summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--deps/v8/.DEPS.git5
-rw-r--r--deps/v8/.gitignore2
-rw-r--r--deps/v8/AUTHORS1
-rw-r--r--deps/v8/BUILD.gn231
-rw-r--r--deps/v8/ChangeLog256
-rw-r--r--deps/v8/DEPS7
-rw-r--r--deps/v8/Makefile4
-rw-r--r--deps/v8/Makefile.android6
-rw-r--r--deps/v8/PRESUBMIT.py79
-rw-r--r--deps/v8/benchmarks/v8.json2
-rw-r--r--deps/v8/build/all.gyp6
-rw-r--r--deps/v8/build/android.gypi1
-rwxr-xr-xdeps/v8/build/get_landmines.py1
-rw-r--r--deps/v8/build/standalone.gypi30
-rw-r--r--deps/v8/build/toolchain.gypi138
-rw-r--r--deps/v8/include/v8-debug.h3
-rw-r--r--deps/v8/include/v8.h645
-rw-r--r--deps/v8/include/v8config.h58
-rw-r--r--deps/v8/samples/lineprocessor.cc1
-rw-r--r--deps/v8/samples/process.cc1
-rw-r--r--deps/v8/samples/shell.cc1
-rw-r--r--deps/v8/src/DEPS1
-rw-r--r--deps/v8/src/accessors.cc195
-rw-r--r--deps/v8/src/accessors.h53
-rw-r--r--deps/v8/src/allocation.cc4
-rw-r--r--deps/v8/src/api.cc770
-rw-r--r--deps/v8/src/api.h8
-rw-r--r--deps/v8/src/apinatives.js2
-rw-r--r--deps/v8/src/arguments.h8
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h153
-rw-r--r--deps/v8/src/arm/assembler-arm.cc130
-rw-r--r--deps/v8/src/arm/assembler-arm.h9
-rw-r--r--deps/v8/src/arm/builtins-arm.cc15
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc858
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h221
-rw-r--r--deps/v8/src/arm/codegen-arm.cc8
-rw-r--r--deps/v8/src/arm/codegen-arm.h2
-rw-r--r--deps/v8/src/arm/constants-arm.h2
-rw-r--r--deps/v8/src/arm/debug-arm.cc16
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc2
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc437
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc323
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.h26
-rw-r--r--deps/v8/src/arm/lithium-arm.cc73
-rw-r--r--deps/v8/src/arm/lithium-arm.h476
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc527
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h33
-rw-r--r--deps/v8/src/arm/lithium-gap-resolver-arm.h2
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc165
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h78
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc28
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.h2
-rw-r--r--deps/v8/src/arm/simulator-arm.cc6
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h2
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc28
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h31
-rw-r--r--deps/v8/src/arm64/builtins-arm64.cc7
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc893
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.h218
-rw-r--r--deps/v8/src/arm64/codegen-arm64.cc8
-rw-r--r--deps/v8/src/arm64/codegen-arm64.h2
-rw-r--r--deps/v8/src/arm64/debug-arm64.cc16
-rw-r--r--deps/v8/src/arm64/decoder-arm64.cc4
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc2
-rw-r--r--deps/v8/src/arm64/disasm-arm64.cc4
-rw-r--r--deps/v8/src/arm64/full-codegen-arm64.cc380
-rw-r--r--deps/v8/src/arm64/instructions-arm64.cc4
-rw-r--r--deps/v8/src/arm64/instructions-arm64.h6
-rw-r--r--deps/v8/src/arm64/instrument-arm64.cc2
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc368
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.h26
-rw-r--r--deps/v8/src/arm64/lithium-arm64.cc130
-rw-r--r--deps/v8/src/arm64/lithium-arm64.h512
-rw-r--r--deps/v8/src/arm64/lithium-codegen-arm64.cc649
-rw-r--r--deps/v8/src/arm64/lithium-codegen-arm64.h81
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h9
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc275
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h99
-rw-r--r--deps/v8/src/arm64/regexp-macro-assembler-arm64.cc26
-rw-r--r--deps/v8/src/arm64/regexp-macro-assembler-arm64.h2
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc651
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h171
-rw-r--r--deps/v8/src/array-iterator.js2
-rw-r--r--deps/v8/src/array.js29
-rw-r--r--deps/v8/src/assembler.cc50
-rw-r--r--deps/v8/src/assembler.h26
-rw-r--r--deps/v8/src/assert-scope.cc148
-rw-r--r--deps/v8/src/assert-scope.h115
-rw-r--r--deps/v8/src/ast-value-factory.cc10
-rw-r--r--deps/v8/src/ast-value-factory.h59
-rw-r--r--deps/v8/src/ast.cc141
-rw-r--r--deps/v8/src/ast.h1084
-rw-r--r--deps/v8/src/background-parsing-task.cc62
-rw-r--r--deps/v8/src/background-parsing-task.h67
-rw-r--r--deps/v8/src/bailout-reason.cc20
-rw-r--r--deps/v8/src/bailout-reason.h339
-rw-r--r--deps/v8/src/base/atomicops_internals_mips_gcc.h37
-rw-r--r--deps/v8/src/base/base.gyp (renamed from deps/v8/test/base-unittests/base-unittests.gyp)5
-rw-r--r--deps/v8/src/base/bits-unittest.cc167
-rw-r--r--deps/v8/src/base/bits.cc25
-rw-r--r--deps/v8/src/base/bits.h150
-rw-r--r--deps/v8/src/base/build_config.h12
-rw-r--r--deps/v8/src/base/compiler-specific.h58
-rw-r--r--deps/v8/src/base/cpu-unittest.cc (renamed from deps/v8/test/base-unittests/cpu-unittest.cc)0
-rw-r--r--deps/v8/src/base/cpu.cc33
-rw-r--r--deps/v8/src/base/cpu.h6
-rw-r--r--deps/v8/src/base/division-by-constant-unittest.cc132
-rw-r--r--deps/v8/src/base/division-by-constant.cc115
-rw-r--r--deps/v8/src/base/division-by-constant.h45
-rw-r--r--deps/v8/src/base/flags-unittest.cc104
-rw-r--r--deps/v8/src/base/flags.h108
-rw-r--r--deps/v8/src/base/logging.cc4
-rw-r--r--deps/v8/src/base/macros.h290
-rw-r--r--deps/v8/src/base/platform/condition-variable-unittest.cc (renamed from deps/v8/test/base-unittests/platform/condition-variable-unittest.cc)24
-rw-r--r--deps/v8/src/base/platform/condition-variable.h8
-rw-r--r--deps/v8/src/base/platform/elapsed-timer.h2
-rw-r--r--deps/v8/src/base/platform/mutex-unittest.cc (renamed from deps/v8/test/base-unittests/platform/mutex-unittest.cc)0
-rw-r--r--deps/v8/src/base/platform/mutex.h10
-rw-r--r--deps/v8/src/base/platform/platform-cygwin.cc2
-rw-r--r--deps/v8/src/base/platform/platform-freebsd.cc18
-rw-r--r--deps/v8/src/base/platform/platform-linux.cc34
-rw-r--r--deps/v8/src/base/platform/platform-macos.cc2
-rw-r--r--deps/v8/src/base/platform/platform-openbsd.cc2
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc103
-rw-r--r--deps/v8/src/base/platform/platform-qnx.cc2
-rw-r--r--deps/v8/src/base/platform/platform-solaris.cc2
-rw-r--r--deps/v8/src/base/platform/platform-unittest.cc (renamed from deps/v8/test/base-unittests/platform/platform-unittest.cc)39
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc31
-rw-r--r--deps/v8/src/base/platform/platform.h11
-rw-r--r--deps/v8/src/base/platform/semaphore-unittest.cc145
-rw-r--r--deps/v8/src/base/platform/semaphore.cc13
-rw-r--r--deps/v8/src/base/platform/semaphore.h4
-rw-r--r--deps/v8/src/base/platform/time-unittest.cc (renamed from deps/v8/test/base-unittests/platform/time-unittest.cc)2
-rw-r--r--deps/v8/src/base/platform/time.cc14
-rw-r--r--deps/v8/src/base/platform/time.h6
-rw-r--r--deps/v8/src/base/sys-info-unittest.cc32
-rw-r--r--deps/v8/src/base/sys-info.cc125
-rw-r--r--deps/v8/src/base/sys-info.h30
-rw-r--r--deps/v8/src/base/utils/random-number-generator-unittest.cc (renamed from deps/v8/test/base-unittests/utils/random-number-generator-unittest.cc)0
-rw-r--r--deps/v8/src/base/utils/random-number-generator.cc3
-rw-r--r--deps/v8/src/base/utils/random-number-generator.h15
-rw-r--r--deps/v8/src/base/win32-headers.h2
-rw-r--r--deps/v8/src/basic-block-profiler.cc112
-rw-r--r--deps/v8/src/basic-block-profiler.h73
-rw-r--r--deps/v8/src/bootstrapper.cc316
-rw-r--r--deps/v8/src/bootstrapper.h18
-rw-r--r--deps/v8/src/builtins.cc53
-rw-r--r--deps/v8/src/builtins.h160
-rw-r--r--deps/v8/src/cached-powers.cc2
-rw-r--r--deps/v8/src/code-factory.cc92
-rw-r--r--deps/v8/src/code-factory.h61
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc249
-rw-r--r--deps/v8/src/code-stubs.cc638
-rw-r--r--deps/v8/src/code-stubs.h1866
-rw-r--r--deps/v8/src/codegen.cc34
-rw-r--r--deps/v8/src/codegen.h2
-rw-r--r--deps/v8/src/collection.js18
-rw-r--r--deps/v8/src/compilation-cache.cc2
-rw-r--r--deps/v8/src/compiler-intrinsics.h73
-rw-r--r--deps/v8/src/compiler.cc760
-rw-r--r--deps/v8/src/compiler.h267
-rw-r--r--deps/v8/src/compiler/access-builder.cc90
-rw-r--r--deps/v8/src/compiler/access-builder.h52
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc324
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h29
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm-unittest.cc1927
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc343
-rw-r--r--deps/v8/src/compiler/arm/linkage-arm.cc37
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc269
-rw-r--r--deps/v8/src/compiler/arm64/instruction-codes-arm64.h55
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64-unittest.cc1397
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc565
-rw-r--r--deps/v8/src/compiler/arm64/linkage-arm64.cc36
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.cc437
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.h74
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.cc103
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.h32
-rw-r--r--deps/v8/src/compiler/change-lowering-unittest.cc476
-rw-r--r--deps/v8/src/compiler/change-lowering.cc316
-rw-r--r--deps/v8/src/compiler/change-lowering.h79
-rw-r--r--deps/v8/src/compiler/code-generator.cc211
-rw-r--r--deps/v8/src/compiler/code-generator.h70
-rw-r--r--deps/v8/src/compiler/common-node-cache.h6
-rw-r--r--deps/v8/src/compiler/common-operator-unittest.cc183
-rw-r--r--deps/v8/src/compiler/common-operator.cc252
-rw-r--r--deps/v8/src/compiler/common-operator.h333
-rw-r--r--deps/v8/src/compiler/compiler-test-utils.h (renamed from deps/v8/test/compiler-unittests/compiler-unittests.h)38
-rw-r--r--deps/v8/src/compiler/compiler.gyp60
-rw-r--r--deps/v8/src/compiler/gap-resolver.h2
-rw-r--r--deps/v8/src/compiler/generic-algorithm.h22
-rw-r--r--deps/v8/src/compiler/generic-node-inl.h17
-rw-r--r--deps/v8/src/compiler/generic-node.h31
-rw-r--r--deps/v8/src/compiler/graph-builder.cc38
-rw-r--r--deps/v8/src/compiler/graph-builder.h42
-rw-r--r--deps/v8/src/compiler/graph-inl.h8
-rw-r--r--deps/v8/src/compiler/graph-reducer-unittest.cc115
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc28
-rw-r--r--deps/v8/src/compiler/graph-reducer.h25
-rw-r--r--deps/v8/src/compiler/graph-replay.cc2
-rw-r--r--deps/v8/src/compiler/graph-replay.h15
-rw-r--r--deps/v8/src/compiler/graph-unittest.cc (renamed from deps/v8/test/compiler-unittests/node-matchers.cc)443
-rw-r--r--deps/v8/src/compiler/graph-unittest.h143
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc214
-rw-r--r--deps/v8/src/compiler/graph-visualizer.h7
-rw-r--r--deps/v8/src/compiler/graph.cc31
-rw-r--r--deps/v8/src/compiler/graph.h44
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc340
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h70
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32-unittest.cc429
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc369
-rw-r--r--deps/v8/src/compiler/ia32/linkage-ia32.cc36
-rw-r--r--deps/v8/src/compiler/instruction-codes.h12
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h79
-rw-r--r--deps/v8/src/compiler/instruction-selector-unittest.cc512
-rw-r--r--deps/v8/src/compiler/instruction-selector-unittest.h213
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc342
-rw-r--r--deps/v8/src/compiler/instruction-selector.h20
-rw-r--r--deps/v8/src/compiler/instruction.cc15
-rw-r--r--deps/v8/src/compiler/instruction.h172
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer-unittest.cc236
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc218
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h47
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc54
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc457
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.h34
-rw-r--r--deps/v8/src/compiler/js-graph.cc34
-rw-r--r--deps/v8/src/compiler/js-graph.h30
-rw-r--r--deps/v8/src/compiler/js-inlining.cc446
-rw-r--r--deps/v8/src/compiler/js-inlining.h40
-rw-r--r--deps/v8/src/compiler/js-operator.h137
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc237
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h49
-rw-r--r--deps/v8/src/compiler/linkage-impl.h322
-rw-r--r--deps/v8/src/compiler/linkage.cc59
-rw-r--r--deps/v8/src/compiler/linkage.h173
-rw-r--r--deps/v8/src/compiler/lowering-builder.cc45
-rw-r--r--deps/v8/src/compiler/lowering-builder.h38
-rw-r--r--deps/v8/src/compiler/machine-node-factory.h381
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer-unittest.cc659
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc250
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.h54
-rw-r--r--deps/v8/src/compiler/machine-operator-unittest.cc325
-rw-r--r--deps/v8/src/compiler/machine-operator.cc244
-rw-r--r--deps/v8/src/compiler/machine-operator.h281
-rw-r--r--deps/v8/src/compiler/machine-type.cc46
-rw-r--r--deps/v8/src/compiler/machine-type.h175
-rw-r--r--deps/v8/src/compiler/node-aux-data-inl.h2
-rw-r--r--deps/v8/src/compiler/node-aux-data.h9
-rw-r--r--deps/v8/src/compiler/node-cache.cc2
-rw-r--r--deps/v8/src/compiler/node-matchers.h157
-rw-r--r--deps/v8/src/compiler/node-properties-inl.h49
-rw-r--r--deps/v8/src/compiler/node-properties.h9
-rw-r--r--deps/v8/src/compiler/node.cc26
-rw-r--r--deps/v8/src/compiler/node.h41
-rw-r--r--deps/v8/src/compiler/opcodes.h133
-rw-r--r--deps/v8/src/compiler/operator-properties-inl.h186
-rw-r--r--deps/v8/src/compiler/operator-properties.h57
-rw-r--r--deps/v8/src/compiler/operator.cc26
-rw-r--r--deps/v8/src/compiler/operator.h170
-rw-r--r--deps/v8/src/compiler/phi-reducer.h4
-rw-r--r--deps/v8/src/compiler/pipeline.cc192
-rw-r--r--deps/v8/src/compiler/pipeline.h17
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc77
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h376
-rw-r--r--deps/v8/src/compiler/register-allocator.cc11
-rw-r--r--deps/v8/src/compiler/representation-change.h377
-rw-r--r--deps/v8/src/compiler/schedule.cc4
-rw-r--r--deps/v8/src/compiler/schedule.h92
-rw-r--r--deps/v8/src/compiler/scheduler.cc792
-rw-r--r--deps/v8/src/compiler/scheduler.h65
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc674
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h30
-rw-r--r--deps/v8/src/compiler/simplified-node-factory.h128
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer-unittest.cc483
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc147
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.h53
-rw-r--r--deps/v8/src/compiler/simplified-operator-unittest.cc222
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc178
-rw-r--r--deps/v8/src/compiler/simplified-operator.h193
-rw-r--r--deps/v8/src/compiler/source-position.h4
-rw-r--r--deps/v8/src/compiler/structured-machine-assembler.cc664
-rw-r--r--deps/v8/src/compiler/structured-machine-assembler.h311
-rw-r--r--deps/v8/src/compiler/typer.cc202
-rw-r--r--deps/v8/src/compiler/typer.h2
-rw-r--r--deps/v8/src/compiler/value-numbering-reducer-unittest.cc120
-rw-r--r--deps/v8/src/compiler/value-numbering-reducer.cc74
-rw-r--r--deps/v8/src/compiler/value-numbering-reducer.h36
-rw-r--r--deps/v8/src/compiler/verifier.cc236
-rw-r--r--deps/v8/src/compiler/verifier.h13
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc398
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h75
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64-unittest.cc294
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc440
-rw-r--r--deps/v8/src/compiler/x64/linkage-x64.cc35
-rw-r--r--deps/v8/src/contexts.cc2
-rw-r--r--deps/v8/src/contexts.h26
-rw-r--r--deps/v8/src/conversions-inl.h13
-rw-r--r--deps/v8/src/conversions.cc4
-rw-r--r--deps/v8/src/conversions.h10
-rw-r--r--deps/v8/src/counters.h259
-rw-r--r--deps/v8/src/d8-posix.cc17
-rw-r--r--deps/v8/src/d8.cc69
-rw-r--r--deps/v8/src/d8.h3
-rw-r--r--deps/v8/src/data-flow.cc17
-rw-r--r--deps/v8/src/data-flow.h26
-rw-r--r--deps/v8/src/date.h14
-rw-r--r--deps/v8/src/debug-debugger.js6
-rw-r--r--deps/v8/src/debug.cc144
-rw-r--r--deps/v8/src/debug.h29
-rw-r--r--deps/v8/src/deoptimizer.cc110
-rw-r--r--deps/v8/src/deoptimizer.h65
-rw-r--r--deps/v8/src/disassembler.cc72
-rw-r--r--deps/v8/src/disassembler.h18
-rw-r--r--deps/v8/src/double.h4
-rw-r--r--deps/v8/src/elements-kind.h5
-rw-r--r--deps/v8/src/elements.cc245
-rw-r--r--deps/v8/src/elements.h25
-rw-r--r--deps/v8/src/execution.cc80
-rw-r--r--deps/v8/src/execution.h18
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc16
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.h2
-rw-r--r--deps/v8/src/factory.cc245
-rw-r--r--deps/v8/src/factory.h178
-rw-r--r--deps/v8/src/field-index-inl.h5
-rw-r--r--deps/v8/src/field-index.cc23
-rw-r--r--deps/v8/src/field-index.h6
-rw-r--r--deps/v8/src/flag-definitions.h60
-rw-r--r--deps/v8/src/frames.cc25
-rw-r--r--deps/v8/src/full-codegen.cc46
-rw-r--r--deps/v8/src/full-codegen.h13
-rw-r--r--deps/v8/src/gdb-jit.cc14
-rw-r--r--deps/v8/src/generator.js13
-rw-r--r--deps/v8/src/global-handles.cc23
-rw-r--r--deps/v8/src/global-handles.h7
-rw-r--r--deps/v8/src/globals.h56
-rw-r--r--deps/v8/src/handles-inl.h4
-rw-r--r--deps/v8/src/harmony-array.js18
-rw-r--r--deps/v8/src/harmony-classes.js32
-rw-r--r--deps/v8/src/hashmap.h5
-rw-r--r--deps/v8/src/heap-snapshot-generator-inl.h19
-rw-r--r--deps/v8/src/heap-snapshot-generator.cc161
-rw-r--r--deps/v8/src/heap-snapshot-generator.h26
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler-unittest.cc348
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.cc174
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.h188
-rw-r--r--deps/v8/src/heap/gc-tracer.cc82
-rw-r--r--deps/v8/src/heap/gc-tracer.h53
-rw-r--r--deps/v8/src/heap/heap-inl.h37
-rw-r--r--deps/v8/src/heap/heap.cc291
-rw-r--r--deps/v8/src/heap/heap.gyp (renamed from deps/v8/test/compiler-unittests/compiler-unittests.gyp)17
-rw-r--r--deps/v8/src/heap/heap.h119
-rw-r--r--deps/v8/src/heap/incremental-marking.cc139
-rw-r--r--deps/v8/src/heap/incremental-marking.h4
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h3
-rw-r--r--deps/v8/src/heap/mark-compact.cc360
-rw-r--r--deps/v8/src/heap/mark-compact.h24
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h6
-rw-r--r--deps/v8/src/heap/objects-visiting.cc1
-rw-r--r--deps/v8/src/heap/spaces-inl.h5
-rw-r--r--deps/v8/src/heap/spaces.cc115
-rw-r--r--deps/v8/src/heap/spaces.h101
-rw-r--r--deps/v8/src/heap/store-buffer.cc36
-rw-r--r--deps/v8/src/hydrogen-gvn.cc4
-rw-r--r--deps/v8/src/hydrogen-gvn.h6
-rw-r--r--deps/v8/src/hydrogen-instructions.cc66
-rw-r--r--deps/v8/src/hydrogen-instructions.h1496
-rw-r--r--deps/v8/src/hydrogen-removable-simulates.cc7
-rw-r--r--deps/v8/src/hydrogen-types.cc5
-rw-r--r--deps/v8/src/hydrogen-types.h16
-rw-r--r--deps/v8/src/hydrogen.cc408
-rw-r--r--deps/v8/src/hydrogen.h142
-rw-r--r--deps/v8/src/i18n.cc273
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc17
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h9
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc15
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc763
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h191
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc18
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h2
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc16
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc2
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc352
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc304
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc538
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h28
-rw-r--r--deps/v8/src/ia32/lithium-gap-resolver-ia32.cc2
-rw-r--r--deps/v8/src/ia32/lithium-gap-resolver-ia32.h2
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc81
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h468
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc184
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h69
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc28
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.h2
-rw-r--r--deps/v8/src/ic/access-compiler.cc55
-rw-r--r--deps/v8/src/ic/access-compiler.h83
-rw-r--r--deps/v8/src/ic/arm/access-compiler-arm.cc46
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc (renamed from deps/v8/src/arm/stub-cache-arm.cc)721
-rw-r--r--deps/v8/src/ic/arm/ic-arm.cc (renamed from deps/v8/src/arm/ic-arm.cc)457
-rw-r--r--deps/v8/src/ic/arm/ic-compiler-arm.cc130
-rw-r--r--deps/v8/src/ic/arm/stub-cache-arm.cc175
-rw-r--r--deps/v8/src/ic/arm64/access-compiler-arm64.cc53
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc (renamed from deps/v8/src/arm64/stub-cache-arm64.cc)790
-rw-r--r--deps/v8/src/ic/arm64/ic-arm64.cc (renamed from deps/v8/src/arm64/ic-arm64.cc)530
-rw-r--r--deps/v8/src/ic/arm64/ic-compiler-arm64.cc133
-rw-r--r--deps/v8/src/ic/arm64/stub-cache-arm64.cc149
-rw-r--r--deps/v8/src/ic/call-optimization.cc113
-rw-r--r--deps/v8/src/ic/call-optimization.h62
-rw-r--r--deps/v8/src/ic/handler-compiler.cc410
-rw-r--r--deps/v8/src/ic/handler-compiler.h275
-rw-r--r--deps/v8/src/ic/ia32/access-compiler-ia32.cc44
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc (renamed from deps/v8/src/ia32/stub-cache-ia32.cc)807
-rw-r--r--deps/v8/src/ic/ia32/ic-compiler-ia32.cc128
-rw-r--r--deps/v8/src/ic/ia32/ic-ia32.cc (renamed from deps/v8/src/ia32/ic-ia32.cc)433
-rw-r--r--deps/v8/src/ic/ia32/stub-cache-ia32.cc189
-rw-r--r--deps/v8/src/ic/ic-compiler.cc447
-rw-r--r--deps/v8/src/ic/ic-compiler.h125
-rw-r--r--deps/v8/src/ic/ic-inl.h (renamed from deps/v8/src/ic-inl.h)62
-rw-r--r--deps/v8/src/ic/ic-state.cc614
-rw-r--r--deps/v8/src/ic/ic-state.h238
-rw-r--r--deps/v8/src/ic/ic.cc (renamed from deps/v8/src/ic.cc)1968
-rw-r--r--deps/v8/src/ic/ic.h (renamed from deps/v8/src/ic.h)543
-rw-r--r--deps/v8/src/ic/mips/OWNERS5
-rw-r--r--deps/v8/src/ic/mips/access-compiler-mips.cc46
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc (renamed from deps/v8/src/mips/stub-cache-mips.cc)758
-rw-r--r--deps/v8/src/ic/mips/ic-compiler-mips.cc131
-rw-r--r--deps/v8/src/ic/mips/ic-mips.cc (renamed from deps/v8/src/mips/ic-mips.cc)489
-rw-r--r--deps/v8/src/ic/mips/stub-cache-mips.cc169
-rw-r--r--deps/v8/src/ic/mips64/OWNERS5
-rw-r--r--deps/v8/src/ic/mips64/access-compiler-mips64.cc46
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc (renamed from deps/v8/src/mips64/stub-cache-mips64.cc)763
-rw-r--r--deps/v8/src/ic/mips64/ic-compiler-mips64.cc131
-rw-r--r--deps/v8/src/ic/mips64/ic-mips64.cc (renamed from deps/v8/src/mips64/ic-mips64.cc)491
-rw-r--r--deps/v8/src/ic/mips64/stub-cache-mips64.cc170
-rw-r--r--deps/v8/src/ic/stub-cache.cc147
-rw-r--r--deps/v8/src/ic/stub-cache.h171
-rw-r--r--deps/v8/src/ic/x64/access-compiler-x64.cc46
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc (renamed from deps/v8/src/x64/stub-cache-x64.cc)739
-rw-r--r--deps/v8/src/ic/x64/ic-compiler-x64.cc137
-rw-r--r--deps/v8/src/ic/x64/ic-x64.cc (renamed from deps/v8/src/x64/ic-x64.cc)493
-rw-r--r--deps/v8/src/ic/x64/stub-cache-x64.cc153
-rw-r--r--deps/v8/src/ic/x87/OWNERS1
-rw-r--r--deps/v8/src/ic/x87/access-compiler-x87.cc44
-rw-r--r--deps/v8/src/ic/x87/handler-compiler-x87.cc (renamed from deps/v8/src/x87/stub-cache-x87.cc)802
-rw-r--r--deps/v8/src/ic/x87/ic-compiler-x87.cc128
-rw-r--r--deps/v8/src/ic/x87/ic-x87.cc (renamed from deps/v8/src/x87/ic-x87.cc)433
-rw-r--r--deps/v8/src/ic/x87/stub-cache-x87.cc189
-rw-r--r--deps/v8/src/interface-descriptors.cc143
-rw-r--r--deps/v8/src/interface-descriptors.h486
-rw-r--r--deps/v8/src/interpreter-irregexp.cc2
-rw-r--r--deps/v8/src/isolate.cc227
-rw-r--r--deps/v8/src/isolate.h97
-rw-r--r--deps/v8/src/json-parser.h69
-rw-r--r--deps/v8/src/json-stringifier.h78
-rw-r--r--deps/v8/src/jsregexp.cc735
-rw-r--r--deps/v8/src/jsregexp.h120
-rw-r--r--deps/v8/src/libplatform/default-platform-unittest.cc43
-rw-r--r--deps/v8/src/libplatform/default-platform.cc6
-rw-r--r--deps/v8/src/libplatform/default-platform.h4
-rw-r--r--deps/v8/src/libplatform/libplatform.gyp39
-rw-r--r--deps/v8/src/libplatform/task-queue-unittest.cc60
-rw-r--r--deps/v8/src/libplatform/worker-thread-unittest.cc48
-rw-r--r--deps/v8/src/libplatform/worker-thread.h2
-rw-r--r--deps/v8/src/lithium-allocator.cc8
-rw-r--r--deps/v8/src/lithium-codegen.cc23
-rw-r--r--deps/v8/src/lithium-codegen.h4
-rw-r--r--deps/v8/src/lithium.cc42
-rw-r--r--deps/v8/src/lithium.h44
-rw-r--r--deps/v8/src/liveedit.cc18
-rw-r--r--deps/v8/src/liveedit.h2
-rw-r--r--deps/v8/src/log.cc7
-rw-r--r--deps/v8/src/lookup-inl.h45
-rw-r--r--deps/v8/src/lookup.cc263
-rw-r--r--deps/v8/src/lookup.h176
-rw-r--r--deps/v8/src/macros.py6
-rw-r--r--deps/v8/src/math.js51
-rw-r--r--deps/v8/src/messages.cc6
-rw-r--r--deps/v8/src/messages.js50
-rw-r--r--deps/v8/src/mips/OWNERS5
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h5
-rw-r--r--deps/v8/src/mips/assembler-mips.cc517
-rw-r--r--deps/v8/src/mips/assembler-mips.h139
-rw-r--r--deps/v8/src/mips/builtins-mips.cc14
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc943
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h238
-rw-r--r--deps/v8/src/mips/codegen-mips.cc11
-rw-r--r--deps/v8/src/mips/codegen-mips.h2
-rw-r--r--deps/v8/src/mips/constants-mips.cc5
-rw-r--r--deps/v8/src/mips/constants-mips.h128
-rw-r--r--deps/v8/src/mips/debug-mips.cc16
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc72
-rw-r--r--deps/v8/src/mips/disasm-mips.cc408
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc383
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc303
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc562
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h35
-rw-r--r--deps/v8/src/mips/lithium-gap-resolver-mips.h2
-rw-r--r--deps/v8/src/mips/lithium-mips.cc80
-rw-r--r--deps/v8/src/mips/lithium-mips.h473
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc822
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h124
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.cc34
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.h2
-rw-r--r--deps/v8/src/mips/simulator-mips.cc648
-rw-r--r--deps/v8/src/mips/simulator-mips.h14
-rw-r--r--deps/v8/src/mips64/OWNERS5
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h5
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc2
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h4
-rw-r--r--deps/v8/src/mips64/builtins-mips64.cc7
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc877
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.h239
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc8
-rw-r--r--deps/v8/src/mips64/codegen-mips64.h2
-rw-r--r--deps/v8/src/mips64/debug-mips64.cc16
-rw-r--r--deps/v8/src/mips64/deoptimizer-mips64.cc2
-rw-r--r--deps/v8/src/mips64/full-codegen-mips64.cc370
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc303
-rw-r--r--deps/v8/src/mips64/lithium-codegen-mips64.cc520
-rw-r--r--deps/v8/src/mips64/lithium-codegen-mips64.h35
-rw-r--r--deps/v8/src/mips64/lithium-gap-resolver-mips64.h2
-rw-r--r--deps/v8/src/mips64/lithium-mips64.cc75
-rw-r--r--deps/v8/src/mips64/lithium-mips64.h467
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc166
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h74
-rw-r--r--deps/v8/src/mips64/regexp-macro-assembler-mips64.cc34
-rw-r--r--deps/v8/src/mips64/regexp-macro-assembler-mips64.h2
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc23
-rw-r--r--deps/v8/src/mips64/simulator-mips64.h2
-rw-r--r--deps/v8/src/mirror-debugger.js107
-rw-r--r--deps/v8/src/mksnapshot.cc30
-rw-r--r--deps/v8/src/msan.h13
-rw-r--r--deps/v8/src/natives-external.cc4
-rw-r--r--deps/v8/src/objects-debug.cc22
-rw-r--r--deps/v8/src/objects-inl.h226
-rw-r--r--deps/v8/src/objects-printer.cc14
-rw-r--r--deps/v8/src/objects.cc2258
-rw-r--r--deps/v8/src/objects.h1411
-rw-r--r--deps/v8/src/optimizing-compiler-thread.cc7
-rw-r--r--deps/v8/src/ostreams.cc30
-rw-r--r--deps/v8/src/ostreams.h8
-rw-r--r--deps/v8/src/parser.cc513
-rw-r--r--deps/v8/src/parser.h133
-rw-r--r--deps/v8/src/perf-jit.cc3
-rw-r--r--deps/v8/src/preparser.cc78
-rw-r--r--deps/v8/src/preparser.h613
-rw-r--r--deps/v8/src/prettyprinter.cc47
-rw-r--r--deps/v8/src/prettyprinter.h1
-rw-r--r--deps/v8/src/promise.js19
-rw-r--r--deps/v8/src/property-details-inl.h12
-rw-r--r--deps/v8/src/property-details.h39
-rw-r--r--deps/v8/src/property.cc30
-rw-r--r--deps/v8/src/property.h266
-rw-r--r--deps/v8/src/regexp-macro-assembler-irregexp.h1
-rw-r--r--deps/v8/src/regexp-macro-assembler.cc17
-rw-r--r--deps/v8/src/regexp-macro-assembler.h4
-rw-r--r--deps/v8/src/regexp.js35
-rw-r--r--deps/v8/src/rewriter.cc6
-rw-r--r--deps/v8/src/runtime.h913
-rw-r--r--deps/v8/src/runtime.js8
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc347
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc441
-rw-r--r--deps/v8/src/runtime/runtime-i18n.cc751
-rw-r--r--deps/v8/src/runtime/runtime-json.cc54
-rw-r--r--deps/v8/src/runtime/runtime-maths.cc247
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc565
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc1131
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc1260
-rw-r--r--deps/v8/src/runtime/runtime-test.cc323
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc760
-rw-r--r--deps/v8/src/runtime/runtime-uri.cc (renamed from deps/v8/src/uri.h)177
-rw-r--r--deps/v8/src/runtime/runtime-utils.h146
-rw-r--r--deps/v8/src/runtime/runtime.cc (renamed from deps/v8/src/runtime.cc)8173
-rw-r--r--deps/v8/src/runtime/runtime.h907
-rw-r--r--deps/v8/src/runtime/string-builder.h296
-rw-r--r--deps/v8/src/safepoint-table.cc2
-rw-r--r--deps/v8/src/safepoint-table.h7
-rw-r--r--deps/v8/src/sampler.cc23
-rw-r--r--deps/v8/src/scanner-character-streams.cc190
-rw-r--r--deps/v8/src/scanner-character-streams.h43
-rw-r--r--deps/v8/src/scanner.cc154
-rw-r--r--deps/v8/src/scanner.h12
-rw-r--r--deps/v8/src/scopeinfo.cc10
-rw-r--r--deps/v8/src/scopes.cc22
-rw-r--r--deps/v8/src/scopes.h9
-rw-r--r--deps/v8/src/serialize.cc411
-rw-r--r--deps/v8/src/serialize.h121
-rw-r--r--deps/v8/src/snapshot-common.cc9
-rw-r--r--deps/v8/src/snapshot-empty.cc3
-rw-r--r--deps/v8/src/snapshot-external.cc9
-rw-r--r--deps/v8/src/snapshot-source-sink.cc10
-rw-r--r--deps/v8/src/snapshot-source-sink.h12
-rw-r--r--deps/v8/src/snapshot.h8
-rw-r--r--deps/v8/src/string-iterator.js5
-rw-r--r--deps/v8/src/string-search.h6
-rw-r--r--deps/v8/src/string-stream.h10
-rw-r--r--deps/v8/src/string.js11
-rw-r--r--deps/v8/src/strtod.cc2
-rw-r--r--deps/v8/src/stub-cache.cc1293
-rw-r--r--deps/v8/src/stub-cache.h684
-rw-r--r--deps/v8/src/test/DEPS3
-rw-r--r--deps/v8/src/test/run-all-unittests.cc45
-rw-r--r--deps/v8/src/test/test-utils.cc58
-rw-r--r--deps/v8/src/test/test-utils.h86
-rw-r--r--deps/v8/src/test/test.gyp71
-rw-r--r--deps/v8/src/third_party/vtune/DEPS3
-rw-r--r--deps/v8/src/third_party/vtune/v8-vtune.h4
-rw-r--r--deps/v8/src/third_party/vtune/vtune-jit.cc11
-rw-r--r--deps/v8/src/token.h4
-rw-r--r--deps/v8/src/type-feedback-vector-inl.h45
-rw-r--r--deps/v8/src/type-feedback-vector.cc22
-rw-r--r--deps/v8/src/type-feedback-vector.h55
-rw-r--r--deps/v8/src/type-info.cc36
-rw-r--r--deps/v8/src/type-info.h7
-rw-r--r--deps/v8/src/types-inl.h25
-rw-r--r--deps/v8/src/types.cc998
-rw-r--r--deps/v8/src/types.h425
-rw-r--r--deps/v8/src/typing.cc6
-rw-r--r--deps/v8/src/unique.h76
-rw-r--r--deps/v8/src/uri.js34
-rw-r--r--deps/v8/src/utils.h144
-rw-r--r--deps/v8/src/v8.cc13
-rw-r--r--deps/v8/src/v8.h8
-rw-r--r--deps/v8/src/v8natives.js38
-rw-r--r--deps/v8/src/v8threads.cc3
-rw-r--r--deps/v8/src/vector.h4
-rw-r--r--deps/v8/src/version.cc6
-rw-r--r--deps/v8/src/weak-collection.js (renamed from deps/v8/src/weak_collection.js)4
-rw-r--r--deps/v8/src/x64/assembler-x64.cc47
-rw-r--r--deps/v8/src/x64/assembler-x64.h8
-rw-r--r--deps/v8/src/x64/builtins-x64.cc15
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc768
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h195
-rw-r--r--deps/v8/src/x64/codegen-x64.cc22
-rw-r--r--deps/v8/src/x64/codegen-x64.h2
-rw-r--r--deps/v8/src/x64/debug-x64.cc16
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc2
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc355
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc305
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc532
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h28
-rw-r--r--deps/v8/src/x64/lithium-gap-resolver-x64.cc2
-rw-r--r--deps/v8/src/x64/lithium-gap-resolver-x64.h2
-rw-r--r--deps/v8/src/x64/lithium-x64.cc78
-rw-r--r--deps/v8/src/x64/lithium-x64.h472
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc183
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h70
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc30
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.h2
-rw-r--r--deps/v8/src/x87/assembler-x87-inl.h2
-rw-r--r--deps/v8/src/x87/assembler-x87.cc57
-rw-r--r--deps/v8/src/x87/assembler-x87.h19
-rw-r--r--deps/v8/src/x87/builtins-x87.cc25
-rw-r--r--deps/v8/src/x87/code-stubs-x87.cc774
-rw-r--r--deps/v8/src/x87/code-stubs-x87.h210
-rw-r--r--deps/v8/src/x87/codegen-x87.cc71
-rw-r--r--deps/v8/src/x87/codegen-x87.h2
-rw-r--r--deps/v8/src/x87/debug-x87.cc16
-rw-r--r--deps/v8/src/x87/deoptimizer-x87.cc78
-rw-r--r--deps/v8/src/x87/disasm-x87.cc23
-rw-r--r--deps/v8/src/x87/full-codegen-x87.cc395
-rw-r--r--deps/v8/src/x87/interface-descriptors-x87.cc304
-rw-r--r--deps/v8/src/x87/lithium-codegen-x87.cc1212
-rw-r--r--deps/v8/src/x87/lithium-codegen-x87.h54
-rw-r--r--deps/v8/src/x87/lithium-gap-resolver-x87.cc15
-rw-r--r--deps/v8/src/x87/lithium-gap-resolver-x87.h2
-rw-r--r--deps/v8/src/x87/lithium-x87.cc155
-rw-r--r--deps/v8/src/x87/lithium-x87.h794
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.cc348
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.h132
-rw-r--r--deps/v8/src/x87/regexp-macro-assembler-x87.cc28
-rw-r--r--deps/v8/src/x87/regexp-macro-assembler-x87.h2
-rw-r--r--deps/v8/src/zone-containers.h47
-rw-r--r--deps/v8/src/zone.h4
-rw-r--r--deps/v8/test/base-unittests/DEPS8
-rw-r--r--deps/v8/test/base-unittests/testcfg.py51
-rw-r--r--deps/v8/test/benchmarks/benchmarks.status4
-rw-r--r--deps/v8/test/benchmarks/testcfg.py3
-rw-r--r--deps/v8/test/cctest/OWNERS7
-rw-r--r--deps/v8/test/cctest/cctest.cc29
-rw-r--r--deps/v8/test/cctest/cctest.gyp13
-rw-r--r--deps/v8/test/cctest/cctest.h7
-rw-r--r--deps/v8/test/cctest/cctest.status72
-rw-r--r--deps/v8/test/cctest/compiler/c-signature.h133
-rw-r--r--deps/v8/test/cctest/compiler/call-tester.h103
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.cc22
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.h85
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.h47
-rw-r--r--deps/v8/test/cctest/compiler/graph-builder-tester.cc35
-rw-r--r--deps/v8/test/cctest/compiler/graph-builder-tester.h31
-rw-r--r--deps/v8/test/cctest/compiler/instruction-selector-tester.h10
-rw-r--r--deps/v8/test/cctest/compiler/simplified-graph-builder.cc94
-rw-r--r--deps/v8/test/cctest/compiler/simplified-graph-builder.h137
-rw-r--r--deps/v8/test/cctest/compiler/test-basic-block-profiler.cc115
-rw-r--r--deps/v8/test/cctest/compiler/test-branch-combine.cc45
-rw-r--r--deps/v8/test/cctest/compiler/test-changes-lowering.cc196
-rw-r--r--deps/v8/test/cctest/compiler/test-codegen-deopt.cc145
-rw-r--r--deps/v8/test/cctest/compiler/test-gap-resolver.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-graph-reducer.cc50
-rw-r--r--deps/v8/test/cctest/compiler/test-instruction-selector-arm.cc1863
-rw-r--r--deps/v8/test/cctest/compiler/test-instruction-selector-ia32.cc66
-rw-r--r--deps/v8/test/cctest/compiler/test-instruction-selector.cc22
-rw-r--r--deps/v8/test/cctest/compiler/test-instruction.cc28
-rw-r--r--deps/v8/test/cctest/compiler/test-js-constant-cache.cc21
-rw-r--r--deps/v8/test/cctest/compiler/test-js-context-specialization.cc38
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc265
-rw-r--r--deps/v8/test/cctest/compiler/test-linkage.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc104
-rw-r--r--deps/v8/test/cctest/compiler/test-node-cache.cc12
-rw-r--r--deps/v8/test/cctest/compiler/test-node.cc30
-rw-r--r--deps/v8/test/cctest/compiler/test-operator.cc86
-rw-r--r--deps/v8/test/cctest/compiler/test-phi-reducer.cc39
-rw-r--r--deps/v8/test/cctest/compiler/test-pipeline.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-representation-change.cc503
-rw-r--r--deps/v8/test/cctest/compiler/test-run-deopt.cc26
-rw-r--r--deps/v8/test/cctest/compiler/test-run-inlining.cc353
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsbranches.cc20
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jscalls.cc54
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc1544
-rw-r--r--deps/v8/test/cctest/compiler/test-run-properties.cc141
-rw-r--r--deps/v8/test/cctest/compiler/test-schedule.cc34
-rw-r--r--deps/v8/test/cctest/compiler/test-scheduler.cc490
-rw-r--r--deps/v8/test/cctest/compiler/test-simplified-lowering.cc643
-rw-r--r--deps/v8/test/cctest/compiler/test-structured-ifbuilder-fuzzer.cc667
-rw-r--r--deps/v8/test/cctest/compiler/test-structured-machine-assembler.cc1055
-rw-r--r--deps/v8/test/cctest/compiler/value-helper.h67
-rw-r--r--deps/v8/test/cctest/test-alloc.cc6
-rw-r--r--deps/v8/test/cctest/test-api.cc971
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc22
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc52
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc28
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc135
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc204
-rw-r--r--deps/v8/test/cctest/test-ast.cc4
-rw-r--r--deps/v8/test/cctest/test-checks.cc2
-rw-r--r--deps/v8/test/cctest/test-code-stubs.cc18
-rw-r--r--deps/v8/test/cctest/test-compiler.cc10
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc38
-rw-r--r--deps/v8/test/cctest/test-dataflow.cc1
-rw-r--r--deps/v8/test/cctest/test-date.cc2
-rw-r--r--deps/v8/test/cctest/test-debug.cc265
-rw-r--r--deps/v8/test/cctest/test-deoptimization.cc623
-rw-r--r--deps/v8/test/cctest/test-dictionary.cc4
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc3
-rw-r--r--deps/v8/test/cctest/test-disasm-mips.cc181
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc4
-rw-r--r--deps/v8/test/cctest/test-disasm-x87.cc9
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc2
-rw-r--r--deps/v8/test/cctest/test-hashing.cc153
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc89
-rw-r--r--deps/v8/test/cctest/test-heap.cc451
-rw-r--r--deps/v8/test/cctest/test-libplatform-default-platform.cc30
-rw-r--r--deps/v8/test/cctest/test-libplatform.h123
-rw-r--r--deps/v8/test/cctest/test-liveedit.cc1
-rw-r--r--deps/v8/test/cctest/test-log.cc275
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm.cc2
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-ia32.cc2
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc6
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc24
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x87.cc2
-rw-r--r--deps/v8/test/cctest/test-mark-compact.cc32
-rw-r--r--deps/v8/test/cctest/test-object-observe.cc2
-rw-r--r--deps/v8/test/cctest/test-ordered-hash-table.cc27
-rw-r--r--deps/v8/test/cctest/test-parsing.cc850
-rw-r--r--deps/v8/test/cctest/test-platform.cc90
-rw-r--r--deps/v8/test/cctest/test-random-number-generator.cc22
-rw-r--r--deps/v8/test/cctest/test-regexp.cc107
-rw-r--r--deps/v8/test/cctest/test-semaphore.cc156
-rw-r--r--deps/v8/test/cctest/test-serialize.cc738
-rw-r--r--deps/v8/test/cctest/test-spaces.cc35
-rw-r--r--deps/v8/test/cctest/test-strings.cc224
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc12
-rw-r--r--deps/v8/test/cctest/test-types.cc930
-rw-r--r--deps/v8/test/cctest/test-utils.cc2
-rw-r--r--deps/v8/test/compiler-unittests/DEPS6
-rw-r--r--deps/v8/test/compiler-unittests/arm/instruction-selector-arm-unittest.cc27
-rw-r--r--deps/v8/test/compiler-unittests/change-lowering-unittest.cc257
-rw-r--r--deps/v8/test/compiler-unittests/compiler-unittests.cc86
-rw-r--r--deps/v8/test/compiler-unittests/instruction-selector-unittest.cc92
-rw-r--r--deps/v8/test/compiler-unittests/instruction-selector-unittest.h129
-rw-r--r--deps/v8/test/compiler-unittests/node-matchers.h71
-rw-r--r--deps/v8/test/compiler-unittests/testcfg.py51
-rw-r--r--deps/v8/test/fuzz-natives/fuzz-natives.status2
-rw-r--r--deps/v8/test/heap-unittests/heap-unittests.status6
-rw-r--r--deps/v8/test/intl/intl.status3
-rw-r--r--deps/v8/test/libplatform-unittests/libplatform-unittests.status6
-rw-r--r--deps/v8/test/message/message.status3
-rw-r--r--deps/v8/test/mjsunit/array-sort.js44
-rw-r--r--deps/v8/test/mjsunit/asm/int32array-unaligned.js43
-rw-r--r--deps/v8/test/mjsunit/asm/math-abs.js84
-rw-r--r--deps/v8/test/mjsunit/asm/math-fround.js38
-rw-r--r--deps/v8/test/mjsunit/compiler/opt-next-call.js13
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-warm.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-411262.js (renamed from deps/v8/test/cctest/test-libplatform-worker-thread.cc)46
-rw-r--r--deps/v8/test/mjsunit/compiler/shift-shr.js26
-rw-r--r--deps/v8/test/mjsunit/cross-realm-filtering.js69
-rw-r--r--deps/v8/test/mjsunit/debug-backtrace-text.js2
-rw-r--r--deps/v8/test/mjsunit/debug-break-inline.js2
-rw-r--r--deps/v8/test/mjsunit/debug-clearbreakpointgroup.js1
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-arguments.js2
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-closure.js1
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-with.js2
-rw-r--r--deps/v8/test/mjsunit/debug-receiver.js2
-rw-r--r--deps/v8/test/mjsunit/debug-scopes.js2
-rw-r--r--deps/v8/test/mjsunit/debug-script.js2
-rw-r--r--deps/v8/test/mjsunit/debug-step-2.js1
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-property-function-call.js153
-rw-r--r--deps/v8/test/mjsunit/deopt-global-accessor.js23
-rw-r--r--deps/v8/test/mjsunit/es6/arguments-iterator.js230
-rw-r--r--deps/v8/test/mjsunit/es6/array-iterator.js9
-rw-r--r--deps/v8/test/mjsunit/es6/collections.js100
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-caught-by-default-reject-handler.js86
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-caught-by-default-reject-handler.js87
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-eventually-caught.js42
-rw-r--r--deps/v8/test/mjsunit/es6/debug-stepin-generators.js45
-rw-r--r--deps/v8/test/mjsunit/es6/generators-debug-liveedit.js (renamed from deps/v8/test/mjsunit/harmony/generators-debug-liveedit.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/generators-debug-scopes.js (renamed from deps/v8/test/mjsunit/harmony/generators-debug-scopes.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/generators-iteration.js (renamed from deps/v8/test/mjsunit/harmony/generators-iteration.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/generators-mirror.js84
-rw-r--r--deps/v8/test/mjsunit/es6/generators-objects.js (renamed from deps/v8/test/mjsunit/harmony/generators-objects.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/generators-parsing.js (renamed from deps/v8/test/mjsunit/harmony/generators-parsing.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/generators-poisoned-properties.js (renamed from deps/v8/test/mjsunit/harmony/generators-poisoned-properties.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/generators-relocation.js (renamed from deps/v8/test/mjsunit/harmony/generators-relocation.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/generators-runtime.js (renamed from deps/v8/test/mjsunit/harmony/generators-runtime.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/iteration-semantics.js2
-rw-r--r--deps/v8/test/mjsunit/es6/math-expm1.js56
-rw-r--r--deps/v8/test/mjsunit/es6/math-hyperbolic.js80
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-2681.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-2681.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-2691.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-2691.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-3280.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-3280.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/symbols.js3
-rw-r--r--deps/v8/test/mjsunit/es6/unscopables.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/array-of.js164
-rw-r--r--deps/v8/test/mjsunit/harmony/arrow-functions.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/object-literals-method.js248
-rw-r--r--deps/v8/test/mjsunit/harmony/private.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-with-unscopables.js1
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-sticky.js132
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-405844.js13
-rw-r--r--deps/v8/test/mjsunit/harmony/super.js234
-rw-r--r--deps/v8/test/mjsunit/harmony/toMethod.js115
-rw-r--r--deps/v8/test/mjsunit/keyed-named-access.js72
-rw-r--r--deps/v8/test/mjsunit/lithium/SeqStringSetChar.js12
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status114
-rw-r--r--deps/v8/test/mjsunit/new-string-add.js197
-rw-r--r--deps/v8/test/mjsunit/number-literal.js33
-rw-r--r--deps/v8/test/mjsunit/object-literal.js70
-rw-r--r--deps/v8/test/mjsunit/regexp-not-sticky-yet.js (renamed from deps/v8/test/cctest/test-libplatform-task-queue.cc)93
-rw-r--r--deps/v8/test/mjsunit/regress-3225.js2
-rw-r--r--deps/v8/test/mjsunit/regress/poly_count_operation.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1170187.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-119609.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-131994.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-325676.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3564.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-403292.js53
-rw-r--r--deps/v8/test/mjsunit/regress/regress-404981.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-408036.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-409533.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-387627.js)6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-410912.js206
-rw-r--r--deps/v8/test/mjsunit/regress/regress-411210.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-411237.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-412162.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-416416.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-416730.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-conditional-position.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-107996.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-171715.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-222893.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-320922.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-323936.js46
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-357052.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-403409.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-405491.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-405517.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-405922.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-407946.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-412203.js36
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-412208.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-412210.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-412215.js33
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-412319.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-416558.js115
-rw-r--r--deps/v8/test/mjsunit/regress/regress-debug-deopt-while-recompile.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-force-constant-representation.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-inline-constant-load.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-json-parse-index.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-opt-after-debug-deopt.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-reset-dictionary-elements.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex3.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-sliced-external-cons-regexp.js21
-rw-r--r--deps/v8/test/mjsunit/regress/string-compare-memcmp.js7
-rw-r--r--deps/v8/test/mjsunit/regress/string-set-char-deopt.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/apply.js9
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/arraybuffergetbytelength.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/arraybufferinitialize.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/arraybufferisview.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/arraybufferneuter.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/arraybuffersliceimpl.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/arraybufferviewgetbytelength.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/arraybufferviewgetbyteoffset.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/arrayconcat.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/availablelocalesof.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/basicjsonstringify.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/booleanize.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/boundfunctiongetbindings.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/break.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/breakiteratoradopttext.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/breakiteratorbreaktype.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/breakiteratorcurrent.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/breakiteratorfirst.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/breakiteratornext.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/canonicalizelanguagetag.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/changebreakonexception.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/charfromcode.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/checkexecutionstate.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/checkisbootstrapping.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/clearbreakpoint.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/clearfunctiontypefeedback.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/clearstepping.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/collectstacktrace.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/compilestring.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/constructdouble.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/createbreakiterator.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/createcollator.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/createglobalprivatesymbol.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/createjsfunctionproxy.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/createjsproxy.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/createprivateownsymbol.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/createprivatesymbol.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/createsymbol.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/dataviewgetbuffer.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/dataviewgetfloat32.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/dataviewgetfloat64.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/dataviewgetint16.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/dataviewgetint32.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/dataviewgetint8.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/dataviewgetuint16.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/dataviewgetuint32.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/dataviewgetuint8.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/dataviewinitialize.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/dataviewsetfloat32.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/dataviewsetfloat64.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/dataviewsetint16.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/dataviewsetint32.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/dataviewsetint8.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/dataviewsetuint16.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/dataviewsetuint32.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/dataviewsetuint8.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/datecacheversion.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/datecurrenttime.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/datelocaltimezone.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/datemakeday.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/dateparsestring.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/datesetvalue.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/datetoutc.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debugasynctaskevent.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debugbreak.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debugcallbacksupportsstepping.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debugconstructedby.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debugdisassembleconstructor.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debugdisassemblefunction.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debugevaluate.js12
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debugevaluateglobal.js10
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debuggetproperty.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debuggetpropertydetails.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debuggetprototype.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debugindexedinterceptorelementvalue.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debugnamedinterceptorpropertyvalue.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debugpoppromise.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debugpreparestepinifstepping.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debugprintscopes.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debugpromiseevent.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debugpromiserejectevent.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debugpropertyattributesfromdetails.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debugpropertyindexfromdetails.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debugpropertytypefromdetails.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debugpushpromise.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debugreferencedby.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/debugtrace.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/defineaccessorpropertyunchecked.js9
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/defineapiaccessorproperty.js9
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/definedatapropertyunchecked.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/deleteproperty.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/deoptimizefunction.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/doublehi.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/doublelo.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/enqueuemicrotask.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/estimatenumberofelements.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/executeindebugcontext.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/finisharrayprototypesetup.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/fix.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/flattenstring.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/functionbindarguments.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/functiongetinferredname.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/functiongetname.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/functiongetscript.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/functiongetscriptsourceposition.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/functiongetsourcecode.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/functionisapifunction.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/functionisarrow.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/functionisbuiltin.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/functionisgenerator.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/functionmarknameshouldprintasanonymous.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/functionnameshouldprintasanonymous.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/functionremoveprototype.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/functionsetinstanceclassname.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/functionsetlength.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/functionsetname.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/functionsetprototype.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getallscopesdetails.js10
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getargumentsproperty.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getarraykeys.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getbreaklocations.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getcalltrap.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getconstructordelegate.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getconstructtrap.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getdataproperty.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getdefaulticulocale.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getdefaultreceiver.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getframecount.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getframedetails.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getfunctioncodepositionfromsource.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getfunctiondelegate.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getfunctionscopecount.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getfunctionscopedetails.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/gethandler.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getheapusage.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getimplfrominitializedintlobject.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getindexedinterceptorelementnames.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getinterceptorinfo.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getlanguagetagvariants.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getnamedinterceptorpropertynames.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getobjectcontextnotifierperformchange.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getobjectcontextobjectgetnotifier.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getobjectcontextobjectobserve.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getobservationstate.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getoptimizationcount.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getownelementnames.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getownproperty.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getownpropertynames.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getproperty.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getpropertynames.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getpropertynamesfast.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getprototype.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getrootnan.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getscopecount.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getscopedetails.js10
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getscript.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getstepinpositions.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/gettemplatefield.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getthreadcount.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getthreaddetails.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getv8version.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getweakmapentries.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/getweaksetvalues.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/globalprint.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/globalproxy.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/haselement.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/hasownproperty.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/hasproperty.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/havesamemap.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/internalcompare.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/internaldateformat.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/internaldateparse.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/internalnumberformat.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/internalnumberparse.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/internalsetprototype.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/isattachedglobal.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/isbreakonexception.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/isconcurrentrecompilationsupported.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/isextensible.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/isinitializedintlobject.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/isinitializedintlobjectoftype.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/isinprototypechain.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/isjsfunctionproxy.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/isjsglobalproxy.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/isjsmodule.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/isjsproxy.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/isobserved.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/isoptimized.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/ispropertyenumerable.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/issloppymodefunction.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/istemplate.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/isvalidsmi.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/keyedgetproperty.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/liveeditcheckanddropactivations.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/liveeditcomparestrings.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/liveeditfunctionsetscript.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/loadfromsuper.js (renamed from deps/v8/test/mjsunit/runtime-gen/lookupaccessor.js)4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/loadmutabledouble.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/mapclear.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/mapdelete.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/mapget.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/mapgetsize.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/maphas.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/mapinitialize.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/mapiteratorinitialize.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/mapiteratornext.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/mapset.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/markasinitializedintlobjectoftype.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/mathacos.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/mathasin.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/mathatan.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/mathatan2.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/mathexprt.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/mathfloorrt.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/mathfround.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/mathlogrt.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/mathsqrtrt.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/maxsmi.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/movearraycontents.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/neveroptimizefunction.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/newarguments.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/newobjectfrombound.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/newstring.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/newstringwrapper.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/newsymbolwrapper.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/notifycontextdisposed.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numberadd.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numberand.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numbercompare.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numberdiv.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numberequals.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numberimul.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numbermod.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numbermul.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numberor.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numbersar.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numbershl.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numbershr.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numbersub.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numbertoexponential.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numbertofixed.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numbertointeger.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numbertointegermapminuszero.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numbertojsint32.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numbertojsuint32.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numbertoprecision.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numbertoradixstring.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numbertostringrt.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numberunaryminus.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/numberxor.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/objectfreeze.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/objectwascreatedincurrentorigin.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/observationweakmapcreate.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/observerobjectandrecordhavesameorigin.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/optimizeobjectforaddingmultipleproperties.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/ownkeys.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/parsejson.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/preventextensions.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/pushifabsent.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/quotejsonstring.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/regexpcompile.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/regexpconstructresult.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/regexpexecmultiple.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/regexpexecrt.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/regexpinitializeobject.js9
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/removearrayholes.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/rempio2.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/roundnumber.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/runmicrotasks.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/runninginsimulator.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/setadd.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/setclear.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/setcode.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/setdebugeventlistener.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/setdelete.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/setdisablebreak.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/setflags.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/setfunctionbreakpoint.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/setgetsize.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/sethas.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/setinitialize.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/setisobserved.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/setiteratorinitialize.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/setiteratornext.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/setprototype.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/setscopevariablevalue.js10
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/smilexicographiccompare.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/sparsejoinwithseparator.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/specialarrayfunctions.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/stringbuilderconcat.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/stringbuilderjoin.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/stringcharcodeatrt.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/stringequals.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/stringindexof.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/stringlastindexof.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/stringlocalecompare.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/stringmatch.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/stringnormalize.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/stringparsefloat.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/stringparseint.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/stringreplaceglobalregexpwithstring.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/stringreplaceonecharwithstring.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/stringsplit.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/stringtoarray.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/stringtolowercase.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/stringtonumber.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/stringtouppercase.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/stringtrim.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/symboldescription.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/symbolisprivate.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/symbolregistry.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/tobool.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/tofastproperties.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/traceenter.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/traceexit.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/truncatestring.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/trymigrateinstance.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/typedarraygetbuffer.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/typedarraygetlength.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/typedarrayinitialize.js9
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/typedarrayinitializefromarraylike.js8
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/typedarraymaxsizeinheap.js4
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/typedarraysetfastcases.js7
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/typeof.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/unblockconcurrentrecompilation.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/uriescape.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/uriunescape.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/weakcollectiondelete.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/weakcollectionget.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/weakcollectionhas.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/weakcollectioninitialize.js5
-rw-r--r--deps/v8/test/mjsunit/runtime-gen/weakcollectionset.js7
-rw-r--r--deps/v8/test/mjsunit/serialize-ic.js9
-rw-r--r--deps/v8/test/mjsunit/string-external-cached.js14
-rw-r--r--deps/v8/test/mjsunit/string-externalize.js28
-rw-r--r--deps/v8/test/mjsunit/string-match.js1
-rw-r--r--deps/v8/test/mjsunit/string-natives.js14
-rw-r--r--deps/v8/test/mjsunit/string-oom-concat.js1
-rw-r--r--deps/v8/test/mjsunit/string-slices.js4
-rw-r--r--deps/v8/test/mozilla/mozilla.status2
-rw-r--r--deps/v8/test/perf-test/Collections/Collections.json15
-rw-r--r--deps/v8/test/perf-test/Collections/base.js367
-rw-r--r--deps/v8/test/perf-test/Collections/map.js81
-rw-r--r--deps/v8/test/perf-test/Collections/run.js30
-rw-r--r--deps/v8/test/perf-test/Collections/set.js66
-rw-r--r--deps/v8/test/perf-test/Collections/weakmap.js80
-rw-r--r--deps/v8/test/perf-test/Collections/weakset.js64
-rw-r--r--deps/v8/test/preparser/duplicate-property.pyt162
-rw-r--r--deps/v8/test/preparser/preparser.status4
-rw-r--r--deps/v8/test/promises-aplus/promises-aplus.status1
-rw-r--r--deps/v8/test/test262-es6/README18
-rw-r--r--deps/v8/test/test262-es6/harness-adapt.js91
-rw-r--r--deps/v8/test/test262-es6/test262-es6.status166
-rw-r--r--deps/v8/test/test262-es6/testcfg.py164
-rw-r--r--deps/v8/test/test262/test262.status3
-rw-r--r--deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt2
-rw-r--r--deps/v8/test/webkit/webkit.status2
-rw-r--r--deps/v8/testing/gmock-support.h72
-rw-r--r--deps/v8/testing/gmock.gyp9
-rw-r--r--deps/v8/testing/gtest-support.h58
-rw-r--r--deps/v8/testing/gtest-type-names.h34
-rw-r--r--deps/v8/testing/gtest.gyp2
-rw-r--r--deps/v8/third_party/fdlibm/LICENSE2
-rw-r--r--deps/v8/third_party/fdlibm/fdlibm.cc16
-rw-r--r--deps/v8/third_party/fdlibm/fdlibm.h2
-rw-r--r--deps/v8/third_party/fdlibm/fdlibm.js318
-rwxr-xr-xdeps/v8/tools/check-name-clashes.py119
-rwxr-xr-xdeps/v8/tools/cpu.sh62
-rw-r--r--deps/v8/tools/detect-builtins.js51
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py30
-rwxr-xr-xdeps/v8/tools/generate-builtins-tests.py158
-rwxr-xr-xdeps/v8/tools/generate-runtime-tests.py1412
-rwxr-xr-xdeps/v8/tools/grokdump.py20
-rw-r--r--deps/v8/tools/gyp/v8.gyp248
-rw-r--r--deps/v8/tools/lexer-shell.cc5
-rw-r--r--deps/v8/tools/parser-shell.cc5
-rwxr-xr-xdeps/v8/tools/presubmit.py12
-rwxr-xr-xdeps/v8/tools/push-to-trunk/auto_push.py24
-rwxr-xr-xdeps/v8/tools/push-to-trunk/auto_roll.py46
-rwxr-xr-xdeps/v8/tools/push-to-trunk/auto_tag.py44
-rwxr-xr-xdeps/v8/tools/push-to-trunk/bump_up_version.py59
-rwxr-xr-xdeps/v8/tools/push-to-trunk/chromium_roll.py128
-rw-r--r--deps/v8/tools/push-to-trunk/common_includes.py292
-rw-r--r--deps/v8/tools/push-to-trunk/git_recipes.py217
-rwxr-xr-xdeps/v8/tools/push-to-trunk/merge_to_branch.py100
-rwxr-xr-xdeps/v8/tools/push-to-trunk/push_to_trunk.py144
-rwxr-xr-xdeps/v8/tools/push-to-trunk/releases.py153
-rw-r--r--deps/v8/tools/push-to-trunk/test_scripts.py1254
-rwxr-xr-xdeps/v8/tools/run-tests.py62
-rwxr-xr-xdeps/v8/tools/run_perf.py (renamed from deps/v8/tools/run_benchmarks.py)144
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py45
-rw-r--r--deps/v8/tools/testrunner/local/utils.py4
-rw-r--r--deps/v8/tools/unittests/run_perf_test.py (renamed from deps/v8/tools/unittests/run_benchmarks_test.py)127
-rw-r--r--deps/v8/tools/v8heapconst.py38
-rw-r--r--deps/v8/tools/whitespace.txt4
1288 files changed, 83066 insertions, 65243 deletions
diff --git a/deps/v8/.DEPS.git b/deps/v8/.DEPS.git
index 7775744953..8f9da45bb6 100644
--- a/deps/v8/.DEPS.git
+++ b/deps/v8/.DEPS.git
@@ -24,6 +24,11 @@ deps = {
}
deps_os = {
+ 'android':
+ {
+ 'v8/third_party/android_tools':
+ Var('git_url') + '/android_tools.git@31869996507de16812bb53a3d0aaa15cd6194c16',
+ },
'win':
{
'v8/third_party/cygwin':
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index d0d4b436df..22f4e1c3b4 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -80,3 +80,5 @@ GRTAGS
GSYMS
GPATH
gtags.files
+turbo*.dot
+turbo*.json
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 7ac0815699..f18761e4ba 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -56,6 +56,7 @@ Patrick Gansterer <paroga@paroga.com>
Peter Varga <pvarga@inf.u-szeged.hu>
Rafal Krypa <rafal@krypa.net>
Rajeev R Krithivasan <rkrithiv@codeaurora.org>
+Refael Ackermann <refack@gmail.com>
Rene Rebe <rene@exactcode.de>
Robert Mustacchi <rm@fingolfin.org>
Rodolph Perfetta <rodolph.perfetta@arm.com>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index efa4b717c9..bfe44395ff 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -25,7 +25,7 @@ v8_random_seed = "314159265"
# Configurations
#
config("internal_config") {
- visibility = ":*" # Only targets in this file can depend on this.
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
include_dirs = [ "." ]
@@ -38,7 +38,7 @@ config("internal_config") {
}
config("internal_config_base") {
- visibility = ":*" # Only targets in this file can depend on this.
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
include_dirs = [ "." ]
}
@@ -56,7 +56,7 @@ config("external_config") {
}
config("features") {
- visibility = ":*" # Only targets in this file can depend on this.
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
defines = []
@@ -118,7 +118,7 @@ config("features") {
}
config("toolchain") {
- visibility = ":*" # Only targets in this file can depend on this.
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
defines = []
cflags = []
@@ -167,7 +167,7 @@ config("toolchain") {
#
action("js2c") {
- visibility = ":*" # Only targets in this file can depend on this.
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
script = "tools/js2c.py"
@@ -184,24 +184,25 @@ action("js2c") {
"src/uri.js",
"third_party/fdlibm/fdlibm.js",
"src/math.js",
- "src/messages.js",
"src/apinatives.js",
- "src/debug-debugger.js",
- "src/mirror-debugger.js",
- "src/liveedit-debugger.js",
"src/date.js",
- "src/json.js",
"src/regexp.js",
"src/arraybuffer.js",
"src/typedarray.js",
+ "src/generator.js",
+ "src/object-observe.js",
"src/collection.js",
+ "src/weak-collection.js",
"src/collection-iterator.js",
- "src/weak_collection.js",
"src/promise.js",
- "src/object-observe.js",
- "src/macros.py",
+ "src/messages.js",
+ "src/json.js",
"src/array-iterator.js",
"src/string-iterator.js",
+ "src/debug-debugger.js",
+ "src/mirror-debugger.js",
+ "src/liveedit-debugger.js",
+ "src/macros.py",
]
outputs = [
@@ -228,7 +229,7 @@ action("js2c") {
}
action("js2c_experimental") {
- visibility = ":*" # Only targets in this file can depend on this.
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
script = "tools/js2c.py"
@@ -242,6 +243,7 @@ action("js2c_experimental") {
"src/generator.js",
"src/harmony-string.js",
"src/harmony-array.js",
+ "src/harmony-classes.js",
]
outputs = [
@@ -265,7 +267,7 @@ action("js2c_experimental") {
if (v8_use_external_startup_data) {
action("natives_blob") {
- visibility = ":*" # Only targets in this file can depend on this.
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
":js2c",
@@ -288,7 +290,7 @@ if (v8_use_external_startup_data) {
}
action("postmortem-metadata") {
- visibility = ":*" # Only targets in this file can depend on this.
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
script = "tools/gen-postmortem-metadata.py"
@@ -307,7 +309,7 @@ action("postmortem-metadata") {
}
action("run_mksnapshot") {
- visibility = ":*" # Only targets in this file can depend on this.
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [ ":mksnapshot($host_toolchain)" ]
@@ -345,7 +347,7 @@ action("run_mksnapshot") {
#
source_set("v8_nosnapshot") {
- visibility = ":*" # Only targets in this file can depend on this.
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
":js2c",
@@ -366,7 +368,7 @@ source_set("v8_nosnapshot") {
}
source_set("v8_snapshot") {
- visibility = ":*" # Only targets in this file can depend on this.
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
":js2c",
@@ -389,7 +391,7 @@ source_set("v8_snapshot") {
if (v8_use_external_startup_data) {
source_set("v8_external_snapshot") {
- visibility = ":*" # Only targets in this file can depend on this.
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
":js2c",
@@ -411,7 +413,7 @@ if (v8_use_external_startup_data) {
}
source_set("v8_base") {
- visibility = ":*" # Only targets in this file can depend on this.
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
"src/accessors.cc",
@@ -434,6 +436,12 @@ source_set("v8_base") {
"src/ast-value-factory.h",
"src/ast.cc",
"src/ast.h",
+ "src/background-parsing-task.cc",
+ "src/background-parsing-task.h",
+ "src/bailout-reason.cc",
+ "src/bailout-reason.h",
+ "src/basic-block-profiler.cc",
+ "src/basic-block-profiler.h",
"src/bignum-dtoa.cc",
"src/bignum-dtoa.h",
"src/bignum.cc",
@@ -451,6 +459,8 @@ source_set("v8_base") {
"src/checks.h",
"src/circular-queue-inl.h",
"src/circular-queue.h",
+ "src/code-factory.cc",
+ "src/code-factory.h",
"src/code-stubs.cc",
"src/code-stubs.h",
"src/code-stubs-hydrogen.cc",
@@ -459,12 +469,19 @@ source_set("v8_base") {
"src/codegen.h",
"src/compilation-cache.cc",
"src/compilation-cache.h",
+ "src/compiler/access-builder.cc",
+ "src/compiler/access-builder.h",
"src/compiler/ast-graph-builder.cc",
"src/compiler/ast-graph-builder.h",
+ "src/compiler/basic-block-instrumentor.cc",
+ "src/compiler/basic-block-instrumentor.h",
+ "src/compiler/change-lowering.cc",
+ "src/compiler/change-lowering.h",
"src/compiler/code-generator-impl.h",
"src/compiler/code-generator.cc",
"src/compiler/code-generator.h",
"src/compiler/common-node-cache.h",
+ "src/compiler/common-operator.cc",
"src/compiler/common-operator.h",
"src/compiler/control-builders.cc",
"src/compiler/control-builders.h",
@@ -493,24 +510,28 @@ source_set("v8_base") {
"src/compiler/instruction-selector.h",
"src/compiler/instruction.cc",
"src/compiler/instruction.h",
+ "src/compiler/js-builtin-reducer.cc",
+ "src/compiler/js-builtin-reducer.h",
"src/compiler/js-context-specialization.cc",
"src/compiler/js-context-specialization.h",
"src/compiler/js-generic-lowering.cc",
"src/compiler/js-generic-lowering.h",
"src/compiler/js-graph.cc",
"src/compiler/js-graph.h",
+ "src/compiler/js-inlining.cc",
+ "src/compiler/js-inlining.h",
"src/compiler/js-operator.h",
"src/compiler/js-typed-lowering.cc",
"src/compiler/js-typed-lowering.h",
"src/compiler/linkage-impl.h",
"src/compiler/linkage.cc",
"src/compiler/linkage.h",
- "src/compiler/lowering-builder.cc",
- "src/compiler/lowering-builder.h",
- "src/compiler/machine-node-factory.h",
"src/compiler/machine-operator-reducer.cc",
"src/compiler/machine-operator-reducer.h",
+ "src/compiler/machine-operator.cc",
"src/compiler/machine-operator.h",
+ "src/compiler/machine-type.cc",
+ "src/compiler/machine-type.h",
"src/compiler/node-aux-data-inl.h",
"src/compiler/node-aux-data.h",
"src/compiler/node-cache.cc",
@@ -523,6 +544,7 @@ source_set("v8_base") {
"src/compiler/opcodes.h",
"src/compiler/operator-properties-inl.h",
"src/compiler/operator-properties.h",
+ "src/compiler/operator.cc",
"src/compiler/operator.h",
"src/compiler/phi-reducer.h",
"src/compiler/pipeline.cc",
@@ -538,14 +560,16 @@ source_set("v8_base") {
"src/compiler/scheduler.h",
"src/compiler/simplified-lowering.cc",
"src/compiler/simplified-lowering.h",
- "src/compiler/simplified-node-factory.h",
+ "src/compiler/simplified-operator-reducer.cc",
+ "src/compiler/simplified-operator-reducer.h",
+ "src/compiler/simplified-operator.cc",
"src/compiler/simplified-operator.h",
"src/compiler/source-position.cc",
"src/compiler/source-position.h",
- "src/compiler/structured-machine-assembler.cc",
- "src/compiler/structured-machine-assembler.h",
"src/compiler/typer.cc",
"src/compiler/typer.h",
+ "src/compiler/value-numbering-reducer.cc",
+ "src/compiler/value-numbering-reducer.h",
"src/compiler/verifier.cc",
"src/compiler/verifier.h",
"src/compiler.cc",
@@ -601,7 +625,6 @@ source_set("v8_base") {
"src/fast-dtoa.cc",
"src/fast-dtoa.h",
"src/feedback-slots.h",
- "src/field-index.cc",
"src/field-index.h",
"src/field-index-inl.h",
"src/fixed-dtoa.cc",
@@ -630,6 +653,8 @@ source_set("v8_base") {
"src/heap-snapshot-generator-inl.h",
"src/heap-snapshot-generator.cc",
"src/heap-snapshot-generator.h",
+ "src/heap/gc-idle-time-handler.cc",
+ "src/heap/gc-idle-time-handler.h",
"src/heap/gc-tracer.cc",
"src/heap/gc-tracer.h",
"src/heap/heap-inl.h",
@@ -707,11 +732,25 @@ source_set("v8_base") {
"src/i18n.h",
"src/icu_util.cc",
"src/icu_util.h",
- "src/ic-inl.h",
- "src/ic.cc",
- "src/ic.h",
+ "src/ic/access-compiler.cc",
+ "src/ic/access-compiler.h",
+ "src/ic/call-optimization.cc",
+ "src/ic/call-optimization.h",
+ "src/ic/handler-compiler.cc",
+ "src/ic/handler-compiler.h",
+ "src/ic/ic-inl.h",
+ "src/ic/ic-state.cc",
+ "src/ic/ic-state.h",
+ "src/ic/ic.cc",
+ "src/ic/ic.h",
+ "src/ic/ic-compiler.cc",
+ "src/ic/ic-compiler.h",
+ "src/ic/stub-cache.cc",
+ "src/ic/stub-cache.h",
"src/interface.cc",
"src/interface.h",
+ "src/interface-descriptors.cc",
+ "src/interface-descriptors.h",
"src/interpreter-irregexp.cc",
"src/interpreter-irregexp.h",
"src/isolate.cc",
@@ -785,8 +824,21 @@ source_set("v8_base") {
"src/rewriter.h",
"src/runtime-profiler.cc",
"src/runtime-profiler.h",
- "src/runtime.cc",
- "src/runtime.h",
+ "src/runtime/runtime-collections.cc",
+ "src/runtime/runtime-compiler.cc",
+ "src/runtime/runtime-i18n.cc",
+ "src/runtime/runtime-json.cc",
+ "src/runtime/runtime-maths.cc",
+ "src/runtime/runtime-numbers.cc",
+ "src/runtime/runtime-regexp.cc",
+ "src/runtime/runtime-strings.cc",
+ "src/runtime/runtime-test.cc",
+ "src/runtime/runtime-typedarray.cc",
+ "src/runtime/runtime-uri.cc",
+ "src/runtime/runtime-utils.h",
+ "src/runtime/runtime.cc",
+ "src/runtime/runtime.h",
+ "src/runtime/string-builder.h",
"src/safepoint-table.cc",
"src/safepoint-table.h",
"src/sampler.cc",
@@ -812,13 +864,14 @@ source_set("v8_base") {
"src/string-stream.h",
"src/strtod.cc",
"src/strtod.h",
- "src/stub-cache.cc",
- "src/stub-cache.h",
"src/token.cc",
"src/token.h",
"src/transitions-inl.h",
"src/transitions.cc",
"src/transitions.h",
+ "src/type-feedback-vector-inl.h",
+ "src/type-feedback-vector.cc",
+ "src/type-feedback-vector.h",
"src/type-info.cc",
"src/type-info.h",
"src/types-inl.h",
@@ -871,7 +924,7 @@ source_set("v8_base") {
"src/ia32/frames-ia32.cc",
"src/ia32/frames-ia32.h",
"src/ia32/full-codegen-ia32.cc",
- "src/ia32/ic-ia32.cc",
+ "src/ia32/interface-descriptors-ia32.cc",
"src/ia32/lithium-codegen-ia32.cc",
"src/ia32/lithium-codegen-ia32.h",
"src/ia32/lithium-gap-resolver-ia32.cc",
@@ -882,11 +935,13 @@ source_set("v8_base") {
"src/ia32/macro-assembler-ia32.h",
"src/ia32/regexp-macro-assembler-ia32.cc",
"src/ia32/regexp-macro-assembler-ia32.h",
- "src/ia32/stub-cache-ia32.cc",
"src/compiler/ia32/code-generator-ia32.cc",
"src/compiler/ia32/instruction-codes-ia32.h",
"src/compiler/ia32/instruction-selector-ia32.cc",
"src/compiler/ia32/linkage-ia32.cc",
+ "src/ic/ia32/ic-ia32.cc",
+ "src/ic/ia32/ic-compiler-ia32.cc",
+ "src/ic/ia32/stub-cache-ia32.cc",
]
} else if (v8_target_arch == "x64") {
sources += [
@@ -905,7 +960,7 @@ source_set("v8_base") {
"src/x64/frames-x64.cc",
"src/x64/frames-x64.h",
"src/x64/full-codegen-x64.cc",
- "src/x64/ic-x64.cc",
+ "src/x64/interface-descriptors-x64.cc",
"src/x64/lithium-codegen-x64.cc",
"src/x64/lithium-codegen-x64.h",
"src/x64/lithium-gap-resolver-x64.cc",
@@ -916,11 +971,15 @@ source_set("v8_base") {
"src/x64/macro-assembler-x64.h",
"src/x64/regexp-macro-assembler-x64.cc",
"src/x64/regexp-macro-assembler-x64.h",
- "src/x64/stub-cache-x64.cc",
"src/compiler/x64/code-generator-x64.cc",
"src/compiler/x64/instruction-codes-x64.h",
"src/compiler/x64/instruction-selector-x64.cc",
"src/compiler/x64/linkage-x64.cc",
+ "src/ic/x64/access-compiler-x64.cc",
+ "src/ic/x64/handler-compiler-x64.cc",
+ "src/ic/x64/ic-x64.cc",
+ "src/ic/x64/ic-compiler-x64.cc",
+ "src/ic/x64/stub-cache-x64.cc",
]
} else if (v8_target_arch == "arm") {
sources += [
@@ -941,7 +1000,8 @@ source_set("v8_base") {
"src/arm/frames-arm.cc",
"src/arm/frames-arm.h",
"src/arm/full-codegen-arm.cc",
- "src/arm/ic-arm.cc",
+ "src/arm/interface-descriptors-arm.cc",
+ "src/arm/interface-descriptors-arm.h",
"src/arm/lithium-arm.cc",
"src/arm/lithium-arm.h",
"src/arm/lithium-codegen-arm.cc",
@@ -953,11 +1013,15 @@ source_set("v8_base") {
"src/arm/regexp-macro-assembler-arm.cc",
"src/arm/regexp-macro-assembler-arm.h",
"src/arm/simulator-arm.cc",
- "src/arm/stub-cache-arm.cc",
"src/compiler/arm/code-generator-arm.cc",
"src/compiler/arm/instruction-codes-arm.h",
"src/compiler/arm/instruction-selector-arm.cc",
"src/compiler/arm/linkage-arm.cc",
+ "src/ic/arm/access-compiler-arm.cc",
+ "src/ic/arm/handler-compiler-arm.cc",
+ "src/ic/arm/ic-arm.cc",
+ "src/ic/arm/ic-compiler-arm.cc",
+ "src/ic/arm/stub-cache-arm.cc",
]
} else if (v8_target_arch == "arm64") {
sources += [
@@ -981,11 +1045,12 @@ source_set("v8_base") {
"src/arm64/frames-arm64.cc",
"src/arm64/frames-arm64.h",
"src/arm64/full-codegen-arm64.cc",
- "src/arm64/ic-arm64.cc",
"src/arm64/instructions-arm64.cc",
"src/arm64/instructions-arm64.h",
"src/arm64/instrument-arm64.cc",
"src/arm64/instrument-arm64.h",
+ "src/arm64/interface-descriptors-arm64.cc",
+ "src/arm64/interface-descriptors-arm64.h",
"src/arm64/lithium-arm64.cc",
"src/arm64/lithium-arm64.h",
"src/arm64/lithium-codegen-arm64.cc",
@@ -999,13 +1064,17 @@ source_set("v8_base") {
"src/arm64/regexp-macro-assembler-arm64.h",
"src/arm64/simulator-arm64.cc",
"src/arm64/simulator-arm64.h",
- "src/arm64/stub-cache-arm64.cc",
"src/arm64/utils-arm64.cc",
"src/arm64/utils-arm64.h",
"src/compiler/arm64/code-generator-arm64.cc",
"src/compiler/arm64/instruction-codes-arm64.h",
"src/compiler/arm64/instruction-selector-arm64.cc",
"src/compiler/arm64/linkage-arm64.cc",
+ "src/ic/arm64/access-compiler-arm64.cc",
+ "src/ic/arm64/handler-compiler-arm64.cc",
+ "src/ic/arm64/ic-arm64.cc",
+ "src/ic/arm64/ic-compiler-arm64.cc",
+ "src/ic/arm64/stub-cache-arm64.cc",
]
} else if (v8_target_arch == "mipsel") {
sources += [
@@ -1026,7 +1095,7 @@ source_set("v8_base") {
"src/mips/frames-mips.cc",
"src/mips/frames-mips.h",
"src/mips/full-codegen-mips.cc",
- "src/mips/ic-mips.cc",
+ "src/mips/interface-descriptors-mips.cc",
"src/mips/lithium-codegen-mips.cc",
"src/mips/lithium-codegen-mips.h",
"src/mips/lithium-gap-resolver-mips.cc",
@@ -1038,7 +1107,48 @@ source_set("v8_base") {
"src/mips/regexp-macro-assembler-mips.cc",
"src/mips/regexp-macro-assembler-mips.h",
"src/mips/simulator-mips.cc",
- "src/mips/stub-cache-mips.cc",
+ "src/ic/mips/access-compiler-mips.cc",
+ "src/ic/mips/handler-compiler-mips.cc",
+ "src/ic/mips/ic-mips.cc",
+ "src/ic/mips/ic-compiler-mips.cc",
+ "src/ic/mips/stub-cache-mips.cc",
+ ]
+ } else if (v8_target_arch == "mips64el") {
+ sources += [
+ "src/mips64/assembler-mips64.cc",
+ "src/mips64/assembler-mips64.h",
+ "src/mips64/assembler-mips64-inl.h",
+ "src/mips64/builtins-mips64.cc",
+ "src/mips64/codegen-mips64.cc",
+ "src/mips64/codegen-mips64.h",
+ "src/mips64/code-stubs-mips64.cc",
+ "src/mips64/code-stubs-mips64.h",
+ "src/mips64/constants-mips64.cc",
+ "src/mips64/constants-mips64.h",
+ "src/mips64/cpu-mips64.cc",
+ "src/mips64/debug-mips64.cc",
+ "src/mips64/deoptimizer-mips64.cc",
+ "src/mips64/disasm-mips64.cc",
+ "src/mips64/frames-mips64.cc",
+ "src/mips64/frames-mips64.h",
+ "src/mips64/full-codegen-mips64.cc",
+ "src/mips64/interface-descriptors-mips64.cc",
+ "src/mips64/lithium-codegen-mips64.cc",
+ "src/mips64/lithium-codegen-mips64.h",
+ "src/mips64/lithium-gap-resolver-mips64.cc",
+ "src/mips64/lithium-gap-resolver-mips64.h",
+ "src/mips64/lithium-mips64.cc",
+ "src/mips64/lithium-mips64.h",
+ "src/mips64/macro-assembler-mips64.cc",
+ "src/mips64/macro-assembler-mips64.h",
+ "src/mips64/regexp-macro-assembler-mips64.cc",
+ "src/mips64/regexp-macro-assembler-mips64.h",
+ "src/mips64/simulator-mips64.cc",
+ "src/ic/mips64/access-compiler-mips64.cc",
+ "src/ic/mips64/handler-compiler-mips64.cc",
+ "src/ic/mips64/ic-mips64.cc",
+ "src/ic/mips64/ic-compiler-mips64.cc",
+ "src/ic/mips64/stub-cache-mips64.cc",
]
}
@@ -1046,9 +1156,18 @@ source_set("v8_base") {
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [ ":internal_config", ":features", ":toolchain" ]
+ if (!is_debug) {
+ configs -= [ "//build/config/compiler:optimize" ]
+ configs += [ "//build/config/compiler:optimize_max" ]
+ }
+
defines = []
deps = [ ":v8_libbase" ]
+ if (is_win) {
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ cflags = [ "/wd4267" ]
+ }
if (is_linux) {
if (v8_compress_startup_data == "bz2") {
libs += [ "bz2" ]
@@ -1076,7 +1195,7 @@ source_set("v8_base") {
}
source_set("v8_libbase") {
- visibility = ":*" # Only targets in this file can depend on this.
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
"src/base/atomicops.h",
@@ -1089,9 +1208,14 @@ source_set("v8_libbase") {
"src/base/atomicops_internals_x86_gcc.cc",
"src/base/atomicops_internals_x86_gcc.h",
"src/base/atomicops_internals_x86_msvc.h",
+ "src/base/bits.cc",
+ "src/base/bits.h",
"src/base/build_config.h",
"src/base/cpu.cc",
"src/base/cpu.h",
+ "src/base/division-by-constant.cc",
+ "src/base/division-by-constant.h",
+ "src/base/flags.h",
"src/base/lazy-instance.h",
"src/base/logging.cc",
"src/base/logging.h",
@@ -1112,6 +1236,8 @@ source_set("v8_libbase") {
"src/base/safe_conversions_impl.h",
"src/base/safe_math.h",
"src/base/safe_math_impl.h",
+ "src/base/sys-info.cc",
+ "src/base/sys-info.h",
"src/base/utils/random-number-generator.cc",
"src/base/utils/random-number-generator.h",
]
@@ -1120,6 +1246,11 @@ source_set("v8_libbase") {
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [ ":internal_config_base", ":features", ":toolchain" ]
+ if (!is_debug) {
+ configs -= [ "//build/config/compiler:optimize" ]
+ configs += [ "//build/config/compiler:optimize_max" ]
+ }
+
defines = []
if (is_posix) {
@@ -1183,6 +1314,11 @@ source_set("v8_libplatform") {
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [ ":internal_config_base", ":features", ":toolchain" ]
+ if (!is_debug) {
+ configs -= [ "//build/config/compiler:optimize" ]
+ configs += [ "//build/config/compiler:optimize_max" ]
+ }
+
deps = [
":v8_libbase",
]
@@ -1194,7 +1330,7 @@ source_set("v8_libplatform") {
if (current_toolchain == host_toolchain) {
executable("mksnapshot") {
- visibility = ":*" # Only targets in this file can depend on this.
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
"src/mksnapshot.cc",
@@ -1250,6 +1386,7 @@ component("v8") {
direct_dependent_configs = [ ":external_config" ]
+ libs = []
if (is_android && current_toolchain != host_toolchain) {
libs += [ "log" ]
}
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 0b2872a7c2..89e5b9f89f 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,4 +1,258 @@
-2014-08-13: Version 3.28.73
+2014-09-30: Version 3.29.93
+
+ Add a getter for the address and size of the code range to the pulic API
+ (issue 3598).
+
+ Convert `obj` ToObject in Object.keys() (issue 3587).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-09-29: Version 3.29.92
+
+ Performance and stability improvements on all platforms.
+
+
+2014-09-26: Version 3.29.91
+
+ Performance and stability improvements on all platforms.
+
+
+2014-09-25: Version 3.29.88
+
+ Performance and stability improvements on all platforms.
+
+
+2014-09-24: Version 3.29.87
+
+ Preserve message when rethrowing exception (issue 3583).
+
+ Fix escaped index JSON parsing (Chromium issue 416449).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-09-23: Version 3.29.84
+
+ Performance and stability improvements on all platforms.
+
+
+2014-09-23: Version 3.29.83
+
+ Performance and stability improvements on all platforms.
+
+
+2014-09-23: Version 3.29.82
+
+ Fix escaped index JSON parsing (Chromium issue 416449).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-09-17: Version 3.29.70
+
+ Enable ES6 generators (issue 2355).
+
+ Fixed int vs. uintptr_t confusion (plus some cleanup on the way) (issue
+ 3556).
+
+ Move configuration of ResourceConstraints to Isolate construction.
+
+ Performance and stability improvements on all platforms.
+
+
+2014-09-16: Version 3.29.66
+
+ Currently, a new isolate is created in an uninitialized state, and
+ several API methods will automatically initialize it. During this
+ uninitialized state, code event handlers and function entry handlers can
+ be attached to the isolate.
+
+ Performance and stability improvements on all platforms.
+
+
+2014-09-15: Version 3.29.64
+
+ ES6: String(symbol) should work like symbol.toString (issue 3554).
+
+ Arrow functions: Cleanup handling of the prototype property (issue
+ 2700).
+
+ Remove V8_HOST_CAN_READ_UNALIGNED and its uses (Chromium issue 412967).
+
+ Fix Smi vs. HeapObject confusion in HConstants (Chromium issue 412215).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-09-12: Version 3.29.59
+
+ Do not use wide reads in CopyCharsUnsigned (Chromium issue 412967).
+
+ Fix inaccurate type condition in Hydrogen (Chromium issue 412210).
+
+ Fix crash in ScriptDebugServer::wrapCallFrames (Chromium issue 411196).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-09-11: Version 3.29.57
+
+ ES6: Add support for method shorthand in object literals (issue 3516).
+
+ Unbreak FreeBSD build (hopefully) (issue 3548).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-09-09: Version 3.29.53
+
+ Performance and stability improvements on all platforms.
+
+
+2014-09-08: Version 3.29.50
+
+ Allocate a new empty number dictionary when resetting elements (Chromium
+ issue 410332).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-09-05: Version 3.29.43
+
+ Enforce correct number comparisons when inlining Array.indexOf (Chromium
+ issue 407946).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-09-04: Version 3.29.41
+
+ Performance and stability improvements on all platforms.
+
+
+2014-09-03: Version 3.29.40
+
+ Use correct receiver for DOM accessors on the prototype chain (issue
+ 3538).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-09-02: Version 3.29.38
+
+ Do not clear weak monomorphic IC after context disposal (Chromium issue
+ 404020).
+
+ Turn on job-based sweeping (issue 3104).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-09-01: Version 3.29.35
+
+ Performance and stability improvements on all platforms.
+
+
+2014-08-29: Version 3.29.29
+
+ Performance and stability improvements on all platforms.
+
+
+2014-08-28: Version 3.29.27
+
+ Performance and stability improvements on all platforms.
+
+
+2014-08-28: Version 3.29.25
+
+ Performance and stability improvements on all platforms.
+
+
+2014-08-28: Version 3.29.24
+
+ Tweaks to generate XP-compatible .exes (Chromium issue 407517).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-08-28: Version 3.29.23
+
+ Performance and stability improvements on all platforms.
+
+
+2014-08-27: Version 3.29.20
+
+ Handle empty allocation list in CodeRange properly (issue 3540, Chromium
+ issue 407566).
+
+ Fixed inlining of constant values (issue 3529).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-08-25: Version 3.29.17
+
+ Performance and stability improvements on all platforms.
+
+
+2014-08-24: Version 3.29.16
+
+ Fix issue with numeric property names (issue 3507).
+
+ Add back the duplicate property checker (issue 3498).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-08-22: Version 3.29.14
+
+ Don't inline Array.shift() if receiver map is not extensible (Chromium
+ issue 405517).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-08-21: Version 3.29.11
+
+ Refactor ParseObjectLiteral.
+
+ Support symbol-named properties in API (issue 3394).
+
+ Suppress test262 test that tests duplicate properties.
+
+ ES6: Duplicate properties are no longer an error (issue 3498).
+
+ Expose function CheckDebugBreak in the debugger api.
+
+ Remove RegExp.$input (issue 3486).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-08-21: Version 3.29.10
+
+ ES6: Make sure we do not store -0 as the key in Map/Set (issue 3515).
+
+ Remove removed flags from tests.
+
+ Expose well-known Symbols to C++ API (Chromium issue 341423).
+
+ Implement ES6 Array.of() (issue 3427).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-08-20: Version 3.29.9
+
+ Correctly handle holes when concat()ing double arrays (Chromium issue
+ 403409).
+
+ [turbofan] Refactor the InstructionSelector tests (issue 3489).
+
+ ES6: Make Map/Set constructors support iterable values (issue 3508).
+
+ WeakMap/WeakSet: Add test for non object keys (issue 3399).
Performance and stability improvements on all platforms.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index 9459204f2c..d4139c6098 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -3,6 +3,8 @@
# all paths in here must match this assumption.
vars = {
+ "chromium_git": "https://chromium.googlesource.com",
+
"chromium_trunk": "https://src.chromium.org/svn/trunk",
"buildtools_revision": "fb782d4369d5ae04f17a2fceef7de5a63e50f07b",
@@ -28,6 +30,11 @@ deps = {
}
deps_os = {
+ "android": {
+ "v8/third_party/android_tools":
+ Var("chromium_git") + "/android_tools.git" + "@" +
+ "31869996507de16812bb53a3d0aaa15cd6194c16",
+ },
"win": {
"v8/third_party/cygwin":
Var("chromium_trunk") + "/deps/third_party/cygwin@66844",
diff --git a/deps/v8/Makefile b/deps/v8/Makefile
index 96d7a7ae4d..2fbe1ba7db 100644
--- a/deps/v8/Makefile
+++ b/deps/v8/Makefile
@@ -230,8 +230,8 @@ NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:
GYPFILES = build/all.gyp build/features.gypi build/standalone.gypi \
- build/toolchain.gypi samples/samples.gyp src/d8.gyp \
- test/cctest/cctest.gyp tools/gyp/v8.gyp
+ build/toolchain.gypi samples/samples.gyp src/compiler/compiler.gyp \
+ src/d8.gyp test/cctest/cctest.gyp tools/gyp/v8.gyp
# If vtunejit=on, the v8vtune.gyp will be appended.
ifeq ($(vtunejit), on)
diff --git a/deps/v8/Makefile.android b/deps/v8/Makefile.android
index d46af31fdb..8e200f1f24 100644
--- a/deps/v8/Makefile.android
+++ b/deps/v8/Makefile.android
@@ -64,20 +64,20 @@ else
DEFINES += android_target_arch=mips mips_arch_variant=mips32r2
TOOLCHAIN_ARCH = mipsel-linux-android
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
- TOOLCHAIN_VER = 4.6
+ TOOLCHAIN_VER = 4.8
else
ifeq ($(ARCH), android_ia32)
DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86 android_target_platform=14
TOOLCHAIN_ARCH = x86
TOOLCHAIN_PREFIX = i686-linux-android
- TOOLCHAIN_VER = 4.6
+ TOOLCHAIN_VER = 4.8
else
ifeq ($(ARCH), android_x87)
DEFINES = target_arch=x87 v8_target_arch=x87 android_target_arch=x86 android_target_platform=14
TOOLCHAIN_ARCH = x86
TOOLCHAIN_PREFIX = i686-linux-android
- TOOLCHAIN_VER = 4.6
+ TOOLCHAIN_VER = 4.8
else
$(error Target architecture "${ARCH}" is not supported)
endif
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index 55bb99ab8a..3a9895db8d 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -34,6 +34,32 @@ for more details about the presubmit API built into gcl.
import sys
+_EXCLUDED_PATHS = (
+ r"^test[\\\/].*",
+ r"^testing[\\\/].*",
+ r"^third_party[\\\/].*",
+ r"^tools[\\\/].*",
+)
+
+
+# Regular expression that matches code only used for test binaries
+# (best effort).
+_TEST_CODE_EXCLUDED_PATHS = (
+ r'.+-unittest\.cc',
+ # Has a method VisitForTest().
+ r'src[\\\/]compiler[\\\/]ast-graph-builder\.cc',
+ # Test extension.
+ r'src[\\\/]extensions[\\\/]gc-extension\.cc',
+)
+
+
+_TEST_ONLY_WARNING = (
+ 'You might be calling functions intended only for testing from\n'
+ 'production code. It is OK to ignore this warning if you know what\n'
+ 'you are doing, as the heuristics used to detect the situation are\n'
+ 'not perfect. The commit queue will not block on this warning.')
+
+
def _V8PresubmitChecks(input_api, output_api):
"""Runs the V8 presubmit checks."""
import sys
@@ -41,7 +67,7 @@ def _V8PresubmitChecks(input_api, output_api):
input_api.PresubmitLocalPath(), 'tools'))
from presubmit import CppLintProcessor
from presubmit import SourceProcessor
- from presubmit import CheckGeneratedRuntimeTests
+ from presubmit import CheckRuntimeVsNativesNameClashes
from presubmit import CheckExternalReferenceRegistration
results = []
@@ -51,9 +77,9 @@ def _V8PresubmitChecks(input_api, output_api):
results.append(output_api.PresubmitError(
"Copyright header, trailing whitespaces and two empty lines " \
"between declarations check failed"))
- if not CheckGeneratedRuntimeTests(input_api.PresubmitLocalPath()):
+ if not CheckRuntimeVsNativesNameClashes(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
- "Generated runtime tests check failed"))
+ "Runtime/natives name clash check failed"))
if not CheckExternalReferenceRegistration(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
"External references registration check failed"))
@@ -113,6 +139,49 @@ def _CheckUnwantedDependencies(input_api, output_api):
return results
+def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
+ """Attempts to prevent use of functions intended only for testing in
+ non-testing code. For now this is just a best-effort implementation
+ that ignores header files and may have some false positives. A
+ better implementation would probably need a proper C++ parser.
+ """
+ # We only scan .cc files, as the declaration of for-testing functions in
+ # header files are hard to distinguish from calls to such functions without a
+ # proper C++ parser.
+ file_inclusion_pattern = r'.+\.cc'
+
+ base_function_pattern = r'[ :]test::[^\s]+|ForTest(ing)?|for_test(ing)?'
+ inclusion_pattern = input_api.re.compile(r'(%s)\s*\(' % base_function_pattern)
+ comment_pattern = input_api.re.compile(r'//.*(%s)' % base_function_pattern)
+ exclusion_pattern = input_api.re.compile(
+ r'::[A-Za-z0-9_]+(%s)|(%s)[^;]+\{' % (
+ base_function_pattern, base_function_pattern))
+
+ def FilterFile(affected_file):
+ black_list = (_EXCLUDED_PATHS +
+ _TEST_CODE_EXCLUDED_PATHS +
+ input_api.DEFAULT_BLACK_LIST)
+ return input_api.FilterSourceFile(
+ affected_file,
+ white_list=(file_inclusion_pattern, ),
+ black_list=black_list)
+
+ problems = []
+ for f in input_api.AffectedSourceFiles(FilterFile):
+ local_path = f.LocalPath()
+ for line_number, line in f.ChangedContents():
+ if (inclusion_pattern.search(line) and
+ not comment_pattern.search(line) and
+ not exclusion_pattern.search(line)):
+ problems.append(
+ '%s:%d\n %s' % (local_path, line_number, line.strip()))
+
+ if problems:
+ return [output_api.PresubmitPromptOrNotify(_TEST_ONLY_WARNING, problems)]
+ else:
+ return []
+
+
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
@@ -122,6 +191,8 @@ def _CommonChecks(input_api, output_api):
input_api, output_api))
results.extend(_V8PresubmitChecks(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api))
+ results.extend(
+ _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
return results
@@ -180,6 +251,6 @@ def GetPreferredTryMasters(project, change):
'v8_linux_layout_dbg': set(['defaulttests']),
'v8_mac_rel': set(['defaulttests']),
'v8_win_rel': set(['defaulttests']),
- 'v8_win64_rel': set(['defaulttests']),
+ 'v8_win64_compile_rel': set(['defaulttests']),
},
}
diff --git a/deps/v8/benchmarks/v8.json b/deps/v8/benchmarks/v8.json
index f4210d9d40..03ea9621ac 100644
--- a/deps/v8/benchmarks/v8.json
+++ b/deps/v8/benchmarks/v8.json
@@ -3,7 +3,7 @@
"main": "run.js",
"run_count": 2,
"results_regexp": "^%s: (.+)$",
- "benchmarks": [
+ "tests": [
{"name": "Richards"},
{"name": "DeltaBlue"},
{"name": "Crypto"},
diff --git a/deps/v8/build/all.gyp b/deps/v8/build/all.gyp
index 5e410a3d0f..1e420fad8e 100644
--- a/deps/v8/build/all.gyp
+++ b/deps/v8/build/all.gyp
@@ -9,10 +9,12 @@
'type': 'none',
'dependencies': [
'../samples/samples.gyp:*',
+ '../src/base/base.gyp:base-unittests',
+ '../src/compiler/compiler.gyp:compiler-unittests',
'../src/d8.gyp:d8',
- '../test/base-unittests/base-unittests.gyp:*',
+ '../src/heap/heap.gyp:heap-unittests',
+ '../src/libplatform/libplatform.gyp:libplatform-unittests',
'../test/cctest/cctest.gyp:*',
- '../test/compiler-unittests/compiler-unittests.gyp:*',
],
'conditions': [
['component!="shared_library"', {
diff --git a/deps/v8/build/android.gypi b/deps/v8/build/android.gypi
index 46ece08524..f984ea3af6 100644
--- a/deps/v8/build/android.gypi
+++ b/deps/v8/build/android.gypi
@@ -87,7 +87,6 @@
'-pthread', # Not supported by Android toolchain.
],
'cflags': [
- '-U__linux__', # Don't allow toolchain to claim -D__linux__
'-ffunction-sections',
'-funwind-tables',
'-fstack-protector',
diff --git a/deps/v8/build/get_landmines.py b/deps/v8/build/get_landmines.py
index c6ff8165f9..66a86cbb50 100755
--- a/deps/v8/build/get_landmines.py
+++ b/deps/v8/build/get_landmines.py
@@ -19,6 +19,7 @@ def main():
print 'Landmines test.'
print 'Activating MSVS 2013.'
print 'Revert activation of MSVS 2013.'
+ print 'Activating MSVS 2013 again.'
return 0
diff --git a/deps/v8/build/standalone.gypi b/deps/v8/build/standalone.gypi
index 2ed19f65ea..b09122b538 100644
--- a/deps/v8/build/standalone.gypi
+++ b/deps/v8/build/standalone.gypi
@@ -215,9 +215,18 @@
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd"', {
'target_defaults': {
- 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
- '-Wno-long-long', '-pthread', '-fno-exceptions',
- '-pedantic' ],
+ 'cflags': [
+ '-Wall',
+ '<(werror)',
+ '-W',
+ '-Wno-unused-parameter',
+ '-Wno-long-long',
+ '-pthread',
+ '-fno-exceptions',
+ '-pedantic',
+ # Don't warn about the "struct foo f = {0};" initialization pattern.
+ '-Wno-missing-field-initializers',
+ ],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ],
'ldflags': [ '-pthread', ],
'conditions': [
@@ -234,8 +243,15 @@
# or OS=="netbsd"'
['OS=="qnx"', {
'target_defaults': {
- 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
- '-fno-exceptions' ],
+ 'cflags': [
+ '-Wall',
+ '<(werror)',
+ '-W',
+ '-Wno-unused-parameter',
+ '-fno-exceptions',
+ # Don't warn about the "struct foo f = {0};" initialization pattern.
+ '-Wno-missing-field-initializers',
+ ],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ],
'conditions': [
[ 'visibility=="hidden"', {
@@ -263,6 +279,7 @@
'defines': [
'_CRT_SECURE_NO_DEPRECATE',
'_CRT_NONSTDC_NO_DEPRECATE',
+ '_USING_V110_SDK71_',
],
'conditions': [
['component=="static_library"', {
@@ -298,6 +315,7 @@
'AdditionalOptions': ['/ignore:4221'],
},
'VCLinkerTool': {
+ 'MinimumRequiredVersion': '5.01', # XP.
'AdditionalDependencies': [
'ws2_32.lib',
],
@@ -359,6 +377,8 @@
'-Wendif-labels',
'-W',
'-Wno-unused-parameter',
+ # Don't warn about the "struct foo f = {0};" initialization pattern.
+ '-Wno-missing-field-initializers',
],
},
'conditions': [
diff --git a/deps/v8/build/toolchain.gypi b/deps/v8/build/toolchain.gypi
index 1d47360d2a..7f3b9e52d9 100644
--- a/deps/v8/build/toolchain.gypi
+++ b/deps/v8/build/toolchain.gypi
@@ -58,6 +58,14 @@
# Default arch variant for MIPS.
'mips_arch_variant%': 'r2',
+ # Possible values fp32, fp64, fpxx.
+ # fp32 - 32 32-bit FPU registers are available, doubles are placed in
+ # register pairs.
+ # fp64 - 32 64-bit FPU registers are available.
+ # fpxx - compatibility mode, it chooses fp32 or fp64 depending on runtime
+ # detection
+ 'mips_fpu_mode%': 'fp32',
+
'v8_enable_backtrace%': 0,
# Enable profiling support. Only required on Windows.
@@ -83,6 +91,9 @@
# Allow to suppress the array bounds warning (default is no suppression).
'wno_array_bounds%': '',
+ # Link-Time Optimizations
+ 'use_lto%': 0,
+
'variables': {
# This is set when building the Android WebView inside the Android build
# system, using the 'android' gyp backend.
@@ -233,6 +244,15 @@
}],
],
}],
+ # Disable LTO for v8
+ # v8 is optimized for speed, which takes precedence over
+ # size optimization in LTO.
+ ['use_lto==1', {
+ 'cflags!': [
+ '-flto',
+ '-ffat-lto-objects',
+ ],
+ }],
],
}], # _toolset=="target"
],
@@ -272,10 +292,33 @@
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
+ ['mips_fpu_mode=="fp64"', {
+ 'cflags': ['-mfp64'],
+ }],
+ ['mips_fpu_mode=="fpxx"', {
+ 'cflags': ['-mfpxx'],
+ }],
+ ['mips_fpu_mode=="fp32"', {
+ 'cflags': ['-mfp32'],
+ }],
+ ['mips_arch_variant=="r6"', {
+ 'cflags!': ['-mfp32'],
+ 'cflags': ['-mips32r6', '-Wa,-mips32r6'],
+ 'ldflags': [
+ '-mips32r6',
+ '-Wl,--dynamic-linker=$(LDSO_PATH)',
+ '-Wl,--rpath=$(LD_R_PATH)',
+ ],
+ }],
['mips_arch_variant=="r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}],
['mips_arch_variant=="r1"', {
+ 'cflags!': ['-mfp64'],
+ 'cflags': ['-mips32', '-Wa,-mips32'],
+ }],
+ ['mips_arch_variant=="rx"', {
+ 'cflags!': ['-mfp64'],
'cflags': ['-mips32', '-Wa,-mips32'],
}],
],
@@ -297,8 +340,34 @@
'__mips_soft_float=1'
],
}],
+ ['mips_arch_variant=="rx"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32RX',
+ 'FPU_MODE_FPXX',
+ ],
+ }],
+ ['mips_arch_variant=="r6"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R6',
+ 'FPU_MODE_FP64',
+ ],
+ }],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',],
+ 'conditions': [
+ ['mips_fpu_mode=="fp64"', {
+ 'defines': ['FPU_MODE_FP64',],
+ }],
+ ['mips_fpu_mode=="fpxx"', {
+ 'defines': ['FPU_MODE_FPXX',],
+ }],
+ ['mips_fpu_mode=="fp32"', {
+ 'defines': ['FPU_MODE_FP32',],
+ }],
+ ],
+ }],
+ ['mips_arch_variant=="r1"', {
+ 'defines': ['FPU_MODE_FP32',],
}],
],
}], # v8_target_arch=="mips"
@@ -321,13 +390,37 @@
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
+ ['mips_fpu_mode=="fp64"', {
+ 'cflags': ['-mfp64'],
+ }],
+ ['mips_fpu_mode=="fpxx"', {
+ 'cflags': ['-mfpxx'],
+ }],
+ ['mips_fpu_mode=="fp32"', {
+ 'cflags': ['-mfp32'],
+ }],
+ ['mips_arch_variant=="r6"', {
+ 'cflags!': ['-mfp32'],
+ 'cflags': ['-mips32r6', '-Wa,-mips32r6'],
+ 'ldflags': [
+ '-mips32r6',
+ '-Wl,--dynamic-linker=$(LDSO_PATH)',
+ '-Wl,--rpath=$(LD_R_PATH)',
+ ],
+ }],
['mips_arch_variant=="r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}],
['mips_arch_variant=="r1"', {
+ 'cflags!': ['-mfp64'],
+ 'cflags': ['-mips32', '-Wa,-mips32'],
+ }],
+ ['mips_arch_variant=="rx"', {
+ 'cflags!': ['-mfp64'],
'cflags': ['-mips32', '-Wa,-mips32'],
- }],
+ }],
['mips_arch_variant=="loongson"', {
+ 'cflags!': ['-mfp64'],
'cflags': ['-mips3', '-Wa,-mips3'],
}],
],
@@ -349,11 +442,40 @@
'__mips_soft_float=1'
],
}],
+ ['mips_arch_variant=="rx"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32RX',
+ 'FPU_MODE_FPXX',
+ ],
+ }],
+ ['mips_arch_variant=="r6"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R6',
+ 'FPU_MODE_FP64',
+ ],
+ }],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',],
+ 'conditions': [
+ ['mips_fpu_mode=="fp64"', {
+ 'defines': ['FPU_MODE_FP64',],
+ }],
+ ['mips_fpu_mode=="fpxx"', {
+ 'defines': ['FPU_MODE_FPXX',],
+ }],
+ ['mips_fpu_mode=="fp32"', {
+ 'defines': ['FPU_MODE_FP32',],
+ }],
+ ],
+ }],
+ ['mips_arch_variant=="r1"', {
+ 'defines': ['FPU_MODE_FP32',],
}],
['mips_arch_variant=="loongson"', {
- 'defines': ['_MIPS_ARCH_LOONGSON',],
+ 'defines': [
+ '_MIPS_ARCH_LOONGSON',
+ 'FPU_MODE_FP32',
+ ],
}],
],
}], # v8_target_arch=="mipsel"
@@ -499,6 +621,12 @@
'cflags': [ '-m32' ],
'ldflags': [ '-m32' ],
}],
+ # Enable feedback-directed optimisation when building in android.
+ [ 'android_webview_build == 1', {
+ 'aosp_build_settings': {
+ 'LOCAL_FDO_SUPPORT': 'true',
+ },
+ }],
],
'xcode_settings': {
'ARCHS': [ 'i386' ],
@@ -523,6 +651,12 @@
'cflags': [ '-m64' ],
'ldflags': [ '-m64' ],
}],
+ # Enable feedback-directed optimisation when building in android.
+ [ 'android_webview_build == 1', {
+ 'aosp_build_settings': {
+ 'LOCAL_FDO_SUPPORT': 'true',
+ },
+ }],
]
}],
],
diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h
index e72415952d..6abf4e095b 100644
--- a/deps/v8/include/v8-debug.h
+++ b/deps/v8/include/v8-debug.h
@@ -167,6 +167,9 @@ class V8_EXPORT Debug {
// happened yet.
static void CancelDebugBreak(Isolate* isolate);
+ // Check if a debugger break is scheduled in the given isolate.
+ static bool CheckDebugBreak(Isolate* isolate);
+
// Break execution of JavaScript in the given isolate (this method
// can be invoked from a non-VM thread) for further client command
// execution on a VM thread. Client data is then passed in
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index ef0bda63f4..63c67624a1 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -77,6 +77,7 @@ class ImplementationUtilities;
class Int32;
class Integer;
class Isolate;
+class Name;
class Number;
class NumberObject;
class Object;
@@ -129,6 +130,7 @@ class Heap;
class HeapObject;
class Isolate;
class Object;
+struct StreamedSource;
template<typename T> class CustomArguments;
class PropertyCallbackArguments;
class FunctionCallbackArguments;
@@ -1087,6 +1089,73 @@ class V8_EXPORT ScriptCompiler {
CachedData* cached_data;
};
+ /**
+ * For streaming incomplete script data to V8. The embedder should implement a
+ * subclass of this class.
+ */
+ class ExternalSourceStream {
+ public:
+ virtual ~ExternalSourceStream() {}
+
+ /**
+ * V8 calls this to request the next chunk of data from the embedder. This
+ * function will be called on a background thread, so it's OK to block and
+ * wait for the data, if the embedder doesn't have data yet. Returns the
+ * length of the data returned. When the data ends, GetMoreData should
+ * return 0. Caller takes ownership of the data.
+ *
+ * When streaming UTF-8 data, V8 handles multi-byte characters split between
+ * two data chunks, but doesn't handle multi-byte characters split between
+ * more than two data chunks. The embedder can avoid this problem by always
+ * returning at least 2 bytes of data.
+ *
+ * If the embedder wants to cancel the streaming, they should make the next
+ * GetMoreData call return 0. V8 will interpret it as end of data (and most
+ * probably, parsing will fail). The streaming task will return as soon as
+ * V8 has parsed the data it received so far.
+ */
+ virtual size_t GetMoreData(const uint8_t** src) = 0;
+ };
+
+
+ /**
+ * Source code which can be streamed into V8 in pieces. It will be parsed
+ * while streaming. It can be compiled after the streaming is complete.
+ * StreamedSource must be kept alive while the streaming task is ran (see
+ * ScriptStreamingTask below).
+ */
+ class V8_EXPORT StreamedSource {
+ public:
+ enum Encoding { ONE_BYTE, TWO_BYTE, UTF8 };
+
+ StreamedSource(ExternalSourceStream* source_stream, Encoding encoding);
+ ~StreamedSource();
+
+ // Ownership of the CachedData or its buffers is *not* transferred to the
+ // caller. The CachedData object is alive as long as the StreamedSource
+ // object is alive.
+ const CachedData* GetCachedData() const;
+
+ internal::StreamedSource* impl() const { return impl_; }
+
+ private:
+ // Prevent copying. Not implemented.
+ StreamedSource(const StreamedSource&);
+ StreamedSource& operator=(const StreamedSource&);
+
+ internal::StreamedSource* impl_;
+ };
+
+ /**
+ * A streaming task which the embedder must run on a background thread to
+ * stream scripts into V8. Returned by ScriptCompiler::StartStreamingScript.
+ */
+ class ScriptStreamingTask {
+ public:
+ virtual ~ScriptStreamingTask() {}
+ virtual void Run() = 0;
+ };
+
enum CompileOptions {
kNoCompileOptions = 0,
kProduceParserCache,
@@ -1129,6 +1198,32 @@ class V8_EXPORT ScriptCompiler {
static Local<Script> Compile(
Isolate* isolate, Source* source,
CompileOptions options = kNoCompileOptions);
+
+ /**
+ * Returns a task which streams script data into V8, or NULL if the script
+ * cannot be streamed. The user is responsible for running the task on a
+ * background thread and deleting it. When ran, the task starts parsing the
+ * script, and it will request data from the StreamedSource as needed. When
+ * ScriptStreamingTask::Run exits, all data has been streamed and the script
+ * can be compiled (see Compile below).
+ *
+ * This API allows to start the streaming with as little data as possible, and
+ * the remaining data (for example, the ScriptOrigin) is passed to Compile.
+ */
+ static ScriptStreamingTask* StartStreamingScript(
+ Isolate* isolate, StreamedSource* source,
+ CompileOptions options = kNoCompileOptions);
+
+ /**
+ * Compiles a streamed script (bound to current context).
+ *
+ * This can only be called after the streaming has finished
+ * (ScriptStreamingTask has been run). V8 doesn't construct the source string
+ * during streaming, so the embedder needs to pass the full source here.
+ */
+ static Local<Script> Compile(Isolate* isolate, StreamedSource* source,
+ Handle<String> full_source_string,
+ const ScriptOrigin& origin);
};
@@ -1367,6 +1462,12 @@ class V8_EXPORT Value : public Data {
bool IsFalse() const;
/**
+ * Returns true if this value is a symbol or a string.
+ * This is an experimental feature.
+ */
+ bool IsName() const;
+
+ /**
* Returns true if this value is an instance of the String type.
* See ECMA-262 8.4.
*/
@@ -1424,6 +1525,11 @@ class V8_EXPORT Value : public Data {
bool IsDate() const;
/**
+ * Returns true if this value is an Arguments object.
+ */
+ bool IsArgumentsObject() const;
+
+ /**
* Returns true if this value is a Boolean object.
*/
bool IsBooleanObject() const;
@@ -1455,12 +1561,48 @@ class V8_EXPORT Value : public Data {
bool IsRegExp() const;
/**
+ * Returns true if this value is a Generator function.
+ * This is an experimental feature.
+ */
+ bool IsGeneratorFunction() const;
+
+ /**
+ * Returns true if this value is a Generator object (iterator).
+ * This is an experimental feature.
+ */
+ bool IsGeneratorObject() const;
+
+ /**
* Returns true if this value is a Promise.
* This is an experimental feature.
*/
bool IsPromise() const;
/**
+ * Returns true if this value is a Map.
+ * This is an experimental feature.
+ */
+ bool IsMap() const;
+
+ /**
+ * Returns true if this value is a Set.
+ * This is an experimental feature.
+ */
+ bool IsSet() const;
+
+ /**
+ * Returns true if this value is a WeakMap.
+ * This is an experimental feature.
+ */
+ bool IsWeakMap() const;
+
+ /**
+ * Returns true if this value is a WeakSet.
+ * This is an experimental feature.
+ */
+ bool IsWeakSet() const;
+
+ /**
* Returns true if this value is an ArrayBuffer.
* This is an experimental feature.
*/
@@ -1594,14 +1736,25 @@ class V8_EXPORT Boolean : public Primitive {
/**
+ * A superclass for symbols and strings.
+ */
+class V8_EXPORT Name : public Primitive {
+ public:
+ V8_INLINE static Name* Cast(v8::Value* obj);
+ private:
+ static void CheckCast(v8::Value* obj);
+};
+
+
+/**
* A JavaScript string value (ECMA-262, 4.3.17).
*/
-class V8_EXPORT String : public Primitive {
+class V8_EXPORT String : public Name {
public:
enum Encoding {
UNKNOWN_ENCODING = 0x1,
TWO_BYTE_ENCODING = 0x0,
- ASCII_ENCODING = 0x4,
+ ASCII_ENCODING = 0x4, // TODO(yangguo): deprecate this.
ONE_BYTE_ENCODING = 0x4
};
/**
@@ -1657,7 +1810,8 @@ class V8_EXPORT String : public Primitive {
NO_OPTIONS = 0,
HINT_MANY_WRITES_EXPECTED = 1,
NO_NULL_TERMINATION = 2,
- PRESERVE_ASCII_NULL = 4,
+ PRESERVE_ASCII_NULL = 4, // TODO(yangguo): deprecate this.
+ PRESERVE_ONE_BYTE_NULL = 4,
// Used by WriteUtf8 to replace orphan surrogate code units with the
// unicode replacement character. Needs to be set to guarantee valid UTF-8
// output.
@@ -1691,9 +1845,12 @@ class V8_EXPORT String : public Primitive {
bool IsExternal() const;
/**
- * Returns true if the string is both external and ASCII
+ * Returns true if the string is both external and one-byte.
*/
- bool IsExternalAscii() const;
+ bool IsExternalOneByte() const;
+
+ // TODO(yangguo): deprecate this.
+ bool IsExternalAscii() const { return IsExternalOneByte(); }
class V8_EXPORT ExternalStringResourceBase { // NOLINT
public:
@@ -1748,33 +1905,32 @@ class V8_EXPORT String : public Primitive {
};
/**
- * An ExternalAsciiStringResource is a wrapper around an ASCII
+ * An ExternalOneByteStringResource is a wrapper around an one-byte
* string buffer that resides outside V8's heap. Implement an
- * ExternalAsciiStringResource to manage the life cycle of the
+ * ExternalOneByteStringResource to manage the life cycle of the
* underlying buffer. Note that the string data must be immutable
- * and that the data must be strict (7-bit) ASCII, not Latin-1 or
- * UTF-8, which would require special treatment internally in the
- * engine and, in the case of UTF-8, do not allow efficient indexing.
- * Use String::New or convert to 16 bit data for non-ASCII.
+ * and that the data must be Latin-1 and not UTF-8, which would require
+ * special treatment internally in the engine and do not allow efficient
+ * indexing. Use String::New or convert to 16 bit data for non-Latin1.
*/
- class V8_EXPORT ExternalAsciiStringResource
+ class V8_EXPORT ExternalOneByteStringResource
: public ExternalStringResourceBase {
public:
/**
* Override the destructor to manage the life cycle of the underlying
* buffer.
*/
- virtual ~ExternalAsciiStringResource() {}
+ virtual ~ExternalOneByteStringResource() {}
/** The string data from the underlying buffer.*/
virtual const char* data() const = 0;
- /** The number of ASCII characters in the string.*/
+ /** The number of Latin-1 characters in the string.*/
virtual size_t length() const = 0;
protected:
- ExternalAsciiStringResource() {}
+ ExternalOneByteStringResource() {}
};
- typedef ExternalAsciiStringResource ExternalOneByteStringResource;
+ typedef ExternalOneByteStringResource ExternalAsciiStringResource;
/**
* If the string is an external string, return the ExternalStringResourceBase
@@ -1791,10 +1947,15 @@ class V8_EXPORT String : public Primitive {
V8_INLINE ExternalStringResource* GetExternalStringResource() const;
/**
- * Get the ExternalAsciiStringResource for an external ASCII string.
- * Returns NULL if IsExternalAscii() doesn't return true.
+ * Get the ExternalOneByteStringResource for an external one-byte string.
+ * Returns NULL if IsExternalOneByte() doesn't return true.
*/
- const ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
+ const ExternalOneByteStringResource* GetExternalOneByteStringResource() const;
+
+ // TODO(yangguo): deprecate this.
+ const ExternalAsciiStringResource* GetExternalAsciiStringResource() const {
+ return GetExternalOneByteStringResource();
+ }
V8_INLINE static String* Cast(v8::Value* obj);
@@ -1851,7 +2012,7 @@ class V8_EXPORT String : public Primitive {
bool MakeExternal(ExternalStringResource* resource);
/**
- * Creates a new external string using the ASCII data defined in the given
+ * Creates a new external string using the one-byte data defined in the given
* resource. When the external string is no longer live on V8's heap the
* resource will be disposed by calling its Dispose method. The caller of
* this function should not otherwise delete or modify the resource. Neither
@@ -1859,7 +2020,7 @@ class V8_EXPORT String : public Primitive {
* destructor of the external string resource.
*/
static Local<String> NewExternal(Isolate* isolate,
- ExternalAsciiStringResource* resource);
+ ExternalOneByteStringResource* resource);
/**
* Associate an external string resource with this string by transforming it
@@ -1870,7 +2031,7 @@ class V8_EXPORT String : public Primitive {
* The string is not modified if the operation fails. See NewExternal for
* information on the lifetime of the resource.
*/
- bool MakeExternal(ExternalAsciiStringResource* resource);
+ bool MakeExternal(ExternalOneByteStringResource* resource);
/**
* Returns true if this string can be made external.
@@ -1935,7 +2096,7 @@ class V8_EXPORT String : public Primitive {
*
* This is an experimental feature. Use at your own risk.
*/
-class V8_EXPORT Symbol : public Primitive {
+class V8_EXPORT Symbol : public Name {
public:
// Returns the print name string of the symbol, or undefined if none.
Local<Value> Name() const;
@@ -1955,7 +2116,12 @@ class V8_EXPORT Symbol : public Primitive {
// registry that is not accessible by (and cannot clash with) JavaScript code.
static Local<Symbol> ForApi(Isolate *isolate, Local<String> name);
+ // Well-known symbols
+ static Local<Symbol> GetIterator(Isolate* isolate);
+ static Local<Symbol> GetUnscopables(Isolate* isolate);
+
V8_INLINE static Symbol* Cast(v8::Value* obj);
+
private:
Symbol();
static void CheckCast(v8::Value* obj);
@@ -2079,12 +2245,19 @@ enum ExternalArrayType {
typedef void (*AccessorGetterCallback)(
Local<String> property,
const PropertyCallbackInfo<Value>& info);
+typedef void (*AccessorNameGetterCallback)(
+ Local<Name> property,
+ const PropertyCallbackInfo<Value>& info);
typedef void (*AccessorSetterCallback)(
Local<String> property,
Local<Value> value,
const PropertyCallbackInfo<void>& info);
+typedef void (*AccessorNameSetterCallback)(
+ Local<Name> property,
+ Local<Value> value,
+ const PropertyCallbackInfo<void>& info);
/**
@@ -2159,14 +2332,20 @@ class V8_EXPORT Object : public Value {
Handle<Value> data = Handle<Value>(),
AccessControl settings = DEFAULT,
PropertyAttribute attribute = None);
+ bool SetAccessor(Handle<Name> name,
+ AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter = 0,
+ Handle<Value> data = Handle<Value>(),
+ AccessControl settings = DEFAULT,
+ PropertyAttribute attribute = None);
// This function is not yet stable and should not be used at this time.
- bool SetDeclaredAccessor(Local<String> name,
+ bool SetDeclaredAccessor(Local<Name> name,
Local<DeclaredAccessorDescriptor> descriptor,
PropertyAttribute attribute = None,
AccessControl settings = DEFAULT);
- void SetAccessorProperty(Local<String> name,
+ void SetAccessorProperty(Local<Name> name,
Local<Function> getter,
Handle<Function> setter = Handle<Function>(),
PropertyAttribute attribute = None,
@@ -3168,12 +3347,12 @@ class V8_EXPORT External : public Value {
class V8_EXPORT Template : public Data {
public:
/** Adds a property to each instance created by this template.*/
- void Set(Handle<String> name, Handle<Data> value,
+ void Set(Handle<Name> name, Handle<Data> value,
PropertyAttribute attributes = None);
V8_INLINE void Set(Isolate* isolate, const char* name, Handle<Data> value);
void SetAccessorProperty(
- Local<String> name,
+ Local<Name> name,
Local<FunctionTemplate> getter = Local<FunctionTemplate>(),
Local<FunctionTemplate> setter = Local<FunctionTemplate>(),
PropertyAttribute attribute = None,
@@ -3215,9 +3394,18 @@ class V8_EXPORT Template : public Data {
Local<AccessorSignature> signature =
Local<AccessorSignature>(),
AccessControl settings = DEFAULT);
+ void SetNativeDataProperty(Local<Name> name,
+ AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter = 0,
+ // TODO(dcarney): gcc can't handle Local below
+ Handle<Value> data = Handle<Value>(),
+ PropertyAttribute attribute = None,
+ Local<AccessorSignature> signature =
+ Local<AccessorSignature>(),
+ AccessControl settings = DEFAULT);
// This function is not yet stable and should not be used at this time.
- bool SetDeclaredAccessor(Local<String> name,
+ bool SetDeclaredAccessor(Local<Name> name,
Local<DeclaredAccessorDescriptor> descriptor,
PropertyAttribute attribute = None,
Local<AccessorSignature> signature =
@@ -3584,12 +3772,20 @@ class V8_EXPORT ObjectTemplate : public Template {
PropertyAttribute attribute = None,
Handle<AccessorSignature> signature =
Handle<AccessorSignature>());
+ void SetAccessor(Handle<Name> name,
+ AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter = 0,
+ Handle<Value> data = Handle<Value>(),
+ AccessControl settings = DEFAULT,
+ PropertyAttribute attribute = None,
+ Handle<AccessorSignature> signature =
+ Handle<AccessorSignature>());
/**
* Sets a named property handler on the object template.
*
- * Whenever a named property is accessed on objects created from
- * this object template, the provided callback is invoked instead of
+ * Whenever a property whose name is a string is accessed on objects created
+ * from this object template, the provided callback is invoked instead of
* accessing the property directly on the JavaScript object.
*
* \param getter The callback to invoke when getting a property.
@@ -3792,11 +3988,11 @@ class V8_EXPORT TypeSwitch : public Data {
// --- Extensions ---
-class V8_EXPORT ExternalAsciiStringResourceImpl
- : public String::ExternalAsciiStringResource {
+class V8_EXPORT ExternalOneByteStringResourceImpl
+ : public String::ExternalOneByteStringResource {
public:
- ExternalAsciiStringResourceImpl() : data_(0), length_(0) {}
- ExternalAsciiStringResourceImpl(const char* data, size_t length)
+ ExternalOneByteStringResourceImpl() : data_(0), length_(0) {}
+ ExternalOneByteStringResourceImpl(const char* data, size_t length)
: data_(data), length_(length) {}
const char* data() const { return data_; }
size_t length() const { return length_; }
@@ -3826,7 +4022,7 @@ class V8_EXPORT Extension { // NOLINT
const char* name() const { return name_; }
size_t source_length() const { return source_length_; }
- const String::ExternalAsciiStringResource* source() const {
+ const String::ExternalOneByteStringResource* source() const {
return &source_; }
int dependency_count() { return dep_count_; }
const char** dependencies() { return deps_; }
@@ -3836,7 +4032,7 @@ class V8_EXPORT Extension { // NOLINT
private:
const char* name_;
size_t source_length_; // expected to initialize before source_
- ExternalAsciiStringResourceImpl source_;
+ ExternalOneByteStringResourceImpl source_;
int dep_count_;
const char** deps_;
bool auto_enable_;
@@ -3915,13 +4111,6 @@ class V8_EXPORT ResourceConstraints {
};
-/**
- * Sets the given ResourceConstraints on the given Isolate.
- */
-bool V8_EXPORT SetResourceConstraints(Isolate* isolate,
- ResourceConstraints* constraints);
-
-
// --- Exceptions ---
@@ -4059,18 +4248,150 @@ class V8_EXPORT HeapStatistics {
class RetainedObjectInfo;
+
+/**
+ * FunctionEntryHook is the type of the profile entry hook called at entry to
+ * any generated function when function-level profiling is enabled.
+ *
+ * \param function the address of the function that's being entered.
+ * \param return_addr_location points to a location on stack where the machine
+ * return address resides. This can be used to identify the caller of
+ * \p function, and/or modified to divert execution when \p function exits.
+ *
+ * \note the entry hook must not cause garbage collection.
+ */
+typedef void (*FunctionEntryHook)(uintptr_t function,
+ uintptr_t return_addr_location);
+
+/**
+ * A JIT code event is issued each time code is added, moved or removed.
+ *
+ * \note removal events are not currently issued.
+ */
+struct JitCodeEvent {
+ enum EventType {
+ CODE_ADDED,
+ CODE_MOVED,
+ CODE_REMOVED,
+ CODE_ADD_LINE_POS_INFO,
+ CODE_START_LINE_INFO_RECORDING,
+ CODE_END_LINE_INFO_RECORDING
+ };
+ // Definition of the code position type. The "POSITION" type means the place
+ // in the source code which are of interest when making stack traces to
+ // pin-point the source location of a stack frame as close as possible.
+ // The "STATEMENT_POSITION" means the place at the beginning of each
+ // statement, and is used to indicate possible break locations.
+ enum PositionType { POSITION, STATEMENT_POSITION };
+
+ // Type of event.
+ EventType type;
+ // Start of the instructions.
+ void* code_start;
+ // Size of the instructions.
+ size_t code_len;
+ // Script info for CODE_ADDED event.
+ Handle<UnboundScript> script;
+ // User-defined data for *_LINE_INFO_* event. It's used to hold the source
+ // code line information which is returned from the
+ // CODE_START_LINE_INFO_RECORDING event. And it's passed to subsequent
+ // CODE_ADD_LINE_POS_INFO and CODE_END_LINE_INFO_RECORDING events.
+ void* user_data;
+
+ struct name_t {
+ // Name of the object associated with the code, note that the string is not
+ // zero-terminated.
+ const char* str;
+ // Number of chars in str.
+ size_t len;
+ };
+
+ struct line_info_t {
+ // PC offset
+ size_t offset;
+ // Code postion
+ size_t pos;
+ // The position type.
+ PositionType position_type;
+ };
+
+ union {
+ // Only valid for CODE_ADDED.
+ struct name_t name;
+
+ // Only valid for CODE_ADD_LINE_POS_INFO
+ struct line_info_t line_info;
+
+ // New location of instructions. Only valid for CODE_MOVED.
+ void* new_code_start;
+ };
+};
+
+/**
+ * Option flags passed to the SetJitCodeEventHandler function.
+ */
+enum JitCodeEventOptions {
+ kJitCodeEventDefault = 0,
+ // Generate callbacks for already existent code.
+ kJitCodeEventEnumExisting = 1
+};
+
+
+/**
+ * Callback function passed to SetJitCodeEventHandler.
+ *
+ * \param event code add, move or removal event.
+ */
+typedef void (*JitCodeEventHandler)(const JitCodeEvent* event);
+
+
/**
- * Isolate represents an isolated instance of the V8 engine. V8
- * isolates have completely separate states. Objects from one isolate
- * must not be used in other isolates. When V8 is initialized a
- * default isolate is implicitly created and entered. The embedder
- * can create additional isolates and use them in parallel in multiple
- * threads. An isolate can be entered by at most one thread at any
- * given time. The Locker/Unlocker API must be used to synchronize.
+ * Isolate represents an isolated instance of the V8 engine. V8 isolates have
+ * completely separate states. Objects from one isolate must not be used in
+ * other isolates. The embedder can create multiple isolates and use them in
+ * parallel in multiple threads. An isolate can be entered by at most one
+ * thread at any given time. The Locker/Unlocker API must be used to
+ * synchronize.
*/
class V8_EXPORT Isolate {
public:
/**
+ * Initial configuration parameters for a new Isolate.
+ */
+ struct CreateParams {
+ CreateParams()
+ : entry_hook(NULL),
+ code_event_handler(NULL),
+ enable_serializer(false) {}
+
+ /**
+ * The optional entry_hook allows the host application to provide the
+ * address of a function that's invoked on entry to every V8-generated
+ * function. Note that entry_hook is invoked at the very start of each
+ * generated function. Furthermore, if an entry_hook is given, V8 will
+ * always run without a context snapshot.
+ */
+ FunctionEntryHook entry_hook;
+
+ /**
+ * Allows the host application to provide the address of a function that is
+ * notified each time code is added, moved or removed.
+ */
+ JitCodeEventHandler code_event_handler;
+
+ /**
+ * ResourceConstraints to use for the new Isolate.
+ */
+ ResourceConstraints constraints;
+
+ /**
+ * This flag currently renders the Isolate unusable.
+ */
+ bool enable_serializer;
+ };
+
+
+ /**
* Stack-allocated class which sets the isolate for all operations
* executed within a local scope.
*/
@@ -4177,8 +4498,10 @@ class V8_EXPORT Isolate {
*
* When an isolate is no longer used its resources should be freed
* by calling Dispose(). Using the delete operator is not allowed.
+ *
+ * V8::Initialize() must have run prior to this.
*/
- static Isolate* New();
+ static Isolate* New(const CreateParams& params = CreateParams());
/**
* Returns the entered isolate for the current thread or NULL in
@@ -4488,6 +4811,54 @@ class V8_EXPORT Isolate {
*/
int ContextDisposedNotification();
+ /**
+ * Allows the host application to provide the address of a function that is
+ * notified each time code is added, moved or removed.
+ *
+ * \param options options for the JIT code event handler.
+ * \param event_handler the JIT code event handler, which will be invoked
+ * each time code is added, moved or removed.
+ * \note \p event_handler won't get notified of existent code.
+ * \note since code removal notifications are not currently issued, the
+ * \p event_handler may get notifications of code that overlaps earlier
+ * code notifications. This happens when code areas are reused, and the
+ * earlier overlapping code areas should therefore be discarded.
+ * \note the events passed to \p event_handler and the strings they point to
+ * are not guaranteed to live past each call. The \p event_handler must
+ * copy strings and other parameters it needs to keep around.
+ * \note the set of events declared in JitCodeEvent::EventType is expected to
+ * grow over time, and the JitCodeEvent structure is expected to accrue
+ * new members. The \p event_handler function must ignore event codes
+ * it does not recognize to maintain future compatibility.
+ * \note Use Isolate::CreateParams to get events for code executed during
+ * Isolate setup.
+ */
+ void SetJitCodeEventHandler(JitCodeEventOptions options,
+ JitCodeEventHandler event_handler);
+
+ /**
+ * Modifies the stack limit for this Isolate.
+ *
+ * \param stack_limit An address beyond which the Vm's stack may not grow.
+ *
+ * \note If you are using threads then you should hold the V8::Locker lock
+ * while setting the stack limit and you must set a non-default stack
+ * limit separately for each thread.
+ */
+ void SetStackLimit(uintptr_t stack_limit);
+
+ /**
+ * Returns a memory range that can potentially contain jitted code.
+ *
+ * On Win64, embedders are advised to install function table callbacks for
+ * these ranges, as default SEH won't be able to unwind through jitted code.
+ *
+ * Might be empty on other platforms.
+ *
+ * https://code.google.com/p/v8/issues/detail?id=3598
+ */
+ void GetCodeRange(void** start, size_t* length_in_bytes);
+
private:
template<class K, class V, class Traits> friend class PersistentValueMap;
@@ -4567,106 +4938,6 @@ typedef uintptr_t (*ReturnAddressLocationResolver)(
/**
- * FunctionEntryHook is the type of the profile entry hook called at entry to
- * any generated function when function-level profiling is enabled.
- *
- * \param function the address of the function that's being entered.
- * \param return_addr_location points to a location on stack where the machine
- * return address resides. This can be used to identify the caller of
- * \p function, and/or modified to divert execution when \p function exits.
- *
- * \note the entry hook must not cause garbage collection.
- */
-typedef void (*FunctionEntryHook)(uintptr_t function,
- uintptr_t return_addr_location);
-
-
-/**
- * A JIT code event is issued each time code is added, moved or removed.
- *
- * \note removal events are not currently issued.
- */
-struct JitCodeEvent {
- enum EventType {
- CODE_ADDED,
- CODE_MOVED,
- CODE_REMOVED,
- CODE_ADD_LINE_POS_INFO,
- CODE_START_LINE_INFO_RECORDING,
- CODE_END_LINE_INFO_RECORDING
- };
- // Definition of the code position type. The "POSITION" type means the place
- // in the source code which are of interest when making stack traces to
- // pin-point the source location of a stack frame as close as possible.
- // The "STATEMENT_POSITION" means the place at the beginning of each
- // statement, and is used to indicate possible break locations.
- enum PositionType {
- POSITION,
- STATEMENT_POSITION
- };
-
- // Type of event.
- EventType type;
- // Start of the instructions.
- void* code_start;
- // Size of the instructions.
- size_t code_len;
- // Script info for CODE_ADDED event.
- Handle<UnboundScript> script;
- // User-defined data for *_LINE_INFO_* event. It's used to hold the source
- // code line information which is returned from the
- // CODE_START_LINE_INFO_RECORDING event. And it's passed to subsequent
- // CODE_ADD_LINE_POS_INFO and CODE_END_LINE_INFO_RECORDING events.
- void* user_data;
-
- struct name_t {
- // Name of the object associated with the code, note that the string is not
- // zero-terminated.
- const char* str;
- // Number of chars in str.
- size_t len;
- };
-
- struct line_info_t {
- // PC offset
- size_t offset;
- // Code postion
- size_t pos;
- // The position type.
- PositionType position_type;
- };
-
- union {
- // Only valid for CODE_ADDED.
- struct name_t name;
-
- // Only valid for CODE_ADD_LINE_POS_INFO
- struct line_info_t line_info;
-
- // New location of instructions. Only valid for CODE_MOVED.
- void* new_code_start;
- };
-};
-
-/**
- * Option flags passed to the SetJitCodeEventHandler function.
- */
-enum JitCodeEventOptions {
- kJitCodeEventDefault = 0,
- // Generate callbacks for already existent code.
- kJitCodeEventEnumExisting = 1
-};
-
-
-/**
- * Callback function passed to SetJitCodeEventHandler.
- *
- * \param event code add, move or removal event.
- */
-typedef void (*JitCodeEventHandler)(const JitCodeEvent* event);
-
-
-/**
* Interface for iterating through all external resources in the heap.
*/
class V8_EXPORT ExternalResourceVisitor { // NOLINT
@@ -4854,9 +5125,8 @@ class V8_EXPORT V8 {
static void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
/**
- * Initializes from snapshot if possible. Otherwise, attempts to
- * initialize from scratch. This function is called implicitly if
- * you use the API without calling it first.
+ * Initializes V8. This function needs to be called before the first Isolate
+ * is created. It always returns true.
*/
static bool Initialize();
@@ -4874,45 +5144,6 @@ class V8_EXPORT V8 {
ReturnAddressLocationResolver return_address_resolver);
/**
- * Allows the host application to provide the address of a function that's
- * invoked on entry to every V8-generated function.
- * Note that \p entry_hook is invoked at the very start of each
- * generated function.
- *
- * \param isolate the isolate to operate on.
- * \param entry_hook a function that will be invoked on entry to every
- * V8-generated function.
- * \returns true on success on supported platforms, false on failure.
- * \note Setting an entry hook can only be done very early in an isolates
- * lifetime, and once set, the entry hook cannot be revoked.
- */
- static bool SetFunctionEntryHook(Isolate* isolate,
- FunctionEntryHook entry_hook);
-
- /**
- * Allows the host application to provide the address of a function that is
- * notified each time code is added, moved or removed.
- *
- * \param options options for the JIT code event handler.
- * \param event_handler the JIT code event handler, which will be invoked
- * each time code is added, moved or removed.
- * \note \p event_handler won't get notified of existent code.
- * \note since code removal notifications are not currently issued, the
- * \p event_handler may get notifications of code that overlaps earlier
- * code notifications. This happens when code areas are reused, and the
- * earlier overlapping code areas should therefore be discarded.
- * \note the events passed to \p event_handler and the strings they point to
- * are not guaranteed to live past each call. The \p event_handler must
- * copy strings and other parameters it needs to keep around.
- * \note the set of events declared in JitCodeEvent::EventType is expected to
- * grow over time, and the JitCodeEvent structure is expected to accrue
- * new members. The \p event_handler function must ignore event codes
- * it does not recognize to maintain future compatibility.
- */
- static void SetJitCodeEventHandler(JitCodeEventOptions options,
- JitCodeEventHandler event_handler);
-
- /**
* Forcefully terminate the current thread of JavaScript execution
* in the given isolate.
*
@@ -5517,15 +5748,16 @@ template <size_t ptr_size> struct SmiTagging;
template<int kSmiShiftSize>
V8_INLINE internal::Object* IntToSmi(int value) {
int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
- intptr_t tagged_value =
- (static_cast<intptr_t>(value) << smi_shift_bits) | kSmiTag;
+ uintptr_t tagged_value =
+ (static_cast<uintptr_t>(value) << smi_shift_bits) | kSmiTag;
return reinterpret_cast<internal::Object*>(tagged_value);
}
// Smi constants for 32-bit systems.
template <> struct SmiTagging<4> {
- static const int kSmiShiftSize = 0;
- static const int kSmiValueSize = 31;
+ enum { kSmiShiftSize = 0, kSmiValueSize = 31 };
+ static int SmiShiftSize() { return kSmiShiftSize; }
+ static int SmiValueSize() { return kSmiValueSize; }
V8_INLINE static int SmiToInt(const internal::Object* value) {
int shift_bits = kSmiTagSize + kSmiShiftSize;
// Throw away top 32 bits and shift down (requires >> to be sign extending).
@@ -5552,8 +5784,9 @@ template <> struct SmiTagging<4> {
// Smi constants for 64-bit systems.
template <> struct SmiTagging<8> {
- static const int kSmiShiftSize = 31;
- static const int kSmiValueSize = 32;
+ enum { kSmiShiftSize = 31, kSmiValueSize = 32 };
+ static int SmiShiftSize() { return kSmiShiftSize; }
+ static int SmiValueSize() { return kSmiValueSize; }
V8_INLINE static int SmiToInt(const internal::Object* value) {
int shift_bits = kSmiTagSize + kSmiShiftSize;
// Shift down and throw away top 32 bits.
@@ -5597,7 +5830,7 @@ class Internals {
static const int kFullStringRepresentationMask = 0x07;
static const int kStringEncodingMask = 0x4;
static const int kExternalTwoByteRepresentationTag = 0x02;
- static const int kExternalAsciiRepresentationTag = 0x06;
+ static const int kExternalOneByteRepresentationTag = 0x06;
static const int kIsolateEmbedderDataOffset = 0 * kApiPointerSize;
static const int kAmountOfExternalAllocatedMemoryOffset =
@@ -5686,7 +5919,7 @@ class Internals {
V8_INLINE static void UpdateNodeFlag(internal::Object** obj,
bool value, int shift) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
- uint8_t mask = static_cast<uint8_t>(1 << shift);
+ uint8_t mask = static_cast<uint8_t>(1U << shift);
*addr = static_cast<uint8_t>((*addr & ~mask) | (value << shift));
}
@@ -6268,7 +6501,7 @@ String::ExternalStringResourceBase* String::GetExternalStringResourceBase(
int type = I::GetInstanceType(obj) & I::kFullStringRepresentationMask;
*encoding_out = static_cast<Encoding>(type & I::kStringEncodingMask);
ExternalStringResourceBase* resource = NULL;
- if (type == I::kExternalAsciiRepresentationTag ||
+ if (type == I::kExternalOneByteRepresentationTag ||
type == I::kExternalTwoByteRepresentationTag) {
void* value = I::ReadField<void*>(obj, I::kStringResourceOffset);
resource = static_cast<ExternalStringResourceBase*>(value);
@@ -6338,6 +6571,14 @@ template <class T> Value* Value::Cast(T* value) {
}
+Name* Name::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Name*>(value);
+}
+
+
Symbol* Symbol::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h
index 452ffc73e7..87de994170 100644
--- a/deps/v8/include/v8config.h
+++ b/deps/v8/include/v8config.h
@@ -175,7 +175,12 @@
// V8_HAS_ATTRIBUTE_VISIBILITY - __attribute__((visibility)) supported
// V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT - __attribute__((warn_unused_result))
// supported
+// V8_HAS_BUILTIN_CLZ - __builtin_clz() supported
+// V8_HAS_BUILTIN_CTZ - __builtin_ctz() supported
// V8_HAS_BUILTIN_EXPECT - __builtin_expect() supported
+// V8_HAS_BUILTIN_POPCOUNT - __builtin_popcount() supported
+// V8_HAS_BUILTIN_SADD_OVERFLOW - __builtin_sadd_overflow() supported
+// V8_HAS_BUILTIN_SSUB_OVERFLOW - __builtin_ssub_overflow() supported
// V8_HAS_DECLSPEC_ALIGN - __declspec(align(n)) supported
// V8_HAS_DECLSPEC_DEPRECATED - __declspec(deprecated) supported
// V8_HAS_DECLSPEC_NOINLINE - __declspec(noinline) supported
@@ -206,7 +211,12 @@
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
(__has_attribute(warn_unused_result))
+# define V8_HAS_BUILTIN_CLZ (__has_builtin(__builtin_clz))
+# define V8_HAS_BUILTIN_CTZ (__has_builtin(__builtin_ctz))
# define V8_HAS_BUILTIN_EXPECT (__has_builtin(__builtin_expect))
+# define V8_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount))
+# define V8_HAS_BUILTIN_SADD_OVERFLOW (__has_builtin(__builtin_sadd_overflow))
+# define V8_HAS_BUILTIN_SSUB_OVERFLOW (__has_builtin(__builtin_ssub_overflow))
# define V8_HAS_CXX11_ALIGNAS (__has_feature(cxx_alignas))
# define V8_HAS_CXX11_STATIC_ASSERT (__has_feature(cxx_static_assert))
@@ -238,7 +248,10 @@
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
(!V8_CC_INTEL && V8_GNUC_PREREQ(4, 1, 0))
+# define V8_HAS_BUILTIN_CLZ (V8_GNUC_PREREQ(3, 4, 0))
+# define V8_HAS_BUILTIN_CTZ (V8_GNUC_PREREQ(3, 4, 0))
# define V8_HAS_BUILTIN_EXPECT (V8_GNUC_PREREQ(2, 96, 0))
+# define V8_HAS_BUILTIN_POPCOUNT (V8_GNUC_PREREQ(3, 4, 0))
// g++ requires -std=c++0x or -std=gnu++0x to support C++11 functionality
// without warnings (functionality used by the macros below). These modes
@@ -321,24 +334,6 @@ declarator __attribute__((deprecated))
#endif
-// A macro to mark variables or types as unused, avoiding compiler warnings.
-#if V8_HAS_ATTRIBUTE_UNUSED
-# define V8_UNUSED __attribute__((unused))
-#else
-# define V8_UNUSED
-#endif
-
-
-// Annotate a function indicating the caller must examine the return value.
-// Use like:
-// int foo() V8_WARN_UNUSED_RESULT;
-#if V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT
-# define V8_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
-#else
-# define V8_WARN_UNUSED_RESULT /* NOT SUPPORTED */
-#endif
-
-
// A macro to provide the compiler with branch prediction information.
#if V8_HAS_BUILTIN_EXPECT
# define V8_UNLIKELY(condition) (__builtin_expect(!!(condition), 0))
@@ -369,33 +364,6 @@ declarator __attribute__((deprecated))
#endif
-// Annotate a virtual method indicating it must be overriding a virtual
-// method in the parent class.
-// Use like:
-// virtual void bar() V8_OVERRIDE;
-#if V8_HAS_CXX11_OVERRIDE
-# define V8_OVERRIDE override
-#else
-# define V8_OVERRIDE /* NOT SUPPORTED */
-#endif
-
-
-// Annotate a virtual method indicating that subclasses must not override it,
-// or annotate a class to indicate that it cannot be subclassed.
-// Use like:
-// class B V8_FINAL : public A {};
-// virtual void bar() V8_FINAL;
-#if V8_HAS_CXX11_FINAL
-# define V8_FINAL final
-#elif V8_HAS___FINAL
-# define V8_FINAL __final
-#elif V8_HAS_SEALED
-# define V8_FINAL sealed
-#else
-# define V8_FINAL /* NOT SUPPORTED */
-#endif
-
-
// This macro allows to specify memory alignment for structs, classes, etc.
// Use like:
// class V8_ALIGNED(16) MyClass { ... };
diff --git a/deps/v8/samples/lineprocessor.cc b/deps/v8/samples/lineprocessor.cc
index 9b627f3019..69bfab49ba 100644
--- a/deps/v8/samples/lineprocessor.cc
+++ b/deps/v8/samples/lineprocessor.cc
@@ -257,6 +257,7 @@ int main(int argc, char* argv[]) {
v8::V8::InitializeICU();
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
+ v8::V8::Initialize();
int result = RunMain(argc, argv);
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
diff --git a/deps/v8/samples/process.cc b/deps/v8/samples/process.cc
index 4db7eeb7b8..e5c9b7a53c 100644
--- a/deps/v8/samples/process.cc
+++ b/deps/v8/samples/process.cc
@@ -648,6 +648,7 @@ int main(int argc, char* argv[]) {
v8::V8::InitializeICU();
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
+ v8::V8::Initialize();
map<string, string> options;
string file;
ParseOptions(argc, argv, &options, &file);
diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc
index ef61426a0a..b66e8f7453 100644
--- a/deps/v8/samples/shell.cc
+++ b/deps/v8/samples/shell.cc
@@ -83,6 +83,7 @@ int main(int argc, char* argv[]) {
v8::V8::InitializeICU();
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
+ v8::V8::Initialize();
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
ShellArrayBufferAllocator array_buffer_allocator;
v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index f38a902bdf..260f5b2e5f 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -4,6 +4,7 @@ include_rules = [
"+src/compiler/pipeline.h",
"-src/libplatform",
"-include/libplatform",
+ "+testing",
]
specific_include_rules = {
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 3875c4fdf4..011372cbff 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -23,9 +23,9 @@ namespace internal {
Handle<AccessorInfo> Accessors::MakeAccessor(
Isolate* isolate,
- Handle<String> name,
- AccessorGetterCallback getter,
- AccessorSetterCallback setter,
+ Handle<Name> name,
+ AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter,
PropertyAttributes attributes) {
Factory* factory = isolate->factory();
Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo();
@@ -138,7 +138,7 @@ bool Accessors::IsJSObjectFieldAccessor<HeapType>(Handle<HeapType> type,
bool SetPropertyOnInstanceIfInherited(
Isolate* isolate, const v8::PropertyCallbackInfo<void>& info,
- v8::Local<v8::String> name, Handle<Object> value) {
+ v8::Local<v8::Name> name, Handle<Object> value) {
Handle<Object> holder = Utils::OpenHandle(*info.Holder());
Handle<Object> receiver = Utils::OpenHandle(*info.This());
if (*holder == *receiver) return false;
@@ -156,6 +156,46 @@ bool SetPropertyOnInstanceIfInherited(
//
+// Accessors::ArgumentsIterator
+//
+
+
+void Accessors::ArgumentsIteratorGetter(
+ v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ DisallowHeapAllocation no_allocation;
+ HandleScope scope(isolate);
+ Object* result = isolate->native_context()->array_values_iterator();
+ info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
+}
+
+
+void Accessors::ArgumentsIteratorSetter(
+ v8::Local<v8::Name> name, v8::Local<v8::Value> val,
+ const v8::PropertyCallbackInfo<void>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ HandleScope scope(isolate);
+ Handle<JSObject> object = Utils::OpenHandle(*info.This());
+ Handle<Object> value = Utils::OpenHandle(*val);
+
+ if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) return;
+
+ LookupIterator it(object, Utils::OpenHandle(*name));
+ CHECK_EQ(LookupIterator::ACCESSOR, it.state());
+ DCHECK(it.HolderIsReceiverOrHiddenPrototype());
+ Object::SetDataProperty(&it, value);
+}
+
+
+Handle<AccessorInfo> Accessors::ArgumentsIteratorInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ Handle<Name> name(isolate->native_context()->iterator_symbol(), isolate);
+ return MakeAccessor(isolate, name, &ArgumentsIteratorGetter,
+ &ArgumentsIteratorSetter, attributes);
+}
+
+
+//
// Accessors::ArrayLength
//
@@ -176,7 +216,7 @@ Handle<Object> Accessors::FlattenNumber(Isolate* isolate,
void Accessors::ArrayLengthGetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
@@ -188,7 +228,7 @@ void Accessors::ArrayLengthGetter(
void Accessors::ArrayLengthSetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
@@ -222,9 +262,15 @@ void Accessors::ArrayLengthSetter(
return;
}
- isolate->ScheduleThrow(
- *isolate->factory()->NewRangeError("invalid_array_length",
- HandleVector<Object>(NULL, 0)));
+ Handle<Object> exception;
+ maybe = isolate->factory()->NewRangeError("invalid_array_length",
+ HandleVector<Object>(NULL, 0));
+ if (!maybe.ToHandle(&exception)) {
+ isolate->OptionalRescheduleException(false);
+ return;
+ }
+
+ isolate->ScheduleThrow(*exception);
}
@@ -244,7 +290,7 @@ Handle<AccessorInfo> Accessors::ArrayLengthInfo(
//
void Accessors::StringLengthGetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
@@ -267,7 +313,7 @@ void Accessors::StringLengthGetter(
void Accessors::StringLengthSetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE();
@@ -290,7 +336,7 @@ Handle<AccessorInfo> Accessors::StringLengthInfo(
void Accessors::ScriptColumnOffsetGetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
@@ -302,7 +348,7 @@ void Accessors::ScriptColumnOffsetGetter(
void Accessors::ScriptColumnOffsetSetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE();
@@ -312,7 +358,7 @@ void Accessors::ScriptColumnOffsetSetter(
Handle<AccessorInfo> Accessors::ScriptColumnOffsetInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("column_offset")));
+ STATIC_CHAR_VECTOR("column_offset")));
return MakeAccessor(isolate,
name,
&ScriptColumnOffsetGetter,
@@ -327,7 +373,7 @@ Handle<AccessorInfo> Accessors::ScriptColumnOffsetInfo(
void Accessors::ScriptIdGetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
@@ -339,7 +385,7 @@ void Accessors::ScriptIdGetter(
void Accessors::ScriptIdSetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE();
@@ -348,8 +394,8 @@ void Accessors::ScriptIdSetter(
Handle<AccessorInfo> Accessors::ScriptIdInfo(
Isolate* isolate, PropertyAttributes attributes) {
- Handle<String> name(isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("id")));
+ Handle<String> name(
+ isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("id")));
return MakeAccessor(isolate,
name,
&ScriptIdGetter,
@@ -364,7 +410,7 @@ Handle<AccessorInfo> Accessors::ScriptIdInfo(
void Accessors::ScriptNameGetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
@@ -376,7 +422,7 @@ void Accessors::ScriptNameGetter(
void Accessors::ScriptNameSetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE();
@@ -399,7 +445,7 @@ Handle<AccessorInfo> Accessors::ScriptNameInfo(
void Accessors::ScriptSourceGetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
@@ -411,7 +457,7 @@ void Accessors::ScriptSourceGetter(
void Accessors::ScriptSourceSetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE();
@@ -434,7 +480,7 @@ Handle<AccessorInfo> Accessors::ScriptSourceInfo(
void Accessors::ScriptLineOffsetGetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
@@ -446,7 +492,7 @@ void Accessors::ScriptLineOffsetGetter(
void Accessors::ScriptLineOffsetSetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE();
@@ -456,7 +502,7 @@ void Accessors::ScriptLineOffsetSetter(
Handle<AccessorInfo> Accessors::ScriptLineOffsetInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("line_offset")));
+ STATIC_CHAR_VECTOR("line_offset")));
return MakeAccessor(isolate,
name,
&ScriptLineOffsetGetter,
@@ -471,7 +517,7 @@ Handle<AccessorInfo> Accessors::ScriptLineOffsetInfo(
void Accessors::ScriptTypeGetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
@@ -483,7 +529,7 @@ void Accessors::ScriptTypeGetter(
void Accessors::ScriptTypeSetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE();
@@ -492,8 +538,8 @@ void Accessors::ScriptTypeSetter(
Handle<AccessorInfo> Accessors::ScriptTypeInfo(
Isolate* isolate, PropertyAttributes attributes) {
- Handle<String> name(isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("type")));
+ Handle<String> name(
+ isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("type")));
return MakeAccessor(isolate,
name,
&ScriptTypeGetter,
@@ -508,7 +554,7 @@ Handle<AccessorInfo> Accessors::ScriptTypeInfo(
void Accessors::ScriptCompilationTypeGetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
@@ -521,7 +567,7 @@ void Accessors::ScriptCompilationTypeGetter(
void Accessors::ScriptCompilationTypeSetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE();
@@ -531,7 +577,7 @@ void Accessors::ScriptCompilationTypeSetter(
Handle<AccessorInfo> Accessors::ScriptCompilationTypeInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("compilation_type")));
+ STATIC_CHAR_VECTOR("compilation_type")));
return MakeAccessor(isolate,
name,
&ScriptCompilationTypeGetter,
@@ -546,7 +592,7 @@ Handle<AccessorInfo> Accessors::ScriptCompilationTypeInfo(
void Accessors::ScriptLineEndsGetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
@@ -566,7 +612,7 @@ void Accessors::ScriptLineEndsGetter(
void Accessors::ScriptLineEndsSetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE();
@@ -576,7 +622,7 @@ void Accessors::ScriptLineEndsSetter(
Handle<AccessorInfo> Accessors::ScriptLineEndsInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("line_ends")));
+ STATIC_CHAR_VECTOR("line_ends")));
return MakeAccessor(isolate,
name,
&ScriptLineEndsGetter,
@@ -591,7 +637,7 @@ Handle<AccessorInfo> Accessors::ScriptLineEndsInfo(
void Accessors::ScriptSourceUrlGetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
@@ -603,7 +649,7 @@ void Accessors::ScriptSourceUrlGetter(
void Accessors::ScriptSourceUrlSetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE();
@@ -626,7 +672,7 @@ Handle<AccessorInfo> Accessors::ScriptSourceUrlInfo(
void Accessors::ScriptSourceMappingUrlGetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
@@ -639,7 +685,7 @@ void Accessors::ScriptSourceMappingUrlGetter(
void Accessors::ScriptSourceMappingUrlSetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE();
@@ -662,7 +708,7 @@ Handle<AccessorInfo> Accessors::ScriptSourceMappingUrlInfo(
void Accessors::ScriptContextDataGetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
@@ -674,7 +720,7 @@ void Accessors::ScriptContextDataGetter(
void Accessors::ScriptContextDataSetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE();
@@ -684,7 +730,7 @@ void Accessors::ScriptContextDataSetter(
Handle<AccessorInfo> Accessors::ScriptContextDataInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("context_data")));
+ STATIC_CHAR_VECTOR("context_data")));
return MakeAccessor(isolate,
name,
&ScriptContextDataGetter,
@@ -699,7 +745,7 @@ Handle<AccessorInfo> Accessors::ScriptContextDataInfo(
void Accessors::ScriptEvalFromScriptGetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
@@ -721,7 +767,7 @@ void Accessors::ScriptEvalFromScriptGetter(
void Accessors::ScriptEvalFromScriptSetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE();
@@ -731,7 +777,7 @@ void Accessors::ScriptEvalFromScriptSetter(
Handle<AccessorInfo> Accessors::ScriptEvalFromScriptInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("eval_from_script")));
+ STATIC_CHAR_VECTOR("eval_from_script")));
return MakeAccessor(isolate,
name,
&ScriptEvalFromScriptGetter,
@@ -746,7 +792,7 @@ Handle<AccessorInfo> Accessors::ScriptEvalFromScriptInfo(
void Accessors::ScriptEvalFromScriptPositionGetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
@@ -767,7 +813,7 @@ void Accessors::ScriptEvalFromScriptPositionGetter(
void Accessors::ScriptEvalFromScriptPositionSetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE();
@@ -777,7 +823,7 @@ void Accessors::ScriptEvalFromScriptPositionSetter(
Handle<AccessorInfo> Accessors::ScriptEvalFromScriptPositionInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("eval_from_script_position")));
+ STATIC_CHAR_VECTOR("eval_from_script_position")));
return MakeAccessor(isolate,
name,
&ScriptEvalFromScriptPositionGetter,
@@ -792,7 +838,7 @@ Handle<AccessorInfo> Accessors::ScriptEvalFromScriptPositionInfo(
void Accessors::ScriptEvalFromFunctionNameGetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
@@ -813,7 +859,7 @@ void Accessors::ScriptEvalFromFunctionNameGetter(
void Accessors::ScriptEvalFromFunctionNameSetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE();
@@ -823,7 +869,7 @@ void Accessors::ScriptEvalFromFunctionNameSetter(
Handle<AccessorInfo> Accessors::ScriptEvalFromFunctionNameInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("eval_from_function_name")));
+ STATIC_CHAR_VECTOR("eval_from_function_name")));
return MakeAccessor(isolate,
name,
&ScriptEvalFromFunctionNameGetter,
@@ -884,7 +930,7 @@ Handle<Object> Accessors::FunctionSetPrototype(Handle<JSFunction> function,
void Accessors::FunctionPrototypeGetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
@@ -896,7 +942,7 @@ void Accessors::FunctionPrototypeGetter(
void Accessors::FunctionPrototypeSetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
@@ -927,7 +973,7 @@ Handle<AccessorInfo> Accessors::FunctionPrototypeInfo(
void Accessors::FunctionLengthGetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
@@ -953,7 +999,7 @@ void Accessors::FunctionLengthGetter(
void Accessors::FunctionLengthSetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
// Function length is non writable, non configurable.
@@ -977,7 +1023,7 @@ Handle<AccessorInfo> Accessors::FunctionLengthInfo(
void Accessors::FunctionNameGetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
@@ -989,7 +1035,7 @@ void Accessors::FunctionNameGetter(
void Accessors::FunctionNameSetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
// Function name is non writable, non configurable.
@@ -1114,7 +1160,7 @@ Handle<Object> Accessors::FunctionGetArguments(Handle<JSFunction> function) {
void Accessors::FunctionArgumentsGetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
@@ -1126,7 +1172,7 @@ void Accessors::FunctionArgumentsGetter(
void Accessors::FunctionArgumentsSetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
// Function arguments is non writable, non configurable.
@@ -1257,7 +1303,7 @@ MaybeHandle<JSFunction> FindCaller(Isolate* isolate,
void Accessors::FunctionCallerGetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
@@ -1277,7 +1323,7 @@ void Accessors::FunctionCallerGetter(
void Accessors::FunctionCallerSetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
// Function caller is non writable, non configurable.
@@ -1310,9 +1356,16 @@ static void ModuleGetExport(
Isolate* isolate = instance->GetIsolate();
if (value->IsTheHole()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
- isolate->ScheduleThrow(
- *isolate->factory()->NewReferenceError("not_defined",
- HandleVector(&name, 1)));
+
+ Handle<Object> exception;
+ MaybeHandle<Object> maybe = isolate->factory()->NewReferenceError(
+ "not_defined", HandleVector(&name, 1));
+ if (!maybe.ToHandle(&exception)) {
+ isolate->OptionalRescheduleException(false);
+ return;
+ }
+
+ isolate->ScheduleThrow(*exception);
return;
}
info.GetReturnValue().Set(v8::Utils::ToLocal(Handle<Object>(value, isolate)));
@@ -1328,12 +1381,18 @@ static void ModuleSetExport(
DCHECK(context->IsModuleContext());
int slot = info.Data()->Int32Value();
Object* old_value = context->get(slot);
+ Isolate* isolate = context->GetIsolate();
if (old_value->IsTheHole()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
- Isolate* isolate = instance->GetIsolate();
- isolate->ScheduleThrow(
- *isolate->factory()->NewReferenceError("not_defined",
- HandleVector(&name, 1)));
+ Handle<Object> exception;
+ MaybeHandle<Object> maybe = isolate->factory()->NewReferenceError(
+ "not_defined", HandleVector(&name, 1));
+ if (!maybe.ToHandle(&exception)) {
+ isolate->OptionalRescheduleException(false);
+ return;
+ }
+
+ isolate->ScheduleThrow(*exception);
return;
}
context->set(slot, *v8::Utils::OpenHandle(*value));
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index 17b7510ade..8fc1f84be3 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -13,27 +13,28 @@ namespace internal {
// The list of accessor descriptors. This is a second-order macro
// taking a macro to be applied to all accessor descriptor names.
-#define ACCESSOR_INFO_LIST(V) \
- V(ArrayLength) \
- V(FunctionArguments) \
- V(FunctionCaller) \
- V(FunctionName) \
- V(FunctionLength) \
- V(FunctionPrototype) \
- V(ScriptColumnOffset) \
- V(ScriptCompilationType) \
- V(ScriptContextData) \
- V(ScriptEvalFromScript) \
- V(ScriptEvalFromScriptPosition) \
- V(ScriptEvalFromFunctionName) \
- V(ScriptId) \
- V(ScriptLineEnds) \
- V(ScriptLineOffset) \
- V(ScriptName) \
- V(ScriptSource) \
- V(ScriptType) \
- V(ScriptSourceUrl) \
- V(ScriptSourceMappingUrl) \
+#define ACCESSOR_INFO_LIST(V) \
+ V(ArgumentsIterator) \
+ V(ArrayLength) \
+ V(FunctionArguments) \
+ V(FunctionCaller) \
+ V(FunctionName) \
+ V(FunctionLength) \
+ V(FunctionPrototype) \
+ V(ScriptColumnOffset) \
+ V(ScriptCompilationType) \
+ V(ScriptContextData) \
+ V(ScriptEvalFromScript) \
+ V(ScriptEvalFromScriptPosition) \
+ V(ScriptEvalFromFunctionName) \
+ V(ScriptId) \
+ V(ScriptLineEnds) \
+ V(ScriptLineOffset) \
+ V(ScriptName) \
+ V(ScriptSource) \
+ V(ScriptType) \
+ V(ScriptSourceUrl) \
+ V(ScriptSourceMappingUrl) \
V(StringLength)
// Accessors contains all predefined proxy accessors.
@@ -43,10 +44,10 @@ class Accessors : public AllStatic {
// Accessor descriptors.
#define ACCESSOR_INFO_DECLARATION(name) \
static void name##Getter( \
- v8::Local<v8::String> name, \
+ v8::Local<v8::Name> name, \
const v8::PropertyCallbackInfo<v8::Value>& info); \
static void name##Setter( \
- v8::Local<v8::String> name, \
+ v8::Local<v8::Name> name, \
v8::Local<v8::Value> value, \
const v8::PropertyCallbackInfo<void>& info); \
static Handle<AccessorInfo> name##Info( \
@@ -83,9 +84,9 @@ class Accessors : public AllStatic {
static Handle<AccessorInfo> MakeAccessor(
Isolate* isolate,
- Handle<String> name,
- AccessorGetterCallback getter,
- AccessorSetterCallback setter,
+ Handle<Name> name,
+ AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter,
PropertyAttributes attributes);
static Handle<ExecutableAccessorInfo> CloneAccessor(
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc
index b5aa98416b..cae1c10251 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/allocation.cc
@@ -5,6 +5,7 @@
#include "src/allocation.h"
#include <stdlib.h> // For free, malloc.
+#include "src/base/bits.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
#include "src/utils.h"
@@ -83,7 +84,8 @@ char* StrNDup(const char* str, int n) {
void* AlignedAlloc(size_t size, size_t alignment) {
- DCHECK(IsPowerOf2(alignment) && alignment >= V8_ALIGNOF(void*)); // NOLINT
+ DCHECK_LE(V8_ALIGNOF(void*), alignment);
+ DCHECK(base::bits::IsPowerOfTwo32(alignment));
void* ptr;
#if V8_OS_WIN
ptr = _aligned_malloc(size, alignment);
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 4a6345910f..0fbdf7bd9b 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -13,6 +13,7 @@
#include "include/v8-profiler.h"
#include "include/v8-testing.h"
#include "src/assert-scope.h"
+#include "src/background-parsing-task.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
#include "src/base/utils/random-number-generator.h"
@@ -37,7 +38,7 @@
#include "src/property.h"
#include "src/property-details.h"
#include "src/prototype.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
#include "src/runtime-profiler.h"
#include "src/scanner-character-streams.h"
#include "src/simulator.h"
@@ -201,29 +202,6 @@ static inline bool IsExecutionTerminatingCheck(i::Isolate* isolate) {
}
-// --- S t a t i c s ---
-
-
-static bool InitializeHelper(i::Isolate* isolate) {
- // If the isolate has a function entry hook, it needs to re-build all its
- // code stubs with entry hooks embedded, so let's deserialize a snapshot.
- if (isolate == NULL || isolate->function_entry_hook() == NULL) {
- if (i::Snapshot::Initialize())
- return true;
- }
- return i::V8::Initialize(NULL);
-}
-
-
-static inline bool EnsureInitializedForIsolate(i::Isolate* isolate,
- const char* location) {
- return (isolate != NULL && isolate->IsInitialized()) ||
- Utils::ApiCheck(InitializeHelper(isolate),
- location,
- "Error initializing V8");
-}
-
-
StartupDataDecompressor::StartupDataDecompressor()
: raw_data(i::NewArray<char*>(V8::GetCompressedStartupDataCount())) {
for (int i = 0; i < V8::GetCompressedStartupDataCount(); ++i) {
@@ -413,6 +391,7 @@ void RegisteredExtension::UnregisterAll() {
delete re;
re = next;
}
+ first_extension_ = NULL;
}
@@ -492,30 +471,23 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
}
-bool SetResourceConstraints(Isolate* v8_isolate,
- ResourceConstraints* constraints) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- int semi_space_size = constraints->max_semi_space_size();
- int old_space_size = constraints->max_old_space_size();
- int max_executable_size = constraints->max_executable_size();
- size_t code_range_size = constraints->code_range_size();
+void SetResourceConstraints(i::Isolate* isolate,
+ const ResourceConstraints& constraints) {
+ int semi_space_size = constraints.max_semi_space_size();
+ int old_space_size = constraints.max_old_space_size();
+ int max_executable_size = constraints.max_executable_size();
+ size_t code_range_size = constraints.code_range_size();
if (semi_space_size != 0 || old_space_size != 0 ||
max_executable_size != 0 || code_range_size != 0) {
- // After initialization it's too late to change Heap constraints.
- DCHECK(!isolate->IsInitialized());
- bool result = isolate->heap()->ConfigureHeap(semi_space_size,
- old_space_size,
- max_executable_size,
- code_range_size);
- if (!result) return false;
- }
- if (constraints->stack_limit() != NULL) {
- uintptr_t limit = reinterpret_cast<uintptr_t>(constraints->stack_limit());
+ isolate->heap()->ConfigureHeap(semi_space_size, old_space_size,
+ max_executable_size, code_range_size);
+ }
+ if (constraints.stack_limit() != NULL) {
+ uintptr_t limit = reinterpret_cast<uintptr_t>(constraints.stack_limit());
isolate->stack_guard()->SetStackLimit(limit);
}
- isolate->set_max_available_threads(constraints->max_available_threads());
- return true;
+ isolate->set_max_available_threads(constraints.max_available_threads());
}
@@ -744,7 +716,6 @@ void Context::SetAlignedPointerInEmbedderData(int index, void* value) {
// NeanderObject constructor. When you add one to the site calling the
// constructor you should check that you ensured the VM was not dead first.
NeanderObject::NeanderObject(v8::internal::Isolate* isolate, int size) {
- EnsureInitializedForIsolate(isolate, "v8::Nowhere");
ENTER_V8(isolate);
value_ = isolate->factory()->NewNeanderObject();
i::Handle<i::FixedArray> elements = isolate->factory()->NewFixedArray(size);
@@ -828,7 +799,7 @@ static void TemplateSet(i::Isolate* isolate,
}
-void Template::Set(v8::Handle<String> name,
+void Template::Set(v8::Handle<Name> name,
v8::Handle<Data> value,
v8::PropertyAttribute attribute) {
i::Isolate* isolate = i::Isolate::Current();
@@ -845,7 +816,7 @@ void Template::Set(v8::Handle<String> name,
void Template::SetAccessorProperty(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<FunctionTemplate> getter,
v8::Local<FunctionTemplate> setter,
v8::PropertyAttribute attribute,
@@ -937,7 +908,6 @@ Local<FunctionTemplate> FunctionTemplate::New(
v8::Handle<Signature> signature,
int length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- EnsureInitializedForIsolate(i_isolate, "v8::FunctionTemplate::New()");
LOG_API(i_isolate, "FunctionTemplate::New");
ENTER_V8(i_isolate);
return FunctionTemplateNew(
@@ -949,7 +919,6 @@ Local<Signature> Signature::New(Isolate* isolate,
Handle<FunctionTemplate> receiver, int argc,
Handle<FunctionTemplate> argv[]) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- EnsureInitializedForIsolate(i_isolate, "v8::Signature::New()");
LOG_API(i_isolate, "Signature::New");
ENTER_V8(i_isolate);
i::Handle<i::Struct> struct_obj =
@@ -1100,7 +1069,6 @@ Local<TypeSwitch> TypeSwitch::New(Handle<FunctionTemplate> type) {
Local<TypeSwitch> TypeSwitch::New(int argc, Handle<FunctionTemplate> types[]) {
i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::TypeSwitch::New()");
LOG_API(isolate, "TypeSwitch::New");
ENTER_V8(isolate);
i::Handle<i::FixedArray> vector = isolate->factory()->NewFixedArray(argc);
@@ -1156,7 +1124,7 @@ void FunctionTemplate::SetCallHandler(FunctionCallback callback,
static i::Handle<i::AccessorInfo> SetAccessorInfoProperties(
i::Handle<i::AccessorInfo> obj,
- v8::Handle<String> name,
+ v8::Handle<Name> name,
v8::AccessControl settings,
v8::PropertyAttribute attributes,
v8::Handle<AccessorSignature> signature) {
@@ -1173,7 +1141,7 @@ static i::Handle<i::AccessorInfo> SetAccessorInfoProperties(
template<typename Getter, typename Setter>
static i::Handle<i::AccessorInfo> MakeAccessorInfo(
- v8::Handle<String> name,
+ v8::Handle<Name> name,
Getter getter,
Setter setter,
v8::Handle<Value> data,
@@ -1194,7 +1162,7 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
static i::Handle<i::AccessorInfo> MakeAccessorInfo(
- v8::Handle<String> name,
+ v8::Handle<Name> name,
v8::Handle<v8::DeclaredAccessorDescriptor> descriptor,
void* setter_ignored,
void* data_ignored,
@@ -1281,7 +1249,6 @@ Local<ObjectTemplate> ObjectTemplate::New() {
Local<ObjectTemplate> ObjectTemplate::New(
i::Isolate* isolate,
v8::Handle<FunctionTemplate> constructor) {
- EnsureInitializedForIsolate(isolate, "v8::ObjectTemplate::New()");
LOG_API(isolate, "ObjectTemplate::New");
ENTER_V8(isolate);
i::Handle<i::Struct> struct_obj =
@@ -1345,10 +1312,10 @@ static inline i::Handle<i::TemplateInfo> GetTemplateInfo(
}
-template<typename Setter, typename Getter, typename Data, typename Template>
+template<typename Getter, typename Setter, typename Data, typename Template>
static bool TemplateSetAccessor(
Template* template_obj,
- v8::Local<String> name,
+ v8::Local<Name> name,
Getter getter,
Setter setter,
Data data,
@@ -1368,7 +1335,7 @@ static bool TemplateSetAccessor(
bool Template::SetDeclaredAccessor(
- Local<String> name,
+ Local<Name> name,
Local<DeclaredAccessorDescriptor> descriptor,
PropertyAttribute attribute,
Local<AccessorSignature> signature,
@@ -1391,6 +1358,18 @@ void Template::SetNativeDataProperty(v8::Local<String> name,
}
+void Template::SetNativeDataProperty(v8::Local<Name> name,
+ AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter,
+ v8::Handle<Value> data,
+ PropertyAttribute attribute,
+ v8::Local<AccessorSignature> signature,
+ AccessControl settings) {
+ TemplateSetAccessor(
+ this, name, getter, setter, data, settings, attribute, signature);
+}
+
+
void ObjectTemplate::SetAccessor(v8::Handle<String> name,
AccessorGetterCallback getter,
AccessorSetterCallback setter,
@@ -1403,6 +1382,18 @@ void ObjectTemplate::SetAccessor(v8::Handle<String> name,
}
+void ObjectTemplate::SetAccessor(v8::Handle<Name> name,
+ AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter,
+ v8::Handle<Value> data,
+ AccessControl settings,
+ PropertyAttribute attribute,
+ v8::Handle<AccessorSignature> signature) {
+ TemplateSetAccessor(
+ this, name, getter, setter, data, settings, attribute, signature);
+}
+
+
void ObjectTemplate::SetNamedPropertyHandler(
NamedPropertyGetterCallback getter,
NamedPropertySetterCallback setter,
@@ -1575,6 +1566,20 @@ ScriptCompiler::CachedData::~CachedData() {
}
+ScriptCompiler::StreamedSource::StreamedSource(ExternalSourceStream* stream,
+ Encoding encoding)
+ : impl_(new i::StreamedSource(stream, encoding)) {}
+
+
+ScriptCompiler::StreamedSource::~StreamedSource() { delete impl_; }
+
+
+const ScriptCompiler::CachedData*
+ScriptCompiler::StreamedSource::GetCachedData() const {
+ return impl_->cached_data.get();
+}
+
+
Local<Script> UnboundScript::BindToCurrentContext() {
i::Handle<i::HeapObject> obj =
i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
@@ -1789,6 +1794,89 @@ Local<Script> ScriptCompiler::Compile(
}
+ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript(
+ Isolate* v8_isolate, StreamedSource* source, CompileOptions options) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ if (!isolate->global_context().is_null() &&
+ !isolate->global_context()->IsNativeContext()) {
+ // The context chain is non-trivial, and constructing the corresponding
+ // non-trivial Scope chain outside the V8 heap is not implemented. Don't
+ // stream the script. This will only occur if Harmony scoping is enabled and
+ // a previous script has introduced "let" or "const" variables. TODO(marja):
+ // Implement externalizing ScopeInfos and constructing non-trivial Scope
+ // chains independent of the V8 heap so that we can stream also in this
+ // case.
+ return NULL;
+ }
+ return new i::BackgroundParsingTask(source->impl(), options,
+ i::FLAG_stack_size, isolate);
+}
+
+
+Local<Script> ScriptCompiler::Compile(Isolate* v8_isolate,
+ StreamedSource* v8_source,
+ Handle<String> full_source_string,
+ const ScriptOrigin& origin) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ i::StreamedSource* source = v8_source->impl();
+ ON_BAILOUT(isolate, "v8::ScriptCompiler::Compile()", return Local<Script>());
+ LOG_API(isolate, "ScriptCompiler::Compile()");
+ ENTER_V8(isolate);
+ i::SharedFunctionInfo* raw_result = NULL;
+
+ {
+ i::HandleScope scope(isolate);
+ i::Handle<i::String> str = Utils::OpenHandle(*(full_source_string));
+ i::Handle<i::Script> script = isolate->factory()->NewScript(str);
+ if (!origin.ResourceName().IsEmpty()) {
+ script->set_name(*Utils::OpenHandle(*(origin.ResourceName())));
+ }
+ if (!origin.ResourceLineOffset().IsEmpty()) {
+ script->set_line_offset(i::Smi::FromInt(
+ static_cast<int>(origin.ResourceLineOffset()->Value())));
+ }
+ if (!origin.ResourceColumnOffset().IsEmpty()) {
+ script->set_column_offset(i::Smi::FromInt(
+ static_cast<int>(origin.ResourceColumnOffset()->Value())));
+ }
+ if (!origin.ResourceIsSharedCrossOrigin().IsEmpty()) {
+ script->set_is_shared_cross_origin(origin.ResourceIsSharedCrossOrigin() ==
+ v8::True(v8_isolate));
+ }
+ source->info->set_script(script);
+ source->info->SetContext(isolate->global_context());
+
+ EXCEPTION_PREAMBLE(isolate);
+
+ // Do the parsing tasks which need to be done on the main thread. This will
+ // also handle parse errors.
+ source->parser->Internalize();
+
+ i::Handle<i::SharedFunctionInfo> result =
+ i::Handle<i::SharedFunctionInfo>::null();
+ if (source->info->function() != NULL) {
+ // Parsing has succeeded.
+ result =
+ i::Compiler::CompileStreamedScript(source->info.get(), str->length());
+ }
+ has_pending_exception = result.is_null();
+ if (has_pending_exception) isolate->ReportPendingMessages();
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
+
+ raw_result = *result;
+ // The Handle<Script> will go out of scope soon; make sure CompilationInfo
+ // doesn't point to it.
+ source->info->set_script(i::Handle<i::Script>());
+ } // HandleScope goes out of scope.
+ i::Handle<i::SharedFunctionInfo> result(raw_result, isolate);
+ Local<UnboundScript> generic = ToApiHandle<UnboundScript>(result);
+ if (generic.IsEmpty()) {
+ return Local<Script>();
+ }
+ return generic->BindToCurrentContext();
+}
+
+
Local<Script> Script::Compile(v8::Handle<String> source,
v8::ScriptOrigin* origin) {
i::Handle<i::String> str = Utils::OpenHandle(*source);
@@ -1827,7 +1915,7 @@ v8::TryCatch::TryCatch()
// Special handling for simulators which have a separate JS stack.
js_stack_comparable_address_ =
reinterpret_cast<void*>(v8::internal::SimulatorStack::RegisterCTryCatch(
- GetCurrentStackPosition()));
+ v8::internal::GetCurrentStackPosition()));
isolate_->RegisterTryCatchHandler(this);
}
@@ -2041,7 +2129,7 @@ MUST_USE_RESULT static i::MaybeHandle<i::Object> CallV8HeapFunction(
i::Handle<i::Object> argv[] = { data };
return CallV8HeapFunction(name,
i::Isolate::Current()->js_builtins_object(),
- ARRAY_SIZE(argv),
+ arraysize(argv),
argv);
}
@@ -2277,7 +2365,6 @@ bool StackFrame::IsConstructor() const {
Local<Value> JSON::Parse(Local<String> json_string) {
i::Handle<i::String> string = Utils::OpenHandle(*json_string);
i::Isolate* isolate = string->GetIsolate();
- EnsureInitializedForIsolate(isolate, "v8::JSON::Parse");
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::String> source = i::String::Flatten(string);
@@ -2324,6 +2411,11 @@ bool Value::IsFunction() const {
}
+bool Value::IsName() const {
+ return Utils::OpenHandle(this)->IsName();
+}
+
+
bool Value::FullIsString() const {
bool result = Utils::OpenHandle(this)->IsString();
DCHECK_EQ(result, QuickIsString());
@@ -2383,6 +2475,28 @@ bool Value::IsNumber() const {
}
+#define VALUE_IS_SPECIFIC_TYPE(Type, Class) \
+ bool Value::Is##Type() const { \
+ i::Handle<i::Object> obj = Utils::OpenHandle(this); \
+ if (!obj->IsHeapObject()) return false; \
+ i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate(); \
+ return obj->HasSpecificClassOf(isolate->heap()->Class##_string()); \
+ }
+
+VALUE_IS_SPECIFIC_TYPE(ArgumentsObject, Arguments)
+VALUE_IS_SPECIFIC_TYPE(BooleanObject, Boolean)
+VALUE_IS_SPECIFIC_TYPE(NumberObject, Number)
+VALUE_IS_SPECIFIC_TYPE(StringObject, String)
+VALUE_IS_SPECIFIC_TYPE(SymbolObject, Symbol)
+VALUE_IS_SPECIFIC_TYPE(Date, Date)
+VALUE_IS_SPECIFIC_TYPE(Map, Map)
+VALUE_IS_SPECIFIC_TYPE(Set, Set)
+VALUE_IS_SPECIFIC_TYPE(WeakMap, WeakMap)
+VALUE_IS_SPECIFIC_TYPE(WeakSet, WeakSet)
+
+#undef VALUE_IS_SPECIFIC_TYPE
+
+
bool Value::IsBoolean() const {
return Utils::OpenHandle(this)->IsBoolean();
}
@@ -2417,38 +2531,6 @@ bool Value::IsUint32() const {
}
-bool Value::IsDate() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (!obj->IsHeapObject()) return false;
- i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
- return obj->HasSpecificClassOf(isolate->heap()->Date_string());
-}
-
-
-bool Value::IsStringObject() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (!obj->IsHeapObject()) return false;
- i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
- return obj->HasSpecificClassOf(isolate->heap()->String_string());
-}
-
-
-bool Value::IsSymbolObject() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (!obj->IsHeapObject()) return false;
- i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
- return obj->HasSpecificClassOf(isolate->heap()->Symbol_string());
-}
-
-
-bool Value::IsNumberObject() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (!obj->IsHeapObject()) return false;
- i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
- return obj->HasSpecificClassOf(isolate->heap()->Number_string());
-}
-
-
static bool CheckConstructor(i::Isolate* isolate,
i::Handle<i::JSObject> obj,
const char* class_name) {
@@ -2480,17 +2562,22 @@ bool Value::IsNativeError() const {
}
-bool Value::IsBooleanObject() const {
+bool Value::IsRegExp() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (!obj->IsHeapObject()) return false;
- i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
- return obj->HasSpecificClassOf(isolate->heap()->Boolean_string());
+ return obj->IsJSRegExp();
}
-bool Value::IsRegExp() const {
+bool Value::IsGeneratorFunction() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->IsJSRegExp();
+ if (!obj->IsJSFunction()) return false;
+ i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(obj);
+ return func->shared()->is_generator();
+}
+
+
+bool Value::IsGeneratorObject() const {
+ return Utils::OpenHandle(this)->IsJSGeneratorObject();
}
@@ -2632,6 +2719,14 @@ void v8::Function::CheckCast(Value* that) {
}
+void v8::Name::CheckCast(v8::Value* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ Utils::ApiCheck(obj->IsName(),
+ "v8::Name::Cast()",
+ "Could not convert to name");
+}
+
+
void v8::String::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsString(),
@@ -2948,7 +3043,7 @@ bool Value::Equals(Handle<Value> that) const {
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> result;
has_pending_exception = !CallV8HeapFunction(
- "EQUALS", obj, ARRAY_SIZE(args), args).ToHandle(&result);
+ "EQUALS", obj, arraysize(args), args).ToHandle(&result);
EXCEPTION_BAILOUT_CHECK(isolate, false);
return *result == i::Smi::FromInt(i::EQUAL);
}
@@ -3177,7 +3272,7 @@ Local<Value> v8::Object::GetOwnPropertyDescriptor(Local<String> key) {
has_pending_exception = !CallV8HeapFunction(
"ObjectGetOwnPropertyDescriptor",
isolate->factory()->undefined_value(),
- ARRAY_SIZE(args),
+ arraysize(args),
args).ToHandle(&result);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
return Utils::ToLocal(result);
@@ -3296,7 +3391,8 @@ Local<String> v8::Object::ObjectProtoToString() {
return v8::String::NewFromUtf8(isolate, "[object ]");
} else {
i::Handle<i::String> class_name = i::Handle<i::String>::cast(name);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Arguments"))) {
+ if (i::String::Equals(class_name,
+ i_isolate->factory()->Arguments_string())) {
return v8::String::NewFromUtf8(isolate, "[object Object]");
} else {
const char* prefix = "[object ";
@@ -3414,11 +3510,11 @@ bool v8::Object::Has(uint32_t index) {
}
-template<typename Setter, typename Getter, typename Data>
+template<typename Getter, typename Setter, typename Data>
static inline bool ObjectSetAccessor(Object* obj,
- Handle<String> name,
- Setter getter,
- Getter setter,
+ Handle<Name> name,
+ Getter getter,
+ Setter setter,
Data data,
AccessControl settings,
PropertyAttribute attributes) {
@@ -3453,7 +3549,18 @@ bool Object::SetAccessor(Handle<String> name,
}
-bool Object::SetDeclaredAccessor(Local<String> name,
+bool Object::SetAccessor(Handle<Name> name,
+ AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter,
+ v8::Handle<Value> data,
+ AccessControl settings,
+ PropertyAttribute attributes) {
+ return ObjectSetAccessor(
+ this, name, getter, setter, data, settings, attributes);
+}
+
+
+bool Object::SetDeclaredAccessor(Local<Name> name,
Local<DeclaredAccessorDescriptor> descriptor,
PropertyAttribute attributes,
AccessControl settings) {
@@ -3463,7 +3570,7 @@ bool Object::SetDeclaredAccessor(Local<String> name,
}
-void Object::SetAccessorProperty(Local<String> name,
+void Object::SetAccessorProperty(Local<Name> name,
Local<Function> getter,
Handle<Function> setter,
PropertyAttribute attribute,
@@ -3555,26 +3662,15 @@ bool v8::Object::HasIndexedLookupInterceptor() {
}
-static Local<Value> GetPropertyByLookup(i::Isolate* isolate,
- i::Handle<i::JSObject> receiver,
- i::Handle<i::String> name,
- i::LookupResult* lookup) {
- if (!lookup->IsProperty()) {
- // No real property was found.
- return Local<Value>();
- }
-
- // If the property being looked up is a callback, it can throw
- // an exception.
- EXCEPTION_PREAMBLE(isolate);
- i::LookupIterator it(
- receiver, name, i::Handle<i::JSReceiver>(lookup->holder(), isolate),
- i::LookupIterator::SKIP_INTERCEPTOR);
+static Local<Value> GetPropertyByLookup(i::LookupIterator* it) {
+ // If the property being looked up is a callback, it can throw an exception.
+ EXCEPTION_PREAMBLE(it->isolate());
i::Handle<i::Object> result;
- has_pending_exception = !i::Object::GetProperty(&it).ToHandle(&result);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
+ has_pending_exception = !i::Object::GetProperty(it).ToHandle(&result);
+ EXCEPTION_BAILOUT_CHECK(it->isolate(), Local<Value>());
- return Utils::ToLocal(result);
+ if (it->IsFound()) return Utils::ToLocal(result);
+ return Local<Value>();
}
@@ -3587,9 +3683,12 @@ Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
ENTER_V8(isolate);
i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::LookupResult lookup(isolate);
- self_obj->LookupRealNamedPropertyInPrototypes(key_obj, &lookup);
- return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
+ i::PrototypeIterator iter(isolate, self_obj);
+ if (iter.IsAtEnd()) return Local<Value>();
+ i::Handle<i::Object> proto = i::PrototypeIterator::GetCurrent(iter);
+ i::LookupIterator it(self_obj, key_obj, i::Handle<i::JSReceiver>::cast(proto),
+ i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ return GetPropertyByLookup(&it);
}
@@ -3600,9 +3699,9 @@ Local<Value> v8::Object::GetRealNamedProperty(Handle<String> key) {
ENTER_V8(isolate);
i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::LookupResult lookup(isolate);
- self_obj->LookupRealNamedProperty(key_obj, &lookup);
- return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
+ i::LookupIterator it(self_obj, key_obj,
+ i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ return GetPropertyByLookup(&it);
}
@@ -4055,16 +4154,15 @@ Handle<Value> Function::GetDisplayName() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
i::Handle<i::String> property_name =
isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("displayName"));
- i::LookupResult lookup(isolate);
- func->LookupRealNamedProperty(property_name, &lookup);
- if (lookup.IsFound()) {
- i::Object* value = lookup.GetLazyValue();
- if (value && value->IsString()) {
- i::String* name = i::String::cast(value);
- if (name->length() > 0) return Utils::ToLocal(i::Handle<i::String>(name));
- }
+ STATIC_CHAR_VECTOR("displayName"));
+
+ i::Handle<i::Object> value =
+ i::JSObject::GetDataProperty(func, property_name);
+ if (value->IsString()) {
+ i::Handle<i::String> name = i::Handle<i::String>::cast(value);
+ if (name->length() > 0) return Utils::ToLocal(name);
}
+
return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
}
@@ -4294,7 +4392,7 @@ class Utf8LengthHelper : public i::AllStatic {
void VisitOneByteString(const uint8_t* chars, int length) {
int utf8_length = 0;
- // Add in length 1 for each non-ASCII character.
+ // Add in length 1 for each non-Latin1 character.
for (int i = 0; i < length; i++) {
utf8_length += *chars++ >> 7;
}
@@ -4696,7 +4794,7 @@ int String::WriteUtf8(char* buffer,
// First check that the buffer is large enough.
int utf8_bytes = v8::Utf8Length(*str, str->GetIsolate());
if (utf8_bytes <= capacity) {
- // ASCII fast path.
+ // one-byte fast path.
if (utf8_bytes == string_length) {
WriteOneByte(reinterpret_cast<uint8_t*>(buffer), 0, capacity, options);
if (nchars_ref != NULL) *nchars_ref = string_length;
@@ -4770,14 +4868,13 @@ int String::Write(uint16_t* buffer,
bool v8::String::IsExternal() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- EnsureInitializedForIsolate(str->GetIsolate(), "v8::String::IsExternal()");
return i::StringShape(*str).IsExternalTwoByte();
}
-bool v8::String::IsExternalAscii() const {
+bool v8::String::IsExternalOneByte() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- return i::StringShape(*str).IsExternalAscii();
+ return i::StringShape(*str).IsExternalOneByte();
}
@@ -4800,11 +4897,11 @@ void v8::String::VerifyExternalStringResourceBase(
i::Handle<i::String> str = Utils::OpenHandle(this);
const v8::String::ExternalStringResourceBase* expected;
Encoding expectedEncoding;
- if (i::StringShape(*str).IsExternalAscii()) {
+ if (i::StringShape(*str).IsExternalOneByte()) {
const void* resource =
- i::Handle<i::ExternalAsciiString>::cast(str)->resource();
+ i::Handle<i::ExternalOneByteString>::cast(str)->resource();
expected = reinterpret_cast<const ExternalStringResourceBase*>(resource);
- expectedEncoding = ASCII_ENCODING;
+ expectedEncoding = ONE_BYTE_ENCODING;
} else if (i::StringShape(*str).IsExternalTwoByte()) {
const void* resource =
i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
@@ -4812,20 +4909,20 @@ void v8::String::VerifyExternalStringResourceBase(
expectedEncoding = TWO_BYTE_ENCODING;
} else {
expected = NULL;
- expectedEncoding = str->IsOneByteRepresentation() ? ASCII_ENCODING
- : TWO_BYTE_ENCODING;
+ expectedEncoding =
+ str->IsOneByteRepresentation() ? ONE_BYTE_ENCODING : TWO_BYTE_ENCODING;
}
CHECK_EQ(expected, value);
CHECK_EQ(expectedEncoding, encoding);
}
-const v8::String::ExternalAsciiStringResource*
-v8::String::GetExternalAsciiStringResource() const {
+const v8::String::ExternalOneByteStringResource*
+v8::String::GetExternalOneByteStringResource() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- if (i::StringShape(*str).IsExternalAscii()) {
+ if (i::StringShape(*str).IsExternalOneByte()) {
const void* resource =
- i::Handle<i::ExternalAsciiString>::cast(str)->resource();
- return reinterpret_cast<const ExternalAsciiStringResource*>(resource);
+ i::Handle<i::ExternalOneByteString>::cast(str)->resource();
+ return reinterpret_cast<const ExternalOneByteStringResource*>(resource);
} else {
return NULL;
}
@@ -4959,11 +5056,8 @@ void v8::V8::ShutdownPlatform() {
bool v8::V8::Initialize() {
- i::Isolate* isolate = i::Isolate::UncheckedCurrent();
- if (isolate != NULL && isolate->IsInitialized()) {
- return true;
- }
- return InitializeHelper(isolate);
+ i::V8::Initialize();
+ return true;
}
@@ -4977,38 +5071,6 @@ void v8::V8::SetReturnAddressLocationResolver(
i::V8::SetReturnAddressLocationResolver(return_address_resolver);
}
-
-bool v8::V8::SetFunctionEntryHook(Isolate* ext_isolate,
- FunctionEntryHook entry_hook) {
- DCHECK(ext_isolate != NULL);
- DCHECK(entry_hook != NULL);
-
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(ext_isolate);
-
- // The entry hook can only be set before the Isolate is initialized, as
- // otherwise the Isolate's code stubs generated at initialization won't
- // contain entry hooks.
- if (isolate->IsInitialized())
- return false;
-
- // Setting an entry hook is a one-way operation, once set, it cannot be
- // changed or unset.
- if (isolate->function_entry_hook() != NULL)
- return false;
-
- isolate->set_function_entry_hook(entry_hook);
- return true;
-}
-
-
-void v8::V8::SetJitCodeEventHandler(
- JitCodeEventOptions options, JitCodeEventHandler event_handler) {
- i::Isolate* isolate = i::Isolate::Current();
- // Ensure that logging is initialized for our isolate.
- isolate->InitializeLoggingAndCounters();
- isolate->logger()->SetCodeEventHandler(options, event_handler);
-}
-
void v8::V8::SetArrayBufferAllocator(
ArrayBuffer::Allocator* allocator) {
if (!Utils::ApiCheck(i::V8::ArrayBufferAllocator() == NULL,
@@ -5158,7 +5220,6 @@ Local<Context> v8::Context::New(
v8::Handle<ObjectTemplate> global_template,
v8::Handle<Value> global_object) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
- EnsureInitializedForIsolate(isolate, "v8::Context::New()");
LOG_API(isolate, "Context::New");
ON_BAILOUT(isolate, "v8::Context::New()", return Local<Context>());
i::HandleScope scope(isolate);
@@ -5289,7 +5350,6 @@ bool FunctionTemplate::HasInstance(v8::Handle<v8::Value> value) {
Local<External> v8::External::New(Isolate* isolate, void* value) {
STATIC_ASSERT(sizeof(value) == sizeof(i::Address));
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- EnsureInitializedForIsolate(i_isolate, "v8::External::New()");
LOG_API(i_isolate, "External::New");
ENTER_V8(i_isolate);
i::Handle<i::JSObject> external = i_isolate->factory()->NewExternal(value);
@@ -5364,7 +5424,6 @@ inline Local<String> NewString(Isolate* v8_isolate,
String::NewStringType type,
int length) {
i::Isolate* isolate = reinterpret_cast<internal::Isolate*>(v8_isolate);
- EnsureInitializedForIsolate(isolate, location);
LOG_API(isolate, env);
if (length == 0 && type != String::kUndetectableString) {
return String::Empty(v8_isolate);
@@ -5427,7 +5486,6 @@ Local<String> String::NewFromTwoByte(Isolate* isolate,
Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
i::Handle<i::String> left_string = Utils::OpenHandle(*left);
i::Isolate* isolate = left_string->GetIsolate();
- EnsureInitializedForIsolate(isolate, "v8::String::New()");
LOG_API(isolate, "String::New(char)");
ENTER_V8(isolate);
i::Handle<i::String> right_string = Utils::OpenHandle(*right);
@@ -5438,21 +5496,15 @@ Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
}
-static i::Handle<i::String> NewExternalStringHandle(
- i::Isolate* isolate,
- v8::String::ExternalStringResource* resource) {
- // We do not expect this to fail. Change this if it does.
- return isolate->factory()->NewExternalStringFromTwoByte(
- resource).ToHandleChecked();
+static i::MaybeHandle<i::String> NewExternalStringHandle(
+ i::Isolate* isolate, v8::String::ExternalStringResource* resource) {
+ return isolate->factory()->NewExternalStringFromTwoByte(resource);
}
-static i::Handle<i::String> NewExternalAsciiStringHandle(
- i::Isolate* isolate,
- v8::String::ExternalAsciiStringResource* resource) {
- // We do not expect this to fail. Change this if it does.
- return isolate->factory()->NewExternalStringFromAscii(
- resource).ToHandleChecked();
+static i::MaybeHandle<i::String> NewExternalOneByteStringHandle(
+ i::Isolate* isolate, v8::String::ExternalOneByteStringResource* resource) {
+ return isolate->factory()->NewExternalStringFromOneByte(resource);
}
@@ -5460,13 +5512,16 @@ Local<String> v8::String::NewExternal(
Isolate* isolate,
v8::String::ExternalStringResource* resource) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- EnsureInitializedForIsolate(i_isolate, "v8::String::NewExternal()");
LOG_API(i_isolate, "String::NewExternal");
ENTER_V8(i_isolate);
CHECK(resource && resource->data());
- i::Handle<i::String> result = NewExternalStringHandle(i_isolate, resource);
- i_isolate->heap()->external_string_table()->AddString(*result);
- return Utils::ToLocal(result);
+ EXCEPTION_PREAMBLE(i_isolate);
+ i::Handle<i::String> string;
+ has_pending_exception =
+ !NewExternalStringHandle(i_isolate, resource).ToHandle(&string);
+ EXCEPTION_BAILOUT_CHECK(i_isolate, Local<String>());
+ i_isolate->heap()->external_string_table()->AddString(*string);
+ return Utils::ToLocal(string);
}
@@ -5497,22 +5552,23 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
Local<String> v8::String::NewExternal(
- Isolate* isolate,
- v8::String::ExternalAsciiStringResource* resource) {
+ Isolate* isolate, v8::String::ExternalOneByteStringResource* resource) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- EnsureInitializedForIsolate(i_isolate, "v8::String::NewExternal()");
LOG_API(i_isolate, "String::NewExternal");
ENTER_V8(i_isolate);
CHECK(resource && resource->data());
- i::Handle<i::String> result =
- NewExternalAsciiStringHandle(i_isolate, resource);
- i_isolate->heap()->external_string_table()->AddString(*result);
- return Utils::ToLocal(result);
+ EXCEPTION_PREAMBLE(i_isolate);
+ i::Handle<i::String> string;
+ has_pending_exception =
+ !NewExternalOneByteStringHandle(i_isolate, resource).ToHandle(&string);
+ EXCEPTION_BAILOUT_CHECK(i_isolate, Local<String>());
+ i_isolate->heap()->external_string_table()->AddString(*string);
+ return Utils::ToLocal(string);
}
bool v8::String::MakeExternal(
- v8::String::ExternalAsciiStringResource* resource) {
+ v8::String::ExternalOneByteStringResource* resource) {
i::Handle<i::String> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
if (i::StringShape(*obj).IsExternal()) {
@@ -5553,7 +5609,6 @@ bool v8::String::CanMakeExternal() {
Local<v8::Object> v8::Object::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- EnsureInitializedForIsolate(i_isolate, "v8::Object::New()");
LOG_API(i_isolate, "Object::New");
ENTER_V8(i_isolate);
i::Handle<i::JSObject> obj =
@@ -5564,7 +5619,6 @@ Local<v8::Object> v8::Object::New(Isolate* isolate) {
Local<v8::Value> v8::NumberObject::New(Isolate* isolate, double value) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- EnsureInitializedForIsolate(i_isolate, "v8::NumberObject::New()");
LOG_API(i_isolate, "NumberObject::New");
ENTER_V8(i_isolate);
i::Handle<i::Object> number = i_isolate->factory()->NewNumber(value);
@@ -5585,7 +5639,6 @@ double v8::NumberObject::ValueOf() const {
Local<v8::Value> v8::BooleanObject::New(bool value) {
i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::BooleanObject::New()");
LOG_API(isolate, "BooleanObject::New");
ENTER_V8(isolate);
i::Handle<i::Object> boolean(value
@@ -5610,7 +5663,6 @@ bool v8::BooleanObject::ValueOf() const {
Local<v8::Value> v8::StringObject::New(Handle<String> value) {
i::Handle<i::String> string = Utils::OpenHandle(*value);
i::Isolate* isolate = string->GetIsolate();
- EnsureInitializedForIsolate(isolate, "v8::StringObject::New()");
LOG_API(isolate, "StringObject::New");
ENTER_V8(isolate);
i::Handle<i::Object> obj =
@@ -5631,7 +5683,6 @@ Local<v8::String> v8::StringObject::ValueOf() const {
Local<v8::Value> v8::SymbolObject::New(Isolate* isolate, Handle<Symbol> value) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- EnsureInitializedForIsolate(i_isolate, "v8::SymbolObject::New()");
LOG_API(i_isolate, "SymbolObject::New");
ENTER_V8(i_isolate);
i::Handle<i::Object> obj = i::Object::ToObject(
@@ -5652,7 +5703,6 @@ Local<v8::Symbol> v8::SymbolObject::ValueOf() const {
Local<v8::Value> v8::Date::New(Isolate* isolate, double time) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- EnsureInitializedForIsolate(i_isolate, "v8::Date::New()");
LOG_API(i_isolate, "Date::New");
if (std::isnan(time)) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
@@ -5709,7 +5759,7 @@ static i::Handle<i::String> RegExpFlagsToString(RegExp::Flags flags) {
if ((flags & RegExp::kGlobal) != 0) flags_buf[num_flags++] = 'g';
if ((flags & RegExp::kMultiline) != 0) flags_buf[num_flags++] = 'm';
if ((flags & RegExp::kIgnoreCase) != 0) flags_buf[num_flags++] = 'i';
- DCHECK(num_flags <= static_cast<int>(ARRAY_SIZE(flags_buf)));
+ DCHECK(num_flags <= static_cast<int>(arraysize(flags_buf)));
return isolate->factory()->InternalizeOneByteString(
i::Vector<const uint8_t>(flags_buf, num_flags));
}
@@ -5718,7 +5768,6 @@ static i::Handle<i::String> RegExpFlagsToString(RegExp::Flags flags) {
Local<v8::RegExp> v8::RegExp::New(Handle<String> pattern,
Flags flags) {
i::Isolate* isolate = Utils::OpenHandle(*pattern)->GetIsolate();
- EnsureInitializedForIsolate(isolate, "v8::RegExp::New()");
LOG_API(isolate, "RegExp::New");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -5755,7 +5804,6 @@ v8::RegExp::Flags v8::RegExp::GetFlags() const {
Local<v8::Array> v8::Array::New(Isolate* isolate, int length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- EnsureInitializedForIsolate(i_isolate, "v8::Array::New()");
LOG_API(i_isolate, "Array::New");
ENTER_V8(i_isolate);
int real_length = length > 0 ? length : 0;
@@ -5815,7 +5863,7 @@ bool Value::IsPromise() const {
isolate,
isolate->is_promise(),
isolate->factory()->undefined_value(),
- ARRAY_SIZE(argv), argv,
+ arraysize(argv), argv,
false).ToHandle(&b);
EXCEPTION_BAILOUT_CHECK(isolate, false);
return b->BooleanValue();
@@ -5856,7 +5904,7 @@ void Promise::Resolver::Resolve(Handle<Value> value) {
isolate,
isolate->promise_resolve(),
isolate->factory()->undefined_value(),
- ARRAY_SIZE(argv), argv,
+ arraysize(argv), argv,
false).is_null();
EXCEPTION_BAILOUT_CHECK(isolate, /* void */ ;);
}
@@ -5873,7 +5921,7 @@ void Promise::Resolver::Reject(Handle<Value> value) {
isolate,
isolate->promise_reject(),
isolate->factory()->undefined_value(),
- ARRAY_SIZE(argv), argv,
+ arraysize(argv), argv,
false).is_null();
EXCEPTION_BAILOUT_CHECK(isolate, /* void */ ;);
}
@@ -5891,7 +5939,7 @@ Local<Promise> Promise::Chain(Handle<Function> handler) {
isolate,
isolate->promise_chain(),
promise,
- ARRAY_SIZE(argv), argv,
+ arraysize(argv), argv,
false).ToHandle(&result);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise>());
return Local<Promise>::Cast(Utils::ToLocal(result));
@@ -5910,7 +5958,7 @@ Local<Promise> Promise::Catch(Handle<Function> handler) {
isolate,
isolate->promise_catch(),
promise,
- ARRAY_SIZE(argv), argv,
+ arraysize(argv), argv,
false).ToHandle(&result);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise>());
return Local<Promise>::Cast(Utils::ToLocal(result));
@@ -5929,7 +5977,7 @@ Local<Promise> Promise::Then(Handle<Function> handler) {
isolate,
isolate->promise_then(),
promise,
- ARRAY_SIZE(argv), argv,
+ arraysize(argv), argv,
false).ToHandle(&result);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise>());
return Local<Promise>::Cast(Utils::ToLocal(result));
@@ -5975,7 +6023,6 @@ size_t v8::ArrayBuffer::ByteLength() const {
Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- EnsureInitializedForIsolate(i_isolate, "v8::ArrayBuffer::New(size_t)");
LOG_API(i_isolate, "v8::ArrayBuffer::New(size_t)");
ENTER_V8(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
@@ -5988,7 +6035,6 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
size_t byte_length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- EnsureInitializedForIsolate(i_isolate, "v8::ArrayBuffer::New(void*, size_t)");
LOG_API(i_isolate, "v8::ArrayBuffer::New(void*, size_t)");
ENTER_V8(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
@@ -6091,8 +6137,6 @@ i::Handle<i::JSTypedArray> NewTypedArray(
Local<Type##Array> Type##Array::New(Handle<ArrayBuffer> array_buffer, \
size_t byte_offset, size_t length) { \
i::Isolate* isolate = Utils::OpenHandle(*array_buffer)->GetIsolate(); \
- EnsureInitializedForIsolate(isolate, \
- "v8::" #Type "Array::New(Handle<ArrayBuffer>, size_t, size_t)"); \
LOG_API(isolate, \
"v8::" #Type "Array::New(Handle<ArrayBuffer>, size_t, size_t)"); \
ENTER_V8(isolate); \
@@ -6116,8 +6160,6 @@ Local<DataView> DataView::New(Handle<ArrayBuffer> array_buffer,
size_t byte_offset, size_t byte_length) {
i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);
i::Isolate* isolate = buffer->GetIsolate();
- EnsureInitializedForIsolate(
- isolate, "v8::DataView::New(void*, size_t, size_t)");
LOG_API(isolate, "v8::DataView::New(void*, size_t, size_t)");
ENTER_V8(isolate);
i::Handle<i::JSDataView> obj = isolate->factory()->NewJSDataView();
@@ -6129,7 +6171,6 @@ Local<DataView> DataView::New(Handle<ArrayBuffer> array_buffer,
Local<Symbol> v8::Symbol::New(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- EnsureInitializedForIsolate(i_isolate, "v8::Symbol::New()");
LOG_API(i_isolate, "Symbol::New()");
ENTER_V8(i_isolate);
i::Handle<i::Symbol> result = i_isolate->factory()->NewSymbol();
@@ -6138,49 +6179,62 @@ Local<Symbol> v8::Symbol::New(Isolate* isolate, Local<String> name) {
}
-Local<Symbol> v8::Symbol::For(Isolate* isolate, Local<String> name) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::Handle<i::String> i_name = Utils::OpenHandle(*name);
- i::Handle<i::JSObject> registry = i_isolate->GetSymbolRegistry();
- i::Handle<i::String> part = i_isolate->factory()->for_string();
+static i::Handle<i::Symbol> SymbolFor(i::Isolate* isolate,
+ i::Handle<i::String> name,
+ i::Handle<i::String> part) {
+ i::Handle<i::JSObject> registry = isolate->GetSymbolRegistry();
i::Handle<i::JSObject> symbols =
i::Handle<i::JSObject>::cast(
i::Object::GetPropertyOrElement(registry, part).ToHandleChecked());
i::Handle<i::Object> symbol =
- i::Object::GetPropertyOrElement(symbols, i_name).ToHandleChecked();
+ i::Object::GetPropertyOrElement(symbols, name).ToHandleChecked();
if (!symbol->IsSymbol()) {
DCHECK(symbol->IsUndefined());
- symbol = i_isolate->factory()->NewSymbol();
- i::Handle<i::Symbol>::cast(symbol)->set_name(*i_name);
- i::JSObject::SetProperty(symbols, i_name, symbol, i::STRICT).Assert();
+ symbol = isolate->factory()->NewSymbol();
+ i::Handle<i::Symbol>::cast(symbol)->set_name(*name);
+ i::JSObject::SetProperty(symbols, name, symbol, i::STRICT).Assert();
}
- return Utils::ToLocal(i::Handle<i::Symbol>::cast(symbol));
+ return i::Handle<i::Symbol>::cast(symbol);
+}
+
+
+Local<Symbol> v8::Symbol::For(Isolate* isolate, Local<String> name) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::String> i_name = Utils::OpenHandle(*name);
+ i::Handle<i::String> part = i_isolate->factory()->for_string();
+ return Utils::ToLocal(SymbolFor(i_isolate, i_name, part));
}
Local<Symbol> v8::Symbol::ForApi(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::String> i_name = Utils::OpenHandle(*name);
- i::Handle<i::JSObject> registry = i_isolate->GetSymbolRegistry();
i::Handle<i::String> part = i_isolate->factory()->for_api_string();
- i::Handle<i::JSObject> symbols =
- i::Handle<i::JSObject>::cast(
- i::Object::GetPropertyOrElement(registry, part).ToHandleChecked());
- i::Handle<i::Object> symbol =
- i::Object::GetPropertyOrElement(symbols, i_name).ToHandleChecked();
- if (!symbol->IsSymbol()) {
- DCHECK(symbol->IsUndefined());
- symbol = i_isolate->factory()->NewSymbol();
- i::Handle<i::Symbol>::cast(symbol)->set_name(*i_name);
- i::JSObject::SetProperty(symbols, i_name, symbol, i::STRICT).Assert();
- }
- return Utils::ToLocal(i::Handle<i::Symbol>::cast(symbol));
+ return Utils::ToLocal(SymbolFor(i_isolate, i_name, part));
+}
+
+
+static Local<Symbol> GetWellKnownSymbol(Isolate* isolate, const char* name) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::String> i_name =
+ Utils::OpenHandle(*String::NewFromUtf8(isolate, name));
+ i::Handle<i::String> part = i_isolate->factory()->for_intern_string();
+ return Utils::ToLocal(SymbolFor(i_isolate, i_name, part));
+}
+
+
+Local<Symbol> v8::Symbol::GetIterator(Isolate* isolate) {
+ return GetWellKnownSymbol(isolate, "Symbol.iterator");
+}
+
+
+Local<Symbol> v8::Symbol::GetUnscopables(Isolate* isolate) {
+ return GetWellKnownSymbol(isolate, "Symbol.unscopables");
}
Local<Private> v8::Private::New(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- EnsureInitializedForIsolate(i_isolate, "v8::Private::New()");
LOG_API(i_isolate, "Private::New()");
ENTER_V8(i_isolate);
i::Handle<i::Symbol> symbol = i_isolate->factory()->NewPrivateSymbol();
@@ -6252,7 +6306,6 @@ Local<Integer> v8::Integer::NewFromUnsigned(Isolate* isolate, uint32_t value) {
bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) {
i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::V8::AddMessageListener()");
ON_BAILOUT(isolate, "v8::V8::AddMessageListener()", return false);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@@ -6268,7 +6321,6 @@ bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) {
void V8::RemoveMessageListeners(MessageCallback that) {
i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::V8::RemoveMessageListener()");
ON_BAILOUT(isolate, "v8::V8::RemoveMessageListeners()", return);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@@ -6528,9 +6580,29 @@ Isolate* Isolate::GetCurrent() {
}
-Isolate* Isolate::New() {
+Isolate* Isolate::New(const Isolate::CreateParams& params) {
i::Isolate* isolate = new i::Isolate();
- return reinterpret_cast<Isolate*>(isolate);
+ Isolate* v8_isolate = reinterpret_cast<Isolate*>(isolate);
+ if (params.entry_hook) {
+ isolate->set_function_entry_hook(params.entry_hook);
+ }
+ if (params.code_event_handler) {
+ isolate->InitializeLoggingAndCounters();
+ isolate->logger()->SetCodeEventHandler(kJitCodeEventDefault,
+ params.code_event_handler);
+ }
+ SetResourceConstraints(isolate, params.constraints);
+ if (params.enable_serializer) {
+ isolate->enable_serializer();
+ }
+ // TODO(jochen): Once we got rid of Isolate::Current(), we can remove this.
+ Isolate::Scope isolate_scope(v8_isolate);
+ if (params.entry_hook || !i::Snapshot::Initialize(isolate)) {
+ // If the isolate has a function entry hook, it needs to re-build all its
+ // code stubs with entry hooks embedded, so don't deserialize a snapshot.
+ isolate->Init(NULL);
+ }
+ return v8_isolate;
}
@@ -6738,6 +6810,34 @@ int v8::Isolate::ContextDisposedNotification() {
}
+void v8::Isolate::SetJitCodeEventHandler(JitCodeEventOptions options,
+ JitCodeEventHandler event_handler) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ // Ensure that logging is initialized for our isolate.
+ isolate->InitializeLoggingAndCounters();
+ isolate->logger()->SetCodeEventHandler(options, event_handler);
+}
+
+
+void v8::Isolate::SetStackLimit(uintptr_t stack_limit) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ CHECK(stack_limit);
+ isolate->stack_guard()->SetStackLimit(stack_limit);
+}
+
+
+void v8::Isolate::GetCodeRange(void** start, size_t* length_in_bytes) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ if (isolate->code_range()->valid()) {
+ *start = isolate->code_range()->start();
+ *length_in_bytes = isolate->code_range()->size();
+ } else {
+ *start = NULL;
+ *length_in_bytes = 0;
+ }
+}
+
+
String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
: str_(NULL), length_(0) {
i::Isolate* isolate = i::Isolate::Current();
@@ -6779,97 +6879,43 @@ String::Value::~Value() {
}
-Local<Value> Exception::RangeError(v8::Handle<v8::String> raw_message) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "RangeError");
- ON_BAILOUT(isolate, "v8::Exception::RangeError()", return Local<Value>());
- ENTER_V8(isolate);
- i::Object* error;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result = isolate->factory()->NewRangeError(message);
- error = *result;
- }
- i::Handle<i::Object> result(error, isolate);
- return Utils::ToLocal(result);
-}
-
-
-Local<Value> Exception::ReferenceError(v8::Handle<v8::String> raw_message) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "ReferenceError");
- ON_BAILOUT(isolate, "v8::Exception::ReferenceError()", return Local<Value>());
- ENTER_V8(isolate);
- i::Object* error;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result =
- isolate->factory()->NewReferenceError(message);
- error = *result;
- }
- i::Handle<i::Object> result(error, isolate);
- return Utils::ToLocal(result);
-}
-
-
-Local<Value> Exception::SyntaxError(v8::Handle<v8::String> raw_message) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "SyntaxError");
- ON_BAILOUT(isolate, "v8::Exception::SyntaxError()", return Local<Value>());
- ENTER_V8(isolate);
- i::Object* error;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result = isolate->factory()->NewSyntaxError(message);
- error = *result;
- }
- i::Handle<i::Object> result(error, isolate);
- return Utils::ToLocal(result);
-}
-
-
-Local<Value> Exception::TypeError(v8::Handle<v8::String> raw_message) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "TypeError");
- ON_BAILOUT(isolate, "v8::Exception::TypeError()", return Local<Value>());
- ENTER_V8(isolate);
- i::Object* error;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result = isolate->factory()->NewTypeError(message);
- error = *result;
- }
- i::Handle<i::Object> result(error, isolate);
- return Utils::ToLocal(result);
-}
-
-
-Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "Error");
- ON_BAILOUT(isolate, "v8::Exception::Error()", return Local<Value>());
- ENTER_V8(isolate);
- i::Object* error;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result = isolate->factory()->NewError(message);
- error = *result;
- }
- i::Handle<i::Object> result(error, isolate);
- return Utils::ToLocal(result);
-}
+#define DEFINE_ERROR(NAME) \
+ Local<Value> Exception::NAME(v8::Handle<v8::String> raw_message) { \
+ i::Isolate* isolate = i::Isolate::Current(); \
+ LOG_API(isolate, #NAME); \
+ ON_BAILOUT(isolate, "v8::Exception::" #NAME "()", return Local<Value>()); \
+ ENTER_V8(isolate); \
+ i::Object* error; \
+ { \
+ i::HandleScope scope(isolate); \
+ i::Handle<i::String> message = Utils::OpenHandle(*raw_message); \
+ i::Handle<i::Object> result; \
+ EXCEPTION_PREAMBLE(isolate); \
+ i::MaybeHandle<i::Object> maybe_result = \
+ isolate->factory()->New##NAME(message); \
+ has_pending_exception = !maybe_result.ToHandle(&result); \
+ /* TODO(yangguo): crbug/403509. Return empty handle instead. */ \
+ EXCEPTION_BAILOUT_CHECK( \
+ isolate, v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate))); \
+ error = *result; \
+ } \
+ i::Handle<i::Object> result(error, isolate); \
+ return Utils::ToLocal(result); \
+ }
+
+DEFINE_ERROR(RangeError)
+DEFINE_ERROR(ReferenceError)
+DEFINE_ERROR(SyntaxError)
+DEFINE_ERROR(TypeError)
+DEFINE_ERROR(Error)
+
+#undef DEFINE_ERROR
// --- D e b u g S u p p o r t ---
bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) {
i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::SetDebugEventListener()");
ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener()", return false);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@@ -6894,6 +6940,12 @@ void Debug::CancelDebugBreak(Isolate* isolate) {
}
+bool Debug::CheckDebugBreak(Isolate* isolate) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ return internal_isolate->stack_guard()->CheckDebugBreak();
+}
+
+
void Debug::DebugBreakForCommand(Isolate* isolate, ClientData* data) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
internal_isolate->debug()->EnqueueDebugCommand(data);
@@ -6902,7 +6954,6 @@ void Debug::DebugBreakForCommand(Isolate* isolate, ClientData* data) {
void Debug::SetMessageHandler(v8::Debug::MessageHandler handler) {
i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::SetMessageHandler");
ENTER_V8(isolate);
isolate->debug()->SetMessageHandler(handler);
}
@@ -6954,7 +7005,7 @@ Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
i::Handle<i::JSObject> debug(
isolate_debug->debug_context()->global_object());
i::Handle<i::String> name = isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("MakeMirror"));
+ STATIC_CHAR_VECTOR("MakeMirror"));
i::Handle<i::Object> fun_obj =
i::Object::GetProperty(debug, name).ToHandleChecked();
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(fun_obj);
@@ -6976,7 +7027,6 @@ void Debug::ProcessDebugMessages() {
Local<Context> Debug::GetDebugContext() {
i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::GetDebugContext()");
ENTER_V8(isolate);
return Utils::ToLocal(i::Isolate::Current()->debug()->GetDebugContext());
}
@@ -7529,7 +7579,7 @@ void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
}
List<Context*>* context_lists[2] = { &saved_contexts_, &entered_contexts_};
- for (unsigned i = 0; i < ARRAY_SIZE(context_lists); i++) {
+ for (unsigned i = 0; i < arraysize(context_lists); i++) {
if (context_lists[i]->is_empty()) continue;
Object** start = reinterpret_cast<Object**>(&context_lists[i]->first());
v->VisitPointers(start, start + context_lists[i]->length());
@@ -7613,9 +7663,9 @@ void DeferredHandles::Iterate(ObjectVisitor* v) {
void InvokeAccessorGetterCallback(
- v8::Local<v8::String> property,
+ v8::Local<v8::Name> property,
const v8::PropertyCallbackInfo<v8::Value>& info,
- v8::AccessorGetterCallback getter) {
+ v8::AccessorNameGetterCallback getter) {
// Leaving JavaScript.
Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
Address getter_address = reinterpret_cast<Address>(reinterpret_cast<intptr_t>(
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index c87bd712ef..9aed5dd4e9 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -158,6 +158,7 @@ class RegisteredExtension {
V(Float32Array, JSTypedArray) \
V(Float64Array, JSTypedArray) \
V(DataView, JSDataView) \
+ V(Name, Name) \
V(String, String) \
V(Symbol, Symbol) \
V(Script, JSFunction) \
@@ -189,6 +190,8 @@ class Utils {
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<Function> ToLocal(
v8::internal::Handle<v8::internal::JSFunction> obj);
+ static inline Local<Name> ToLocal(
+ v8::internal::Handle<v8::internal::Name> obj);
static inline Local<String> ToLocal(
v8::internal::Handle<v8::internal::String> obj);
static inline Local<Symbol> ToLocal(
@@ -333,6 +336,7 @@ inline v8::Local<T> ToApiHandle(
MAKE_TO_LOCAL(ToLocal, Context, Context)
MAKE_TO_LOCAL(ToLocal, Object, Value)
MAKE_TO_LOCAL(ToLocal, JSFunction, Function)
+MAKE_TO_LOCAL(ToLocal, Name, Name)
MAKE_TO_LOCAL(ToLocal, String, String)
MAKE_TO_LOCAL(ToLocal, Symbol, Symbol)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
@@ -671,9 +675,9 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
// Interceptor functions called from generated inline caches to notify
// CPU profiler that external callbacks are invoked.
void InvokeAccessorGetterCallback(
- v8::Local<v8::String> property,
+ v8::Local<v8::Name> property,
const v8::PropertyCallbackInfo<v8::Value>& info,
- v8::AccessorGetterCallback getter);
+ v8::AccessorNameGetterCallback getter);
void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
v8::FunctionCallback callback);
diff --git a/deps/v8/src/apinatives.js b/deps/v8/src/apinatives.js
index dda1d24bab..3e38d10035 100644
--- a/deps/v8/src/apinatives.js
+++ b/deps/v8/src/apinatives.js
@@ -72,7 +72,7 @@ function InstantiateFunction(data, name) {
}
}
var fun = %CreateApiFunction(data, prototype);
- if (name) %FunctionSetName(fun, name);
+ if (IS_STRING(name)) %FunctionSetName(fun, name);
var doNotCache = flags & (1 << kDoNotCacheBit);
if (!doNotCache) cache[serialNumber] = fun;
ConfigureTemplateInstance(fun, data);
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index bbd2262fd7..9fb2da3bc5 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -68,13 +68,13 @@ class Arguments BASE_EMBEDDED {
// They are used to generate the Call() functions below
// These aren't included in the list as they have duplicate signatures
// F(NamedPropertyEnumeratorCallback, ...)
-// F(NamedPropertyGetterCallback, ...)
#define FOR_EACH_CALLBACK_TABLE_MAPPING_0(F) \
F(IndexedPropertyEnumeratorCallback, v8::Array) \
#define FOR_EACH_CALLBACK_TABLE_MAPPING_1(F) \
- F(AccessorGetterCallback, v8::Value, v8::Local<v8::String>) \
+ F(NamedPropertyGetterCallback, v8::Value, v8::Local<v8::String>) \
+ F(AccessorNameGetterCallback, v8::Value, v8::Local<v8::Name>) \
F(NamedPropertyQueryCallback, \
v8::Integer, \
v8::Local<v8::String>) \
@@ -102,9 +102,9 @@ class Arguments BASE_EMBEDDED {
v8::Local<v8::Value>) \
#define FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(F) \
- F(AccessorSetterCallback, \
+ F(AccessorNameSetterCallback, \
void, \
- v8::Local<v8::String>, \
+ v8::Local<v8::Name>, \
v8::Local<v8::Value>) \
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 1cfe34b241..876cd3d1bd 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -70,6 +70,12 @@ int DwVfpRegister::NumAllocatableRegisters() {
}
+// static
+int DwVfpRegister::NumAllocatableAliasedRegisters() {
+ return LowDwVfpRegister::kMaxNumLowRegisters - kNumReservedRegisters;
+}
+
+
int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
DCHECK(!reg.is(kDoubleRegZero));
DCHECK(!reg.is(kScratchDoubleReg));
@@ -423,36 +429,58 @@ void Assembler::emit(Instr x) {
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
// be returned to after a call.
- // Call sequence on V7 or later is :
+ // Call sequence on V7 or later is:
// movw ip, #... @ call address low 16
// movt ip, #... @ call address high 16
// blx ip
// @ return address
- // Or pre-V7 or cases that need frequent patching, the address is in the
+ // For V6 when the constant pool is unavailable, it is:
+ // mov ip, #... @ call address low 8
+ // orr ip, ip, #... @ call address 2nd 8
+ // orr ip, ip, #... @ call address 3rd 8
+ // orr ip, ip, #... @ call address high 8
+ // blx ip
+ // @ return address
+ // In cases that need frequent patching, the address is in the
// constant pool. It could be a small constant pool load:
// ldr ip, [pc / pp, #...] @ call address
// blx ip
// @ return address
- // Or an extended constant pool load:
+ // Or an extended constant pool load (ARMv7):
// movw ip, #...
// movt ip, #...
// ldr ip, [pc, ip] @ call address
// blx ip
// @ return address
+ // Or an extended constant pool load (ARMv6):
+ // mov ip, #...
+ // orr ip, ip, #...
+ // orr ip, ip, #...
+ // orr ip, ip, #...
+ // ldr ip, [pc, ip] @ call address
+ // blx ip
+ // @ return address
Address candidate = pc - 2 * Assembler::kInstrSize;
Instr candidate_instr(Memory::int32_at(candidate));
if (IsLdrPcImmediateOffset(candidate_instr) |
IsLdrPpImmediateOffset(candidate_instr)) {
return candidate;
- } else if (IsLdrPpRegOffset(candidate_instr)) {
- candidate = pc - 4 * Assembler::kInstrSize;
- DCHECK(IsMovW(Memory::int32_at(candidate)) &&
- IsMovT(Memory::int32_at(candidate + Assembler::kInstrSize)));
- return candidate;
} else {
- candidate = pc - 3 * Assembler::kInstrSize;
- DCHECK(IsMovW(Memory::int32_at(candidate)) &&
- IsMovT(Memory::int32_at(candidate + kInstrSize)));
+ if (IsLdrPpRegOffset(candidate_instr)) {
+ candidate -= Assembler::kInstrSize;
+ }
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ candidate -= 1 * Assembler::kInstrSize;
+ DCHECK(IsMovW(Memory::int32_at(candidate)) &&
+ IsMovT(Memory::int32_at(candidate + Assembler::kInstrSize)));
+ } else {
+ candidate -= 3 * Assembler::kInstrSize;
+ DCHECK(
+ IsMovImmed(Memory::int32_at(candidate)) &&
+ IsOrrImmed(Memory::int32_at(candidate + Assembler::kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(candidate + 2 * Assembler::kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(candidate + 3 * Assembler::kInstrSize)));
+ }
return candidate;
}
}
@@ -469,14 +497,28 @@ Address Assembler::return_address_from_call_start(Address pc) {
// Load from constant pool, small section.
return pc + kInstrSize * 2;
} else {
- DCHECK(IsMovW(Memory::int32_at(pc)));
- DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
- if (IsLdrPpRegOffset(Memory::int32_at(pc + kInstrSize))) {
- // Load from constant pool, extended section.
- return pc + kInstrSize * 4;
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ DCHECK(IsMovW(Memory::int32_at(pc)));
+ DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
+ if (IsLdrPpRegOffset(Memory::int32_at(pc + 2 * kInstrSize))) {
+ // Load from constant pool, extended section.
+ return pc + kInstrSize * 4;
+ } else {
+ // A movw / movt load immediate.
+ return pc + kInstrSize * 3;
+ }
} else {
- // A movw / movt load immediate.
- return pc + kInstrSize * 3;
+ DCHECK(IsMovImmed(Memory::int32_at(pc)));
+ DCHECK(IsOrrImmed(Memory::int32_at(pc + kInstrSize)));
+ DCHECK(IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)));
+ DCHECK(IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
+ if (IsLdrPpRegOffset(Memory::int32_at(pc + 4 * kInstrSize))) {
+ // Load from constant pool, extended section.
+ return pc + kInstrSize * 6;
+ } else {
+ // A mov / orr load immediate.
+ return pc + kInstrSize * 5;
+ }
}
}
}
@@ -493,10 +535,17 @@ void Assembler::deserialization_set_special_target_at(
bool Assembler::is_constant_pool_load(Address pc) {
- return !Assembler::IsMovW(Memory::int32_at(pc)) ||
- (FLAG_enable_ool_constant_pool &&
- Assembler::IsLdrPpRegOffset(
- Memory::int32_at(pc + 2 * Assembler::kInstrSize)));
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ return !Assembler::IsMovW(Memory::int32_at(pc)) ||
+ (FLAG_enable_ool_constant_pool &&
+ Assembler::IsLdrPpRegOffset(
+ Memory::int32_at(pc + 2 * Assembler::kInstrSize)));
+ } else {
+ return !Assembler::IsMovImmed(Memory::int32_at(pc)) ||
+ (FLAG_enable_ool_constant_pool &&
+ Assembler::IsLdrPpRegOffset(
+ Memory::int32_at(pc + 4 * Assembler::kInstrSize)));
+ }
}
@@ -505,10 +554,22 @@ Address Assembler::constant_pool_entry_address(
if (FLAG_enable_ool_constant_pool) {
DCHECK(constant_pool != NULL);
int cp_offset;
- if (IsMovW(Memory::int32_at(pc))) {
+ if (!CpuFeatures::IsSupported(ARMv7) && IsMovImmed(Memory::int32_at(pc))) {
+ DCHECK(IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)) &&
+ IsLdrPpRegOffset(Memory::int32_at(pc + 4 * kInstrSize)));
+ // This is an extended constant pool lookup (ARMv6).
+ Instr mov_instr = instr_at(pc);
+ Instr orr_instr_1 = instr_at(pc + kInstrSize);
+ Instr orr_instr_2 = instr_at(pc + 2 * kInstrSize);
+ Instr orr_instr_3 = instr_at(pc + 3 * kInstrSize);
+ cp_offset = DecodeShiftImm(mov_instr) | DecodeShiftImm(orr_instr_1) |
+ DecodeShiftImm(orr_instr_2) | DecodeShiftImm(orr_instr_3);
+ } else if (IsMovW(Memory::int32_at(pc))) {
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)) &&
IsLdrPpRegOffset(Memory::int32_at(pc + 2 * kInstrSize)));
- // This is an extended constant pool lookup.
+ // This is an extended constant pool lookup (ARMv7).
Instruction* movw_instr = Instruction::At(pc);
Instruction* movt_instr = Instruction::At(pc + kInstrSize);
cp_offset = (movt_instr->ImmedMovwMovtValue() << 16) |
@@ -532,8 +593,8 @@ Address Assembler::target_address_at(Address pc,
if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Return the value in the constant pool.
return Memory::Address_at(constant_pool_entry_address(pc, constant_pool));
- } else {
- // This is an movw_movt immediate load. Return the immediate.
+ } else if (CpuFeatures::IsSupported(ARMv7)) {
+ // This is an movw / movt immediate load. Return the immediate.
DCHECK(IsMovW(Memory::int32_at(pc)) &&
IsMovT(Memory::int32_at(pc + kInstrSize)));
Instruction* movw_instr = Instruction::At(pc);
@@ -541,6 +602,20 @@ Address Assembler::target_address_at(Address pc,
return reinterpret_cast<Address>(
(movt_instr->ImmedMovwMovtValue() << 16) |
movw_instr->ImmedMovwMovtValue());
+ } else {
+ // This is an mov / orr immediate load. Return the immediate.
+ DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
+ IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
+ Instr mov_instr = instr_at(pc);
+ Instr orr_instr_1 = instr_at(pc + kInstrSize);
+ Instr orr_instr_2 = instr_at(pc + 2 * kInstrSize);
+ Instr orr_instr_3 = instr_at(pc + 3 * kInstrSize);
+ Address ret = reinterpret_cast<Address>(
+ DecodeShiftImm(mov_instr) | DecodeShiftImm(orr_instr_1) |
+ DecodeShiftImm(orr_instr_2) | DecodeShiftImm(orr_instr_3));
+ return ret;
}
}
@@ -560,9 +635,9 @@ void Assembler::set_target_address_at(Address pc,
// ldr ip, [pp, #...]
// since the instruction accessing this address in the constant pool remains
// unchanged.
- } else {
- // This is an movw_movt immediate load. Patch the immediate embedded in the
- // instructions.
+ } else if (CpuFeatures::IsSupported(ARMv7)) {
+ // This is an movw / movt immediate load. Patch the immediate embedded in
+ // the instructions.
DCHECK(IsMovW(Memory::int32_at(pc)));
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
@@ -574,6 +649,26 @@ void Assembler::set_target_address_at(Address pc,
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
CpuFeatures::FlushICache(pc, 2 * kInstrSize);
}
+ } else {
+ // This is an mov / orr immediate load. Patch the immediate embedded in
+ // the instructions.
+ DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
+ IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
+ uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
+ uint32_t immediate = reinterpret_cast<uint32_t>(target);
+ instr_ptr[0] = PatchShiftImm(instr_ptr[0], immediate & kImm8Mask);
+ instr_ptr[1] = PatchShiftImm(instr_ptr[1], immediate & (kImm8Mask << 8));
+ instr_ptr[2] = PatchShiftImm(instr_ptr[2], immediate & (kImm8Mask << 16));
+ instr_ptr[3] = PatchShiftImm(instr_ptr[3], immediate & (kImm8Mask << 24));
+ DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
+ IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CpuFeatures::FlushICache(pc, 4 * kInstrSize);
+ }
}
}
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 1a2f5d6e5d..96f28f9683 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -39,6 +39,7 @@
#if V8_TARGET_ARCH_ARM
#include "src/arm/assembler-arm-inl.h"
+#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/macro-assembler.h"
#include "src/serialize.h"
@@ -435,6 +436,10 @@ const Instr kMovLeaveCCPattern = 0x1a0 * B16;
const Instr kMovwPattern = 0x30 * B20;
const Instr kMovtPattern = 0x34 * B20;
const Instr kMovwLeaveCCFlip = 0x5 * B21;
+const Instr kMovImmedMask = 0x7f * B21;
+const Instr kMovImmedPattern = 0x1d * B21;
+const Instr kOrrImmedMask = 0x7f * B21;
+const Instr kOrrImmedPattern = 0x1c * B21;
const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
const Instr kCmpCmnPattern = 0x15 * B20;
const Instr kCmpCmnFlip = B21;
@@ -494,7 +499,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
- DCHECK(m >= 4 && IsPowerOf2(m));
+ DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@@ -1052,9 +1057,6 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
static bool use_mov_immediate_load(const Operand& x,
const Assembler* assembler) {
if (assembler != NULL && !assembler->is_constant_pool_available()) {
- // If there is no constant pool available, we must use an mov immediate.
- // TODO(rmcilroy): enable ARMv6 support.
- DCHECK(CpuFeatures::IsSupported(ARMv7));
return true;
} else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
(assembler == NULL || !assembler->predictable_code_size())) {
@@ -1081,11 +1083,14 @@ int Operand::instructions_required(const Assembler* assembler,
// for the constant pool or immediate load
int instructions;
if (use_mov_immediate_load(*this, assembler)) {
- instructions = 2; // A movw, movt immediate load.
+ // A movw / movt or mov / orr immediate load.
+ instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4;
} else if (assembler != NULL && assembler->use_extended_constant_pool()) {
- instructions = 3; // An extended constant pool load.
+ // An extended constant pool load.
+ instructions = CpuFeatures::IsSupported(ARMv7) ? 3 : 5;
} else {
- instructions = 1; // A small constant pool load.
+ // A small constant pool load.
+ instructions = 1;
}
if ((instr & ~kCondMask) != 13 * B21) { // mov, S not set
@@ -1107,21 +1112,27 @@ void Assembler::move_32_bit_immediate(Register rd,
const Operand& x,
Condition cond) {
RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
+ uint32_t imm32 = static_cast<uint32_t>(x.imm32_);
if (x.must_output_reloc_info(this)) {
RecordRelocInfo(rinfo);
}
if (use_mov_immediate_load(x, this)) {
Register target = rd.code() == pc.code() ? ip : rd;
- // TODO(rmcilroy): add ARMv6 support for immediate loads.
- DCHECK(CpuFeatures::IsSupported(ARMv7));
- if (!FLAG_enable_ool_constant_pool &&
- x.must_output_reloc_info(this)) {
- // Make sure the movw/movt doesn't get separated.
- BlockConstPoolFor(2);
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) {
+ // Make sure the movw/movt doesn't get separated.
+ BlockConstPoolFor(2);
+ }
+ movw(target, imm32 & 0xffff, cond);
+ movt(target, imm32 >> 16, cond);
+ } else {
+ DCHECK(FLAG_enable_ool_constant_pool);
+ mov(target, Operand(imm32 & kImm8Mask), LeaveCC, cond);
+ orr(target, target, Operand(imm32 & (kImm8Mask << 8)), LeaveCC, cond);
+ orr(target, target, Operand(imm32 & (kImm8Mask << 16)), LeaveCC, cond);
+ orr(target, target, Operand(imm32 & (kImm8Mask << 24)), LeaveCC, cond);
}
- movw(target, static_cast<uint32_t>(x.imm32_ & 0xffff), cond);
- movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond);
if (target.code() != rd.code()) {
mov(rd, target, LeaveCC, cond);
}
@@ -1132,8 +1143,15 @@ void Assembler::move_32_bit_immediate(Register rd,
DCHECK(FLAG_enable_ool_constant_pool);
Register target = rd.code() == pc.code() ? ip : rd;
// Emit instructions to load constant pool offset.
- movw(target, 0, cond);
- movt(target, 0, cond);
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ movw(target, 0, cond);
+ movt(target, 0, cond);
+ } else {
+ mov(target, Operand(0), LeaveCC, cond);
+ orr(target, target, Operand(0), LeaveCC, cond);
+ orr(target, target, Operand(0), LeaveCC, cond);
+ orr(target, target, Operand(0), LeaveCC, cond);
+ }
// Load from constant pool at offset.
ldr(rd, MemOperand(pp, target), cond);
} else {
@@ -3147,6 +3165,23 @@ Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) {
}
+int Assembler::DecodeShiftImm(Instr instr) {
+ int rotate = Instruction::RotateValue(instr) * 2;
+ int immed8 = Instruction::Immed8Value(instr);
+ return (immed8 >> rotate) | (immed8 << (32 - rotate));
+}
+
+
+Instr Assembler::PatchShiftImm(Instr instr, int immed) {
+ uint32_t rotate_imm = 0;
+ uint32_t immed_8 = 0;
+ bool immed_fits = fits_shifter(immed, &rotate_imm, &immed_8, NULL);
+ DCHECK(immed_fits);
+ USE(immed_fits);
+ return (instr & ~kOff12Mask) | (rotate_imm << 8) | immed_8;
+}
+
+
bool Assembler::IsNop(Instr instr, int type) {
DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop.
// Check for mov rx, rx where x = type.
@@ -3154,6 +3189,16 @@ bool Assembler::IsNop(Instr instr, int type) {
}
+bool Assembler::IsMovImmed(Instr instr) {
+ return (instr & kMovImmedMask) == kMovImmedPattern;
+}
+
+
+bool Assembler::IsOrrImmed(Instr instr) {
+ return (instr & kOrrImmedMask) == kOrrImmedPattern;
+}
+
+
// static
bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
uint32_t dummy1;
@@ -3735,17 +3780,46 @@ void ConstantPoolBuilder::Populate(Assembler* assm,
// Patch vldr/ldr instruction with correct offset.
Instr instr = assm->instr_at(rinfo.pc());
if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) {
- // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
- Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
- DCHECK((Assembler::IsMovW(instr) &&
- Instruction::ImmedMovwMovtValue(instr) == 0));
- DCHECK((Assembler::IsMovT(next_instr) &&
- Instruction::ImmedMovwMovtValue(next_instr) == 0));
- assm->instr_at_put(rinfo.pc(),
- Assembler::PatchMovwImmediate(instr, offset & 0xffff));
- assm->instr_at_put(
- rinfo.pc() + Assembler::kInstrSize,
- Assembler::PatchMovwImmediate(next_instr, offset >> 16));
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
+ Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
+ DCHECK((Assembler::IsMovW(instr) &&
+ Instruction::ImmedMovwMovtValue(instr) == 0));
+ DCHECK((Assembler::IsMovT(next_instr) &&
+ Instruction::ImmedMovwMovtValue(next_instr) == 0));
+ assm->instr_at_put(
+ rinfo.pc(), Assembler::PatchMovwImmediate(instr, offset & 0xffff));
+ assm->instr_at_put(
+ rinfo.pc() + Assembler::kInstrSize,
+ Assembler::PatchMovwImmediate(next_instr, offset >> 16));
+ } else {
+ // Instructions to patch must be 'mov rd, [#0]' and 'orr rd, rd, [#0].
+ Instr instr_2 = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
+ Instr instr_3 = assm->instr_at(rinfo.pc() + 2 * Assembler::kInstrSize);
+ Instr instr_4 = assm->instr_at(rinfo.pc() + 3 * Assembler::kInstrSize);
+ DCHECK((Assembler::IsMovImmed(instr) &&
+ Instruction::Immed8Value(instr) == 0));
+ DCHECK((Assembler::IsOrrImmed(instr_2) &&
+ Instruction::Immed8Value(instr_2) == 0) &&
+ Assembler::GetRn(instr_2).is(Assembler::GetRd(instr_2)));
+ DCHECK((Assembler::IsOrrImmed(instr_3) &&
+ Instruction::Immed8Value(instr_3) == 0) &&
+ Assembler::GetRn(instr_3).is(Assembler::GetRd(instr_3)));
+ DCHECK((Assembler::IsOrrImmed(instr_4) &&
+ Instruction::Immed8Value(instr_4) == 0) &&
+ Assembler::GetRn(instr_4).is(Assembler::GetRd(instr_4)));
+ assm->instr_at_put(
+ rinfo.pc(), Assembler::PatchShiftImm(instr, (offset & kImm8Mask)));
+ assm->instr_at_put(
+ rinfo.pc() + Assembler::kInstrSize,
+ Assembler::PatchShiftImm(instr_2, (offset & (kImm8Mask << 8))));
+ assm->instr_at_put(
+ rinfo.pc() + 2 * Assembler::kInstrSize,
+ Assembler::PatchShiftImm(instr_3, (offset & (kImm8Mask << 16))));
+ assm->instr_at_put(
+ rinfo.pc() + 3 * Assembler::kInstrSize,
+ Assembler::PatchShiftImm(instr_4, (offset & (kImm8Mask << 24))));
+ }
} else if (type == ConstantPoolArray::INT64) {
// Instruction to patch must be 'vldr rd, [pp, #0]'.
DCHECK((Assembler::IsVldrDPpImmediateOffset(instr) &&
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index e33f48a05b..e8ee605988 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -218,6 +218,11 @@ struct DwVfpRegister {
inline static int NumReservedRegisters();
inline static int NumAllocatableRegisters();
+ // TODO(turbofan): This is a temporary work-around required because our
+ // register allocator does not yet support the aliasing of single/double
+ // registers on ARM.
+ inline static int NumAllocatableAliasedRegisters();
+
inline static int ToAllocationIndex(DwVfpRegister reg);
static const char* AllocationIndexToString(int index);
inline static DwVfpRegister FromAllocationIndex(int index);
@@ -1449,12 +1454,16 @@ class Assembler : public AssemblerBase {
static Register GetCmpImmediateRegister(Instr instr);
static int GetCmpImmediateRawImmediate(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
+ static bool IsMovImmed(Instr instr);
+ static bool IsOrrImmed(Instr instr);
static bool IsMovT(Instr instr);
static Instr GetMovTPattern();
static bool IsMovW(Instr instr);
static Instr GetMovWPattern();
static Instr EncodeMovwImmediate(uint32_t immediate);
static Instr PatchMovwImmediate(Instr instruction, uint32_t immediate);
+ static int DecodeShiftImm(Instr instr);
+ static Instr PatchShiftImm(Instr instr, int immed);
// Constants in pools are accessed via pc relative addressing, which can
// reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 60055a6cd3..db7033d189 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -10,8 +10,7 @@
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
@@ -808,8 +807,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
}
@@ -1422,13 +1421,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ ldr(r1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
kPointerSize)));
- if (FLAG_enable_ool_constant_pool) {
- __ add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
- __ ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
- } else {
- __ mov(sp, fp);;
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- }
+ __ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR);
__ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1));
__ add(sp, sp, Operand(kPointerSize)); // adjust for receiver
}
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index a728d58fbf..be0532efd9 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -6,348 +6,86 @@
#if V8_TARGET_ARCH_ARM
+#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+#include "src/isolate.h"
+#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
-#include "src/stub-cache.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
-void FastNewClosureStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, r2 };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
-}
-
-
-void FastNewContextStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, r1 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void ToNumberStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, r0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void NumberToStringStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, r0 };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
-}
-
-
-void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, r3, r2, r1 };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi(),
- Representation::Tagged() };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry,
- representations);
-}
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, r3, r2, r1, r0 };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
-}
-
-
-void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, r2, r3 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void CallFunctionStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // r1 function the function to call
- Register registers[] = {cp, r1};
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void CallConstructStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // r0 : number of arguments
- // r1 : the function to call
- // r2 : feedback vector
- // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
- // vector (Smi)
- // TODO(turbofan): So far we don't gather type feedback and hence skip the
- // slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {cp, r0, r1, r2};
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void RegExpConstructResultStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, r2, r1, r0 };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, r0, r1 };
- Address entry =
- Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(entry));
-}
-
-
-void CompareNilICStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, r0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(CompareNilIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
-}
-
-
-const Register InterfaceDescriptor::ContextRegister() { return cp; }
-
-
static void InitializeArrayConstructorDescriptor(
- CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
+ Isolate* isolate, CodeStubDescriptor* descriptor,
int constant_stack_parameter_count) {
- // register state
- // cp -- context
- // r0 -- number of arguments
- // r1 -- function
- // r2 -- allocation site with elements kind
Address deopt_handler = Runtime::FunctionForId(
Runtime::kArrayConstructor)->entry;
if (constant_stack_parameter_count == 0) {
- Register registers[] = { cp, r1, r2 };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
- deopt_handler, NULL, constant_stack_parameter_count,
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE);
} else {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = { cp, r1, r2, r0 };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32() };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers, r0,
- deopt_handler, representations,
- constant_stack_parameter_count,
+ descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
}
static void InitializeInternalArrayConstructorDescriptor(
- CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
+ Isolate* isolate, CodeStubDescriptor* descriptor,
int constant_stack_parameter_count) {
- // register state
- // cp -- context
- // r0 -- number of arguments
- // r1 -- constructor function
Address deopt_handler = Runtime::FunctionForId(
Runtime::kInternalArrayConstructor)->entry;
if (constant_stack_parameter_count == 0) {
- Register registers[] = { cp, r1 };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
- deopt_handler, NULL, constant_stack_parameter_count,
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE);
} else {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = { cp, r1, r0 };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32() };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers, r0,
- deopt_handler, representations,
- constant_stack_parameter_count,
+ descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
}
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 0);
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 1);
+void ArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
}
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(MajorKey(), descriptor, -1);
+void ArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
}
-void ToBooleanStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, r0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(ToBooleanIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
+void ArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
}
-void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0);
+void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
-void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1);
+void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
}
-void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1);
-}
-
-
-void BinaryOpICStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, r1, r0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(BinaryOpIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
-}
-
-
-void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, r2, r1, r0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
-}
-
-
-void StringAddStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, r1, r0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kStringAdd)->entry);
-}
-
-
-void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
- static PlatformInterfaceDescriptor default_descriptor =
- PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
-
- static PlatformInterfaceDescriptor noInlineDescriptor =
- PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
-
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
- Register registers[] = { cp, // context
- r1, // JSFunction
- r0, // actual number of arguments
- r2, // expected number of arguments
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // JSFunction
- Representation::Integer32(), // actual number of arguments
- Representation::Integer32(), // expected number of arguments
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers,
- representations, &default_descriptor);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::KeyedCall);
- Register registers[] = { cp, // context
- r2, // key
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // key
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers,
- representations, &noInlineDescriptor);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::NamedCall);
- Register registers[] = { cp, // context
- r2, // name
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // name
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers,
- representations, &noInlineDescriptor);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::CallHandler);
- Register registers[] = { cp, // context
- r0, // receiver
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // receiver
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers,
- representations, &default_descriptor);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::ApiFunctionCall);
- Register registers[] = { cp, // context
- r0, // callee
- r4, // call_data
- r2, // holder
- r1, // api_function_address
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers,
- representations, &default_descriptor);
- }
+void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
}
@@ -368,23 +106,22 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register rhs);
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
+ ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
isolate()->counters()->code_stubs()->Increment();
- CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
- int param_count = descriptor->GetEnvironmentParameterCount();
+ CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
+ int param_count = descriptor.GetEnvironmentParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
DCHECK(param_count == 0 ||
- r0.is(descriptor->GetEnvironmentParameterRegister(
- param_count - 1)));
+ r0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
// Push arguments
for (int i = 0; i < param_count; ++i) {
- __ push(descriptor->GetEnvironmentParameterRegister(i));
+ __ push(descriptor.GetEnvironmentParameterRegister(i));
}
- ExternalReference miss = descriptor->miss_handler();
__ CallExternalReference(miss, param_count);
}
@@ -392,101 +129,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-// Takes a Smi and converts to an IEEE 64 bit floating point value in two
-// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
-// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
-// scratch register. Destroys the source register. No GC occurs during this
-// stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public PlatformCodeStub {
- public:
- ConvertToDoubleStub(Isolate* isolate,
- Register result_reg_1,
- Register result_reg_2,
- Register source_reg,
- Register scratch_reg)
- : PlatformCodeStub(isolate),
- result1_(result_reg_1),
- result2_(result_reg_2),
- source_(source_reg),
- zeros_(scratch_reg) { }
-
- private:
- Register result1_;
- Register result2_;
- Register source_;
- Register zeros_;
-
- // Minor key encoding in 16 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 14> {};
-
- Major MajorKey() const { return ConvertToDouble; }
- int MinorKey() const {
- // Encode the parameters in a unique 16 bit value.
- return result1_.code() +
- (result2_.code() << 4) +
- (source_.code() << 8) +
- (zeros_.code() << 12);
- }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
- Register exponent = result1_;
- Register mantissa = result2_;
-
- Label not_special;
- __ SmiUntag(source_);
- // Move sign bit from source to destination. This works because the sign bit
- // in the exponent word of the double has the same position and polarity as
- // the 2's complement sign bit in a Smi.
- STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
- // Subtract from 0 if source was negative.
- __ rsb(source_, source_, Operand::Zero(), LeaveCC, ne);
-
- // We have -1, 0 or 1, which we treat specially. Register source_ contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ cmp(source_, Operand(1));
- __ b(gt, &not_special);
-
- // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
- const uint32_t exponent_word_for_1 =
- HeapNumber::kExponentBias << HeapNumber::kExponentShift;
- __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
- // 1, 0 and -1 all have 0 for the second word.
- __ mov(mantissa, Operand::Zero());
- __ Ret();
-
- __ bind(&not_special);
- __ clz(zeros_, source_);
- // Compute exponent and or it into the exponent register.
- // We use mantissa as a scratch register here. Use a fudge factor to
- // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
- // that fit in the ARM's constant field.
- int fudge = 0x400;
- __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
- __ add(mantissa, mantissa, Operand(fudge));
- __ orr(exponent,
- exponent,
- Operand(mantissa, LSL, HeapNumber::kExponentShift));
- // Shift up the source chopping the top bit off.
- __ add(zeros_, zeros_, Operand(1));
- // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
- __ mov(source_, Operand(source_, LSL, zeros_));
- // Compute lower part of fraction (last 12 bits).
- __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
- // And the top (top 20 bits).
- __ orr(exponent,
- exponent,
- Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
- __ Ret();
-}
-
-
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done;
Register input_reg = source();
@@ -608,29 +250,29 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// We test for the special value that has a different exponent. This test
// has the neat side effect of setting the flags according to the sign.
STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- __ cmp(the_int_, Operand(0x80000000u));
+ __ cmp(the_int(), Operand(0x80000000u));
__ b(eq, &max_negative_int);
// Set up the correct exponent in scratch_. All non-Smi int32s have the same.
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
uint32_t non_smi_exponent =
(HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ mov(scratch_, Operand(non_smi_exponent));
+ __ mov(scratch(), Operand(non_smi_exponent));
// Set the sign bit in scratch_ if the value was negative.
- __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
+ __ orr(scratch(), scratch(), Operand(HeapNumber::kSignMask), LeaveCC, cs);
// Subtract from 0 if the value was negative.
- __ rsb(the_int_, the_int_, Operand::Zero(), LeaveCC, cs);
+ __ rsb(the_int(), the_int(), Operand::Zero(), LeaveCC, cs);
// We should be masking the implict first digit of the mantissa away here,
// but it just ends up combining harmlessly with the last digit of the
// exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
// the most significant 1 to hit the last bit of the 12 bit sign and exponent.
DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
- __ str(scratch_, FieldMemOperand(the_heap_number_,
- HeapNumber::kExponentOffset));
- __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
- __ str(scratch_, FieldMemOperand(the_heap_number_,
- HeapNumber::kMantissaOffset));
+ __ orr(scratch(), scratch(), Operand(the_int(), LSR, shift_distance));
+ __ str(scratch(),
+ FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
+ __ mov(scratch(), Operand(the_int(), LSL, 32 - shift_distance));
+ __ str(scratch(),
+ FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
__ Ret();
__ bind(&max_negative_int);
@@ -640,9 +282,9 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// significant 1 bit is not stored.
non_smi_exponent += 1 << HeapNumber::kExponentShift;
__ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
- __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
+ __ str(ip, FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
__ mov(ip, Operand::Zero());
- __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
+ __ str(ip, FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
__ Ret();
}
@@ -923,15 +565,14 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
}
-static void ICCompareStub_CheckInputType(MacroAssembler* masm,
- Register input,
+static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
Register scratch,
- CompareIC::State expected,
+ CompareICState::State expected,
Label* fail) {
Label ok;
- if (expected == CompareIC::SMI) {
+ if (expected == CompareICState::SMI) {
__ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::NUMBER) {
+ } else if (expected == CompareICState::NUMBER) {
__ JumpIfSmi(input, &ok);
__ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
DONT_DO_SMI_CHECK);
@@ -945,14 +586,14 @@ static void ICCompareStub_CheckInputType(MacroAssembler* masm,
// On entry r1 and r2 are the values to be compared.
// On exit r0 is 0, positive or negative to indicate the result of
// the comparison.
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
Register lhs = r1;
Register rhs = r0;
Condition cc = GetCondition();
Label miss;
- ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss);
- ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss);
+ CompareICStub_CheckInputType(masm, lhs, r2, left(), &miss);
+ CompareICStub_CheckInputType(masm, rhs, r3, right(), &miss);
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles, lhs_not_nan;
@@ -1048,29 +689,19 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
masm, lhs, rhs, &flat_string_check, &slow);
}
- // Check for both being sequential ASCII strings, and inline if that is the
- // case.
+ // Check for both being sequential one-byte strings,
+ // and inline if that is the case.
__ bind(&flat_string_check);
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow);
+ __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r2, r3, &slow);
__ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r2,
r3);
if (cc == eq) {
- StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- lhs,
- rhs,
- r2,
- r3,
- r4);
+ StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r2, r3, r4);
} else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- lhs,
- rhs,
- r2,
- r3,
- r4,
- r5);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r2, r3, r4,
+ r5);
}
// Never falls through to here.
@@ -1111,7 +742,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
const Register scratch = r1;
- if (save_doubles_ == kSaveFPRegs) {
+ if (save_doubles()) {
__ SaveFPRegs(sp, scratch);
}
const int argument_count = 1;
@@ -1123,7 +754,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
__ CallCFunction(
ExternalReference::store_buffer_overflow_function(isolate()),
argument_count);
- if (save_doubles_ == kSaveFPRegs) {
+ if (save_doubles()) {
__ RestoreFPRegs(sp, scratch);
}
__ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
@@ -1132,7 +763,8 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
const Register base = r1;
- const Register exponent = r2;
+ const Register exponent = MathPowTaggedDescriptor::exponent();
+ DCHECK(exponent.is(r2));
const Register heapnumbermap = r5;
const Register heapnumber = r0;
const DwVfpRegister double_base = d0;
@@ -1144,7 +776,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const Register scratch2 = r4;
Label call_runtime, done, int_exponent;
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
Label base_is_smi, unpack_exponent;
// The exponent and base are supplied as arguments on the stack.
// This can only happen if the stub is called from non-optimized code.
@@ -1174,7 +806,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ b(ne, &call_runtime);
__ vldr(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
- } else if (exponent_type_ == TAGGED) {
+ } else if (exponent_type() == TAGGED) {
// Base is already in double_base.
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
@@ -1182,7 +814,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
FieldMemOperand(exponent, HeapNumber::kValueOffset));
}
- if (exponent_type_ != INTEGER) {
+ if (exponent_type() != INTEGER) {
Label int_exponent_convert;
// Detect integer exponents stored as double.
__ vcvt_u32_f64(single_scratch, double_exponent);
@@ -1192,7 +824,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ VFPCompareAndSetFlags(double_scratch, double_exponent);
__ b(eq, &int_exponent_convert);
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
// compile time and uses DoMathPowHalf instead. We then skip this check
// for non-constant cases of +/-0.5 as these hardly occur.
@@ -1257,7 +889,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&int_exponent);
// Get two copies of exponent in the registers scratch and exponent.
- if (exponent_type_ == INTEGER) {
+ if (exponent_type() == INTEGER) {
__ mov(scratch, exponent);
} else {
// Exponent has previously been stored into scratch as untagged integer.
@@ -1293,7 +925,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Returning or bailing out.
Counters* counters = isolate()->counters();
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
__ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
@@ -1346,20 +978,10 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
void CodeStub::GenerateFPStubs(Isolate* isolate) {
+ // Generate if not already in cache.
SaveFPRegsMode mode = kSaveFPRegs;
- CEntryStub save_doubles(isolate, 1, mode);
- StoreBufferOverflowStub stub(isolate, mode);
- // These stubs might already be in the snapshot, detect that and don't
- // regenerate, which would lead to code stub initialization state being messed
- // up.
- Code* save_doubles_code;
- if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
- save_doubles_code = *save_doubles.GetCode();
- }
- Code* store_buffer_overflow_code;
- if (!stub.FindCodeInCache(&store_buffer_overflow_code)) {
- store_buffer_overflow_code = *stub.GetCode();
- }
+ CEntryStub(isolate, 1, mode).GetCode();
+ StoreBufferOverflowStub(isolate, mode).GetCode();
isolate->set_fp_stubs_generated(true);
}
@@ -1388,7 +1010,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(save_doubles_);
+ __ EnterExitFrame(save_doubles());
// Store a copy of argc in callee-saved registers for later.
__ mov(r4, Operand(r0));
@@ -1405,7 +1027,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
if (frame_alignment > kPointerSize) {
Label alignment_as_expected;
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
__ tst(sp, Operand(frame_alignment_mask));
__ b(eq, &alignment_as_expected);
// Don't use Check here, as it will call Runtime_Abort re-entering here.
@@ -1472,7 +1094,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// sp: stack pointer
// fp: frame pointer
// Callee-saved register r4 still holds argc.
- __ LeaveExitFrame(save_doubles_, r4, true);
+ __ LeaveExitFrame(save_doubles(), r4, true);
__ mov(pc, lr);
// Handling of exception.
@@ -1500,7 +1122,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
}
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+void JSEntryStub::Generate(MacroAssembler* masm) {
// r0: code entry
// r1: function
// r2: receiver
@@ -1539,7 +1161,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// r2: receiver
// r3: argc
// r4: argv
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ int marker = type();
if (FLAG_enable_ool_constant_pool) {
__ mov(r8, Operand(isolate()->factory()->empty_constant_pool_array()));
}
@@ -1620,7 +1242,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// r2: receiver
// r3: argc
// r4: argv
- if (is_construct) {
+ if (type() == StackFrame::ENTRY_CONSTRUCT) {
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
isolate());
__ mov(ip, Operand(construct_entry));
@@ -1871,7 +1493,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
- Register receiver = LoadIC::ReceiverRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3,
r4, &miss);
@@ -1881,17 +1503,13 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-Register InstanceofStub::left() { return r0; }
-
-
-Register InstanceofStub::right() { return r1; }
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
const int kDisplacement =
StandardFrameConstants::kCallerSPOffset - kPointerSize;
+ DCHECK(r1.is(ArgumentsAccessReadDescriptor::index()));
+ DCHECK(r0.is(ArgumentsAccessReadDescriptor::parameter_count()));
// Check that the key is a smi.
Label slow;
@@ -2167,6 +1785,32 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
}
+void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
+ // Return address is in lr.
+ Label slow;
+
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
+
+ // Check that the key is an array index, that is Uint32.
+ __ NonNegativeSmiTst(key);
+ __ b(ne, &slow);
+
+ // Everything is fine, call runtime.
+ __ Push(receiver, key); // Receiver, key.
+
+ // Perform tail call to the entry.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
+ masm->isolate()),
+ 2, 1);
+
+ __ bind(&slow);
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
+}
+
+
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// sp[0] : number of parameters
// sp[4] : receiver displacement
@@ -2423,7 +2067,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kTwoByteStringTag == 0);
__ and_(r0, r0, Operand(kStringEncodingMask));
__ mov(r3, Operand(r0, ASR, 2), SetCC);
- __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
+ __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset),
+ ne);
__ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
// (E) Carry on. String handling is done.
@@ -2434,7 +2079,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(r6, &runtime);
// r1: previous index
- // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
+ // r3: encoding of subject string (1 if one_byte, 0 if two_byte);
// r6: code
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
@@ -2477,7 +2122,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ str(r0, MemOperand(sp, 1 * kPointerSize));
// For arguments 4 and 3 get string length, calculate start of string data and
- // calculate the shift of the index (0 for ASCII and 1 for two byte).
+ // calculate the shift of the index (0 for one-byte and 1 for two-byte).
__ add(r7, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
__ eor(r3, r3, Operand(1));
// Load the length from the original subject string from the previous stack
@@ -2697,9 +2342,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// r3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
- DCHECK_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
- DCHECK_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state into r4.
@@ -2893,7 +2538,7 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
void CallFunctionStub::Generate(MacroAssembler* masm) {
- CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+ CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
}
@@ -2974,7 +2619,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
// r1 - function
// r3 - slot id
Label miss;
- int argc = state_.arg_count();
+ int argc = arg_count();
ParameterCount actual(argc);
EmitLoadTypeFeedbackVector(masm, r2);
@@ -2997,7 +2642,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ TailCallStub(&stub);
__ bind(&miss);
- GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+ GenerateMiss(masm);
// The slow case, we need this no matter what to complete a call after a miss.
CallFunctionNoFeedback(masm,
@@ -3016,7 +2661,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
Label extra_checks_or_miss, slow_start;
Label slow, non_function, wrap, cont;
Label have_js_function;
- int argc = state_.arg_count();
+ int argc = arg_count();
ParameterCount actual(argc);
EmitLoadTypeFeedbackVector(masm, r2);
@@ -3028,7 +2673,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ b(ne, &extra_checks_or_miss);
__ bind(&have_js_function);
- if (state_.CallAsMethod()) {
+ if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
// Compute the receiver in sloppy mode.
__ ldr(r3, MemOperand(sp, argc * kPointerSize));
@@ -3045,7 +2690,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&slow);
EmitSlowCase(masm, argc, &non_function);
- if (state_.CallAsMethod()) {
+ if (CallAsMethod()) {
__ bind(&wrap);
EmitWrapCase(masm, argc, &cont);
}
@@ -3072,7 +2717,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// We are here because tracing is on or we are going monomorphic.
__ bind(&miss);
- GenerateMiss(masm, IC::kCallIC_Miss);
+ GenerateMiss(masm);
// the slow case
__ bind(&slow_start);
@@ -3087,9 +2732,9 @@ void CallICStub::Generate(MacroAssembler* masm) {
}
-void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
// Get the receiver of the function from the stack; 1 ~ return address.
- __ ldr(r4, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize));
+ __ ldr(r4, MemOperand(sp, (arg_count() + 1) * kPointerSize));
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
@@ -3098,6 +2743,9 @@ void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
__ Push(r4, r1, r2, r3);
// Call the entry.
+ IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+ : IC::kCallIC_Customization_Miss;
+
ExternalReference miss = ExternalReference(IC_Utility(id),
masm->isolate());
__ CallExternalReference(miss, 4);
@@ -3110,11 +2758,6 @@ void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- Label flat_string;
- Label ascii_string;
- Label got_char_code;
- Label sliced_string;
-
// If the receiver is a smi trigger the non-string case.
__ JumpIfSmi(object_, receiver_not_string_);
@@ -3206,14 +2849,14 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiShiftSize == 0);
- DCHECK(IsPowerOf2(String::kMaxOneByteCharCode + 1));
+ DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
__ tst(code_,
Operand(kSmiTagMask |
((~String::kMaxOneByteCharCode) << kSmiTagSize)));
__ b(ne, &slow_case_);
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged ASCII char code.
+ // At this point code register contains smi tagged one-byte char code.
__ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_));
__ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
__ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
@@ -3239,10 +2882,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
}
-enum CopyCharactersFlags {
- COPY_ASCII = 1,
- DEST_ALWAYS_ALIGNED = 2
-};
+enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
@@ -3281,48 +2921,6 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
}
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash = character + (character << 10);
- __ LoadRoot(hash, Heap::kHashSeedRootIndex);
- // Untag smi seed and add the character.
- __ add(hash, character, Operand(hash, LSR, kSmiTagSize));
- // hash += hash << 10;
- __ add(hash, hash, Operand(hash, LSL, 10));
- // hash ^= hash >> 6;
- __ eor(hash, hash, Operand(hash, LSR, 6));
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash += character;
- __ add(hash, hash, Operand(character));
- // hash += hash << 10;
- __ add(hash, hash, Operand(hash, LSL, 10));
- // hash ^= hash >> 6;
- __ eor(hash, hash, Operand(hash, LSR, 6));
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash) {
- // hash += hash << 3;
- __ add(hash, hash, Operand(hash, LSL, 3));
- // hash ^= hash >> 11;
- __ eor(hash, hash, Operand(hash, LSR, 11));
- // hash += hash << 15;
- __ add(hash, hash, Operand(hash, LSL, 15));
-
- __ and_(hash, hash, Operand(String::kHashBitMask), SetCC);
-
- // if (hash == 0) hash = 27;
- __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq);
-}
-
-
void SubStringStub::Generate(MacroAssembler* masm) {
Label runtime;
@@ -3445,7 +3043,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ tst(r1, Operand(kStringEncodingMask));
__ b(eq, &two_byte_slice);
- __ AllocateAsciiSlicedString(r0, r2, r6, r4, &runtime);
+ __ AllocateOneByteSlicedString(r0, r2, r6, r4, &runtime);
__ jmp(&set_slice_header);
__ bind(&two_byte_slice);
__ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime);
@@ -3488,8 +3086,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ tst(r1, Operand(kStringEncodingMask));
__ b(eq, &two_byte_sequential);
- // Allocate and copy the resulting ASCII string.
- __ AllocateAsciiString(r0, r2, r4, r6, r1, &runtime);
+ // Allocate and copy the resulting one-byte string.
+ __ AllocateOneByteString(r0, r2, r4, r6, r1, &runtime);
// Locate first character of substring to copy.
__ add(r5, r5, r3);
@@ -3548,12 +3146,9 @@ void SubStringStub::Generate(MacroAssembler* masm) {
}
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
+void StringHelper::GenerateFlatOneByteStringEquals(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3) {
Register length = scratch1;
// Compare lengths.
@@ -3577,9 +3172,8 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
// Compare characters.
__ bind(&compare_chars);
- GenerateAsciiCharsCompareLoop(masm,
- left, right, length, scratch2, scratch3,
- &strings_not_equal);
+ GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
+ &strings_not_equal);
// Characters are equal.
__ mov(r0, Operand(Smi::FromInt(EQUAL)));
@@ -3587,13 +3181,9 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
}
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
+void StringHelper::GenerateCompareFlatOneByteStrings(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3, Register scratch4) {
Label result_not_equal, compare_lengths;
// Find minimum length and length difference.
__ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
@@ -3607,9 +3197,8 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ b(eq, &compare_lengths);
// Compare loop.
- GenerateAsciiCharsCompareLoop(masm,
- left, right, min_length, scratch2, scratch4,
- &result_not_equal);
+ GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
+ scratch4, &result_not_equal);
// Compare lengths - strings up to min-length are equal.
__ bind(&compare_lengths);
@@ -3625,14 +3214,9 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
}
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* chars_not_equal) {
+void StringHelper::GenerateOneByteCharsCompareLoop(
+ MacroAssembler* masm, Register left, Register right, Register length,
+ Register scratch1, Register scratch2, Label* chars_not_equal) {
// Change index to run from -length to -1 by adding length to string
// start. This means that loop ends when index reaches zero, which
// doesn't need an additional compare.
@@ -3678,13 +3262,13 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ bind(&not_same);
- // Check that both objects are sequential ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
+ // Check that both objects are sequential one-byte strings.
+ __ JumpIfNotBothSequentialOneByteStrings(r1, r0, r2, r3, &runtime);
- // Compare flat ASCII strings natively. Remove arguments from stack first.
+ // Compare flat one-byte strings natively. Remove arguments from stack first.
__ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
- GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, r1, r0, r2, r3, r4, r5);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
@@ -3719,13 +3303,13 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// Tail call into the stub that handles binary operations with allocation
// sites.
- BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+ BinaryOpWithAllocationSiteStub stub(isolate(), state());
__ TailCallStub(&stub);
}
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::SMI);
+void CompareICStub::GenerateSmis(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::SMI);
Label miss;
__ orr(r2, r1, r0);
__ JumpIfNotSmi(r2, &miss);
@@ -3745,17 +3329,17 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::NUMBER);
+void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- if (left_ == CompareIC::SMI) {
+ if (left() == CompareICState::SMI) {
__ JumpIfNotSmi(r1, &miss);
}
- if (right_ == CompareIC::SMI) {
+ if (right() == CompareICState::SMI) {
__ JumpIfNotSmi(r0, &miss);
}
@@ -3797,12 +3381,12 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+ CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
+ if (Token::IsOrderedRelationalCompareOp(op())) {
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(ne, &miss);
__ JumpIfSmi(r1, &unordered);
@@ -3812,7 +3396,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
}
__ bind(&maybe_undefined2);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
+ if (Token::IsOrderedRelationalCompareOp(op())) {
__ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
__ b(eq, &unordered);
}
@@ -3822,8 +3406,8 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::INTERNALIZED_STRING);
+void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::INTERNALIZED_STRING);
Label miss;
// Registers containing left and right operands respectively.
@@ -3860,8 +3444,8 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::UNIQUE_NAME);
+void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::UNIQUE_NAME);
DCHECK(GetCondition() == eq);
Label miss;
@@ -3881,8 +3465,8 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
__ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
__ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(tmp1, &miss);
- __ JumpIfNotUniqueName(tmp2, &miss);
+ __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
+ __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
// Unique names are compared by identity.
__ cmp(left, right);
@@ -3899,11 +3483,11 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::STRING);
+void CompareICStub::GenerateStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::STRING);
Label miss;
- bool equality = Token::IsEqualityOp(op_);
+ bool equality = Token::IsEqualityOp(op());
// Registers containing left and right operands respectively.
Register left = r1;
@@ -3950,18 +3534,18 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
__ Ret(eq);
}
- // Check that both strings are sequential ASCII.
+ // Check that both strings are sequential one-byte.
Label runtime;
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(
- tmp1, tmp2, tmp3, tmp4, &runtime);
+ __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
+ &runtime);
- // Compare flat ASCII strings. Returns when done.
+ // Compare flat one-byte strings. Returns when done.
if (equality) {
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, left, right, tmp1, tmp2, tmp3);
+ StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
+ tmp3);
} else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(
- masm, left, right, tmp1, tmp2, tmp3, tmp4);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
+ tmp2, tmp3, tmp4);
}
// Handle more complex cases in runtime.
@@ -3978,8 +3562,8 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::OBJECT);
+void CompareICStub::GenerateObjects(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::OBJECT);
Label miss;
__ and_(r2, r1, Operand(r0));
__ JumpIfSmi(r2, &miss);
@@ -3998,7 +3582,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
Label miss;
__ and_(r2, r1, Operand(r0));
__ JumpIfSmi(r2, &miss);
@@ -4017,8 +3601,7 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
}
-
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+void CompareICStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
@@ -4027,7 +3610,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r1, r0);
__ Push(lr, r1, r0);
- __ mov(ip, Operand(Smi::FromInt(op_)));
+ __ mov(ip, Operand(Smi::FromInt(op())));
__ push(ip);
__ CallExternalReference(miss, 3);
// Compute the entry point of the rewritten stub.
@@ -4115,7 +3698,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
__ ldrb(entity_name,
FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(entity_name, miss);
+ __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
__ bind(&good);
// Restore the properties.
@@ -4280,12 +3863,12 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ cmp(entry_key, Operand(key));
__ b(eq, &in_dictionary);
- if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
// Check if the entry name is not a unique name.
__ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
__ ldrb(entry_key,
FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
+ __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
}
}
@@ -4293,7 +3876,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// If we are doing negative lookup then probing failure should be
// treated as a lookup success. For positive lookup probing failure
// should be treated as lookup failure.
- if (mode_ == POSITIVE_LOOKUP) {
+ if (mode() == POSITIVE_LOOKUP) {
__ mov(result, Operand::Zero());
__ Ret();
}
@@ -4339,11 +3922,8 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
__ b(&skip_to_incremental_compacting);
}
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
}
__ Ret();
@@ -4366,7 +3946,7 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.Save(masm);
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
__ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
@@ -4386,10 +3966,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
InformIncrementalMarker(masm);
regs_.Restore(masm);
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
__ bind(&dont_need_remembered_set);
@@ -4404,7 +3981,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
Register address =
@@ -4420,7 +3997,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
__ CallCFunction(
ExternalReference::incremental_marking_record_write_function(isolate()),
argument_count);
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
}
@@ -4448,10 +4025,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
} else {
__ Ret();
@@ -4492,10 +4066,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
} else {
__ Ret();
@@ -4578,7 +4149,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
__ ldr(r1, MemOperand(fp, parameter_count_offset));
- if (function_mode_ == JS_FUNCTION_STUB_MODE) {
+ if (function_mode() == JS_FUNCTION_STUB_MODE) {
__ add(r1, r1, Operand(1));
}
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
@@ -4588,6 +4159,20 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
}
+void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorLoadStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorKeyedLoadStub stub(isolate());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -4632,7 +4217,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
int frame_alignment = masm->ActivationFrameAlignment();
if (frame_alignment > kPointerSize) {
__ mov(r5, sp);
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
__ and_(sp, sp, Operand(-frame_alignment));
}
@@ -4808,7 +4393,7 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
- if (argument_count_ == ANY) {
+ if (argument_count() == ANY) {
Label not_zero_case, not_one_case;
__ tst(r0, r0);
__ b(ne, &not_zero_case);
@@ -4821,11 +4406,11 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
__ bind(&not_one_case);
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
- } else if (argument_count_ == NONE) {
+ } else if (argument_count() == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- } else if (argument_count_ == ONE) {
+ } else if (argument_count() == ONE) {
CreateArrayDispatchOneArgument(masm, mode);
- } else if (argument_count_ == MORE_THAN_ONE) {
+ } else if (argument_count() == MORE_THAN_ONE) {
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
} else {
UNREACHABLE();
@@ -4835,7 +4420,7 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r0 : argc (only if argument_count_ == ANY)
+ // -- r0 : argc (only if argument_count() == ANY)
// -- r1 : constructor
// -- r2 : AllocationSite or undefined
// -- sp[0] : return address
@@ -4969,9 +4554,9 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register api_function_address = r1;
Register context = cp;
- int argc = ArgumentBits::decode(bit_field_);
- bool is_store = IsStoreBits::decode(bit_field_);
- bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+ int argc = this->argc();
+ bool is_store = this->is_store();
+ bool call_data_undefined = this->call_data_undefined();
typedef FunctionCallbackArguments FCA;
@@ -5068,7 +4653,8 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// -- r2 : api_function_address
// -----------------------------------
- Register api_function_address = r2;
+ Register api_function_address = ApiGetterDescriptor::function_address();
+ DCHECK(api_function_address.is(r2));
__ mov(r0, sp); // r0 = Handle<Name>
__ add(r1, r0, Operand(1 * kPointerSize)); // r1 = PCA
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index ff2a80e676..727bb1b227 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -5,8 +5,6 @@
#ifndef V8_ARM_CODE_STUBS_ARM_H_
#define V8_ARM_CODE_STUBS_ARM_H_
-#include "src/ic-inl.h"
-
namespace v8 {
namespace internal {
@@ -14,24 +12,6 @@ namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
-class StoreBufferOverflowStub: public PlatformCodeStub {
- public:
- StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
- : PlatformCodeStub(isolate), save_doubles_(save_fp) {}
-
- void Generate(MacroAssembler* masm);
-
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- SaveFPRegsMode save_doubles_;
-
- Major MajorKey() const { return StoreBufferOverflow; }
- int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying a large number of characters. This function
@@ -45,71 +25,24 @@ class StringHelper : public AllStatic {
Register scratch,
String::Encoding encoding);
+ // Compares two flat one-byte strings and returns result in r0.
+ static void GenerateCompareFlatOneByteStrings(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3, Register scratch4);
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-class SubStringStub: public PlatformCodeStub {
- public:
- explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- private:
- Major MajorKey() const { return SubString; }
- int MinorKey() const { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-
-class StringCompareStub: public PlatformCodeStub {
- public:
- explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
-
- // Compares two flat ASCII strings and returns result in r0.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
+ // Compares two flat one-byte strings for equality and returns result in r0.
+ static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+ Register left, Register right,
Register scratch1,
Register scratch2,
- Register scratch3,
- Register scratch4);
-
- // Compares two flat ASCII strings for equality and returns result
- // in r0.
- static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
+ Register scratch3);
private:
- virtual Major MajorKey() const { return StringCompare; }
- virtual int MinorKey() const { return 0; }
- virtual void Generate(MacroAssembler* masm);
-
- static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* chars_not_equal);
+ static void GenerateOneByteCharsCompareLoop(
+ MacroAssembler* masm, Register left, Register right, Register length,
+ Register scratch1, Register scratch2, Label* chars_not_equal);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
@@ -118,36 +51,36 @@ class StringCompareStub: public PlatformCodeStub {
// so you don't have to set up the frame.
class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
public:
- WriteInt32ToHeapNumberStub(Isolate* isolate,
- Register the_int,
- Register the_heap_number,
- Register scratch)
- : PlatformCodeStub(isolate),
- the_int_(the_int),
- the_heap_number_(the_heap_number),
- scratch_(scratch) { }
+ WriteInt32ToHeapNumberStub(Isolate* isolate, Register the_int,
+ Register the_heap_number, Register scratch)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = IntRegisterBits::encode(the_int.code()) |
+ HeapNumberRegisterBits::encode(the_heap_number.code()) |
+ ScratchRegisterBits::encode(scratch.code());
+ }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private:
- Register the_int_;
- Register the_heap_number_;
- Register scratch_;
+ Register the_int() const {
+ return Register::from_code(IntRegisterBits::decode(minor_key_));
+ }
+
+ Register the_heap_number() const {
+ return Register::from_code(HeapNumberRegisterBits::decode(minor_key_));
+ }
+
+ Register scratch() const {
+ return Register::from_code(ScratchRegisterBits::decode(minor_key_));
+ }
// Minor key encoding in 16 bits.
class IntRegisterBits: public BitField<int, 0, 4> {};
class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
class ScratchRegisterBits: public BitField<int, 8, 4> {};
- Major MajorKey() const { return WriteInt32ToHeapNumber; }
- int MinorKey() const {
- // Encode the parameters in a unique 16 bit value.
- return IntRegisterBits::encode(the_int_.code())
- | HeapNumberRegisterBits::encode(the_heap_number_.code())
- | ScratchRegisterBits::encode(scratch_.code());
- }
-
- void Generate(MacroAssembler* masm);
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(WriteInt32ToHeapNumber, PlatformCodeStub);
};
@@ -160,16 +93,19 @@ class RecordWriteStub: public PlatformCodeStub {
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
: PlatformCodeStub(isolate),
- object_(object),
- value_(value),
- address_(address),
- remembered_set_action_(remembered_set_action),
- save_fp_regs_mode_(fp_mode),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
+ minor_key_ = ObjectBits::encode(object.code()) |
+ ValueBits::encode(value.code()) |
+ AddressBits::encode(address.code()) |
+ RememberedSetActionBits::encode(remembered_set_action) |
+ SaveFPRegsModeBits::encode(fp_mode);
}
+ RecordWriteStub(uint32_t key, Isolate* isolate)
+ : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
+
enum Mode {
STORE_BUFFER_ONLY,
INCREMENTAL,
@@ -233,6 +169,8 @@ class RecordWriteStub: public PlatformCodeStub {
2 * Assembler::kInstrSize);
}
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+
private:
// This is a helper class for freeing up 3 scratch registers. The input is
// two registers that must be preserved and one scratch register provided by
@@ -297,7 +235,9 @@ class RecordWriteStub: public PlatformCodeStub {
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
- void Generate(MacroAssembler* masm);
+ virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+
+ virtual void Generate(MacroAssembler* masm) OVERRIDE;
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
@@ -305,18 +245,28 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
- Major MajorKey() const { return RecordWrite; }
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
- int MinorKey() const {
- return ObjectBits::encode(object_.code()) |
- ValueBits::encode(value_.code()) |
- AddressBits::encode(address_.code()) |
- RememberedSetActionBits::encode(remembered_set_action_) |
- SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+ Register object() const {
+ return Register::from_code(ObjectBits::decode(minor_key_));
}
- void Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ Register value() const {
+ return Register::from_code(ValueBits::decode(minor_key_));
+ }
+
+ Register address() const {
+ return Register::from_code(AddressBits::decode(minor_key_));
+ }
+
+ RememberedSetAction remembered_set_action() const {
+ return RememberedSetActionBits::decode(minor_key_);
+ }
+
+ SaveFPRegsMode save_fp_regs_mode() const {
+ return SaveFPRegsModeBits::decode(minor_key_);
}
class ObjectBits: public BitField<int, 0, 4> {};
@@ -325,13 +275,10 @@ class RecordWriteStub: public PlatformCodeStub {
class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
- Register object_;
- Register value_;
- Register address_;
- RememberedSetAction remembered_set_action_;
- SaveFPRegsMode save_fp_regs_mode_;
Label slow_;
RegisterAllocation regs_;
+
+ DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
};
@@ -343,14 +290,13 @@ class RecordWriteStub: public PlatformCodeStub {
class DirectCEntryStub: public PlatformCodeStub {
public:
explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
- void Generate(MacroAssembler* masm);
void GenerateCall(MacroAssembler* masm, Register target);
private:
- Major MajorKey() const { return DirectCEntry; }
- int MinorKey() const { return 0; }
-
bool NeedsImmovableCode() { return true; }
+
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
};
@@ -359,9 +305,9 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
- : PlatformCodeStub(isolate), mode_(mode) { }
-
- void Generate(MacroAssembler* masm);
+ : PlatformCodeStub(isolate) {
+ minor_key_ = LookupModeBits::encode(mode);
+ }
static void GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
@@ -393,29 +339,14 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() const { return NameDictionaryLookup; }
-
- int MinorKey() const { return LookupModeBits::encode(mode_); }
+ LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
class LookupModeBits: public BitField<LookupMode, 0, 1> {};
- LookupMode mode_;
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
};
-
-class PlatformInterfaceDescriptor {
- public:
- explicit PlatformInterfaceDescriptor(
- TargetAddressStorageMode storage_mode)
- : storage_mode_(storage_mode) { }
-
- TargetAddressStorageMode storage_mode() { return storage_mode_; }
-
- private:
- TargetAddressStorageMode storage_mode_;
-};
-
-
} } // namespace v8::internal
#endif // V8_ARM_CODE_STUBS_ARM_H_
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index cdc40a48cf..d050399532 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -759,16 +759,16 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ b(ne, call_runtime);
__ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
- Label ascii, done;
+ Label one_byte, done;
__ bind(&check_encoding);
STATIC_ASSERT(kTwoByteStringTag == 0);
__ tst(result, Operand(kStringEncodingMask));
- __ b(ne, &ascii);
+ __ b(ne, &one_byte);
// Two-byte string.
__ ldrh(result, MemOperand(string, index, LSL, 1));
__ jmp(&done);
- __ bind(&ascii);
- // Ascii string.
+ __ bind(&one_byte);
+ // One-byte string.
__ ldrb(result, MemOperand(string, index));
__ bind(&done);
}
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index 9ec09583d9..4c7c7688fd 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -6,7 +6,7 @@
#define V8_ARM_CODEGEN_ARM_H_
#include "src/ast.h"
-#include "src/ic-inl.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index c4e559d6c1..375ef89774 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -564,7 +564,9 @@ class Instruction {
inline int ShiftAmountValue() const { return Bits(11, 7); }
// with immediate
inline int RotateValue() const { return Bits(11, 8); }
+ DECLARE_STATIC_ACCESSOR(RotateValue);
inline int Immed8Value() const { return Bits(7, 0); }
+ DECLARE_STATIC_ACCESSOR(Immed8Value);
inline int Immed4Value() const { return Bits(19, 16); }
inline int ImmedMovwMovtValue() const {
return Immed4Value() << 12 | Offset12Value(); }
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index ec98a7ed36..6d7d6b8ace 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -176,17 +176,17 @@ void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc).
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0);
}
void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC store (from ic-arm.cc).
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- Register value = StoreIC::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.bit() | name.bit() | value.bit(), 0);
}
@@ -200,9 +200,9 @@ void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC keyed store call (from ic-arm.cc).
- Register receiver = KeyedStoreIC::ReceiverRegister();
- Register name = KeyedStoreIC::NameRegister();
- Register value = KeyedStoreIC::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.bit() | name.bit() | value.bit(), 0);
}
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index df2c098456..0455a3ba67 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -101,7 +101,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
void Deoptimizer::SetPlatformCompiledStubRegisters(
- FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+ FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler());
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 09459e4e35..42a605c7e9 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -6,15 +6,16 @@
#if V8_TARGET_ARCH_ARM
+#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compiler.h"
#include "src/debug.h"
#include "src/full-codegen.h"
+#include "src/ic/ic.h"
#include "src/isolate-inl.h"
#include "src/parser.h"
#include "src/scopes.h"
-#include "src/stub-cache.h"
#include "src/arm/code-stubs-arm.h"
#include "src/arm/macro-assembler-arm.h"
@@ -346,7 +347,11 @@ void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
}
+#ifdef CAN_USE_ARMV7_INSTRUCTIONS
static const int kProfileCounterResetSequenceLength = 5 * Assembler::kInstrSize;
+#else
+static const int kProfileCounterResetSequenceLength = 7 * Assembler::kInstrSize;
+#endif
void FullCodeGenerator::EmitProfilingCounterReset() {
@@ -361,10 +366,13 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
reset_value = FLAG_interrupt_budget >> 4;
}
__ mov(r2, Operand(profiling_counter_));
- // The mov instruction above can be either 1, 2 or 3 instructions depending
- // upon whether it is an extended constant pool - insert nop to compensate.
- DCHECK(masm_->InstructionsGeneratedSince(&start) <= 3);
- while (masm_->InstructionsGeneratedSince(&start) != 3) {
+ // The mov instruction above can be either 1 to 3 (for ARMv7) or 1 to 5
+ // instructions (for ARMv6) depending upon whether it is an extended constant
+ // pool - insert nop to compensate.
+ int expected_instr_count =
+ (kProfileCounterResetSequenceLength / Assembler::kInstrSize) - 2;
+ DCHECK(masm_->InstructionsGeneratedSince(&start) <= expected_instr_count);
+ while (masm_->InstructionsGeneratedSince(&start) != expected_instr_count) {
__ nop();
}
__ mov(r3, Operand(Smi::FromInt(reset_value)));
@@ -448,9 +456,11 @@ void FullCodeGenerator::EmitReturnSequence() {
PredictableCodeSizeScope predictable(masm_, -1);
__ RecordJSReturn();
int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT);
- __ add(sp, sp, Operand(sp_delta));
- __ Jump(lr);
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ { ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+ __ add(sp, sp, Operand(sp_delta));
+ __ Jump(lr);
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
}
#ifdef DEBUG
@@ -1045,7 +1055,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1182,7 +1193,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&fixed_array);
__ Move(r1, FeedbackVector());
- __ mov(r2, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
+ __ mov(r2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(slot)));
__ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
@@ -1324,9 +1335,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(),
- info->strict_mode(),
- info->is_generator());
+ FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
__ mov(r2, Operand(info));
__ CallStub(&stub);
} else {
@@ -1346,6 +1355,25 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
}
+void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
+ Comment cnmt(masm_, "[ SuperReference ");
+
+ __ ldr(LoadDescriptor::ReceiverRegister(),
+ MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
+ Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
+ __ Move(LoadDescriptor::NameRegister(), home_object_symbol);
+
+ CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+
+ __ cmp(r0, Operand(isolate()->factory()->undefined_value()));
+ Label done;
+ __ b(ne, &done);
+ __ CallRuntime(Runtime::kThrowNonMethodError, 0);
+ __ bind(&done);
+}
+
+
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
TypeofState typeof_state,
Label* slow) {
@@ -1394,10 +1422,10 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ bind(&fast);
}
- __ ldr(LoadIC::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadIC::NameRegister(), Operand(proxy->var()->name()));
+ __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
}
@@ -1483,10 +1511,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
switch (var->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
- __ ldr(LoadIC::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadIC::NameRegister(), Operand(var->name()));
+ __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
}
CallLoadIC(CONTEXTUAL);
@@ -1694,9 +1722,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
- DCHECK(StoreIC::ValueRegister().is(r0));
- __ mov(StoreIC::NameRegister(), Operand(key->value()));
- __ ldr(StoreIC::ReceiverRegister(), MemOperand(sp));
+ DCHECK(StoreDescriptor::ValueRegister().is(r0));
+ __ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
+ __ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
@@ -1857,13 +1885,19 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ enum LhsKind {
+ VARIABLE,
+ NAMED_PROPERTY,
+ KEYED_PROPERTY,
+ NAMED_SUPER_PROPERTY
+ };
LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
if (property != NULL) {
assign_type = (property->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
+ ? (property->IsSuperAccess() ? NAMED_SUPER_PROPERTY
+ : NAMED_PROPERTY)
+ : KEYED_PROPERTY;
}
// Evaluate LHS expression.
@@ -1875,17 +1909,29 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
// We need the receiver both on the stack and in the register.
VisitForStackValue(property->obj());
- __ ldr(LoadIC::ReceiverRegister(), MemOperand(sp, 0));
+ __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
} else {
VisitForStackValue(property->obj());
}
break;
+ case NAMED_SUPER_PROPERTY:
+ VisitForStackValue(property->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(property->obj()->AsSuperReference());
+ __ Push(result_register());
+ if (expr->is_compound()) {
+ const Register scratch = r1;
+ __ ldr(scratch, MemOperand(sp, kPointerSize));
+ __ Push(scratch);
+ __ Push(result_register());
+ }
+ break;
case KEYED_PROPERTY:
if (expr->is_compound()) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ ldr(LoadIC::ReceiverRegister(), MemOperand(sp, 1 * kPointerSize));
- __ ldr(LoadIC::NameRegister(), MemOperand(sp, 0));
+ __ ldr(LoadDescriptor::ReceiverRegister(),
+ MemOperand(sp, 1 * kPointerSize));
+ __ ldr(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@@ -1906,6 +1952,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
EmitNamedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
@@ -1952,6 +2002,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyAssignment(expr);
+ break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
@@ -1966,12 +2019,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
VisitForStackValue(expr->expression());
switch (expr->yield_kind()) {
- case Yield::SUSPEND:
+ case Yield::kSuspend:
// Pop value from top-of-stack slot; box result into result register.
EmitCreateIteratorResult(false);
__ push(result_register());
// Fall through.
- case Yield::INITIAL: {
+ case Yield::kInitial: {
Label suspend, continuation, post_runtime, resume;
__ jmp(&suspend);
@@ -2003,7 +2056,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
- case Yield::FINAL: {
+ case Yield::kFinal: {
VisitForAccumulatorValue(expr->generator_object());
__ mov(r1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
__ str(r1, FieldMemOperand(result_register(),
@@ -2015,7 +2068,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
- case Yield::DELEGATING: {
+ case Yield::kDelegating: {
VisitForStackValue(expr->generator_object());
// Initial stack layout is as follows:
@@ -2024,8 +2077,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label l_catch, l_try, l_suspend, l_continuation, l_resume;
Label l_next, l_call, l_loop;
- Register load_receiver = LoadIC::ReceiverRegister();
- Register load_name = LoadIC::NameRegister();
+ Register load_receiver = LoadDescriptor::ReceiverRegister();
+ Register load_name = LoadDescriptor::NameRegister();
// Initial send value is undefined.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
@@ -2080,10 +2133,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ ldr(load_receiver, MemOperand(sp, kPointerSize));
__ ldr(load_name, MemOperand(sp, 2 * kPointerSize));
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(expr->KeyedLoadFeedbackSlot())));
}
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallIC(ic, TypeFeedbackId::None());
__ mov(r1, r0);
__ str(r1, MemOperand(sp, 2 * kPointerSize));
@@ -2100,7 +2153,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ push(load_receiver); // save result
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(expr->DoneFeedbackSlot())));
}
CallLoadIC(NOT_CONTEXTUAL); // r0=result.done
@@ -2113,7 +2166,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ pop(load_receiver); // result
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(expr->ValueFeedbackSlot())));
}
CallLoadIC(NOT_CONTEXTUAL); // r0=result.value
@@ -2290,9 +2343,11 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
- __ mov(LoadIC::NameRegister(), Operand(key->value()));
+ DCHECK(!prop->IsSuperAccess());
+
+ __ mov(LoadDescriptor::NameRegister(), Operand(key->value()));
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(prop->PropertyFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL);
} else {
@@ -2301,11 +2356,23 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
}
+void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+ // Stack: receiver, home_object.
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+ DCHECK(prop->IsSuperAccess());
+
+ __ Push(key->value());
+ __ CallRuntime(Runtime::kLoadFromSuper, 3);
+}
+
+
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(prop->PropertyFeedbackSlot())));
CallIC(ic);
} else {
@@ -2336,8 +2403,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- BinaryOpICStub stub(isolate(), op, mode);
- CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2412,9 +2479,9 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
__ pop(r1);
- BinaryOpICStub stub(isolate(), op, mode);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(r0);
}
@@ -2444,9 +2511,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
case NAMED_PROPERTY: {
__ push(r0); // Preserve value.
VisitForAccumulatorValue(prop->obj());
- __ Move(StoreIC::ReceiverRegister(), r0);
- __ pop(StoreIC::ValueRegister()); // Restore value.
- __ mov(StoreIC::NameRegister(),
+ __ Move(StoreDescriptor::ReceiverRegister(), r0);
+ __ pop(StoreDescriptor::ValueRegister()); // Restore value.
+ __ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
CallStoreIC();
break;
@@ -2455,11 +2522,11 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ push(r0); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
- __ Move(KeyedStoreIC::NameRegister(), r0);
- __ Pop(KeyedStoreIC::ValueRegister(), KeyedStoreIC::ReceiverRegister());
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ __ Move(StoreDescriptor::NameRegister(), r0);
+ __ Pop(StoreDescriptor::ValueRegister(),
+ StoreDescriptor::ReceiverRegister());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic);
break;
}
@@ -2484,8 +2551,8 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
- __ mov(StoreIC::NameRegister(), Operand(var->name()));
- __ ldr(StoreIC::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
+ __ ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
CallStoreIC();
} else if (op == Token::INIT_CONST_LEGACY) {
@@ -2557,8 +2624,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
- __ mov(StoreIC::NameRegister(), Operand(prop->key()->AsLiteral()->value()));
- __ pop(StoreIC::ReceiverRegister());
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(prop->key()->AsLiteral()->value()));
+ __ pop(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2566,17 +2634,33 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
+void FullCodeGenerator::EmitNamedSuperPropertyAssignment(Assignment* expr) {
+ // Assignment to named property of super.
+ // r0 : value
+ // stack : receiver ('this'), home_object
+ Property* prop = expr->target()->AsProperty();
+ DCHECK(prop != NULL);
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(key != NULL);
+
+ __ Push(r0);
+ __ Push(key->value());
+ __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy),
+ 4);
+ context()->Plug(r0);
+}
+
+
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
// Record source code position before IC call.
SetSourcePosition(expr->position());
- __ Pop(KeyedStoreIC::ReceiverRegister(), KeyedStoreIC::NameRegister());
- DCHECK(KeyedStoreIC::ValueRegister().is(r0));
+ __ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
+ DCHECK(StoreDescriptor::ValueRegister().is(r0));
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2589,16 +2673,23 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
Expression* key = expr->key();
if (key->IsPropertyName()) {
- VisitForAccumulatorValue(expr->obj());
- __ Move(LoadIC::ReceiverRegister(), r0);
- EmitNamedPropertyLoad(expr);
+ if (!expr->IsSuperAccess()) {
+ VisitForAccumulatorValue(expr->obj());
+ __ Move(LoadDescriptor::ReceiverRegister(), r0);
+ EmitNamedPropertyLoad(expr);
+ } else {
+ VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(expr->obj()->AsSuperReference());
+ __ Push(result_register());
+ EmitNamedSuperPropertyLoad(expr);
+ }
PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(r0);
} else {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
- __ Move(LoadIC::NameRegister(), r0);
- __ pop(LoadIC::ReceiverRegister());
+ __ Move(LoadDescriptor::NameRegister(), r0);
+ __ pop(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
context()->Plug(r0);
}
@@ -2619,12 +2710,11 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallIC::CallType call_type = callee->IsVariableProxy()
- ? CallIC::FUNCTION
- : CallIC::METHOD;
+ CallICState::CallType call_type =
+ callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
// Get the target function.
- if (call_type == CallIC::FUNCTION) {
+ if (call_type == CallICState::FUNCTION) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
@@ -2635,7 +2725,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
- __ ldr(LoadIC::ReceiverRegister(), MemOperand(sp, 0));
+ DCHECK(!callee->AsProperty()->IsSuperAccess());
+ __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
@@ -2648,6 +2739,45 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+ DCHECK(callee->IsProperty());
+ Property* prop = callee->AsProperty();
+ DCHECK(prop->IsSuperAccess());
+
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+ // Load the function from the receiver.
+ const Register scratch = r1;
+ SuperReference* super_ref = prop->obj()->AsSuperReference();
+ EmitLoadHomeObject(super_ref);
+ __ Push(r0);
+ VisitForAccumulatorValue(super_ref->this_var());
+ __ Push(r0);
+ __ Push(r0);
+ __ ldr(scratch, MemOperand(sp, kPointerSize * 2));
+ __ Push(scratch);
+ __ Push(key->value());
+
+ // Stack here:
+ // - home_object
+ // - this (receiver)
+ // - this (receiver) <-- LoadFromSuper will pop here and below.
+ // - home_object
+ // - key
+ __ CallRuntime(Runtime::kLoadFromSuper, 3);
+
+ // Replace home_object with target function.
+ __ str(r0, MemOperand(sp, kPointerSize));
+
+ // Stack here:
+ // - target function
+ // - this (receiver)
+ EmitCall(expr, CallICState::METHOD);
+}
+
+
// Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* key) {
@@ -2658,8 +2788,8 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
// Load the function from the receiver.
DCHECK(callee->IsProperty());
- __ ldr(LoadIC::ReceiverRegister(), MemOperand(sp, 0));
- __ Move(LoadIC::NameRegister(), r0);
+ __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ __ Move(LoadDescriptor::NameRegister(), r0);
EmitKeyedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
@@ -2668,11 +2798,11 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ push(ip);
__ str(r0, MemOperand(sp, kPointerSize));
- EmitCall(expr, CallIC::METHOD);
+ EmitCall(expr, CallICState::METHOD);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2817,13 +2947,20 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitCall(expr);
} else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty();
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(property->obj());
- }
- if (property->key()->IsPropertyName()) {
- EmitCallWithLoadIC(expr);
+ bool is_named_call = property->key()->IsPropertyName();
+ // super.x() is handled in EmitCallWithLoadIC.
+ if (property->IsSuperAccess() && is_named_call) {
+ EmitSuperCallWithLoadIC(expr);
} else {
- EmitKeyedCallWithLoadIC(expr, property->key());
+ {
+ PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(property->obj());
+ }
+ if (is_named_call) {
+ EmitCallWithLoadIC(expr);
+ } else {
+ EmitKeyedCallWithLoadIC(expr, property->key());
+ }
}
} else {
DCHECK(call_type == Call::OTHER_CALL);
@@ -3318,7 +3455,7 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
// Functions have class 'Function'.
__ bind(&function);
- __ LoadRoot(r0, Heap::kfunction_class_stringRootIndex);
+ __ LoadRoot(r0, Heap::kFunction_stringRootIndex);
__ jmp(&done);
// Objects with a non-function constructor have class 'Object'.
@@ -3436,9 +3573,9 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
Register index = r1;
Register value = r2;
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- VisitForAccumulatorValue(args->at(0)); // string
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
__ Pop(index, value);
if (FLAG_debug_code) {
@@ -3469,9 +3606,9 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
Register index = r1;
Register value = r2;
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- VisitForAccumulatorValue(args->at(0)); // string
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
__ Pop(index, value);
if (FLAG_debug_code) {
@@ -3812,7 +3949,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator, non_trivial_array,
not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
one_char_separator_loop_entry, long_separator_loop;
@@ -3859,7 +3996,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
array = no_reg; // End of array's live range.
- // Check that all array elements are sequential ASCII strings, and
+ // Check that all array elements are sequential one-byte strings, and
// accumulate the sum of their lengths, as a smi-encoded value.
__ mov(string_length, Operand::Zero());
__ add(element,
@@ -3875,14 +4012,14 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// elements_end: Array end.
if (generate_debug_code_) {
__ cmp(array_length, Operand::Zero());
- __ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
+ __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
}
__ bind(&loop);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ JumpIfSmi(string, &bailout);
__ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout);
+ __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch, scratch, &bailout);
__ ldr(scratch, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
__ add(string_length, string_length, Operand(scratch), SetCC);
__ b(vs, &bailout);
@@ -3903,11 +4040,11 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// string_length: Sum of string lengths (smi).
// elements: FixedArray of strings.
- // Check that the separator is a flat ASCII string.
+ // Check that the separator is a flat one-byte string.
__ JumpIfSmi(separator, &bailout);
__ ldr(scratch, FieldMemOperand(separator, HeapObject::kMapOffset));
__ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout);
+ __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch, scratch, &bailout);
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not
@@ -3936,12 +4073,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// separator: Separator string
// string_length: Length of result string (not smi)
// array_length: Length of the array.
- __ AllocateAsciiString(result,
- string_length,
- scratch,
- string, // used as scratch
- elements_end, // used as scratch
- &bailout);
+ __ AllocateOneByteString(result, string_length, scratch,
+ string, // used as scratch
+ elements_end, // used as scratch
+ &bailout);
// Prepare for looping. Set up elements_end to end of the array. Set
// result_pos to the position of the result where to write the first
// character.
@@ -3980,7 +4115,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case
__ bind(&one_char_separator);
- // Replace separator with its ASCII character value.
+ // Replace separator with its one-byte character value.
__ ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
@@ -3991,7 +4126,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// result_pos: the position to which we are currently copying characters.
// element: Current array element.
// elements_end: Array end.
- // separator: Single separator ASCII char (in lower byte).
+ // separator: Single separator one-byte char (in lower byte).
// Copy the separator character to the result.
__ strb(separator, MemOperand(result_pos, 1, PostIndex));
@@ -4072,15 +4207,15 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) {
// Push the builtins object as the receiver.
- Register receiver = LoadIC::ReceiverRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
__ ldr(receiver, GlobalObjectOperand());
__ ldr(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset));
__ push(receiver);
// Load the function from the receiver.
- __ mov(LoadIC::NameRegister(), Operand(expr->name()));
+ __ mov(LoadDescriptor::NameRegister(), Operand(expr->name()));
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(expr->CallRuntimeFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL);
} else {
@@ -4247,6 +4382,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (prop != NULL) {
assign_type =
(prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ if (prop->IsSuperAccess()) {
+ // throw exception.
+ VisitSuperReference(prop->obj()->AsSuperReference());
+ return;
+ }
}
// Evaluate expression and get value.
@@ -4263,13 +4403,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == NAMED_PROPERTY) {
// Put the object both on the stack and in the register.
VisitForStackValue(prop->obj());
- __ ldr(LoadIC::ReceiverRegister(), MemOperand(sp, 0));
+ __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(prop);
} else {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
- __ ldr(LoadIC::ReceiverRegister(), MemOperand(sp, 1 * kPointerSize));
- __ ldr(LoadIC::NameRegister(), MemOperand(sp, 0));
+ __ ldr(LoadDescriptor::ReceiverRegister(),
+ MemOperand(sp, 1 * kPointerSize));
+ __ ldr(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
EmitKeyedPropertyLoad(prop);
}
}
@@ -4349,8 +4490,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Record position before stub call.
SetSourcePosition(expr->position());
- BinaryOpICStub stub(isolate(), Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), Token::ADD, NO_OVERWRITE).code();
+ CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4377,9 +4519,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
case NAMED_PROPERTY: {
- __ mov(StoreIC::NameRegister(),
+ __ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- __ pop(StoreIC::ReceiverRegister());
+ __ pop(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -4392,10 +4534,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
- __ Pop(KeyedStoreIC::ReceiverRegister(), KeyedStoreIC::NameRegister());
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ __ Pop(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -4417,10 +4559,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "[ Global variable");
- __ ldr(LoadIC::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadIC::NameRegister(), Operand(proxy->name()));
+ __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), Operand(proxy->name()));
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
}
// Use a regular load, not a contextual load, to avoid a reference
@@ -4585,7 +4727,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4785,14 +4927,35 @@ static Address GetInterruptImmediateLoadAddress(Address pc) {
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(load_address)));
} else if (Assembler::IsLdrPpRegOffset(Memory::int32_at(load_address))) {
// This is an extended constant pool lookup.
- load_address -= 2 * Assembler::kInstrSize;
- DCHECK(Assembler::IsMovW(Memory::int32_at(load_address)));
- DCHECK(Assembler::IsMovT(
- Memory::int32_at(load_address + Assembler::kInstrSize)));
- } else if (Assembler::IsMovT(Memory::int32_at(load_address))) {
- // This is a movw_movt immediate load.
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ load_address -= 2 * Assembler::kInstrSize;
+ DCHECK(Assembler::IsMovW(Memory::int32_at(load_address)));
+ DCHECK(Assembler::IsMovT(
+ Memory::int32_at(load_address + Assembler::kInstrSize)));
+ } else {
+ load_address -= 4 * Assembler::kInstrSize;
+ DCHECK(Assembler::IsMovImmed(Memory::int32_at(load_address)));
+ DCHECK(Assembler::IsOrrImmed(
+ Memory::int32_at(load_address + Assembler::kInstrSize)));
+ DCHECK(Assembler::IsOrrImmed(
+ Memory::int32_at(load_address + 2 * Assembler::kInstrSize)));
+ DCHECK(Assembler::IsOrrImmed(
+ Memory::int32_at(load_address + 3 * Assembler::kInstrSize)));
+ }
+ } else if (CpuFeatures::IsSupported(ARMv7) &&
+ Assembler::IsMovT(Memory::int32_at(load_address))) {
+ // This is a movw / movt immediate load.
load_address -= Assembler::kInstrSize;
DCHECK(Assembler::IsMovW(Memory::int32_at(load_address)));
+ } else if (!CpuFeatures::IsSupported(ARMv7) &&
+ Assembler::IsOrrImmed(Memory::int32_at(load_address))) {
+ // This is a mov / orr immediate load.
+ load_address -= 3 * Assembler::kInstrSize;
+ DCHECK(Assembler::IsMovImmed(Memory::int32_at(load_address)));
+ DCHECK(Assembler::IsOrrImmed(
+ Memory::int32_at(load_address + Assembler::kInstrSize)));
+ DCHECK(Assembler::IsOrrImmed(
+ Memory::int32_at(load_address + 2 * Assembler::kInstrSize)));
} else {
// This is a small constant pool lookup.
DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(load_address)));
@@ -4813,11 +4976,17 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
{
// <decrement profiling counter>
// bpl ok
- // ; load interrupt stub address into ip - either of:
+ // ; load interrupt stub address into ip - either of (for ARMv7):
// ; <small cp load> | <extended cp load> | <immediate load>
// ldr ip, [pc/pp, #imm] | movw ip, #imm | movw ip, #imm
- // | movt ip, #imm> | movw ip, #imm
+ // | movt ip, #imm | movw ip, #imm
// | ldr ip, [pp, ip]
+ // ; or (for ARMv6):
+ // ; <small cp load> | <extended cp load> | <immediate load>
+ // ldr ip, [pc/pp, #imm] | mov ip, #imm | mov ip, #imm
+ // | orr ip, ip, #imm> | orr ip, ip, #imm
+ // | orr ip, ip, #imm> | orr ip, ip, #imm
+ // | orr ip, ip, #imm> | orr ip, ip, #imm
// blx ip
// <reset profiling counter>
// ok-label
@@ -4834,11 +5003,17 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
case OSR_AFTER_STACK_CHECK:
// <decrement profiling counter>
// mov r0, r0 (NOP)
- // ; load on-stack replacement address into ip - either of:
+ // ; load on-stack replacement address into ip - either of (for ARMv7):
// ; <small cp load> | <extended cp load> | <immediate load>
// ldr ip, [pc/pp, #imm] | movw ip, #imm | movw ip, #imm
// | movt ip, #imm> | movw ip, #imm
// | ldr ip, [pp, ip]
+ // ; or (for ARMv6):
+ // ; <small cp load> | <extended cp load> | <immediate load>
+ // ldr ip, [pc/pp, #imm] | mov ip, #imm | mov ip, #imm
+ // | orr ip, ip, #imm> | orr ip, ip, #imm
+ // | orr ip, ip, #imm> | orr ip, ip, #imm
+ // | orr ip, ip, #imm> | orr ip, ip, #imm
// blx ip
// <reset profiling counter>
// ok-label
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
new file mode 100644
index 0000000000..9bbc1f58c1
--- /dev/null
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -0,0 +1,323 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
+
+
+const Register LoadDescriptor::ReceiverRegister() { return r1; }
+const Register LoadDescriptor::NameRegister() { return r2; }
+
+
+const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return r0; }
+
+
+const Register VectorLoadICDescriptor::VectorRegister() { return r3; }
+
+
+const Register StoreDescriptor::ReceiverRegister() { return r1; }
+const Register StoreDescriptor::NameRegister() { return r2; }
+const Register StoreDescriptor::ValueRegister() { return r0; }
+
+
+const Register ElementTransitionAndStoreDescriptor::MapRegister() { return r3; }
+
+
+const Register InstanceofDescriptor::left() { return r0; }
+const Register InstanceofDescriptor::right() { return r1; }
+
+
+const Register ArgumentsAccessReadDescriptor::index() { return r1; }
+const Register ArgumentsAccessReadDescriptor::parameter_count() { return r0; }
+
+
+const Register ApiGetterDescriptor::function_address() { return r2; }
+
+
+const Register MathPowTaggedDescriptor::exponent() { return r2; }
+
+
+const Register MathPowIntegerDescriptor::exponent() {
+ return MathPowTaggedDescriptor::exponent();
+}
+
+
+void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r2};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastCloneShallowArrayDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r3, r2, r1};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void FastCloneShallowObjectDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r3, r2, r1, r0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CreateAllocationSiteDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r2, r3};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StoreArrayLiteralElementDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r3, r0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionWithFeedbackDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r1, r3};
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // r0 : number of arguments
+ // r1 : the function to call
+ // r2 : feedback vector
+ // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
+ // TODO(turbofan): So far we don't gather type feedback and hence skip the
+ // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+ Register registers[] = {cp, r0, r1, r2};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void RegExpConstructResultDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r2, r1, r0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void TransitionElementsKindDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r0, r1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorConstantArgCountDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // cp -- context
+ // r0 -- number of arguments
+ // r1 -- function
+ // r2 -- allocation site with elements kind
+ Register registers[] = {cp, r1, r2};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = {cp, r1, r2, r0};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(),
+ Representation::Tagged(), Representation::Integer32()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // cp -- context
+ // r0 -- number of arguments
+ // r1 -- constructor function
+ Register registers[] = {cp, r1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void InternalArrayConstructorDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = {cp, r1, r0};
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Integer32()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r1, r0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpWithAllocationSiteDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r2, r1, r0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r1, r0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ static PlatformInterfaceDescriptor noInlineDescriptor =
+ PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
+
+ Register registers[] = {
+ cp, // context
+ r2, // key
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ data->Initialize(arraysize(registers), registers, representations,
+ &noInlineDescriptor);
+}
+
+
+void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ static PlatformInterfaceDescriptor noInlineDescriptor =
+ PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
+
+ Register registers[] = {
+ cp, // context
+ r2, // name
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ data->Initialize(arraysize(registers), registers, representations,
+ &noInlineDescriptor);
+}
+
+
+void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ static PlatformInterfaceDescriptor default_descriptor =
+ PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+ Register registers[] = {
+ cp, // context
+ r0, // receiver
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ data->Initialize(arraysize(registers), registers, representations,
+ &default_descriptor);
+}
+
+
+void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ static PlatformInterfaceDescriptor default_descriptor =
+ PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+ Register registers[] = {
+ cp, // context
+ r1, // JSFunction
+ r0, // actual number of arguments
+ r2, // expected number of arguments
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // JSFunction
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ data->Initialize(arraysize(registers), registers, representations,
+ &default_descriptor);
+}
+
+
+void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ static PlatformInterfaceDescriptor default_descriptor =
+ PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+ Register registers[] = {
+ cp, // context
+ r0, // callee
+ r4, // call_data
+ r2, // holder
+ r1, // api_function_address
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ };
+ data->Initialize(arraysize(registers), registers, representations,
+ &default_descriptor);
+}
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/interface-descriptors-arm.h b/deps/v8/src/arm/interface-descriptors-arm.h
new file mode 100644
index 0000000000..6201adc685
--- /dev/null
+++ b/deps/v8/src/arm/interface-descriptors-arm.h
@@ -0,0 +1,26 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM_INTERFACE_DESCRIPTORS_ARM_H_
+#define V8_ARM_INTERFACE_DESCRIPTORS_ARM_H_
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+class PlatformInterfaceDescriptor {
+ public:
+ explicit PlatformInterfaceDescriptor(TargetAddressStorageMode storage_mode)
+ : storage_mode_(storage_mode) {}
+
+ TargetAddressStorageMode storage_mode() { return storage_mode_; }
+
+ private:
+ TargetAddressStorageMode storage_mode_;
+};
+}
+} // namespace v8::internal
+
+#endif // V8_ARM_INTERFACE_DESCRIPTORS_ARM_H_
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 6b86088ee7..13a46a2b54 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -423,12 +423,6 @@ LPlatformChunk* LChunkBuilder::Build() {
}
-void LChunkBuilder::Abort(BailoutReason reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
Register::ToAllocationIndex(reg));
@@ -1082,14 +1076,14 @@ LInstruction* LChunkBuilder::DoCallJSFunction(
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
- const InterfaceDescriptor* descriptor = instr->descriptor();
+ CallInterfaceDescriptor descriptor = instr->descriptor();
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
ops.Add(target, zone());
for (int i = 1; i < instr->OperandCount(); i++) {
- LOperand* op = UseFixed(instr->OperandAt(i),
- descriptor->GetParameterRegister(i - 1));
+ LOperand* op =
+ UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
ops.Add(op, zone());
}
@@ -1099,6 +1093,19 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
}
+LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
+ HTailCallThroughMegamorphicCache* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* receiver_register =
+ UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
+ LOperand* name_register =
+ UseFixed(instr->name(), LoadDescriptor::NameRegister());
+ // Not marked as call. It can't deoptimize, and it never returns.
+ return new (zone()) LTailCallThroughMegamorphicCache(
+ context, receiver_register, name_register);
+}
+
+
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r1);
@@ -1678,9 +1685,10 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
Representation exponent_type = instr->right()->representation();
DCHECK(instr->left()->representation().IsDouble());
LOperand* left = UseFixedDouble(instr->left(), d0);
- LOperand* right = exponent_type.IsDouble() ?
- UseFixedDouble(instr->right(), d1) :
- UseFixed(instr->right(), r2);
+ LOperand* right =
+ exponent_type.IsDouble()
+ ? UseFixedDouble(instr->right(), d1)
+ : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
LPower* result = new(zone()) LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, d2),
instr,
@@ -2102,11 +2110,11 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* global_object = UseFixed(instr->global_object(),
- LoadIC::ReceiverRegister());
+ LOperand* global_object =
+ UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
new(zone()) LLoadGlobalGeneric(context, global_object, vector);
@@ -2161,10 +2169,11 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister());
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LInstruction* result =
@@ -2226,11 +2235,12 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), LoadIC::NameRegister());
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LInstruction* result =
@@ -2286,9 +2296,10 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* obj = UseFixed(instr->object(), KeyedStoreIC::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), KeyedStoreIC::NameRegister());
- LOperand* val = UseFixed(instr->value(), KeyedStoreIC::ValueRegister());
+ LOperand* obj =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+ LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
DCHECK(instr->object()->representation().IsTagged());
DCHECK(instr->key()->representation().IsTagged());
@@ -2345,7 +2356,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
}
LOperand* val;
- if (needs_write_barrier || instr->field_representation().IsSmi()) {
+ if (needs_write_barrier) {
val = UseTempRegister(instr->value());
} else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
@@ -2362,8 +2373,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* obj = UseFixed(instr->object(), StoreIC::ReceiverRegister());
- LOperand* val = UseFixed(instr->value(), StoreIC::ValueRegister());
+ LOperand* obj =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val);
return MarkAsCall(result, instr);
@@ -2439,10 +2451,10 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
return DefineAsSpilled(result, spill_index);
} else {
DCHECK(info()->IsStub());
- CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor();
+ CallInterfaceDescriptor descriptor =
+ info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index());
- Register reg = descriptor->GetEnvironmentParameterRegister(index);
+ Register reg = descriptor.GetEnvironmentParameterRegister(index);
return DefineFixed(result, reg);
}
}
@@ -2458,7 +2470,7 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
} else {
spill_index = env_index - instr->environment()->first_local_index();
if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Abort(kTooManySpillSlotsNeededForOSR);
+ Retry(kTooManySpillSlotsNeededForOSR);
spill_index = 0;
}
}
@@ -2555,6 +2567,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
}
+ inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 16f522e5b6..f9feaf6de2 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -154,6 +154,7 @@ class LCodeGen;
V(SubI) \
V(RSubI) \
V(TaggedToI) \
+ V(TailCallThroughMegamorphicCache) \
V(ThisFunction) \
V(ToFastProperties) \
V(TransitionElementsKind) \
@@ -166,11 +167,11 @@ class LCodeGen;
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ virtual Opcode opcode() const FINAL OVERRIDE { \
return LInstruction::k##type; \
} \
- virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
- virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE; \
+ virtual const char* Mnemonic() const FINAL OVERRIDE { \
return mnemonic; \
} \
static L##type* cast(LInstruction* instr) { \
@@ -290,7 +291,7 @@ class LTemplateResultInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ virtual bool HasResult() const FINAL OVERRIDE {
return R != 0 && result() != NULL;
}
void set_result(LOperand* operand) { results_[0] = operand; }
@@ -312,11 +313,11 @@ class LTemplateInstruction : public LTemplateResultInstruction<R> {
private:
// Iterator support.
- virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
- virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+ virtual int InputCount() FINAL OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
- virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
- virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
+ virtual int TempCount() FINAL OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
};
@@ -331,8 +332,8 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const V8_OVERRIDE { return true; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsGap() const OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
static LGap* cast(LInstruction* instr) {
DCHECK(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@@ -368,11 +369,11 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
};
-class LInstructionGap V8_FINAL : public LGap {
+class LInstructionGap FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return !IsRedundant();
}
@@ -380,14 +381,14 @@ class LInstructionGap V8_FINAL : public LGap {
};
-class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGoto(HBasicBlock* block) : block_(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual bool IsControl() const V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ virtual bool IsControl() const OVERRIDE { return true; }
int block_id() const { return block_->block_id(); }
@@ -396,7 +397,7 @@ class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -412,14 +413,14 @@ class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LDummy FINAL : public LTemplateInstruction<1, 0, 0> {
public:
LDummy() {}
DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
};
-class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
inputs_[0] = value;
@@ -428,25 +429,25 @@ class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- virtual bool IsControl() const V8_OVERRIDE { return true; }
+ virtual bool IsControl() const OVERRIDE { return true; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
-class LLabel V8_FINAL : public LGap {
+class LLabel FINAL : public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -462,14 +463,14 @@ class LLabel V8_FINAL : public LGap {
};
-class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
public:
virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallStub(LOperand* context) {
inputs_[0] = context;
@@ -482,9 +483,29 @@ class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LTailCallThroughMegamorphicCache FINAL
+ : public LTemplateInstruction<0, 3, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ explicit LTailCallThroughMegamorphicCache(LOperand* context,
+ LOperand* receiver,
+ LOperand* name) {
+ inputs_[0] = context;
+ inputs_[1] = receiver;
+ inputs_[2] = name;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* name() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
+ "tail-call-through-megamorphic-cache")
+ DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
+};
+
+class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
@@ -496,7 +517,7 @@ class LControlInstruction : public LTemplateInstruction<0, I, T> {
public:
LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
- virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual bool IsControl() const FINAL OVERRIDE { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -535,7 +556,7 @@ class LControlInstruction : public LTemplateInstruction<0, I, T> {
};
-class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LWrapReceiver FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LWrapReceiver(LOperand* receiver, LOperand* function) {
inputs_[0] = receiver;
@@ -550,7 +571,7 @@ class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
@@ -571,7 +592,7 @@ class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
};
-class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
inputs_[0] = arguments;
@@ -585,11 +606,11 @@ class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
LOperand* length() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -601,14 +622,14 @@ class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
};
-class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LModByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -626,7 +647,7 @@ class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LModByConstI FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LModByConstI(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -644,7 +665,7 @@ class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LModI V8_FINAL : public LTemplateInstruction<1, 2, 2> {
+class LModI FINAL : public LTemplateInstruction<1, 2, 2> {
public:
LModI(LOperand* left, LOperand* right, LOperand* temp, LOperand* temp2) {
inputs_[0] = left;
@@ -663,7 +684,7 @@ class LModI V8_FINAL : public LTemplateInstruction<1, 2, 2> {
};
-class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -681,7 +702,7 @@ class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDivByConstI FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDivByConstI(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -699,7 +720,7 @@ class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LDivI FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
inputs_[0] = dividend;
@@ -716,7 +737,7 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFlooringDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -735,7 +756,7 @@ class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LFlooringDivByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
inputs_[0] = dividend;
@@ -755,7 +776,7 @@ class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivI FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
inputs_[0] = dividend;
@@ -772,7 +793,7 @@ class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMulI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMulI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -788,7 +809,7 @@ class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
// Instruction for computing multiplier * multiplicand + addend.
-class LMultiplyAddD V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LMultiplyAddD FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LMultiplyAddD(LOperand* addend, LOperand* multiplier,
LOperand* multiplicand) {
@@ -806,7 +827,7 @@ class LMultiplyAddD V8_FINAL : public LTemplateInstruction<1, 3, 0> {
// Instruction for computing minuend - multiplier * multiplicand.
-class LMultiplySubD V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LMultiplySubD FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LMultiplySubD(LOperand* minuend, LOperand* multiplier,
LOperand* multiplicand) {
@@ -823,13 +844,13 @@ class LMultiplySubD V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
};
-class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
public:
LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -848,11 +869,11 @@ class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
return hydrogen()->representation().IsDouble();
}
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathFloor FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathFloor(LOperand* value) {
inputs_[0] = value;
@@ -865,7 +886,7 @@ class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LMathRound FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LMathRound(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -880,7 +901,7 @@ class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LMathFround V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathFround FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathFround(LOperand* value) { inputs_[0] = value; }
@@ -890,7 +911,7 @@ class LMathFround V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathAbs FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathAbs(LOperand* context, LOperand* value) {
inputs_[1] = context;
@@ -905,7 +926,7 @@ class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathLog FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathLog(LOperand* value) {
inputs_[0] = value;
@@ -917,7 +938,7 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathClz32 FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathClz32(LOperand* value) {
inputs_[0] = value;
@@ -929,7 +950,7 @@ class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+class LMathExp FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LMathExp(LOperand* value,
LOperand* double_temp,
@@ -951,7 +972,7 @@ class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
};
-class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathSqrt FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSqrt(LOperand* value) {
inputs_[0] = value;
@@ -963,7 +984,7 @@ class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathPowHalf FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathPowHalf(LOperand* value) {
inputs_[0] = value;
@@ -975,7 +996,7 @@ class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -990,7 +1011,7 @@ class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
};
-class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LCmpHoleAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LCmpHoleAndBranch(LOperand* object) {
inputs_[0] = object;
@@ -1003,7 +1024,7 @@ class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
};
-class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1019,7 +1040,7 @@ class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsObjectAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1032,11 +1053,11 @@ class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1049,11 +1070,11 @@ class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1064,11 +1085,11 @@ class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1082,11 +1103,11 @@ class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
+class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
public:
LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1104,11 +1125,11 @@ class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
Token::Value op() const { return hydrogen()->token(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1120,11 +1141,11 @@ class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -1137,7 +1158,7 @@ class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndexAndBranch V8_FINAL
+class LHasCachedArrayIndexAndBranch FINAL
: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@@ -1150,11 +1171,11 @@ class LHasCachedArrayIndexAndBranch V8_FINAL
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1168,11 +1189,11 @@ class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LCmpT FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LCmpT(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1191,7 +1212,7 @@ class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LInstanceOf FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1207,7 +1228,7 @@ class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LInstanceOfKnownGlobal FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
@@ -1228,7 +1249,7 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
return lazy_deopt_env_;
}
virtual void SetDeferredLazyDeoptimizationEnvironment(
- LEnvironment* env) V8_OVERRIDE {
+ LEnvironment* env) OVERRIDE {
lazy_deopt_env_ = env;
}
@@ -1237,7 +1258,7 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
@@ -1252,7 +1273,7 @@ class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
};
-class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LBitI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1269,7 +1290,7 @@ class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LShiftI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -1290,7 +1311,7 @@ class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSubI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1305,7 +1326,7 @@ class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LRSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LRSubI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LRSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1320,7 +1341,7 @@ class LRSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantI FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1329,7 +1350,7 @@ class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantS FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1338,7 +1359,7 @@ class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantD FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1347,7 +1368,7 @@ class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantE FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1358,7 +1379,7 @@ class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantT FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1369,7 +1390,7 @@ class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LBranch(LOperand* value) {
inputs_[0] = value;
@@ -1380,11 +1401,11 @@ class LBranch V8_FINAL : public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCmpMapAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LCmpMapAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1401,7 +1422,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
@@ -1413,7 +1434,7 @@ class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LDateField FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
inputs_[0] = date;
@@ -1432,7 +1453,7 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSeqStringGetChar FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
inputs_[0] = string;
@@ -1447,7 +1468,7 @@ class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LSeqStringSetChar FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LSeqStringSetChar(LOperand* context,
LOperand* string,
@@ -1468,7 +1489,7 @@ class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
};
-class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LAddI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1483,7 +1504,7 @@ class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathMinMax(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1498,7 +1519,7 @@ class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LPower FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1513,7 +1534,7 @@ class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1525,18 +1546,18 @@ class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- virtual Opcode opcode() const V8_OVERRIDE {
+ virtual Opcode opcode() const OVERRIDE {
return LInstruction::kArithmeticD;
}
- virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
- virtual const char* Mnemonic() const V8_OVERRIDE;
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
private:
Token::Value op_;
};
-class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LArithmeticT(Token::Value op,
LOperand* context,
@@ -1553,18 +1574,18 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
LOperand* right() { return inputs_[2]; }
Token::Value op() const { return op_; }
- virtual Opcode opcode() const V8_OVERRIDE {
+ virtual Opcode opcode() const OVERRIDE {
return LInstruction::kArithmeticT;
}
- virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
- virtual const char* Mnemonic() const V8_OVERRIDE;
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
private:
Token::Value op_;
};
-class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LReturn FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
inputs_[0] = value;
@@ -1587,7 +1608,7 @@ class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
-class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
@@ -1600,7 +1621,7 @@ class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LLoadNamedGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
inputs_[0] = context;
@@ -1619,7 +1640,7 @@ class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadFunctionPrototype FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadFunctionPrototype(LOperand* function) {
inputs_[0] = function;
@@ -1632,7 +1653,7 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadRoot FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
@@ -1641,7 +1662,7 @@ class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
@@ -1666,12 +1687,12 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
uint32_t base_offset() const { return hydrogen()->base_offset(); }
};
-class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> {
+class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
public:
LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
LOperand* vector) {
@@ -1691,14 +1712,14 @@ class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> {
};
-class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
LOperand* vector) {
@@ -1719,7 +1740,7 @@ class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1734,7 +1755,7 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
@@ -1747,11 +1768,11 @@ class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LStoreContextSlot(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -1766,11 +1787,11 @@ class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LPushArgument FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
@@ -1782,7 +1803,7 @@ class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDrop FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
@@ -1795,7 +1816,7 @@ class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
+class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 0> {
public:
LStoreCodeEntry(LOperand* function, LOperand* code_object) {
inputs_[0] = function;
@@ -1812,7 +1833,7 @@ class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
};
-class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
+class LInnerAllocatedObject FINAL: public LTemplateInstruction<1, 2, 0> {
public:
LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
inputs_[0] = base_object;
@@ -1822,27 +1843,27 @@ class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
LOperand* base_object() const { return inputs_[0]; }
LOperand* offset() const { return inputs_[1]; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
};
-class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LThisFunction FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
-class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LContext FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
DECLARE_HYDROGEN_ACCESSOR(Context)
};
-class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LDeclareGlobals FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
inputs_[0] = context;
@@ -1855,7 +1876,7 @@ class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallJSFunction(LOperand* function) {
inputs_[0] = function;
@@ -1866,48 +1887,47 @@ class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
+class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
public:
- LCallWithDescriptor(const InterfaceDescriptor* descriptor,
- const ZoneList<LOperand*>& operands,
- Zone* zone)
- : descriptor_(descriptor),
- inputs_(descriptor->GetRegisterParameterCount() + 1, zone) {
- DCHECK(descriptor->GetRegisterParameterCount() + 1 == operands.length());
+ LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+ const ZoneList<LOperand*>& operands, Zone* zone)
+ : descriptor_(descriptor),
+ inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
+ DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
inputs_.AddAll(operands, zone);
}
LOperand* target() const { return inputs_[0]; }
- const InterfaceDescriptor* descriptor() { return descriptor_; }
+ const CallInterfaceDescriptor descriptor() { return descriptor_; }
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
- const InterfaceDescriptor* descriptor_;
+ CallInterfaceDescriptor descriptor_;
ZoneList<LOperand*> inputs_;
// Iterator support.
- virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
- virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+ virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
- virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
- virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+ virtual int TempCount() FINAL OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
};
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
@@ -1920,13 +1940,13 @@ class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
@@ -1943,7 +1963,7 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNew(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
@@ -1956,13 +1976,13 @@ class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
@@ -1975,13 +1995,13 @@ class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallRuntime(LOperand* context) {
inputs_[0] = context;
@@ -1992,7 +2012,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
@@ -2002,7 +2022,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -2014,7 +2034,7 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LUint32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -2026,7 +2046,7 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagI FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -2042,7 +2062,7 @@ class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagU FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -2058,7 +2078,7 @@ class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagD FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
@@ -2075,7 +2095,7 @@ class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToSmi FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleToSmi(LOperand* value) {
inputs_[0] = value;
@@ -2091,7 +2111,7 @@ class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToI FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleToI(LOperand* value) {
inputs_[0] = value;
@@ -2107,7 +2127,7 @@ class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LTaggedToI FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LTaggedToI(LOperand* value,
LOperand* temp,
@@ -2128,7 +2148,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiTag FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -2141,7 +2161,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberUntagD FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value;
@@ -2154,7 +2174,7 @@ class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -2171,7 +2191,7 @@ class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
inputs_[0] = object;
@@ -2186,7 +2206,7 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Representation representation() const {
return hydrogen()->field_representation();
@@ -2194,7 +2214,7 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
inputs_[0] = context;
@@ -2209,14 +2229,14 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
-class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
@@ -2241,7 +2261,7 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
bool NeedsCanonicalization() {
if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
@@ -2253,7 +2273,7 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyedGeneric(LOperand* context,
LOperand* obj,
@@ -2273,13 +2293,13 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
-class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* context,
@@ -2297,7 +2317,7 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 1> {
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
Handle<Map> transitioned_map() {
@@ -2308,7 +2328,7 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 1> {
};
-class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LTrapAllocationMemento(LOperand* object,
LOperand* temp) {
@@ -2324,7 +2344,7 @@ class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringAdd FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -2342,7 +2362,7 @@ class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
-class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringCharCodeAt FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
inputs_[0] = context;
@@ -2359,7 +2379,7 @@ class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringCharFromCode FINAL : public LTemplateInstruction<1, 2, 0> {
public:
explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
inputs_[0] = context;
@@ -2374,7 +2394,7 @@ class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckValue FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
@@ -2387,7 +2407,7 @@ class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckInstanceType FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckInstanceType(LOperand* value) {
inputs_[0] = value;
@@ -2400,7 +2420,7 @@ class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMaps(LOperand* value = NULL) {
inputs_[0] = value;
@@ -2413,7 +2433,7 @@ class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2425,7 +2445,7 @@ class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
@@ -2438,7 +2458,7 @@ class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampDToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampDToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -2450,7 +2470,7 @@ class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -2462,7 +2482,7 @@ class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LClampTToUint8(LOperand* unclamped, LOperand* temp) {
inputs_[0] = unclamped;
@@ -2476,7 +2496,7 @@ class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleBits FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleBits(LOperand* value) {
inputs_[0] = value;
@@ -2489,7 +2509,7 @@ class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LConstructDouble FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LConstructDouble(LOperand* hi, LOperand* lo) {
inputs_[0] = hi;
@@ -2503,7 +2523,7 @@ class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
+class LAllocate FINAL : public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* context,
LOperand* size,
@@ -2525,7 +2545,7 @@ class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
};
-class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LRegExpLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LRegExpLiteral(LOperand* context) {
inputs_[0] = context;
@@ -2538,7 +2558,7 @@ class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFunctionLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LFunctionLiteral(LOperand* context) {
inputs_[0] = context;
@@ -2551,7 +2571,7 @@ class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
@@ -2564,7 +2584,7 @@ class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LTypeof FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -2578,7 +2598,7 @@ class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -2591,11 +2611,11 @@ class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
Handle<String> type_literal() { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch FINAL : public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
@@ -2608,18 +2628,18 @@ class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
};
-class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
};
-class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LStackCheck FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LStackCheck(LOperand* context) {
inputs_[0] = context;
@@ -2637,7 +2657,7 @@ class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LForInPrepareMap FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LForInPrepareMap(LOperand* context, LOperand* object) {
inputs_[0] = context;
@@ -2651,7 +2671,7 @@ class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
@@ -2667,7 +2687,7 @@ class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
@@ -2681,7 +2701,7 @@ class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
};
-class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
@@ -2725,7 +2745,7 @@ class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LPlatformChunk V8_FINAL : public LChunk {
+class LPlatformChunk FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
@@ -2735,20 +2755,14 @@ class LPlatformChunk V8_FINAL : public LChunk {
};
-class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
+class LChunkBuilder FINAL : public LChunkBuilderBase {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : LChunkBuilderBase(graph->zone()),
- chunk_(NULL),
- info_(info),
- graph_(graph),
- status_(UNUSED),
+ : LChunkBuilderBase(info, graph),
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- allocator_(allocator) { }
-
- Isolate* isolate() const { return graph_->isolate(); }
+ allocator_(allocator) {}
// Build the sequence for the graph.
LPlatformChunk* Build();
@@ -2784,24 +2798,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
private:
- enum Status {
- UNUSED,
- BUILDING,
- DONE,
- ABORTED
- };
-
- LPlatformChunk* chunk() const { return chunk_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_building() const { return status_ == BUILDING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- void Abort(BailoutReason reason);
-
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(DoubleRegister reg);
@@ -2843,7 +2839,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
- virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
@@ -2886,10 +2882,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoArithmeticT(Token::Value op,
HBinaryOperation* instr);
- LPlatformChunk* chunk_;
- CompilationInfo* info_;
- HGraph* const graph_;
- Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
HBasicBlock* next_block_;
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index ff09e287f7..14740965bf 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -6,15 +6,18 @@
#include "src/arm/lithium-codegen-arm.h"
#include "src/arm/lithium-gap-resolver-arm.h"
+#include "src/base/bits.h"
+#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/hydrogen-osr.h"
-#include "src/stub-cache.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
-class SafepointGenerator V8_FINAL : public CallWrapper {
+class SafepointGenerator FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -24,9 +27,9 @@ class SafepointGenerator V8_FINAL : public CallWrapper {
deopt_mode_(mode) { }
virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
+ virtual void BeforeCall(int call_size) const OVERRIDE {}
- virtual void AfterCall() const V8_OVERRIDE {
+ virtual void AfterCall() const OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@@ -49,11 +52,8 @@ bool LCodeGen::GenerateCode() {
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::NONE);
- return GeneratePrologue() &&
- GenerateBody() &&
- GenerateDeferredCode() &&
- GenerateDeoptJumpTable() &&
- GenerateSafepointTable();
+ return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+ GenerateJumpTable() && GenerateSafepointTable();
}
@@ -310,7 +310,7 @@ bool LCodeGen::GenerateDeferredCode() {
}
-bool LCodeGen::GenerateDeoptJumpTable() {
+bool LCodeGen::GenerateJumpTable() {
// Check that the jump table is accessible from everywhere in the function
// code, i.e. that offsets to the table can be encoded in the 24bit signed
// immediate of a branch instruction.
@@ -319,35 +319,33 @@ bool LCodeGen::GenerateDeoptJumpTable() {
// Each entry in the jump table generates one instruction and inlines one
// 32bit data after it.
if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
- deopt_jump_table_.length() * 7)) {
+ jump_table_.length() * 7)) {
Abort(kGeneratedCodeIsTooLarge);
}
- if (deopt_jump_table_.length() > 0) {
+ if (jump_table_.length() > 0) {
Label needs_frame, call_deopt_entry;
Comment(";;; -------------------- Jump table --------------------");
- Address base = deopt_jump_table_[0].address;
+ Address base = jump_table_[0].address;
Register entry_offset = scratch0();
- int length = deopt_jump_table_.length();
+ int length = jump_table_.length();
for (int i = 0; i < length; i++) {
- __ bind(&deopt_jump_table_[i].label);
+ Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+ __ bind(&table_entry->label);
- Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
- DCHECK(type == deopt_jump_table_[0].bailout_type);
- Address entry = deopt_jump_table_[i].address;
- int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
- DCHECK(id != Deoptimizer::kNotDeoptimizationEntry);
- Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+ DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
+ Address entry = table_entry->address;
+ DeoptComment(table_entry->reason);
// Second-level deopt table entries are contiguous and small, so instead
// of loading the full, absolute address of each one, load an immediate
// offset which will be added to the base address later.
__ mov(entry_offset, Operand(entry - base));
- if (deopt_jump_table_[i].needs_frame) {
+ if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
if (needs_frame.is_bound()) {
__ b(&needs_frame);
@@ -843,9 +841,10 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
-void LCodeGen::DeoptimizeIf(Condition condition,
- LEnvironment* environment,
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+ const char* detail,
Deoptimizer::BailoutType bailout_type) {
+ LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
@@ -896,35 +895,35 @@ void LCodeGen::DeoptimizeIf(Condition condition,
__ stop("trap_on_deopt", condition);
}
+ Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), detail);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (condition == al && frame_is_built_ &&
!info()->saves_caller_doubles()) {
+ DeoptComment(reason);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
+ Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ !frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (deopt_jump_table_.is_empty() ||
- (deopt_jump_table_.last().address != entry) ||
- (deopt_jump_table_.last().bailout_type != bailout_type) ||
- (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
- Deoptimizer::JumpTableEntry table_entry(entry,
- bailout_type,
- !frame_is_built_);
- deopt_jump_table_.Add(table_entry, zone());
+ if (jump_table_.is_empty() ||
+ !table_entry.IsEquivalentTo(jump_table_.last())) {
+ jump_table_.Add(table_entry, zone());
}
- __ b(condition, &deopt_jump_table_.last().label);
+ __ b(condition, &jump_table_.last().label);
}
}
-void LCodeGen::DeoptimizeIf(Condition condition,
- LEnvironment* environment) {
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+ const char* detail) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(condition, environment, bailout_type);
+ DeoptimizeIf(condition, instr, detail, bailout_type);
}
@@ -932,7 +931,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, 0, TENURED);
+ DeoptimizationInputData::New(isolate(), length, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
@@ -1159,7 +1158,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ and_(dividend, dividend, Operand(mask));
__ rsb(dividend, dividend, Operand::Zero(), SetCC);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "minus zero");
}
__ b(&done);
}
@@ -1177,7 +1176,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr, "division by zero");
return;
}
@@ -1192,7 +1191,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ b(ne, &remainder_not_zero);
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(lt, instr->environment());
+ DeoptimizeIf(lt, instr, "minus zero");
__ bind(&remainder_not_zero);
}
}
@@ -1212,7 +1211,7 @@ void LCodeGen::DoModI(LModI* instr) {
// case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "division by zero");
}
// Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
@@ -1223,7 +1222,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ b(ne, &no_overflow_possible);
__ cmp(right_reg, Operand(-1));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "minus zero");
} else {
__ b(ne, &no_overflow_possible);
__ mov(result_reg, Operand::Zero());
@@ -1244,7 +1243,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
- DeoptimizeIf(lt, instr->environment());
+ DeoptimizeIf(lt, instr, "minus zero");
}
__ bind(&done);
@@ -1269,7 +1268,7 @@ void LCodeGen::DoModI(LModI* instr) {
// NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "division by zero");
}
__ Move(result_reg, left_reg);
@@ -1299,7 +1298,7 @@ void LCodeGen::DoModI(LModI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr, "minus zero");
}
__ bind(&done);
}
@@ -1310,26 +1309,26 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
Register dividend = ToRegister(instr->dividend());
int32_t divisor = instr->divisor();
Register result = ToRegister(instr->result());
- DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
+ DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
DCHECK(!result.is(dividend));
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "minus zero");
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmp(dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "overflow");
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ tst(dividend, Operand(mask));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "lost precision");
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@@ -1357,7 +1356,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr, "division by zero");
return;
}
@@ -1365,7 +1364,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "minus zero");
}
__ TruncatingDiv(result, dividend, Abs(divisor));
@@ -1375,7 +1374,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ mov(ip, Operand(divisor));
__ smull(scratch0(), ip, result, ip);
__ sub(scratch0(), scratch0(), dividend, SetCC);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "lost precision");
}
}
@@ -1390,7 +1389,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(divisor, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "division by zero");
}
// Check for (0 / -x) that will produce negative zero.
@@ -1402,7 +1401,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
__ b(pl, &positive);
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "minus zero");
__ bind(&positive);
}
@@ -1414,7 +1413,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(dividend, Operand(kMinInt));
__ cmp(divisor, Operand(-1), eq);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "overflow");
}
if (CpuFeatures::IsSupported(SUDIV)) {
@@ -1437,7 +1436,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
Register remainder = scratch0();
__ Mls(remainder, result, divisor, dividend);
__ cmp(remainder, Operand::Zero());
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "lost precision");
}
}
@@ -1488,13 +1487,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ rsb(result, dividend, Operand::Zero(), SetCC);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "minus zero");
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
}
return;
}
@@ -1517,7 +1516,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr, "division by zero");
return;
}
@@ -1525,7 +1524,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "minus zero");
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -1566,7 +1565,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "division by zero");
}
// Check for (0 / -x) that will produce negative zero.
@@ -1578,7 +1577,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
}
__ b(pl, &positive);
__ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "minus zero");
__ bind(&positive);
}
@@ -1590,7 +1589,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(left, Operand(kMinInt));
__ cmp(right, Operand(-1), eq);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "overflow");
}
if (CpuFeatures::IsSupported(SUDIV)) {
@@ -1636,14 +1635,14 @@ void LCodeGen::DoMulI(LMulI* instr) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
__ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "minus zero");
}
switch (constant) {
case -1:
if (overflow) {
__ rsb(result, left, Operand::Zero(), SetCC);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ rsb(result, left, Operand::Zero());
}
@@ -1653,7 +1652,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
__ cmp(left, Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr, "minus zero");
}
__ mov(result, Operand::Zero());
break;
@@ -1667,17 +1666,17 @@ void LCodeGen::DoMulI(LMulI* instr) {
int32_t mask = constant >> 31;
uint32_t constant_abs = (constant + mask) ^ mask;
- if (IsPowerOf2(constant_abs)) {
+ if (base::bits::IsPowerOfTwo32(constant_abs)) {
int32_t shift = WhichPowerOf2(constant_abs);
__ mov(result, Operand(left, LSL, shift));
// Correct the sign of the result is the constant is negative.
if (constant < 0) __ rsb(result, result, Operand::Zero());
- } else if (IsPowerOf2(constant_abs - 1)) {
+ } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
int32_t shift = WhichPowerOf2(constant_abs - 1);
__ add(result, left, Operand(left, LSL, shift));
// Correct the sign of the result is the constant is negative.
if (constant < 0) __ rsb(result, result, Operand::Zero());
- } else if (IsPowerOf2(constant_abs + 1)) {
+ } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
int32_t shift = WhichPowerOf2(constant_abs + 1);
__ rsb(result, left, Operand(left, LSL, shift));
// Correct the sign of the result is the constant is negative.
@@ -1703,7 +1702,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ smull(result, scratch, left, right);
}
__ cmp(scratch, Operand(result, ASR, 31));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "overflow");
} else {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
@@ -1719,7 +1718,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ b(pl, &done);
// Bail out if the result is minus zero.
__ cmp(result, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "minus zero");
__ bind(&done);
}
}
@@ -1782,7 +1781,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::SHR:
if (instr->can_deopt()) {
__ mov(result, Operand(left, LSR, scratch), SetCC);
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr, "negative value");
} else {
__ mov(result, Operand(left, LSR, scratch));
}
@@ -1819,7 +1818,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
if (instr->can_deopt()) {
__ tst(left, Operand(0x80000000));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "negative value");
}
__ Move(result, left);
}
@@ -1834,7 +1833,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
__ SmiTag(result, left, SetCC);
}
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ mov(result, Operand(left, LSL, shift_count));
}
@@ -1866,7 +1865,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
}
if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
}
}
@@ -1887,7 +1886,7 @@ void LCodeGen::DoRSubI(LRSubI* instr) {
}
if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
}
}
@@ -1941,9 +1940,9 @@ void LCodeGen::DoDateField(LDateField* instr) {
DCHECK(!scratch.is(object));
__ SmiTst(object);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "Smi");
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "not a date object");
if (index->value() == 0) {
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
@@ -2060,7 +2059,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
}
}
@@ -2172,11 +2171,12 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(r0));
DCHECK(ToRegister(instr->result()).is(r0));
- BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@@ -2285,7 +2285,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "Smi");
}
const Register map = scratch0();
@@ -2341,7 +2341,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr, "unexpected object");
}
}
}
@@ -2608,7 +2608,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// This instruction also signals no smi code inlined.
__ cmp(r0, Operand::Zero());
@@ -2689,7 +2689,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ JumpIfSmi(input, is_false);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
+ if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2720,7 +2720,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// Objects with a non-function constructor have class 'Object'.
__ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
__ b(ne, is_true);
} else {
__ b(ne, is_false);
@@ -2780,16 +2780,16 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
&load_bool_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
Label* load_bool() { return &load_bool_; }
@@ -2924,7 +2924,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// This instruction also signals no smi code inlined.
__ cmp(r0, Operand::Zero());
@@ -2956,23 +2956,25 @@ void LCodeGen::DoReturn(LReturn* instr) {
if (NeedsEagerFrame()) {
no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
}
- if (instr->has_constant_parameter_count()) {
- int parameter_count = ToInteger32(instr->constant_parameter_count());
- int32_t sp_delta = (parameter_count + 1) * kPointerSize;
- if (sp_delta != 0) {
- __ add(sp, sp, Operand(sp_delta));
+ { ConstantPoolUnavailableScope constant_pool_unavailable(masm());
+ if (instr->has_constant_parameter_count()) {
+ int parameter_count = ToInteger32(instr->constant_parameter_count());
+ int32_t sp_delta = (parameter_count + 1) * kPointerSize;
+ if (sp_delta != 0) {
+ __ add(sp, sp, Operand(sp_delta));
+ }
+ } else {
+ Register reg = ToRegister(instr->parameter_count());
+ // The argument count parameter is a smi
+ __ SmiUntag(reg);
+ __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
}
- } else {
- Register reg = ToRegister(instr->parameter_count());
- // The argument count parameter is a smi
- __ SmiUntag(reg);
- __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
- }
- __ Jump(lr);
+ __ Jump(lr);
- if (no_frame_start != -1) {
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ if (no_frame_start != -1) {
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
}
}
@@ -2984,28 +2986,36 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "hole");
}
}
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+ DCHECK(FLAG_vector_ics);
+ Register vector = ToRegister(instr->temp_vector());
+ DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
+ __ Move(vector, instr->hydrogen()->feedback_vector());
+ // No need to allocate this register.
+ DCHECK(VectorLoadICDescriptor::SlotRegister().is(r0));
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(Smi::FromInt(instr->hydrogen()->slot())));
+}
+
+
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister()));
+ DCHECK(ToRegister(instr->global_object())
+ .is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(r0));
- __ mov(LoadIC::NameRegister(), Operand(instr->name()));
+ __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ Move(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(r0));
- __ mov(LoadIC::SlotRegister(),
- Operand(Smi::FromInt(instr->hydrogen()->slot())));
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3026,7 +3036,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register payload = ToRegister(instr->temp());
__ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
__ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "hole");
}
// Store the value.
@@ -3043,7 +3053,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "hole");
} else {
__ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
}
@@ -3064,7 +3074,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "hole");
} else {
__ b(ne, &skip_assignment);
}
@@ -3119,21 +3129,15 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(r0));
// Name is always in r2.
- __ mov(LoadIC::NameRegister(), Operand(instr->name()));
+ __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ Move(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(r0));
- __ mov(LoadIC::SlotRegister(),
- Operand(Smi::FromInt(instr->hydrogen()->slot())));
- }
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+ }
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -3150,7 +3154,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "hole");
// If the function does not have an initial map, we're done.
Label done;
@@ -3276,7 +3280,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ ldr(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ cmp(result, Operand(0x80000000));
- DeoptimizeIf(cs, instr->environment());
+ DeoptimizeIf(cs, instr, "negative value");
}
break;
case FLOAT32_ELEMENTS:
@@ -3329,7 +3333,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "hole");
}
}
@@ -3363,11 +3367,11 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ SmiTst(result);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "not a Smi");
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ cmp(result, scratch);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "hole");
}
}
}
@@ -3417,20 +3421,14 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister()));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ Move(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(r0));
- __ mov(LoadIC::SlotRegister(),
- Operand(Smi::FromInt(instr->hydrogen()->slot())));
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -3515,9 +3513,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "Smi");
__ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
- DeoptimizeIf(lt, instr->environment());
+ DeoptimizeIf(lt, instr, "not a JavaScript object");
__ b(&result_in_receiver);
__ bind(&global_object);
@@ -3552,7 +3550,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, Operand(kArgumentsLimit));
- DeoptimizeIf(hi, instr->environment());
+ DeoptimizeIf(hi, instr, "too many arguments");
// Push the receiver and use the register to keep the original
// number of arguments.
@@ -3682,7 +3680,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
__ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "not a heap number");
Label done;
Register exponent = scratch0();
@@ -3750,20 +3748,20 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
// if input is positive.
__ rsb(result, input, Operand::Zero(), SetCC, mi);
// Deoptimize on overflow.
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
}
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LMathAbs* instr_;
};
@@ -3796,7 +3794,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
Label done, exact;
__ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr, "lost precision or NaN");
__ bind(&exact);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3804,7 +3802,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ cmp(result, Operand::Zero());
__ b(ne, &done);
__ cmp(input_high, Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr, "minus zero");
}
__ bind(&done);
}
@@ -3829,7 +3827,8 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ VmovHigh(input_high, input);
__ cmp(input_high, Operand::Zero());
- DeoptimizeIf(mi, instr->environment()); // [-0.5, -0].
+ // [-0.5, -0].
+ DeoptimizeIf(mi, instr, "minus zero");
}
__ VFPCompareAndSetFlags(input, dot_five);
__ mov(result, Operand(1), LeaveCC, eq); // +0.5.
@@ -3843,7 +3842,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// Reuse dot_five (double_scratch0) as we no longer need this value.
__ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
&done, &done);
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr, "lost precision or NaN");
__ bind(&done);
}
@@ -3889,10 +3888,11 @@ void LCodeGen::DoPower(LPower* instr) {
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
+ Register tagged_exponent = MathPowTaggedDescriptor::exponent();
DCHECK(!instr->right()->IsDoubleRegister() ||
ToDoubleRegister(instr->right()).is(d1));
DCHECK(!instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(r2));
+ ToRegister(instr->right()).is(tagged_exponent));
DCHECK(ToDoubleRegister(instr->left()).is(d0));
DCHECK(ToDoubleRegister(instr->result()).is(d2));
@@ -3901,11 +3901,12 @@ void LCodeGen::DoPower(LPower* instr) {
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
- __ JumpIfSmi(r2, &no_deopt);
- __ ldr(r6, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ JumpIfSmi(tagged_exponent, &no_deopt);
+ DCHECK(!r6.is(tagged_exponent));
+ __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r6, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "not a heap number");
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@@ -3971,6 +3972,34 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
}
+void LCodeGen::DoTailCallThroughMegamorphicCache(
+ LTailCallThroughMegamorphicCache* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register name = ToRegister(instr->name());
+ DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(name.is(LoadDescriptor::NameRegister()));
+ DCHECK(receiver.is(r1));
+ DCHECK(name.is(r2));
+
+ Register scratch = r3;
+ Register extra = r4;
+ Register extra2 = r5;
+ Register extra3 = r6;
+
+ // Important for the tail-call.
+ bool must_teardown_frame = NeedsEagerFrame();
+
+ // The probe will tail call to a handler if found.
+ isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
+ must_teardown_frame, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Tail call to miss if we ended up here.
+ if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
+ LoadIC::GenerateMiss(masm());
+}
+
+
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(r0));
@@ -3982,7 +4011,7 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
PlatformInterfaceDescriptor* call_descriptor =
- instr->descriptor()->platform_specific_descriptor();
+ instr->descriptor().platform_specific_descriptor();
__ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
call_descriptor->storage_mode());
} else {
@@ -4202,10 +4231,10 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister()));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- __ mov(StoreIC::NameRegister(), Operand(instr->name()));
+ __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -4229,7 +4258,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr->environment());
+ DeoptimizeIf(cc, instr, "out of bounds");
}
}
@@ -4423,13 +4452,12 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister()));
- DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister()));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- Handle<Code> ic = instr->strict_mode() == STRICT
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -4478,7 +4506,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "memento found");
__ bind(&no_memento_found);
}
@@ -4495,14 +4523,14 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) {
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
+ class DeferredStringCharCodeAt FINAL : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStringCharCodeAt(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@@ -4551,14 +4579,14 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
+ class DeferredStringCharFromCode FINAL : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStringCharFromCode(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -4628,18 +4656,18 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagI FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagIU(instr_,
instr_->value(),
instr_->temp1(),
instr_->temp2(),
SIGNED_INT32);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagI* instr_;
};
@@ -4655,18 +4683,18 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagU FINAL : public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagIU(instr_,
instr_->value(),
instr_->temp1(),
instr_->temp2(),
UNSIGNED_INT32);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
@@ -4749,14 +4777,14 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagD FINAL : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagD(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -4812,12 +4840,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ tst(input, Operand(0xc0000000));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "overflow");
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTag(output, input, SetCC);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ SmiTag(output, input);
}
@@ -4831,19 +4859,20 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
STATIC_ASSERT(kHeapObjectTag == 1);
// If the input is a HeapObject, SmiUntag will set the carry flag.
__ SmiUntag(result, input, SetCC);
- DeoptimizeIf(cs, instr->environment());
+ DeoptimizeIf(cs, instr, "not a Smi");
} else {
__ SmiUntag(result, input);
}
}
-void LCodeGen::EmitNumberUntagD(Register input_reg,
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
DwVfpRegister result_reg,
- bool can_convert_undefined_to_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
NumberUntagDMode mode) {
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+ bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
Register scratch = scratch0();
SwVfpRegister flt_scratch = double_scratch0().low();
DCHECK(!result_reg.is(double_scratch0()));
@@ -4858,7 +4887,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
if (can_convert_undefined_to_nan) {
__ b(ne, &convert);
} else {
- DeoptimizeIf(ne, env);
+ DeoptimizeIf(ne, instr, "not a heap number");
}
// load heap number
__ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
@@ -4868,7 +4897,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
__ b(ne, &done);
__ VmovHigh(scratch, result_reg);
__ cmp(scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(eq, env);
+ DeoptimizeIf(eq, instr, "minus zero");
}
__ jmp(&done);
if (can_convert_undefined_to_nan) {
@@ -4876,7 +4905,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Convert undefined (and hole) to NaN.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(input_reg, Operand(ip));
- DeoptimizeIf(ne, env);
+ DeoptimizeIf(ne, instr, "not a heap number/undefined");
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
__ jmp(&done);
@@ -4944,24 +4973,22 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ bind(&check_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(scratch2, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false");
__ mov(input_reg, Operand::Zero());
- __ b(&done);
} else {
- // Deoptimize if we don't have a heap number.
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "not a heap number");
__ sub(ip, scratch2, Operand(kHeapObjectTag));
__ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
__ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmp(input_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_scratch2);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "minus zero");
}
}
__ bind(&done);
@@ -4969,14 +4996,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI V8_FINAL : public LDeferredCode {
+ class DeferredTaggedToI FINAL : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredTaggedToI(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
@@ -5016,11 +5043,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
NumberUntagDMode mode = value->representation().IsSmi()
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
- EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->can_convert_undefined_to_nan(),
- instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment(),
- mode);
+ EmitNumberUntagD(instr, input_reg, result_reg, mode);
}
@@ -5035,14 +5058,14 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_input);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "minus zero");
__ bind(&done);
}
}
@@ -5060,26 +5083,26 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_input);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "minus zero");
__ bind(&done);
}
}
__ SmiTag(result_reg, SetCC);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "not a Smi");
}
@@ -5087,7 +5110,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input));
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "Smi");
}
}
@@ -5108,13 +5131,13 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "wrong instance type");
} else {
- DeoptimizeIf(lo, instr->environment());
+ DeoptimizeIf(lo, instr, "wrong instance type");
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmp(scratch, Operand(last));
- DeoptimizeIf(hi, instr->environment());
+ DeoptimizeIf(hi, instr, "wrong instance type");
}
}
} else {
@@ -5122,14 +5145,14 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
uint8_t tag;
instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
- if (IsPowerOf2(mask)) {
- DCHECK(tag == 0 || IsPowerOf2(tag));
+ if (base::bits::IsPowerOfTwo32(mask)) {
+ DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ tst(scratch, Operand(mask));
- DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
+ DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type");
} else {
__ and_(scratch, scratch, Operand(mask));
__ cmp(scratch, Operand(tag));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "wrong instance type");
}
}
}
@@ -5148,7 +5171,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
} else {
__ cmp(reg, Operand(object));
}
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "value mismatch");
}
@@ -5163,22 +5186,22 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(r0, scratch0());
}
__ tst(scratch0(), Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "instance migration failed");
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+ class DeferredCheckMaps FINAL : public LDeferredCode {
public:
DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
: LDeferredCode(codegen), instr_(instr), object_(object) {
SetExit(check_maps());
}
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredInstanceMigration(instr_, object_);
}
Label* check_maps() { return &check_maps_; }
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LCheckMaps* instr_;
Label check_maps_;
@@ -5220,7 +5243,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ b(ne, deferred->entry());
} else {
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "wrong map");
}
__ bind(&success);
@@ -5259,7 +5282,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, Operand(factory()->undefined_value()));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "not a heap number/undefined");
__ mov(result_reg, Operand::Zero());
__ jmp(&done);
@@ -5298,14 +5321,14 @@ void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate V8_FINAL : public LDeferredCode {
+ class DeferredAllocate FINAL : public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredAllocate(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LAllocate* instr_;
};
@@ -5465,9 +5488,8 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(),
- instr->hydrogen()->strict_mode(),
- instr->hydrogen()->is_generator());
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ instr->hydrogen()->kind());
__ mov(r2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
@@ -5637,8 +5659,7 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
type = Deoptimizer::LAZY;
}
- Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
- DeoptimizeIf(al, instr->environment(), type);
+ DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
}
@@ -5665,14 +5686,14 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck V8_FINAL : public LDeferredCode {
+ class DeferredStackCheck FINAL : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStackCheck(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
@@ -5731,19 +5752,19 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "undefined");
Register null_value = r5;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ cmp(r0, null_value);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "null");
__ SmiTst(r0);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "Smi");
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
- DeoptimizeIf(le, instr->environment());
+ DeoptimizeIf(le, instr, "wrong instance type");
Label use_cache, call_runtime;
__ CheckEnumCache(null_value, &call_runtime);
@@ -5759,7 +5780,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
__ cmp(r1, ip);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "wrong map");
__ bind(&use_cache);
}
@@ -5781,7 +5802,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ ldr(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ cmp(result, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "no cache");
__ bind(&done);
}
@@ -5792,7 +5813,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register map = ToRegister(instr->map());
__ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
__ cmp(map, scratch0());
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "wrong map");
}
@@ -5812,7 +5833,7 @@ void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ class DeferredLoadMutableDouble FINAL : public LDeferredCode {
public:
DeferredLoadMutableDouble(LCodeGen* codegen,
LLoadFieldByIndex* instr,
@@ -5825,10 +5846,10 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
object_(object),
index_(index) {
}
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LLoadFieldByIndex* instr_;
Register result_;
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index ee5f4e9086..65cc213453 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -26,7 +26,7 @@ class LCodeGen: public LCodeGenBase {
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
- deopt_jump_table_(4, info->zone()),
+ jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
@@ -169,10 +169,10 @@ class LCodeGen: public LCodeGenBase {
// Code generation passes. Returns true if code generation should
// continue.
- void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+ void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
bool GeneratePrologue();
bool GenerateDeferredCode();
- bool GenerateDeoptJumpTable();
+ bool GenerateJumpTable();
bool GenerateSafepointTable();
// Generates the custom OSR entrypoint and sets the osr_pc_offset.
@@ -234,10 +234,10 @@ class LCodeGen: public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition condition,
- LEnvironment* environment,
- Deoptimizer::BailoutType bailout_type);
- void DeoptimizeIf(Condition condition, LEnvironment* environment);
+ void DeoptimizeIf(Condition condition, LInstruction* instr,
+ const char* detail, Deoptimizer::BailoutType bailout_type);
+ void DeoptimizeIf(Condition condition, LInstruction* instr,
+ const char* detail);
void AddToTranslation(LEnvironment* environment,
Translation* translation,
@@ -271,7 +271,7 @@ class LCodeGen: public LCodeGenBase {
int arguments,
Safepoint::DeoptMode mode);
- void RecordAndWritePosition(int position) V8_OVERRIDE;
+ void RecordAndWritePosition(int position) OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
@@ -281,12 +281,8 @@ class LCodeGen: public LCodeGenBase {
void EmitBranch(InstrType instr, Condition condition);
template<class InstrType>
void EmitFalseBranch(InstrType instr, Condition condition);
- void EmitNumberUntagD(Register input,
- DwVfpRegister result,
- bool allow_undefined_as_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode);
+ void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+ DwVfpRegister result, NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
@@ -324,7 +320,7 @@ class LCodeGen: public LCodeGenBase {
int* offset,
AllocationSiteMode mode);
- void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+ void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
@@ -332,8 +328,11 @@ class LCodeGen: public LCodeGenBase {
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+ template <class T>
+ void EmitVectorLoadICRegisters(T* instr);
+
ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
+ ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
@@ -351,7 +350,7 @@ class LCodeGen: public LCodeGenBase {
Safepoint::Kind expected_safepoint_kind_;
- class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
+ class PushSafepointRegistersScope FINAL BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.h b/deps/v8/src/arm/lithium-gap-resolver-arm.h
index 909ea64398..9d7d843f20 100644
--- a/deps/v8/src/arm/lithium-gap-resolver-arm.h
+++ b/deps/v8/src/arm/lithium-gap-resolver-arm.h
@@ -15,7 +15,7 @@ namespace internal {
class LCodeGen;
class LGapResolver;
-class LGapResolver V8_FINAL BASE_EMBEDDED {
+class LGapResolver FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 4b3cb4e860..0d0b0ee16e 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -8,12 +8,14 @@
#if V8_TARGET_ARCH_ARM
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/isolate-inl.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
@@ -270,7 +272,7 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
} else if (!(src2.instructions_required(this) == 1) &&
!src2.must_output_reloc_info(this) &&
CpuFeatures::IsSupported(ARMv7) &&
- IsPowerOf2(src2.immediate() + 1)) {
+ base::bits::IsPowerOfTwo32(src2.immediate() + 1)) {
ubfx(dst, src1, 0,
WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
} else {
@@ -498,8 +500,8 @@ void MacroAssembler::RecordWriteField(
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
- mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
+ mov(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
+ mov(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
}
}
@@ -568,8 +570,8 @@ void MacroAssembler::RecordWriteForMap(Register object,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(dst, Operand(BitCast<int32_t>(kZapValue + 12)));
- mov(map, Operand(BitCast<int32_t>(kZapValue + 16)));
+ mov(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
+ mov(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
}
}
@@ -640,8 +642,8 @@ void MacroAssembler::RecordWrite(
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
- mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
+ mov(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
+ mov(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
}
}
@@ -677,8 +679,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Ret(eq);
}
push(lr);
- StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(isolate(), fp_mode);
+ StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
CallStub(&store_buffer_overflow);
pop(lr);
bind(&done);
@@ -1075,7 +1076,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
if (frame_alignment > 0) {
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
and_(sp, sp, Operand(-frame_alignment));
}
@@ -1989,12 +1990,10 @@ void MacroAssembler::AllocateTwoByteString(Register result,
}
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
@@ -2003,7 +2002,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
- // Allocate ASCII string in new space.
+ // Allocate one-byte string in new space.
Allocate(scratch1,
result,
scratch2,
@@ -2012,11 +2011,8 @@ void MacroAssembler::AllocateAsciiString(Register result,
TAG_OBJECT);
// Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
@@ -2036,11 +2032,10 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
}
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
Allocate(ConsString::kSize,
result,
scratch1,
@@ -2048,11 +2043,8 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
gc_required,
TAG_OBJECT);
- InitializeNewString(result,
- length,
- Heap::kConsAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
@@ -2072,19 +2064,16 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
}
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
- InitializeNewString(result,
- length,
- Heap::kSlicedAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
@@ -3183,44 +3172,35 @@ void MacroAssembler::LookupNumberStringCache(Register object,
}
-void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
+void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
+ Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
- // Test that both first and second are sequential ASCII strings.
+ // Test that both first and second are sequential one-byte strings.
// Assume that they are non-smis.
ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
- JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
- scratch2,
- scratch1,
- scratch2,
- failure);
+ JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
+ scratch2, failure);
}
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
+void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
// Check that neither is a smi.
and_(scratch1, first, Operand(second));
JumpIfSmi(scratch1, failure);
- JumpIfNonSmisNotBothSequentialAsciiStrings(first,
- second,
- scratch1,
- scratch2,
- failure);
+ JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
+ scratch2, failure);
}
-void MacroAssembler::JumpIfNotUniqueName(Register reg,
- Label* not_unique_name) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
+ Label* not_unique_name) {
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
Label succeed;
tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
@@ -3385,34 +3365,31 @@ void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
}
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
+ Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
- const int kFlatAsciiStringMask =
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatAsciiStringTag =
+ const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
- and_(scratch1, first, Operand(kFlatAsciiStringMask));
- and_(scratch2, second, Operand(kFlatAsciiStringMask));
- cmp(scratch1, Operand(kFlatAsciiStringTag));
+ and_(scratch1, first, Operand(kFlatOneByteStringMask));
+ and_(scratch2, second, Operand(kFlatOneByteStringMask));
+ cmp(scratch1, Operand(kFlatOneByteStringTag));
// Ignore second test if first test failed.
- cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
+ cmp(scratch2, Operand(kFlatOneByteStringTag), eq);
b(ne, failure);
}
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure) {
- const int kFlatAsciiStringMask =
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
+ Register scratch,
+ Label* failure) {
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatAsciiStringTag =
+ const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
- and_(scratch, type, Operand(kFlatAsciiStringMask));
- cmp(scratch, Operand(kFlatAsciiStringTag));
+ and_(scratch, type, Operand(kFlatOneByteStringMask));
+ cmp(scratch, Operand(kFlatOneByteStringTag));
b(ne, failure);
}
@@ -3489,7 +3466,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
// and the original value of sp.
mov(scratch, sp);
sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
and_(sp, sp, Operand(-frame_alignment));
str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
@@ -3568,7 +3545,7 @@ void MacroAssembler::CallCFunctionHelper(Register function,
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
Label alignment_as_expected;
tst(sp, Operand(frame_alignment_mask));
b(eq, &alignment_as_expected);
@@ -3826,8 +3803,8 @@ void MacroAssembler::EnsureNotWhite(
mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
b(ne, &is_data_object);
- // Sequential string, either ASCII or UC16.
- // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+ // Sequential string, either Latin1 or UC16.
+ // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
@@ -4093,16 +4070,18 @@ void MacroAssembler::TruncatingDiv(Register result,
DCHECK(!dividend.is(result));
DCHECK(!dividend.is(ip));
DCHECK(!result.is(ip));
- MultiplierAndShift ms(divisor);
- mov(ip, Operand(ms.multiplier()));
+ base::MagicNumbersForDivision<uint32_t> mag =
+ base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+ mov(ip, Operand(mag.multiplier));
smull(ip, result, dividend, ip);
- if (divisor > 0 && ms.multiplier() < 0) {
+ bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+ if (divisor > 0 && neg) {
add(result, result, Operand(dividend));
}
- if (divisor < 0 && ms.multiplier() > 0) {
+ if (divisor < 0 && !neg && mag.multiplier > 0) {
sub(result, result, Operand(dividend));
}
- if (ms.shift() > 0) mov(result, Operand(result, ASR, ms.shift()));
+ if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift));
add(result, result, Operand(dividend, LSR, 31));
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index d5ca12e4f4..d2a178664e 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -6,6 +6,7 @@
#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
#include "src/assembler.h"
+#include "src/bailout-reason.h"
#include "src/frames.h"
#include "src/globals.h"
@@ -152,8 +153,11 @@ class MacroAssembler: public Assembler {
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Handle<Object> value);
void Move(Register dst, Register src, Condition cond = al);
- void Move(Register dst, const Operand& src, Condition cond = al) {
- if (!src.is_reg() || !src.rm().is(dst)) mov(dst, src, LeaveCC, cond);
+ void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
+ Condition cond = al) {
+ if (!src.is_reg() || !src.rm().is(dst) || sbit != LeaveCC) {
+ mov(dst, src, sbit, cond);
+ }
}
void Move(DwVfpRegister dst, DwVfpRegister src);
@@ -750,32 +754,25 @@ class MacroAssembler: public Assembler {
Register scratch2,
Register scratch3,
Label* gc_required);
- void AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
+ void AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3, Label* gc_required);
void AllocateTwoByteConsString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
- void AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateOneByteConsString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
void AllocateTwoByteSlicedString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
- void AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateOneByteSlicedString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. All registers are clobbered also
@@ -1318,38 +1315,33 @@ class MacroAssembler: public Assembler {
Register scratch3,
Label* not_found);
- // Checks if both objects are sequential ASCII strings and jumps to label
+ // Checks if both objects are sequential one-byte strings and jumps to label
// if either is not. Assumes that neither object is a smi.
- void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label* failure);
+ void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
+ Register object2,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
- // Checks if both objects are sequential ASCII strings and jumps to label
+ // Checks if both objects are sequential one-byte strings and jumps to label
// if either is not.
- void JumpIfNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* not_flat_ascii_strings);
+ void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* not_flat_one_byte_strings);
- // Checks if both instance types are sequential ASCII strings and jumps to
+ // Checks if both instance types are sequential one-byte strings and jumps to
// label if either is not.
- void JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* failure);
-
- // Check if instance type is sequential ASCII string and jump to label if
+ void JumpIfBothInstanceTypesAreNotSequentialOneByte(
+ Register first_object_instance_type, Register second_object_instance_type,
+ Register scratch1, Register scratch2, Label* failure);
+
+ // Check if instance type is sequential one-byte string and jump to label if
// it is not.
- void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure);
+ void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
+ Label* failure);
- void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
+ void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
void EmitSeqStringSetCharCheck(Register string,
Register index,
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index 8480f4559b..f4918febb5 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -238,7 +238,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
__ cmn(r1, Operand(current_input_offset()));
BranchOrBacktrack(gt, on_no_match);
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
Label success;
Label fail;
Label loop_check;
@@ -354,7 +354,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReference(
Label loop;
__ bind(&loop);
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
__ ldrb(r3, MemOperand(r0, char_size(), PostIndex));
__ ldrb(r4, MemOperand(r2, char_size(), PostIndex));
} else {
@@ -443,7 +443,7 @@ void RegExpMacroAssemblerARM::CheckBitInTable(
Handle<ByteArray> table,
Label* on_bit_set) {
__ mov(r0, Operand(table));
- if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
+ if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
__ and_(r1, current_character(), Operand(kTableSize - 1));
__ add(r1, r1, Operand(ByteArray::kHeaderSize - kHeapObjectTag));
} else {
@@ -464,7 +464,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
switch (type) {
case 's':
// Match space-characters
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
// One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success;
__ cmp(current_character(), Operand(' '));
@@ -518,7 +518,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ sub(r0, r0, Operand(0x0b));
__ cmp(r0, Operand(0x0c - 0x0b));
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
BranchOrBacktrack(hi, on_no_match);
} else {
Label done;
@@ -534,8 +534,8 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
return true;
}
case 'w': {
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
__ cmp(current_character(), Operand('z'));
BranchOrBacktrack(hi, on_no_match);
}
@@ -548,8 +548,8 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
}
case 'W': {
Label done;
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
__ cmp(current_character(), Operand('z'));
__ b(hi, &done);
}
@@ -558,7 +558,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
__ ldrb(r0, MemOperand(r0, current_character()));
__ cmp(r0, Operand::Zero());
BranchOrBacktrack(ne, on_no_match);
- if (mode_ != ASCII) {
+ if (mode_ != LATIN1) {
__ bind(&done);
}
return true;
@@ -1067,7 +1067,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+ bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
DCHECK(re_code->instruction_start() <= *return_address);
DCHECK(*return_address <=
@@ -1098,8 +1098,8 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
- // If we changed between an ASCII and an UC16 string, the specialized
+ if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
+ // If we changed between an Latin1 and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
return RETRY;
@@ -1249,7 +1249,7 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
DCHECK(characters == 1);
}
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
if (characters == 4) {
__ ldr(current_character(), MemOperand(end_of_input_address(), offset));
} else if (characters == 2) {
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h
index fef8413411..7414e54a65 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h
@@ -190,7 +190,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
MacroAssembler* masm_;
- // Which mode to generate code for (ASCII or UC16).
+ // Which mode to generate code for (Latin1 or UC16).
Mode mode_;
// One greater than maximal register index actually used.
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index ab26e8af13..0444025a0d 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -311,7 +311,7 @@ void ArmDebugger::Debug() {
}
for (int i = 0; i < DwVfpRegister::NumRegisters(); i++) {
dvalue = GetVFPDoubleRegisterValue(i);
- uint64_t as_words = BitCast<uint64_t>(dvalue);
+ uint64_t as_words = bit_cast<uint64_t>(dvalue);
PrintF("%3s: %f 0x%08x %08x\n",
VFPRegisters::Name(i, true),
dvalue,
@@ -322,10 +322,10 @@ void ArmDebugger::Debug() {
if (GetValue(arg1, &value)) {
PrintF("%s: 0x%08x %d \n", arg1, value, value);
} else if (GetVFPSingleValue(arg1, &svalue)) {
- uint32_t as_word = BitCast<uint32_t>(svalue);
+ uint32_t as_word = bit_cast<uint32_t>(svalue);
PrintF("%s: %f 0x%08x\n", arg1, svalue, as_word);
} else if (GetVFPDoubleValue(arg1, &dvalue)) {
- uint64_t as_words = BitCast<uint64_t>(dvalue);
+ uint64_t as_words = bit_cast<uint64_t>(dvalue);
PrintF("%s: %f 0x%08x %08x\n",
arg1,
dvalue,
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index 3b24197ebf..5e1bed1e8a 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -457,7 +457,7 @@ MemOperand::MemOperand()
}
-MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode)
+MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)
: base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
DCHECK(base.Is64Bits() && !base.IsZero());
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index 7f86e14a77..c1213e9693 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -33,6 +33,7 @@
#define ARM64_DEFINE_REG_STATICS
#include "src/arm64/assembler-arm64-inl.h"
+#include "src/base/bits.h"
#include "src/base/cpu.h"
namespace v8 {
@@ -227,7 +228,7 @@ bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
- for (unsigned i = 0; i < ARRAY_SIZE(regs); i++) {
+ for (unsigned i = 0; i < arraysize(regs); i++) {
if (regs[i].IsRegister()) {
number_of_valid_regs++;
unique_regs |= regs[i].Bit();
@@ -601,7 +602,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
- DCHECK(m >= 4 && IsPowerOf2(m));
+ DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@@ -2208,6 +2209,17 @@ void Assembler::brk(int code) {
}
+void Assembler::EmitStringData(const char* string) {
+ size_t len = strlen(string) + 1;
+ DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
+ EmitData(string, len);
+ // Pad with NULL characters until pc_ is aligned.
+ const char pad[] = {'\0', '\0', '\0', '\0'};
+ STATIC_ASSERT(sizeof(pad) == kInstructionSize);
+ EmitData(pad, RoundUp(pc_offset(), kInstructionSize) - pc_offset());
+}
+
+
void Assembler::debug(const char* message, uint32_t code, Instr params) {
#ifdef USE_SIMULATOR
// Don't generate simulator specific code if we are building a snapshot, which
@@ -2443,7 +2455,7 @@ void Assembler::LoadStore(const CPURegister& rt,
const MemOperand& addr,
LoadStoreOp op) {
Instr memop = op | Rt(rt) | RnSP(addr.base());
- ptrdiff_t offset = addr.offset();
+ int64_t offset = addr.offset();
if (addr.IsImmediateOffset()) {
LSDataSize size = CalcLSDataSize(op);
@@ -2492,18 +2504,18 @@ void Assembler::LoadStore(const CPURegister& rt,
}
-bool Assembler::IsImmLSUnscaled(ptrdiff_t offset) {
+bool Assembler::IsImmLSUnscaled(int64_t offset) {
return is_int9(offset);
}
-bool Assembler::IsImmLSScaled(ptrdiff_t offset, LSDataSize size) {
+bool Assembler::IsImmLSScaled(int64_t offset, LSDataSize size) {
bool offset_is_size_multiple = (((offset >> size) << size) == offset);
return offset_is_size_multiple && is_uint12(offset >> size);
}
-bool Assembler::IsImmLSPair(ptrdiff_t offset, LSDataSize size) {
+bool Assembler::IsImmLSPair(int64_t offset, LSDataSize size) {
bool offset_is_size_multiple = (((offset >> size) << size) == offset);
return offset_is_size_multiple && is_int7(offset >> size);
}
@@ -2664,7 +2676,7 @@ bool Assembler::IsImmLogical(uint64_t value,
int multiplier_idx = CountLeadingZeros(d, kXRegSizeInBits) - 57;
// Ensure that the index to the multipliers array is within bounds.
DCHECK((multiplier_idx >= 0) &&
- (static_cast<size_t>(multiplier_idx) < ARRAY_SIZE(multipliers)));
+ (static_cast<size_t>(multiplier_idx) < arraysize(multipliers)));
uint64_t multiplier = multipliers[multiplier_idx];
uint64_t candidate = (b - a) * multiplier;
@@ -3091,7 +3103,7 @@ void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
}
-void PatchingAssembler::PatchAdrFar(ptrdiff_t target_offset) {
+void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
// The code at the current instruction should be:
// adr rd, 0
// nop (adr_far)
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index 1bafce8454..9b1c5e6746 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -276,6 +276,11 @@ struct FPRegister : public CPURegister {
(kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1);
static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
+ // TODO(turbofan): Proper float32 support.
+ static int NumAllocatableAliasedRegisters() {
+ return NumAllocatableRegisters();
+ }
+
// Return true if the register is one that crankshaft can allocate.
bool IsAllocatable() const {
return (Bit() & kAllocatableFPRegisters) != 0;
@@ -699,7 +704,7 @@ class MemOperand {
public:
inline MemOperand();
inline explicit MemOperand(Register base,
- ptrdiff_t offset = 0,
+ int64_t offset = 0,
AddrMode addrmode = Offset);
inline explicit MemOperand(Register base,
Register regoffset,
@@ -715,7 +720,7 @@ class MemOperand {
const Register& base() const { return base_; }
const Register& regoffset() const { return regoffset_; }
- ptrdiff_t offset() const { return offset_; }
+ int64_t offset() const { return offset_; }
AddrMode addrmode() const { return addrmode_; }
Shift shift() const { return shift_; }
Extend extend() const { return extend_; }
@@ -742,7 +747,7 @@ class MemOperand {
private:
Register base_;
Register regoffset_;
- ptrdiff_t offset_;
+ int64_t offset_;
AddrMode addrmode_;
Shift shift_;
Extend extend_;
@@ -1733,16 +1738,7 @@ class Assembler : public AssemblerBase {
// Copy a string into the instruction stream, including the terminating NULL
// character. The instruction pointer (pc_) is then aligned correctly for
// subsequent instructions.
- void EmitStringData(const char * string) {
- size_t len = strlen(string) + 1;
- DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
- EmitData(string, len);
- // Pad with NULL characters until pc_ is aligned.
- const char pad[] = {'\0', '\0', '\0', '\0'};
- STATIC_ASSERT(sizeof(pad) == kInstructionSize);
- byte* next_pc = AlignUp(pc_, kInstructionSize);
- EmitData(&pad, next_pc - pc_);
- }
+ void EmitStringData(const char* string);
// Pseudo-instructions ------------------------------------------------------
@@ -1859,6 +1855,9 @@ class Assembler : public AssemblerBase {
inline static Instr ImmBarrierType(int imm2);
inline static LSDataSize CalcLSDataSize(LoadStoreOp op);
+ static bool IsImmLSUnscaled(int64_t offset);
+ static bool IsImmLSScaled(int64_t offset, LSDataSize size);
+
// Move immediates encoding.
inline static Instr ImmMoveWide(uint64_t imm);
inline static Instr ShiftMoveWide(int64_t shift);
@@ -1942,12 +1941,10 @@ class Assembler : public AssemblerBase {
void LoadStore(const CPURegister& rt,
const MemOperand& addr,
LoadStoreOp op);
- static bool IsImmLSUnscaled(ptrdiff_t offset);
- static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size);
void LoadStorePair(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& addr, LoadStorePairOp op);
- static bool IsImmLSPair(ptrdiff_t offset, LSDataSize size);
+ static bool IsImmLSPair(int64_t offset, LSDataSize size);
void Logical(const Register& rd,
const Register& rn,
@@ -2292,7 +2289,7 @@ class PatchingAssembler : public Assembler {
// See definition of PatchAdrFar() for details.
static const int kAdrFarPatchableNNops = 2;
static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
- void PatchAdrFar(ptrdiff_t target_offset);
+ void PatchAdrFar(int64_t target_offset);
};
diff --git a/deps/v8/src/arm64/builtins-arm64.cc b/deps/v8/src/arm64/builtins-arm64.cc
index 2e0aed77a5..e9ad8f165d 100644
--- a/deps/v8/src/arm64/builtins-arm64.cc
+++ b/deps/v8/src/arm64/builtins-arm64.cc
@@ -10,8 +10,7 @@
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
@@ -781,8 +780,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
}
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index 3ef118aae9..35b60f7a6f 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -8,159 +8,20 @@
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+#include "src/isolate.h"
+#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
-#include "src/stub-cache.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
-void FastNewClosureStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x2: function info
- Register registers[] = { cp, x2 };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
-}
-
-
-void FastNewContextStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x1: function
- Register registers[] = { cp, x1 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void ToNumberStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x0: value
- Register registers[] = { cp, x0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void NumberToStringStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x0: value
- Register registers[] = { cp, x0 };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
-}
-
-
-void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x3: array literals array
- // x2: array literal index
- // x1: constant elements
- Register registers[] = { cp, x3, x2, x1 };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi(),
- Representation::Tagged() };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry,
- representations);
-}
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x3: object literals array
- // x2: object literal index
- // x1: constant properties
- // x0: object literal flags
- Register registers[] = { cp, x3, x2, x1, x0 };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
-}
-
-
-void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x2: feedback vector
- // x3: call feedback slot
- Register registers[] = { cp, x2, x3 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void CallFunctionStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // x1 function the function to call
- Register registers[] = {cp, x1};
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void CallConstructStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // x0 : number of arguments
- // x1 : the function to call
- // x2 : feedback vector
- // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
- // TODO(turbofan): So far we don't gather type feedback and hence skip the
- // slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {cp, x0, x1, x2};
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void RegExpConstructResultStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x2: length
- // x1: index (of last match)
- // x0: string
- Register registers[] = { cp, x2, x1, x0 };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x0: value (js_array)
- // x1: to_map
- Register registers[] = { cp, x0, x1 };
- Address entry =
- Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(entry));
-}
-
-
-void CompareNilICStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x0: value to compare
- Register registers[] = { cp, x0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(CompareNilIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
-}
-
-
-const Register InterfaceDescriptor::ContextRegister() { return cp; }
-
static void InitializeArrayConstructorDescriptor(
- CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
+ Isolate* isolate, CodeStubDescriptor* descriptor,
int constant_stack_parameter_count) {
// cp: context
// x1: function
@@ -170,247 +31,90 @@ static void InitializeArrayConstructorDescriptor(
Runtime::kArrayConstructor)->entry;
if (constant_stack_parameter_count == 0) {
- Register registers[] = { cp, x1, x2 };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
- deopt_handler, NULL, constant_stack_parameter_count,
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE);
} else {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = { cp, x1, x2, x0 };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32() };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers, x0,
- deopt_handler, representations,
- constant_stack_parameter_count,
+ descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
}
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 0);
+void ArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
}
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 1);
+void ArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
}
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(MajorKey(), descriptor, -1);
+void ArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
}
static void InitializeInternalArrayConstructorDescriptor(
- CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
+ Isolate* isolate, CodeStubDescriptor* descriptor,
int constant_stack_parameter_count) {
- // cp: context
- // x1: constructor function
- // x0: number of arguments to the constructor function
Address deopt_handler = Runtime::FunctionForId(
Runtime::kInternalArrayConstructor)->entry;
if (constant_stack_parameter_count == 0) {
- Register registers[] = { cp, x1 };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
- deopt_handler, NULL, constant_stack_parameter_count,
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE);
} else {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = { cp, x1, x0 };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32() };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers, x0,
- deopt_handler, representations,
- constant_stack_parameter_count,
+ descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
}
-void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0);
+void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
-void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1);
+void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
}
-void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1);
-}
-
-
-void ToBooleanStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x0: value
- Register registers[] = { cp, x0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(ToBooleanIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
-}
-
-
-void BinaryOpICStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x1: left operand
- // x0: right operand
- Register registers[] = { cp, x1, x0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(BinaryOpIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
-}
-
-
-void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x2: allocation site
- // x1: left operand
- // x0: right operand
- Register registers[] = { cp, x2, x1, x0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
-}
-
-
-void StringAddStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // cp: context
- // x1: left operand
- // x0: right operand
- Register registers[] = { cp, x1, x0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kStringAdd)->entry);
-}
-
-
-void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
- static PlatformInterfaceDescriptor default_descriptor =
- PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
-
- static PlatformInterfaceDescriptor noInlineDescriptor =
- PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
-
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
- Register registers[] = { cp, // context
- x1, // JSFunction
- x0, // actual number of arguments
- x2, // expected number of arguments
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // JSFunction
- Representation::Integer32(), // actual number of arguments
- Representation::Integer32(), // expected number of arguments
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers,
- representations, &default_descriptor);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::KeyedCall);
- Register registers[] = { cp, // context
- x2, // key
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // key
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers,
- representations, &noInlineDescriptor);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::NamedCall);
- Register registers[] = { cp, // context
- x2, // name
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // name
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers,
- representations, &noInlineDescriptor);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::CallHandler);
- Register registers[] = { cp, // context
- x0, // receiver
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // receiver
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers,
- representations, &default_descriptor);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::ApiFunctionCall);
- Register registers[] = { cp, // context
- x0, // callee
- x4, // call_data
- x2, // holder
- x1, // api_function_address
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers,
- representations, &default_descriptor);
- }
+void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
}
#define __ ACCESS_MASM(masm)
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
+ ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
isolate()->counters()->code_stubs()->Increment();
- CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
- int param_count = descriptor->GetEnvironmentParameterCount();
+ CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
+ int param_count = descriptor.GetEnvironmentParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
DCHECK((param_count == 0) ||
- x0.Is(descriptor->GetEnvironmentParameterRegister(param_count - 1)));
+ x0.Is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
// Push arguments
MacroAssembler::PushPopQueue queue(masm);
for (int i = 0; i < param_count; ++i) {
- queue.Queue(descriptor->GetEnvironmentParameterRegister(i));
+ queue.Queue(descriptor.GetEnvironmentParameterRegister(i));
}
queue.PushQueued();
- ExternalReference miss = descriptor->miss_handler();
__ CallExternalReference(miss, param_count);
}
@@ -519,30 +223,30 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
if ((cond == lt) || (cond == gt)) {
__ JumpIfObjectType(right, scratch, scratch, FIRST_SPEC_OBJECT_TYPE, slow,
ge);
+ } else if (cond == eq) {
+ __ JumpIfHeapNumber(right, &heap_number);
} else {
Register right_type = scratch;
__ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
&heap_number);
// Comparing JS objects with <=, >= is complicated.
- if (cond != eq) {
- __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
- __ B(ge, slow);
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if ((cond == le) || (cond == ge)) {
- __ Cmp(right_type, ODDBALL_TYPE);
- __ B(ne, &return_equal);
- __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
- if (cond == le) {
- // undefined <= undefined should fail.
- __ Mov(result, GREATER);
- } else {
- // undefined >= undefined should fail.
- __ Mov(result, LESS);
- }
- __ Ret();
+ __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+ __ B(ge, slow);
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if ((cond == le) || (cond == ge)) {
+ __ Cmp(right_type, ODDBALL_TYPE);
+ __ B(ne, &return_equal);
+ __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
+ if (cond == le) {
+ // undefined <= undefined should fail.
+ __ Mov(result, GREATER);
+ } else {
+ // undefined >= undefined should fail.
+ __ Mov(result, LESS);
}
+ __ Ret();
}
}
@@ -646,10 +350,8 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register right,
FPRegister left_d,
FPRegister right_d,
- Register scratch,
Label* slow,
bool strict) {
- DCHECK(!AreAliased(left, right, scratch));
DCHECK(!AreAliased(left_d, right_d));
DCHECK((left.is(x0) && right.is(x1)) ||
(right.is(x0) && left.is(x1)));
@@ -663,8 +365,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// If right is not a number and left is a smi, then strict equality cannot
// succeed. Return non-equal.
Label is_heap_number;
- __ JumpIfObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE,
- &is_heap_number);
+ __ JumpIfHeapNumber(right, &is_heap_number);
// Register right is a non-zero pointer, which is a valid NOT_EQUAL result.
if (!right.is(result)) {
__ Mov(result, NOT_EQUAL);
@@ -674,7 +375,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
} else {
// Smi compared non-strictly with a non-smi, non-heap-number. Call the
// runtime.
- __ JumpIfNotObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE, slow);
+ __ JumpIfNotHeapNumber(right, slow);
}
// Left is the smi. Right is a heap number. Load right value into right_d, and
@@ -689,8 +390,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// If left is not a number and right is a smi then strict equality cannot
// succeed. Return non-equal.
Label is_heap_number;
- __ JumpIfObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE,
- &is_heap_number);
+ __ JumpIfHeapNumber(left, &is_heap_number);
// Register left is a non-zero pointer, which is a valid NOT_EQUAL result.
if (!left.is(result)) {
__ Mov(result, NOT_EQUAL);
@@ -700,7 +400,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
} else {
// Smi compared non-strictly with a non-smi, non-heap-number. Call the
// runtime.
- __ JumpIfNotObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE, slow);
+ __ JumpIfNotHeapNumber(left, slow);
}
// Right is the smi. Left is a heap number. Load left value into left_d, and
@@ -767,18 +467,15 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
}
-static void ICCompareStub_CheckInputType(MacroAssembler* masm,
- Register input,
- Register scratch,
- CompareIC::State expected,
+static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
+ CompareICState::State expected,
Label* fail) {
Label ok;
- if (expected == CompareIC::SMI) {
+ if (expected == CompareICState::SMI) {
__ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::NUMBER) {
+ } else if (expected == CompareICState::NUMBER) {
__ JumpIfSmi(input, &ok);
- __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
- DONT_DO_SMI_CHECK);
+ __ JumpIfNotHeapNumber(input, fail);
}
// We could be strict about internalized/non-internalized here, but as long as
// hydrogen doesn't care, the stub doesn't have to care either.
@@ -786,15 +483,15 @@ static void ICCompareStub_CheckInputType(MacroAssembler* masm,
}
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
Register lhs = x1;
Register rhs = x0;
Register result = x0;
Condition cond = GetCondition();
Label miss;
- ICCompareStub_CheckInputType(masm, lhs, x2, left_, &miss);
- ICCompareStub_CheckInputType(masm, rhs, x3, right_, &miss);
+ CompareICStub_CheckInputType(masm, lhs, left(), &miss);
+ CompareICStub_CheckInputType(masm, rhs, right(), &miss);
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles;
@@ -827,7 +524,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// rhs_d, left into lhs_d.
FPRegister rhs_d = d0;
FPRegister lhs_d = d1;
- EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, x10, &slow, strict());
+ EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, &slow, strict());
__ Bind(&both_loaded_as_doubles);
// The arguments have been converted to doubles and stored in rhs_d and
@@ -901,20 +598,20 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
&flat_string_check, &slow);
}
- // Check for both being sequential ASCII strings, and inline if that is the
- // case.
+ // Check for both being sequential one-byte strings,
+ // and inline if that is the case.
__ Bind(&flat_string_check);
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(lhs_type, rhs_type, x14,
- x15, &slow);
+ __ JumpIfBothInstanceTypesAreNotSequentialOneByte(lhs_type, rhs_type, x14,
+ x15, &slow);
__ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x10,
x11);
if (cond == eq) {
- StringCompareStub::GenerateFlatAsciiStringEquals(masm, lhs, rhs,
- x10, x11, x12);
+ StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, x10, x11,
+ x12);
} else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm, lhs, rhs,
- x10, x11, x12, x13);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, x10, x11,
+ x12, x13);
}
// Never fall through to here.
@@ -964,7 +661,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
saved_fp_regs.Remove(*(masm->FPTmpList()));
__ PushCPURegList(saved_regs);
- if (save_doubles_ == kSaveFPRegs) {
+ if (save_doubles()) {
__ PushCPURegList(saved_fp_regs);
}
@@ -973,7 +670,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
__ CallCFunction(
ExternalReference::store_buffer_overflow_function(isolate()), 1, 0);
- if (save_doubles_ == kSaveFPRegs) {
+ if (save_doubles()) {
__ PopCPURegList(saved_fp_regs);
}
__ PopCPURegList(saved_regs);
@@ -1024,8 +721,10 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Register result_tagged = x0;
Register base_tagged = x10;
- Register exponent_tagged = x11;
- Register exponent_integer = x12;
+ Register exponent_tagged = MathPowTaggedDescriptor::exponent();
+ DCHECK(exponent_tagged.is(x11));
+ Register exponent_integer = MathPowIntegerDescriptor::exponent();
+ DCHECK(exponent_integer.is(x12));
Register scratch1 = x14;
Register scratch0 = x15;
Register saved_lr = x19;
@@ -1044,7 +743,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label done;
// Unpack the inputs.
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
Label base_is_smi;
Label unpack_exponent;
@@ -1068,20 +767,20 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// exponent_tagged is a heap number, so load its double value.
__ Ldr(exponent_double,
FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
- } else if (exponent_type_ == TAGGED) {
+ } else if (exponent_type() == TAGGED) {
__ JumpIfSmi(exponent_tagged, &exponent_is_smi);
__ Ldr(exponent_double,
FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
}
// Handle double (heap number) exponents.
- if (exponent_type_ != INTEGER) {
+ if (exponent_type() != INTEGER) {
// Detect integer exponents stored as doubles and handle those in the
// integer fast-path.
__ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
scratch0_double, &exponent_is_integer);
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
FPRegister half_double = d3;
FPRegister minus_half_double = d4;
// Detect square root case. Crankshaft detects constant +/-0.5 at compile
@@ -1232,7 +931,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Fcmp(result_double, 0.0);
__ B(&done, ne);
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
// Bail out to runtime code.
__ Bind(&call_runtime);
// Put the arguments back on the stack.
@@ -1373,7 +1072,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
// registers.
FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(save_doubles_, x10, 3);
+ __ EnterExitFrame(save_doubles(), x10, 3);
DCHECK(csp.Is(__ StackPointer()));
// Poke callee-saved registers into reserved space.
@@ -1470,7 +1169,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Peek(argc, 2 * kPointerSize);
__ Peek(target, 3 * kPointerSize);
- __ LeaveExitFrame(save_doubles_, x10, true);
+ __ LeaveExitFrame(save_doubles(), x10, true);
DCHECK(jssp.Is(__ StackPointer()));
// Pop or drop the remaining stack slots and return from the stub.
// jssp[24]: Arguments array (of size argc), including receiver.
@@ -1542,7 +1241,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// x4: argv.
// Output:
// x0: result.
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+void JSEntryStub::Generate(MacroAssembler* masm) {
DCHECK(jssp.Is(__ StackPointer()));
Register code_entry = x0;
@@ -1573,7 +1272,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ Fmov(fp_zero, 0.0);
// Build an entry frame (see layout below).
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ int marker = type();
int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
__ Mov(x13, bad_frame_pointer);
__ Mov(x12, Smi::FromInt(marker));
@@ -1658,8 +1357,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// x2: receiver.
// x3: argc.
// x4: argv.
- ExternalReference entry(is_construct ? Builtins::kJSConstructEntryTrampoline
- : Builtins::kJSEntryTrampoline,
+ ExternalReference entry(type() == StackFrame::ENTRY_CONSTRUCT
+ ? Builtins::kJSConstructEntryTrampoline
+ : Builtins::kJSEntryTrampoline,
isolate());
__ Mov(x10, entry);
@@ -1711,7 +1411,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
- Register receiver = LoadIC::ReceiverRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
x11, &miss);
@@ -1902,21 +1602,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
-Register InstanceofStub::left() {
- // Object to check (instanceof lhs).
- return x11;
-}
-
-
-Register InstanceofStub::right() {
- // Constructor function (instanceof rhs).
- return x10;
-}
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- Register arg_count = x0;
- Register key = x1;
+ Register arg_count = ArgumentsAccessReadDescriptor::parameter_count();
+ Register key = ArgumentsAccessReadDescriptor::index();
+ DCHECK(arg_count.is(x0));
+ DCHECK(key.is(x1));
// The displacement is the offset of the last parameter (if any) relative
// to the frame pointer.
@@ -2265,6 +1955,29 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
}
+void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
+ // Return address is in lr.
+ Label slow;
+
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
+
+ // Check that the key is an array index, that is Uint32.
+ __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
+
+ // Everything is fine, call runtime.
+ __ Push(receiver, key);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
+ masm->isolate()),
+ 2, 1);
+
+ __ Bind(&slow);
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
+}
+
+
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Stack layout on entry.
// jssp[0]: number of parameters (tagged)
@@ -2416,7 +2129,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// w0 string_type type of subject string
// x2 jsstring_length subject string length
// x3 jsregexp_object JSRegExp object
- // w4 string_encoding ASCII or UC16
+ // w4 string_encoding Latin1 or UC16
// w5 sliced_string_offset if the string is a SlicedString
// offset to the underlying string
// w6 string_representation groups attributes of the string:
@@ -2614,17 +2327,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kStringEncodingMask == 0x04);
// Find the code object based on the assumptions above.
- // kDataAsciiCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
+ // kDataOneByteCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
// of kPointerSize to reach the latter.
- DCHECK_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize,
+ DCHECK_EQ(JSRegExp::kDataOneByteCodeOffset + kPointerSize,
JSRegExp::kDataUC16CodeOffset);
__ Mov(x10, kPointerSize);
- // We will need the encoding later: ASCII = 0x04
- // UC16 = 0x00
+ // We will need the encoding later: Latin1 = 0x04
+ // UC16 = 0x00
__ Ands(string_encoding, string_type, kStringEncodingMask);
__ CzeroX(x10, ne);
__ Add(x10, regexp_data, x10);
- __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataAsciiCodeOffset));
+ __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataOneByteCodeOffset));
// (E) Carry on. String handling is done.
@@ -2667,13 +2380,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
// Handle UC16 encoding, two bytes make one character.
- // string_encoding: if ASCII: 0x04
- // if UC16: 0x00
+ // string_encoding: if Latin1: 0x04
+ // if UC16: 0x00
STATIC_ASSERT(kStringEncodingMask == 0x04);
__ Ubfx(string_encoding, string_encoding, 2, 1);
__ Eor(string_encoding, string_encoding, 1);
- // string_encoding: if ASCII: 0
- // if UC16: 1
+ // string_encoding: if Latin1: 0
+ // if UC16: 1
// Convert string positions from characters to bytes.
// Previous index is in x1.
@@ -2936,9 +2649,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm,
// index : slot in feedback vector (smi)
Label initialize, done, miss, megamorphic, not_array_function;
- DCHECK_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
- DCHECK_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state.
@@ -3143,7 +2856,7 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
void CallFunctionStub::Generate(MacroAssembler* masm) {
ASM_LOCATION("CallFunctionStub::Generate");
- CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+ CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
}
@@ -3258,7 +2971,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ TailCallStub(&stub);
__ bind(&miss);
- GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+ GenerateMiss(masm);
// The slow case, we need this no matter what to complete a call after a miss.
CallFunctionNoFeedback(masm,
@@ -3278,7 +2991,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
Label extra_checks_or_miss, slow_start;
Label slow, non_function, wrap, cont;
Label have_js_function;
- int argc = state_.arg_count();
+ int argc = arg_count();
ParameterCount actual(argc);
Register function = x1;
@@ -3297,7 +3010,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ B(ne, &extra_checks_or_miss);
__ bind(&have_js_function);
- if (state_.CallAsMethod()) {
+ if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
// Compute the receiver in sloppy mode.
@@ -3317,7 +3030,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&slow);
EmitSlowCase(masm, argc, function, type, &non_function);
- if (state_.CallAsMethod()) {
+ if (CallAsMethod()) {
__ bind(&wrap);
EmitWrapCase(masm, argc, &cont);
}
@@ -3342,7 +3055,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// We are here because tracing is on or we are going monomorphic.
__ bind(&miss);
- GenerateMiss(masm, IC::kCallIC_Miss);
+ GenerateMiss(masm);
// the slow case
__ bind(&slow_start);
@@ -3356,11 +3069,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
}
-void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
ASM_LOCATION("CallICStub[Miss]");
// Get the receiver of the function from the stack; 1 ~ return address.
- __ Peek(x4, (state_.arg_count() + 1) * kPointerSize);
+ __ Peek(x4, (arg_count() + 1) * kPointerSize);
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -3369,6 +3082,9 @@ void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
__ Push(x4, x1, x2, x3);
// Call the entry.
+ IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+ : IC::kCallIC_Customization_Miss;
+
ExternalReference miss = ExternalReference(IC_Utility(id),
masm->isolate());
__ CallExternalReference(miss, 4);
@@ -3418,11 +3134,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Bind(&index_not_smi_);
// If index is a heap number, try converting it to an integer.
- __ CheckMap(index_,
- result_,
- Heap::kHeapNumberMapRootIndex,
- index_not_number_,
- DONT_DO_SMI_CHECK);
+ __ JumpIfNotHeapNumber(index_, index_not_number_);
call_helper.BeforeCall(masm);
// Save object_ on the stack and pass index_ as argument for runtime call.
__ Push(object_, index_);
@@ -3469,7 +3181,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ B(hi, &slow_case_);
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged ASCII char code.
+ // At this point code register contains smi tagged one-byte char code.
__ Add(result_, result_, Operand::UntagSmiAndScale(code_, kPointerSizeLog2));
__ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
__ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
@@ -3494,10 +3206,10 @@ void StringCharFromCodeGenerator::GenerateSlow(
}
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+void CompareICStub::GenerateSmis(MacroAssembler* masm) {
// Inputs are in x0 (lhs) and x1 (rhs).
- DCHECK(state_ == CompareIC::SMI);
- ASM_LOCATION("ICCompareStub[Smis]");
+ DCHECK(state() == CompareICState::SMI);
+ ASM_LOCATION("CompareICStub[Smis]");
Label miss;
// Bail out (to 'miss') unless both x0 and x1 are smis.
__ JumpIfEitherNotSmi(x0, x1, &miss);
@@ -3517,9 +3229,9 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::NUMBER);
- ASM_LOCATION("ICCompareStub[HeapNumbers]");
+void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::NUMBER);
+ ASM_LOCATION("CompareICStub[HeapNumbers]");
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss, handle_lhs, values_in_d_regs;
@@ -3531,10 +3243,10 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
FPRegister rhs_d = d0;
FPRegister lhs_d = d1;
- if (left_ == CompareIC::SMI) {
+ if (left() == CompareICState::SMI) {
__ JumpIfNotSmi(lhs, &miss);
}
- if (right_ == CompareIC::SMI) {
+ if (right() == CompareICState::SMI) {
__ JumpIfNotSmi(rhs, &miss);
}
@@ -3543,15 +3255,13 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
// Load rhs if it's a heap number.
__ JumpIfSmi(rhs, &handle_lhs);
- __ CheckMap(rhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
- DONT_DO_SMI_CHECK);
+ __ JumpIfNotHeapNumber(rhs, &maybe_undefined1);
__ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
// Load lhs if it's a heap number.
__ Bind(&handle_lhs);
__ JumpIfSmi(lhs, &values_in_d_regs);
- __ CheckMap(lhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
- DONT_DO_SMI_CHECK);
+ __ JumpIfNotHeapNumber(lhs, &maybe_undefined2);
__ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
__ Bind(&values_in_d_regs);
@@ -3563,20 +3273,20 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ Ret();
__ Bind(&unordered);
- ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+ CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
__ Bind(&maybe_undefined1);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
+ if (Token::IsOrderedRelationalCompareOp(op())) {
__ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss);
__ JumpIfSmi(lhs, &unordered);
- __ JumpIfNotObjectType(lhs, x10, x10, HEAP_NUMBER_TYPE, &maybe_undefined2);
+ __ JumpIfNotHeapNumber(lhs, &maybe_undefined2);
__ B(&unordered);
}
__ Bind(&maybe_undefined2);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
+ if (Token::IsOrderedRelationalCompareOp(op())) {
__ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered);
}
@@ -3585,9 +3295,9 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::INTERNALIZED_STRING);
- ASM_LOCATION("ICCompareStub[InternalizedStrings]");
+void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::INTERNALIZED_STRING);
+ ASM_LOCATION("CompareICStub[InternalizedStrings]");
Label miss;
Register result = x0;
@@ -3623,9 +3333,9 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::UNIQUE_NAME);
- ASM_LOCATION("ICCompareStub[UniqueNames]");
+void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::UNIQUE_NAME);
+ ASM_LOCATION("CompareICStub[UniqueNames]");
DCHECK(GetCondition() == eq);
Label miss;
@@ -3648,8 +3358,8 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
// To avoid a miss, each instance type should be either SYMBOL_TYPE or it
// should have kInternalizedTag set.
- __ JumpIfNotUniqueName(lhs_instance_type, &miss);
- __ JumpIfNotUniqueName(rhs_instance_type, &miss);
+ __ JumpIfNotUniqueNameInstanceType(lhs_instance_type, &miss);
+ __ JumpIfNotUniqueNameInstanceType(rhs_instance_type, &miss);
// Unique names are compared by identity.
STATIC_ASSERT(EQUAL == 0);
@@ -3662,13 +3372,13 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::STRING);
- ASM_LOCATION("ICCompareStub[Strings]");
+void CompareICStub::GenerateStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::STRING);
+ ASM_LOCATION("CompareICStub[Strings]");
Label miss;
- bool equality = Token::IsEqualityOp(op_);
+ bool equality = Token::IsEqualityOp(op());
Register result = x0;
Register rhs = x0;
@@ -3715,18 +3425,18 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
__ Bind(&not_internalized_strings);
}
- // Check that both strings are sequential ASCII.
+ // Check that both strings are sequential one-byte.
Label runtime;
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(
- lhs_type, rhs_type, x12, x13, &runtime);
+ __ JumpIfBothInstanceTypesAreNotSequentialOneByte(lhs_type, rhs_type, x12,
+ x13, &runtime);
- // Compare flat ASCII strings. Returns when done.
+ // Compare flat one-byte strings. Returns when done.
if (equality) {
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, lhs, rhs, x10, x11, x12);
+ StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, x10, x11,
+ x12);
} else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(
- masm, lhs, rhs, x10, x11, x12, x13);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, x10, x11,
+ x12, x13);
}
// Handle more complex cases in runtime.
@@ -3743,9 +3453,9 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::OBJECT);
- ASM_LOCATION("ICCompareStub[Objects]");
+void CompareICStub::GenerateObjects(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::OBJECT);
+ ASM_LOCATION("CompareICStub[Objects]");
Label miss;
@@ -3767,8 +3477,8 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
- ASM_LOCATION("ICCompareStub[KnownObjects]");
+void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+ ASM_LOCATION("CompareICStub[KnownObjects]");
Label miss;
@@ -3797,10 +3507,10 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
// This method handles the case where a compare stub had the wrong
// implementation. It calls a miss handler, which re-writes the stub. All other
-// ICCompareStub::Generate* methods should fall back into this one if their
+// CompareICStub::Generate* methods should fall back into this one if their
// operands were not the expected types.
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- ASM_LOCATION("ICCompareStub[Miss]");
+void CompareICStub::GenerateMiss(MacroAssembler* masm) {
+ ASM_LOCATION("CompareICStub[Miss]");
Register stub_entry = x11;
{
@@ -3814,7 +3524,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
// Preserve some caller-saved registers.
__ Push(x1, x0, lr);
// Push the arguments.
- __ Mov(op, Smi::FromInt(op_));
+ __ Mov(op, Smi::FromInt(this->op()));
__ Push(left, right, op);
// Call the miss handler. This also pops the arguments.
@@ -3831,67 +3541,6 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
}
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character) {
- DCHECK(!AreAliased(hash, character));
-
- // hash = character + (character << 10);
- __ LoadRoot(hash, Heap::kHashSeedRootIndex);
- // Untag smi seed and add the character.
- __ Add(hash, character, Operand::UntagSmi(hash));
-
- // Compute hashes modulo 2^32 using a 32-bit W register.
- Register hash_w = hash.W();
-
- // hash += hash << 10;
- __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
- // hash ^= hash >> 6;
- __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character) {
- DCHECK(!AreAliased(hash, character));
-
- // hash += character;
- __ Add(hash, hash, character);
-
- // Compute hashes modulo 2^32 using a 32-bit W register.
- Register hash_w = hash.W();
-
- // hash += hash << 10;
- __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
- // hash ^= hash >> 6;
- __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch) {
- // Compute hashes modulo 2^32 using a 32-bit W register.
- Register hash_w = hash.W();
- Register scratch_w = scratch.W();
- DCHECK(!AreAliased(hash_w, scratch_w));
-
- // hash += hash << 3;
- __ Add(hash_w, hash_w, Operand(hash_w, LSL, 3));
- // hash ^= hash >> 11;
- __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 11));
- // hash += hash << 15;
- __ Add(hash_w, hash_w, Operand(hash_w, LSL, 15));
-
- __ Ands(hash_w, hash_w, String::kHashBitMask);
-
- // if (hash == 0) hash = 27;
- __ Mov(scratch_w, StringHasher::kZeroHash);
- __ Csel(hash_w, scratch_w, hash_w, eq);
-}
-
-
void SubStringStub::Generate(MacroAssembler* masm) {
ASM_LOCATION("SubStringStub::Generate");
Label runtime;
@@ -4034,8 +3683,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice);
- __ AllocateAsciiSlicedString(result_string, result_length, x3, x4,
- &runtime);
+ __ AllocateOneByteSlicedString(result_string, result_length, x3, x4,
+ &runtime);
__ B(&set_slice_header);
__ Bind(&two_byte_slice);
@@ -4085,12 +3734,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ Bind(&allocate_result);
- // Sequential ASCII string. Allocate the result.
+ // Sequential one-byte string. Allocate the result.
STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
__ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential);
- // Allocate and copy the resulting ASCII string.
- __ AllocateAsciiString(result_string, result_length, x3, x4, x5, &runtime);
+ // Allocate and copy the resulting one-byte string.
+ __ AllocateOneByteString(result_string, result_length, x3, x4, x5, &runtime);
// Locate first character of substring to copy.
__ Add(substring_char0, unpacked_char0, from);
@@ -4143,12 +3792,9 @@ void SubStringStub::Generate(MacroAssembler* masm) {
}
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
+void StringHelper::GenerateFlatOneByteStringEquals(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3) {
DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3));
Register result = x0;
Register left_length = scratch1;
@@ -4176,8 +3822,8 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
// Compare characters. Falls through if all characters are equal.
__ Bind(&compare_chars);
- GenerateAsciiCharsCompareLoop(masm, left, right, left_length, scratch2,
- scratch3, &strings_not_equal);
+ GenerateOneByteCharsCompareLoop(masm, left, right, left_length, scratch2,
+ scratch3, &strings_not_equal);
// Characters in strings are equal.
__ Mov(result, Smi::FromInt(EQUAL));
@@ -4185,13 +3831,9 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
}
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
+void StringHelper::GenerateCompareFlatOneByteStrings(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3, Register scratch4) {
DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
Label result_not_equal, compare_lengths;
@@ -4206,9 +3848,8 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ Cbz(min_length, &compare_lengths);
// Compare loop.
- GenerateAsciiCharsCompareLoop(masm,
- left, right, min_length, scratch2, scratch4,
- &result_not_equal);
+ GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
+ scratch4, &result_not_equal);
// Compare lengths - strings up to min-length are equal.
__ Bind(&compare_lengths);
@@ -4230,14 +3871,9 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
}
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* chars_not_equal) {
+void StringHelper::GenerateOneByteCharsCompareLoop(
+ MacroAssembler* masm, Register left, Register right, Register length,
+ Register scratch1, Register scratch2, Label* chars_not_equal) {
DCHECK(!AreAliased(left, right, length, scratch1, scratch2));
// Change index to run from -length to -1 by adding length to string
@@ -4285,13 +3921,14 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ Bind(&not_same);
- // Check that both objects are sequential ASCII strings.
- __ JumpIfEitherIsNotSequentialAsciiStrings(left, right, x12, x13, &runtime);
+ // Check that both objects are sequential one-byte strings.
+ __ JumpIfEitherIsNotSequentialOneByteStrings(left, right, x12, x13, &runtime);
- // Compare flat ASCII strings natively. Remove arguments from stack first,
+ // Compare flat one-byte strings natively. Remove arguments from stack first,
// as this function will generate a return.
__ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
- GenerateCompareFlatAsciiStrings(masm, left, right, x12, x13, x14, x15);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, x12, x13,
+ x14, x15);
__ Bind(&runtime);
@@ -4328,7 +3965,7 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// Tail call into the stub that handles binary operations with allocation
// sites.
- BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+ BinaryOpWithAllocationSiteStub stub(isolate(), state());
__ TailCallStub(&stub);
}
@@ -4338,16 +3975,14 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// but we need to save them before using them.
regs_.Save(masm);
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
- Register value = regs_.scratch0();
- __ Ldr(value, MemOperand(regs_.address()));
- __ JumpIfNotInNewSpace(value, &dont_need_remembered_set);
+ Register val = regs_.scratch0();
+ __ Ldr(val, MemOperand(regs_.address()));
+ __ JumpIfNotInNewSpace(val, &dont_need_remembered_set);
- __ CheckPageFlagSet(regs_.object(),
- value,
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ __ CheckPageFlagSet(regs_.object(), val, 1 << MemoryChunk::SCAN_ON_SCAVENGE,
&dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
@@ -4357,11 +3992,9 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
InformIncrementalMarker(masm);
regs_.Restore(masm); // Restore the extra scratch registers we used.
- __ RememberedSetHelper(object_,
- address_,
- value_, // scratch1
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(),
+ value(), // scratch1
+ save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
__ Bind(&dont_need_remembered_set);
}
@@ -4375,7 +4008,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
Register address =
x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
DCHECK(!address.Is(regs_.object()));
@@ -4391,7 +4024,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
isolate());
__ CallCFunction(function, 3, 0);
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
}
@@ -4418,25 +4051,22 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm); // Restore the extra scratch registers we used.
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_, // scratch1
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(),
+ value(), // scratch1
+ save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
} else {
__ Ret();
}
__ Bind(&on_black);
// Get the value from the slot.
- Register value = regs_.scratch0();
- __ Ldr(value, MemOperand(regs_.address()));
+ Register val = regs_.scratch0();
+ __ Ldr(val, MemOperand(regs_.address()));
if (mode == INCREMENTAL_COMPACTION) {
Label ensure_not_white;
- __ CheckPageFlagClear(value,
- regs_.scratch1(),
+ __ CheckPageFlagClear(val, regs_.scratch1(),
MemoryChunk::kEvacuationCandidateMask,
&ensure_not_white);
@@ -4451,7 +4081,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need extra registers for this, so we push the object and the address
// register temporarily.
__ Push(regs_.address(), regs_.object());
- __ EnsureNotWhite(value,
+ __ EnsureNotWhite(val,
regs_.scratch1(), // Scratch.
regs_.object(), // Scratch.
regs_.address(), // Scratch.
@@ -4461,11 +4091,9 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm); // Restore the extra scratch registers we used.
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_, // scratch1
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(),
+ value(), // scratch1
+ save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
} else {
__ Ret();
}
@@ -4493,12 +4121,10 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
__ adr(xzr, &skip_to_incremental_compacting);
}
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_, // scratch1
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object(), address(),
+ value(), // scratch1
+ save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
}
__ Ret();
@@ -4586,7 +4212,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
__ Ldr(x1, MemOperand(fp, parameter_count_offset));
- if (function_mode_ == JS_FUNCTION_STUB_MODE) {
+ if (function_mode() == JS_FUNCTION_STUB_MODE) {
__ Add(x1, x1, 1);
}
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
@@ -4596,6 +4222,20 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
}
+void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorLoadStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorKeyedLoadStub stub(isolate());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
static unsigned int GetProfileEntryHookCallSize(MacroAssembler* masm) {
// The entry hook is a "BumpSystemStackPointer" instruction (sub),
// followed by a "Push lr" instruction, followed by a call.
@@ -4836,7 +4476,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
__ Ldrb(entity_name,
FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(entity_name, miss);
+ __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
__ Bind(&good);
}
@@ -4919,11 +4559,11 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ Cmp(entry_key, key);
__ B(eq, &in_dictionary);
- if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
// Check if the entry name is not a unique name.
__ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
__ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
+ __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
}
}
@@ -4931,7 +4571,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// If we are doing negative lookup then probing failure should be
// treated as a lookup success. For positive lookup, probing failure
// should be treated as lookup failure.
- if (mode_ == POSITIVE_LOOKUP) {
+ if (mode() == POSITIVE_LOOKUP) {
__ Mov(result, 0);
__ Ret();
}
@@ -5111,7 +4751,7 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
Register argc = x0;
- if (argument_count_ == ANY) {
+ if (argument_count() == ANY) {
Label zero_case, n_case;
__ Cbz(argc, &zero_case);
__ Cmp(argc, 1);
@@ -5128,11 +4768,11 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
// N arguments.
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
- } else if (argument_count_ == NONE) {
+ } else if (argument_count() == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- } else if (argument_count_ == ONE) {
+ } else if (argument_count() == ONE) {
CreateArrayDispatchOneArgument(masm, mode);
- } else if (argument_count_ == MORE_THAN_ONE) {
+ } else if (argument_count() == MORE_THAN_ONE) {
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
} else {
UNREACHABLE();
@@ -5143,7 +4783,7 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
ASM_LOCATION("ArrayConstructorStub::Generate");
// ----------- S t a t e -------------
- // -- x0 : argc (only if argument_count_ == ANY)
+ // -- x0 : argc (only if argument_count() == ANY)
// -- x1 : constructor
// -- x2 : AllocationSite or undefined
// -- sp[0] : return address
@@ -5295,9 +4935,9 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register api_function_address = x1;
Register context = cp;
- int argc = ArgumentBits::decode(bit_field_);
- bool is_store = IsStoreBits::decode(bit_field_);
- bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+ int argc = this->argc();
+ bool is_store = this->is_store();
+ bool call_data_undefined = this->call_data_undefined();
typedef FunctionCallbackArguments FCA;
@@ -5387,7 +5027,8 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// -- x2 : api_function_address
// -----------------------------------
- Register api_function_address = x2;
+ Register api_function_address = ApiGetterDescriptor::function_address();
+ DCHECK(api_function_address.is(x2));
__ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
__ Add(x1, x0, 1 * kPointerSize); // x1 = PCA
diff --git a/deps/v8/src/arm64/code-stubs-arm64.h b/deps/v8/src/arm64/code-stubs-arm64.h
index 75a945299f..03dab5bac2 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.h
+++ b/deps/v8/src/arm64/code-stubs-arm64.h
@@ -5,8 +5,6 @@
#ifndef V8_ARM64_CODE_STUBS_ARM64_H_
#define V8_ARM64_CODE_STUBS_ARM64_H_
-#include "src/ic-inl.h"
-
namespace v8 {
namespace internal {
@@ -14,42 +12,25 @@ namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
-class StoreBufferOverflowStub: public PlatformCodeStub {
- public:
- StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
- : PlatformCodeStub(isolate), save_doubles_(save_fp) { }
-
- void Generate(MacroAssembler* masm);
-
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- SaveFPRegsMode save_doubles_;
-
- Major MajorKey() const { return StoreBufferOverflow; }
- int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
class StringHelper : public AllStatic {
public:
- // TODO(all): These don't seem to be used any more. Delete them.
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch);
+ // Compares two flat one-byte strings and returns result in x0.
+ static void GenerateCompareFlatOneByteStrings(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3, Register scratch4);
+
+ // Compare two flat one-byte strings for equality and returns result in x0.
+ static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+ Register left, Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
private:
+ static void GenerateOneByteCharsCompareLoop(
+ MacroAssembler* masm, Register left, Register right, Register length,
+ Register scratch1, Register scratch2, Label* chars_not_equal);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
@@ -60,12 +41,12 @@ class StoreRegistersStateStub: public PlatformCodeStub {
: PlatformCodeStub(isolate) {}
static Register to_be_pushed_lr() { return ip0; }
+
static void GenerateAheadOfTime(Isolate* isolate);
- private:
- Major MajorKey() const { return StoreRegistersState; }
- int MinorKey() const { return 0; }
- void Generate(MacroAssembler* masm);
+ private:
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
};
@@ -75,11 +56,10 @@ class RestoreRegistersStateStub: public PlatformCodeStub {
: PlatformCodeStub(isolate) {}
static void GenerateAheadOfTime(Isolate* isolate);
- private:
- Major MajorKey() const { return RestoreRegistersState; }
- int MinorKey() const { return 0; }
- void Generate(MacroAssembler* masm);
+ private:
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
};
@@ -95,16 +75,22 @@ class RecordWriteStub: public PlatformCodeStub {
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
: PlatformCodeStub(isolate),
- object_(object),
- value_(value),
- address_(address),
- remembered_set_action_(remembered_set_action),
- save_fp_regs_mode_(fp_mode),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
+ DCHECK(object.Is64Bits());
+ DCHECK(value.Is64Bits());
+ DCHECK(address.Is64Bits());
+ minor_key_ = ObjectBits::encode(object.code()) |
+ ValueBits::encode(value.code()) |
+ AddressBits::encode(address.code()) |
+ RememberedSetActionBits::encode(remembered_set_action) |
+ SaveFPRegsModeBits::encode(fp_mode);
}
+ RecordWriteStub(uint32_t key, Isolate* isolate)
+ : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
+
enum Mode {
STORE_BUFFER_ONLY,
INCREMENTAL,
@@ -176,6 +162,8 @@ class RecordWriteStub: public PlatformCodeStub {
DCHECK(GetMode(stub) == mode);
}
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+
private:
// This is a helper class to manage the registers associated with the stub.
// The 'object' and 'address' registers must be preserved.
@@ -282,49 +270,43 @@ class RecordWriteStub: public PlatformCodeStub {
friend class RecordWriteStub;
};
- // A list of stub variants which are pregenerated.
- // The variants are stored in the same format as the minor key, so
- // MinorKeyFor() can be used to populate and check this list.
- static const int kAheadOfTime[];
-
- void Generate(MacroAssembler* masm);
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
-
enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
+ virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+
+ virtual void Generate(MacroAssembler* masm) OVERRIDE;
+ void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
- Major MajorKey() const { return RecordWrite; }
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
- int MinorKey() const {
- return MinorKeyFor(object_, value_, address_, remembered_set_action_,
- save_fp_regs_mode_);
+ Register object() const {
+ return Register::from_code(ObjectBits::decode(minor_key_));
}
- static int MinorKeyFor(Register object,
- Register value,
- Register address,
- RememberedSetAction action,
- SaveFPRegsMode fp_mode) {
- DCHECK(object.Is64Bits());
- DCHECK(value.Is64Bits());
- DCHECK(address.Is64Bits());
- return ObjectBits::encode(object.code()) |
- ValueBits::encode(value.code()) |
- AddressBits::encode(address.code()) |
- RememberedSetActionBits::encode(action) |
- SaveFPRegsModeBits::encode(fp_mode);
+ Register value() const {
+ return Register::from_code(ValueBits::decode(minor_key_));
}
- void Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ Register address() const {
+ return Register::from_code(AddressBits::decode(minor_key_));
+ }
+
+ RememberedSetAction remembered_set_action() const {
+ return RememberedSetActionBits::decode(minor_key_);
+ }
+
+ SaveFPRegsMode save_fp_regs_mode() const {
+ return SaveFPRegsModeBits::decode(minor_key_);
}
class ObjectBits: public BitField<int, 0, 5> {};
@@ -333,11 +315,6 @@ class RecordWriteStub: public PlatformCodeStub {
class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
- Register object_;
- Register value_;
- Register address_;
- RememberedSetAction remembered_set_action_;
- SaveFPRegsMode save_fp_regs_mode_;
Label slow_;
RegisterAllocation regs_;
};
@@ -348,14 +325,13 @@ class RecordWriteStub: public PlatformCodeStub {
class DirectCEntryStub: public PlatformCodeStub {
public:
explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
- void Generate(MacroAssembler* masm);
void GenerateCall(MacroAssembler* masm, Register target);
private:
- Major MajorKey() const { return DirectCEntry; }
- int MinorKey() const { return 0; }
-
bool NeedsImmovableCode() { return true; }
+
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
};
@@ -364,9 +340,9 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
- : PlatformCodeStub(isolate), mode_(mode) { }
-
- void Generate(MacroAssembler* masm);
+ : PlatformCodeStub(isolate) {
+ minor_key_ = LookupModeBits::encode(mode);
+ }
static void GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
@@ -398,78 +374,14 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() const { return NameDictionaryLookup; }
-
- int MinorKey() const { return LookupModeBits::encode(mode_); }
+ LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
class LookupModeBits: public BitField<LookupMode, 0, 1> {};
- LookupMode mode_;
-};
-
-
-class SubStringStub: public PlatformCodeStub {
- public:
- explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- private:
- Major MajorKey() const { return SubString; }
- int MinorKey() const { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public PlatformCodeStub {
- public:
- explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
-
- // Compares two flat ASCII strings and returns result in x0.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- // Compare two flat ASCII strings for equality and returns result
- // in x0.
- static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- private:
- virtual Major MajorKey() const { return StringCompare; }
- virtual int MinorKey() const { return 0; }
- virtual void Generate(MacroAssembler* masm);
-
- static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* chars_not_equal);
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
};
-
-class PlatformInterfaceDescriptor {
- public:
- explicit PlatformInterfaceDescriptor(
- TargetAddressStorageMode storage_mode)
- : storage_mode_(storage_mode) { }
-
- TargetAddressStorageMode storage_mode() { return storage_mode_; }
-
- private:
- TargetAddressStorageMode storage_mode_;
-};
-
-
} } // namespace v8::internal
#endif // V8_ARM64_CODE_STUBS_ARM64_H_
diff --git a/deps/v8/src/arm64/codegen-arm64.cc b/deps/v8/src/arm64/codegen-arm64.cc
index 16b6d3b188..91eaba7957 100644
--- a/deps/v8/src/arm64/codegen-arm64.cc
+++ b/deps/v8/src/arm64/codegen-arm64.cc
@@ -485,15 +485,15 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ B(ne, call_runtime);
__ Ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
- Label ascii, done;
+ Label one_byte, done;
__ Bind(&check_encoding);
STATIC_ASSERT(kTwoByteStringTag == 0);
- __ TestAndBranchIfAnySet(result, kStringEncodingMask, &ascii);
+ __ TestAndBranchIfAnySet(result, kStringEncodingMask, &one_byte);
// Two-byte string.
__ Ldrh(result, MemOperand(string, index, SXTW, 1));
__ B(&done);
- __ Bind(&ascii);
- // Ascii string.
+ __ Bind(&one_byte);
+ // One-byte string.
__ Ldrb(result, MemOperand(string, index, SXTW));
__ Bind(&done);
}
diff --git a/deps/v8/src/arm64/codegen-arm64.h b/deps/v8/src/arm64/codegen-arm64.h
index 9ef148cc40..2f01c510de 100644
--- a/deps/v8/src/arm64/codegen-arm64.h
+++ b/deps/v8/src/arm64/codegen-arm64.h
@@ -6,7 +6,7 @@
#define V8_ARM64_CODEGEN_ARM64_H_
#include "src/ast.h"
-#include "src/ic-inl.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm64/debug-arm64.cc b/deps/v8/src/arm64/debug-arm64.cc
index 746d9a8b47..f57d5b5ab2 100644
--- a/deps/v8/src/arm64/debug-arm64.cc
+++ b/deps/v8/src/arm64/debug-arm64.cc
@@ -236,17 +236,17 @@ void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc).
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
Generate_DebugBreakCallHelper(masm, receiver.Bit() | name.Bit(), 0, x10);
}
void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC store (from ic-arm64.cc).
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- Register value = StoreIC::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10);
}
@@ -260,9 +260,9 @@ void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC keyed store call (from ic-arm64.cc).
- Register receiver = KeyedStoreIC::ReceiverRegister();
- Register name = KeyedStoreIC::NameRegister();
- Register value = KeyedStoreIC::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10);
}
diff --git a/deps/v8/src/arm64/decoder-arm64.cc b/deps/v8/src/arm64/decoder-arm64.cc
index cf7dc34c58..5cca85ea28 100644
--- a/deps/v8/src/arm64/decoder-arm64.cc
+++ b/deps/v8/src/arm64/decoder-arm64.cc
@@ -17,13 +17,13 @@ namespace internal {
void DispatchingDecoderVisitor::AppendVisitor(DecoderVisitor* new_visitor) {
visitors_.remove(new_visitor);
- visitors_.push_front(new_visitor);
+ visitors_.push_back(new_visitor);
}
void DispatchingDecoderVisitor::PrependVisitor(DecoderVisitor* new_visitor) {
visitors_.remove(new_visitor);
- visitors_.push_back(new_visitor);
+ visitors_.push_front(new_visitor);
}
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index d40468029f..d67dc8fcd9 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -89,7 +89,7 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
void Deoptimizer::SetPlatformCompiledStubRegisters(
- FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+ FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler());
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/arm64/disasm-arm64.cc
index b8b1d5d256..ac7cb37322 100644
--- a/deps/v8/src/arm64/disasm-arm64.cc
+++ b/deps/v8/src/arm64/disasm-arm64.cc
@@ -1517,7 +1517,9 @@ int Disassembler::SubstituteLiteralField(Instruction* instr,
case LDR_w_lit:
case LDR_x_lit:
case LDR_s_lit:
- case LDR_d_lit: AppendToOutput("(addr %p)", instr->LiteralAddress()); break;
+ case LDR_d_lit:
+ AppendToOutput("(addr 0x%016" PRIxPTR ")", instr->LiteralAddress());
+ break;
default: UNREACHABLE();
}
diff --git a/deps/v8/src/arm64/full-codegen-arm64.cc b/deps/v8/src/arm64/full-codegen-arm64.cc
index 2e63814bd3..9d544825e4 100644
--- a/deps/v8/src/arm64/full-codegen-arm64.cc
+++ b/deps/v8/src/arm64/full-codegen-arm64.cc
@@ -6,15 +6,16 @@
#if V8_TARGET_ARCH_ARM64
+#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compiler.h"
#include "src/debug.h"
#include "src/full-codegen.h"
+#include "src/ic/ic.h"
#include "src/isolate-inl.h"
#include "src/parser.h"
#include "src/scopes.h"
-#include "src/stub-cache.h"
#include "src/arm64/code-stubs-arm64.h"
#include "src/arm64/macro-assembler-arm64.h"
@@ -1051,7 +1052,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1178,7 +1180,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Bind(&fixed_array);
__ LoadObject(x1, FeedbackVector());
- __ Mov(x10, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
+ __ Mov(x10, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(slot)));
__ Mov(x1, Smi::FromInt(1)); // Smi indicates slow check.
@@ -1319,9 +1321,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(),
- info->strict_mode(),
- info->is_generator());
+ FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
__ Mov(x2, Operand(info));
__ CallStub(&stub);
} else {
@@ -1341,6 +1341,26 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
}
+void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
+ Comment cnmt(masm_, "[ SuperReference ");
+
+ __ ldr(LoadDescriptor::ReceiverRegister(),
+ MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
+ Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
+ __ Mov(LoadDescriptor::NameRegister(), Operand(home_object_symbol));
+
+ CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+
+ __ Mov(x10, Operand(isolate()->factory()->undefined_value()));
+ __ cmp(x0, x10);
+ Label done;
+ __ b(&done, ne);
+ __ CallRuntime(Runtime::kThrowNonMethodError, 0);
+ __ bind(&done);
+}
+
+
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
TypeofState typeof_state,
Label* slow) {
@@ -1384,10 +1404,10 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ Bind(&fast);
}
- __ Ldr(LoadIC::ReceiverRegister(), GlobalObjectMemOperand());
- __ Mov(LoadIC::NameRegister(), Operand(proxy->var()->name()));
+ __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
+ __ Mov(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
if (FLAG_vector_ics) {
- __ Mov(LoadIC::SlotRegister(),
+ __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(proxy->VariableFeedbackSlot()));
}
@@ -1469,10 +1489,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
switch (var->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "Global variable");
- __ Ldr(LoadIC::ReceiverRegister(), GlobalObjectMemOperand());
- __ Mov(LoadIC::NameRegister(), Operand(var->name()));
+ __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
+ __ Mov(LoadDescriptor::NameRegister(), Operand(var->name()));
if (FLAG_vector_ics) {
- __ Mov(LoadIC::SlotRegister(),
+ __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(proxy->VariableFeedbackSlot()));
}
CallLoadIC(CONTEXTUAL);
@@ -1682,9 +1702,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
- DCHECK(StoreIC::ValueRegister().is(x0));
- __ Mov(StoreIC::NameRegister(), Operand(key->value()));
- __ Peek(StoreIC::ReceiverRegister(), 0);
+ DCHECK(StoreDescriptor::ValueRegister().is(x0));
+ __ Mov(StoreDescriptor::NameRegister(), Operand(key->value()));
+ __ Peek(StoreDescriptor::ReceiverRegister(), 0);
CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
@@ -1844,13 +1864,19 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ enum LhsKind {
+ VARIABLE,
+ NAMED_PROPERTY,
+ KEYED_PROPERTY,
+ NAMED_SUPER_PROPERTY
+ };
LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
if (property != NULL) {
assign_type = (property->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
+ ? (property->IsSuperAccess() ? NAMED_SUPER_PROPERTY
+ : NAMED_PROPERTY)
+ : KEYED_PROPERTY;
}
// Evaluate LHS expression.
@@ -1862,17 +1888,27 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
// We need the receiver both on the stack and in the register.
VisitForStackValue(property->obj());
- __ Peek(LoadIC::ReceiverRegister(), 0);
+ __ Peek(LoadDescriptor::ReceiverRegister(), 0);
} else {
VisitForStackValue(property->obj());
}
break;
+ case NAMED_SUPER_PROPERTY:
+ VisitForStackValue(property->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(property->obj()->AsSuperReference());
+ __ Push(result_register());
+ if (expr->is_compound()) {
+ const Register scratch = x10;
+ __ Peek(scratch, kPointerSize);
+ __ Push(scratch, result_register());
+ }
+ break;
case KEYED_PROPERTY:
if (expr->is_compound()) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ Peek(LoadIC::ReceiverRegister(), 1 * kPointerSize);
- __ Peek(LoadIC::NameRegister(), 0);
+ __ Peek(LoadDescriptor::ReceiverRegister(), 1 * kPointerSize);
+ __ Peek(LoadDescriptor::NameRegister(), 0);
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@@ -1893,6 +1929,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
EmitNamedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
@@ -1939,6 +1979,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyAssignment(expr);
+ break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
@@ -1949,9 +1992,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
- __ Mov(LoadIC::NameRegister(), Operand(key->value()));
+ DCHECK(!prop->IsSuperAccess());
+
+ __ Mov(LoadDescriptor::NameRegister(), Operand(key->value()));
if (FLAG_vector_ics) {
- __ Mov(LoadIC::SlotRegister(),
+ __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(prop->PropertyFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL);
} else {
@@ -1960,12 +2005,24 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
}
+void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+ // Stack: receiver, home_object.
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+ DCHECK(prop->IsSuperAccess());
+
+ __ Push(key->value());
+ __ CallRuntime(Runtime::kLoadFromSuper, 3);
+}
+
+
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
if (FLAG_vector_ics) {
- __ Mov(LoadIC::SlotRegister(),
+ __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(prop->PropertyFeedbackSlot()));
CallIC(ic);
} else {
@@ -1993,10 +2050,11 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(x10, &both_smis);
__ Bind(&stub_call);
- BinaryOpICStub stub(isolate(), op, mode);
+
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
{
Assembler::BlockPoolsScope scope(masm_);
- CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
}
__ B(&done);
@@ -2019,16 +2077,14 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ Ubfx(right, right, kSmiShift, 5);
__ Lsl(result, left, right);
break;
- case Token::SHR: {
- Label right_not_zero;
- __ Cbnz(right, &right_not_zero);
- __ Tbnz(left, kXSignBit, &stub_call);
- __ Bind(&right_not_zero);
+ case Token::SHR:
+ // If `left >>> right` >= 0x80000000, the result is not representable in a
+ // signed 32-bit smi.
__ Ubfx(right, right, kSmiShift, 5);
- __ Lsr(result, left, right);
- __ Bic(result, result, kSmiShiftMask);
+ __ Lsr(x10, left, right);
+ __ Tbnz(x10, kXSignBit, &stub_call);
+ __ Bic(result, x10, kSmiShiftMask);
break;
- }
case Token::ADD:
__ Adds(x10, left, right);
__ B(vs, &stub_call);
@@ -2079,11 +2135,11 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
__ Pop(x1);
- BinaryOpICStub stub(isolate(), op, mode);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code.
{
Assembler::BlockPoolsScope scope(masm_);
- CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
}
context()->Plug(x0);
@@ -2116,9 +2172,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForAccumulatorValue(prop->obj());
// TODO(all): We could introduce a VisitForRegValue(reg, expr) to avoid
// this copy.
- __ Mov(StoreIC::ReceiverRegister(), x0);
- __ Pop(StoreIC::ValueRegister()); // Restore value.
- __ Mov(StoreIC::NameRegister(),
+ __ Mov(StoreDescriptor::ReceiverRegister(), x0);
+ __ Pop(StoreDescriptor::ValueRegister()); // Restore value.
+ __ Mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
CallStoreIC();
break;
@@ -2127,11 +2183,11 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ Push(x0); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
- __ Mov(KeyedStoreIC::NameRegister(), x0);
- __ Pop(KeyedStoreIC::ReceiverRegister(), KeyedStoreIC::ValueRegister());
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ __ Mov(StoreDescriptor::NameRegister(), x0);
+ __ Pop(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::ValueRegister());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic);
break;
}
@@ -2158,8 +2214,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
if (var->IsUnallocated()) {
// Global var, const, or let.
- __ Mov(StoreIC::NameRegister(), Operand(var->name()));
- __ Ldr(StoreIC::ReceiverRegister(), GlobalObjectMemOperand());
+ __ Mov(StoreDescriptor::NameRegister(), Operand(var->name()));
+ __ Ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
CallStoreIC();
} else if (op == Token::INIT_CONST_LEGACY) {
@@ -2231,8 +2287,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
- __ Mov(StoreIC::NameRegister(), Operand(prop->key()->AsLiteral()->value()));
- __ Pop(StoreIC::ReceiverRegister());
+ __ Mov(StoreDescriptor::NameRegister(),
+ Operand(prop->key()->AsLiteral()->value()));
+ __ Pop(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2240,6 +2297,24 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
+void FullCodeGenerator::EmitNamedSuperPropertyAssignment(Assignment* expr) {
+ // Assignment to named property of super.
+ // x0 : value
+ // stack : receiver ('this'), home_object
+ Property* prop = expr->target()->AsProperty();
+ DCHECK(prop != NULL);
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(key != NULL);
+
+ __ Push(x0);
+ __ Push(key->value());
+ __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy),
+ 4);
+ context()->Plug(x0);
+}
+
+
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
ASM_LOCATION("FullCodeGenerator::EmitKeyedPropertyAssignment");
// Assignment to a property, using a keyed store IC.
@@ -2247,12 +2322,10 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
// TODO(all): Could we pass this in registers rather than on the stack?
- __ Pop(KeyedStoreIC::NameRegister(), KeyedStoreIC::ReceiverRegister());
- DCHECK(KeyedStoreIC::ValueRegister().is(x0));
+ __ Pop(StoreDescriptor::NameRegister(), StoreDescriptor::ReceiverRegister());
+ DCHECK(StoreDescriptor::ValueRegister().is(x0));
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2265,16 +2338,23 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
Expression* key = expr->key();
if (key->IsPropertyName()) {
- VisitForAccumulatorValue(expr->obj());
- __ Move(LoadIC::ReceiverRegister(), x0);
- EmitNamedPropertyLoad(expr);
+ if (!expr->IsSuperAccess()) {
+ VisitForAccumulatorValue(expr->obj());
+ __ Move(LoadDescriptor::ReceiverRegister(), x0);
+ EmitNamedPropertyLoad(expr);
+ } else {
+ VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(expr->obj()->AsSuperReference());
+ __ Push(result_register());
+ EmitNamedSuperPropertyLoad(expr);
+ }
PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(x0);
} else {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
- __ Move(LoadIC::NameRegister(), x0);
- __ Pop(LoadIC::ReceiverRegister());
+ __ Move(LoadDescriptor::NameRegister(), x0);
+ __ Pop(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
context()->Plug(x0);
}
@@ -2294,12 +2374,11 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallIC::CallType call_type = callee->IsVariableProxy()
- ? CallIC::FUNCTION
- : CallIC::METHOD;
+ CallICState::CallType call_type =
+ callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
// Get the target function.
- if (call_type == CallIC::FUNCTION) {
+ if (call_type == CallICState::FUNCTION) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
@@ -2310,7 +2389,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
- __ Peek(LoadIC::ReceiverRegister(), 0);
+ DCHECK(!callee->AsProperty()->IsSuperAccess());
+ __ Peek(LoadDescriptor::ReceiverRegister(), 0);
EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
@@ -2322,6 +2402,45 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+ DCHECK(callee->IsProperty());
+ Property* prop = callee->AsProperty();
+ DCHECK(prop->IsSuperAccess());
+
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+
+ // Load the function from the receiver.
+ const Register scratch = x10;
+ SuperReference* super_ref = callee->AsProperty()->obj()->AsSuperReference();
+ EmitLoadHomeObject(super_ref);
+ __ Push(x0);
+ VisitForAccumulatorValue(super_ref->this_var());
+ __ Push(x0);
+ __ Peek(scratch, kPointerSize);
+ __ Push(x0, scratch);
+ __ Push(key->value());
+
+ // Stack here:
+ // - home_object
+ // - this (receiver)
+ // - this (receiver) <-- LoadFromSuper will pop here and below.
+ // - home_object
+ // - key
+ __ CallRuntime(Runtime::kLoadFromSuper, 3);
+
+ // Replace home_object with target function.
+ __ Poke(x0, kPointerSize);
+
+ // Stack here:
+ // - target function
+ // - this (receiver)
+ EmitCall(expr, CallICState::METHOD);
+}
+
+
// Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* key) {
@@ -2332,8 +2451,8 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
// Load the function from the receiver.
DCHECK(callee->IsProperty());
- __ Peek(LoadIC::ReceiverRegister(), 0);
- __ Move(LoadIC::NameRegister(), x0);
+ __ Peek(LoadDescriptor::ReceiverRegister(), 0);
+ __ Move(LoadDescriptor::NameRegister(), x0);
EmitKeyedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
@@ -2341,11 +2460,11 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ Pop(x10);
__ Push(x0, x10);
- EmitCall(expr, CallIC::METHOD);
+ EmitCall(expr, CallICState::METHOD);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2494,15 +2613,21 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitCall(expr);
} else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty();
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(property->obj());
- }
- if (property->key()->IsPropertyName()) {
- EmitCallWithLoadIC(expr);
+ bool is_named_call = property->key()->IsPropertyName();
+ // super.x() is handled in EmitCallWithLoadIC.
+ if (property->IsSuperAccess() && is_named_call) {
+ EmitSuperCallWithLoadIC(expr);
} else {
- EmitKeyedCallWithLoadIC(expr, property->key());
+ {
+ PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(property->obj());
+ }
+ if (is_named_call) {
+ EmitCallWithLoadIC(expr);
+ } else {
+ EmitKeyedCallWithLoadIC(expr, property->key());
+ }
}
-
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
@@ -2822,7 +2947,7 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
// Only a HeapNumber can be -0.0, so return false if we have something else.
- __ CheckMap(x0, x1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
+ __ JumpIfNotHeapNumber(x0, if_false, DO_SMI_CHECK);
// Test the bit pattern.
__ Ldr(x10, FieldMemOperand(x0, HeapNumber::kValueOffset));
@@ -3015,7 +3140,7 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
// Functions have class 'Function'.
__ Bind(&function);
- __ LoadRoot(x0, Heap::kfunction_class_stringRootIndex);
+ __ LoadRoot(x0, Heap::kFunction_stringRootIndex);
__ B(&done);
// Objects with a non-function constructor have class 'Object'.
@@ -3134,9 +3259,9 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
Register value = x2;
Register scratch = x10;
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- VisitForAccumulatorValue(args->at(0)); // string
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
__ Pop(value, index);
if (FLAG_debug_code) {
@@ -3164,9 +3289,9 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
Register value = x2;
Register scratch = x10;
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- VisitForAccumulatorValue(args->at(0)); // string
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
__ Pop(value, index);
if (FLAG_debug_code) {
@@ -3507,8 +3632,8 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitFastAsciiArrayJoin");
+void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitFastOneByteArrayJoin");
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -3560,7 +3685,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Get the FixedArray containing array's elements.
__ Ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
- // Check that all array elements are sequential ASCII strings, and
+ // Check that all array elements are sequential one-byte strings, and
// accumulate the sum of their lengths.
__ Mov(string_length, 0);
__ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
@@ -3575,14 +3700,14 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// elements_end: Array end.
if (FLAG_debug_code) {
__ Cmp(array_length, 0);
- __ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
+ __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
}
__ Bind(&loop);
__ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ JumpIfSmi(string, &bailout);
__ Ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
__ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
__ Ldrsw(scratch1,
UntagSmiFieldMemOperand(string, SeqOneByteString::kLengthOffset));
__ Adds(string_length, string_length, scratch1);
@@ -3604,11 +3729,11 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// string_length: Sum of string lengths (not smi).
// elements: FixedArray of strings.
- // Check that the separator is a flat ASCII string.
+ // Check that the separator is a flat one-byte string.
__ JumpIfSmi(separator, &bailout);
__ Ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
__ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string.
@@ -3628,13 +3753,13 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// separator: Separator string
// string_length: Length of result string (not smi)
// array_length: Length of the array (not smi).
- __ AllocateAsciiString(result, string_length, scratch1, scratch2, scratch3,
- &bailout);
+ __ AllocateOneByteString(result, string_length, scratch1, scratch2, scratch3,
+ &bailout);
// Prepare for looping. Set up elements_end to end of the array. Set
// result_pos to the position of the result where to write the first
// character.
- // TODO(all): useless unless AllocateAsciiString trashes the register.
+ // TODO(all): useless unless AllocateOneByteString trashes the register.
__ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
__ Add(result_pos, result, SeqOneByteString::kHeaderSize - kHeapObjectTag);
@@ -3662,7 +3787,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case
__ Bind(&one_char_separator);
- // Replace separator with its ASCII character value.
+ // Replace separator with its one-byte character value.
__ Ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
@@ -3673,7 +3798,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// result_pos: the position to which we are currently copying characters.
// element: Current array element.
// elements_end: Array end.
- // separator: Single separator ASCII char (in lower byte).
+ // separator: Single separator one-byte char (in lower byte).
// Copy the separator character to the result.
__ Strb(separator, MemOperand(result_pos, 1, PostIndex));
@@ -3749,15 +3874,15 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) {
// Push the builtins object as the receiver.
__ Ldr(x10, GlobalObjectMemOperand());
- __ Ldr(LoadIC::ReceiverRegister(),
+ __ Ldr(LoadDescriptor::ReceiverRegister(),
FieldMemOperand(x10, GlobalObject::kBuiltinsOffset));
- __ Push(LoadIC::ReceiverRegister());
+ __ Push(LoadDescriptor::ReceiverRegister());
// Load the function from the receiver.
Handle<String> name = expr->name();
- __ Mov(LoadIC::NameRegister(), Operand(name));
+ __ Mov(LoadDescriptor::NameRegister(), Operand(name));
if (FLAG_vector_ics) {
- __ Mov(LoadIC::SlotRegister(),
+ __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(expr->CallRuntimeFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL);
} else {
@@ -3922,6 +4047,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (prop != NULL) {
assign_type =
(prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ if (prop->IsSuperAccess()) {
+ // throw exception.
+ VisitSuperReference(prop->obj()->AsSuperReference());
+ return;
+ }
}
// Evaluate expression and get value.
@@ -3937,14 +4067,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == NAMED_PROPERTY) {
// Put the object both on the stack and in the register.
VisitForStackValue(prop->obj());
- __ Peek(LoadIC::ReceiverRegister(), 0);
+ __ Peek(LoadDescriptor::ReceiverRegister(), 0);
EmitNamedPropertyLoad(prop);
} else {
// KEYED_PROPERTY
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
- __ Peek(LoadIC::ReceiverRegister(), 1 * kPointerSize);
- __ Peek(LoadIC::NameRegister(), 0);
+ __ Peek(LoadDescriptor::ReceiverRegister(), 1 * kPointerSize);
+ __ Peek(LoadDescriptor::NameRegister(), 0);
EmitKeyedPropertyLoad(prop);
}
}
@@ -4025,8 +4155,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{
Assembler::BlockPoolsScope scope(masm_);
- BinaryOpICStub stub(isolate(), Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), Token::ADD, NO_OVERWRITE).code();
+ CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
}
__ Bind(&done);
@@ -4054,9 +4185,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
case NAMED_PROPERTY: {
- __ Mov(StoreIC::NameRegister(),
+ __ Mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- __ Pop(StoreIC::ReceiverRegister());
+ __ Pop(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -4069,11 +4200,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
- __ Pop(KeyedStoreIC::NameRegister());
- __ Pop(KeyedStoreIC::ReceiverRegister());
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ __ Pop(StoreDescriptor::NameRegister());
+ __ Pop(StoreDescriptor::ReceiverRegister());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -4095,10 +4225,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "Global variable");
- __ Ldr(LoadIC::ReceiverRegister(), GlobalObjectMemOperand());
- __ Mov(LoadIC::NameRegister(), Operand(proxy->name()));
+ __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
+ __ Mov(LoadDescriptor::NameRegister(), Operand(proxy->name()));
if (FLAG_vector_ics) {
- __ Mov(LoadIC::SlotRegister(),
+ __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(proxy->VariableFeedbackSlot()));
}
// Use a regular load, not a contextual load, to avoid a reference
@@ -4271,7 +4401,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4332,12 +4462,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// don't want to spend too much time on it now.
switch (expr->yield_kind()) {
- case Yield::SUSPEND:
+ case Yield::kSuspend:
// Pop value from top-of-stack slot; box result into result register.
EmitCreateIteratorResult(false);
__ Push(result_register());
// Fall through.
- case Yield::INITIAL: {
+ case Yield::kInitial: {
Label suspend, continuation, post_runtime, resume;
__ B(&suspend);
@@ -4372,7 +4502,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
- case Yield::FINAL: {
+ case Yield::kFinal: {
VisitForAccumulatorValue(expr->generator_object());
__ Mov(x1, Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
__ Str(x1, FieldMemOperand(result_register(),
@@ -4384,7 +4514,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
- case Yield::DELEGATING: {
+ case Yield::kDelegating: {
VisitForStackValue(expr->generator_object());
// Initial stack layout is as follows:
@@ -4393,8 +4523,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label l_catch, l_try, l_suspend, l_continuation, l_resume;
Label l_next, l_call, l_loop;
- Register load_receiver = LoadIC::ReceiverRegister();
- Register load_name = LoadIC::NameRegister();
+ Register load_receiver = LoadDescriptor::ReceiverRegister();
+ Register load_name = LoadDescriptor::NameRegister();
// Initial send value is undefined.
__ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
@@ -4454,10 +4584,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Peek(load_receiver, 1 * kPointerSize);
__ Peek(load_name, 2 * kPointerSize);
if (FLAG_vector_ics) {
- __ Mov(LoadIC::SlotRegister(),
+ __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(expr->KeyedLoadFeedbackSlot()));
}
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallIC(ic, TypeFeedbackId::None());
__ Mov(x1, x0);
__ Poke(x1, 2 * kPointerSize);
@@ -4474,7 +4604,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Push(load_receiver); // save result
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
if (FLAG_vector_ics) {
- __ Mov(LoadIC::SlotRegister(),
+ __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(expr->DoneFeedbackSlot()));
}
CallLoadIC(NOT_CONTEXTUAL); // x0=result.done
@@ -4487,7 +4617,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Pop(load_receiver); // result
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
if (FLAG_vector_ics) {
- __ Mov(LoadIC::SlotRegister(),
+ __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(expr->ValueFeedbackSlot()));
}
CallLoadIC(NOT_CONTEXTUAL); // x0=result.value
diff --git a/deps/v8/src/arm64/instructions-arm64.cc b/deps/v8/src/arm64/instructions-arm64.cc
index a6ca6affae..71094baa87 100644
--- a/deps/v8/src/arm64/instructions-arm64.cc
+++ b/deps/v8/src/arm64/instructions-arm64.cc
@@ -182,8 +182,8 @@ LSDataSize CalcLSPairDataSize(LoadStorePairOp op) {
}
-ptrdiff_t Instruction::ImmPCOffset() {
- ptrdiff_t offset;
+int64_t Instruction::ImmPCOffset() {
+ int64_t offset;
if (IsPCRelAddressing()) {
// PC-relative addressing. Only ADR is supported.
offset = ImmPCRel();
diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/arm64/instructions-arm64.h
index bd4e753779..374e2464c3 100644
--- a/deps/v8/src/arm64/instructions-arm64.h
+++ b/deps/v8/src/arm64/instructions-arm64.h
@@ -338,7 +338,7 @@ class Instruction {
// Find the PC offset encoded in this instruction. 'this' may be a branch or
// a PC-relative addressing instruction.
// The offset returned is unscaled.
- ptrdiff_t ImmPCOffset();
+ int64_t ImmPCOffset();
// Find the target of this instruction. 'this' may be a branch or a
// PC-relative addressing instruction.
@@ -352,9 +352,9 @@ class Instruction {
// Patch a literal load instruction to load from 'source'.
void SetImmLLiteral(Instruction* source);
- uint8_t* LiteralAddress() {
+ uintptr_t LiteralAddress() {
int offset = ImmLLiteral() << kLoadLiteralScaleLog2;
- return reinterpret_cast<uint8_t*>(this) + offset;
+ return reinterpret_cast<uintptr_t>(this) + offset;
}
enum CheckAlignment { NO_CHECK, CHECK_ALIGNMENT };
diff --git a/deps/v8/src/arm64/instrument-arm64.cc b/deps/v8/src/arm64/instrument-arm64.cc
index 59982d975b..da505ff294 100644
--- a/deps/v8/src/arm64/instrument-arm64.cc
+++ b/deps/v8/src/arm64/instrument-arm64.cc
@@ -107,7 +107,7 @@ Instrument::Instrument(const char* datafile, uint64_t sample_period)
}
}
- static const int num_counters = ARRAY_SIZE(kCounterList);
+ static const int num_counters = arraysize(kCounterList);
// Dump an instrumentation description comment at the top of the file.
fprintf(output_stream_, "# counters=%d\n", num_counters);
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
new file mode 100644
index 0000000000..690c8c28ee
--- /dev/null
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -0,0 +1,368 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
+
+
+const Register LoadDescriptor::ReceiverRegister() { return x1; }
+const Register LoadDescriptor::NameRegister() { return x2; }
+
+
+const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return x0; }
+
+
+const Register VectorLoadICDescriptor::VectorRegister() { return x3; }
+
+
+const Register StoreDescriptor::ReceiverRegister() { return x1; }
+const Register StoreDescriptor::NameRegister() { return x2; }
+const Register StoreDescriptor::ValueRegister() { return x0; }
+
+
+const Register ElementTransitionAndStoreDescriptor::MapRegister() { return x3; }
+
+
+const Register InstanceofDescriptor::left() {
+ // Object to check (instanceof lhs).
+ return x11;
+}
+
+
+const Register InstanceofDescriptor::right() {
+ // Constructor function (instanceof rhs).
+ return x10;
+}
+
+
+const Register ArgumentsAccessReadDescriptor::index() { return x1; }
+const Register ArgumentsAccessReadDescriptor::parameter_count() { return x0; }
+
+
+const Register ApiGetterDescriptor::function_address() { return x2; }
+
+
+const Register MathPowTaggedDescriptor::exponent() { return x11; }
+
+
+const Register MathPowIntegerDescriptor::exponent() { return x12; }
+
+
+void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x2: function info
+ Register registers[] = {cp, x2};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x1: function
+ Register registers[] = {cp, x1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x0: value
+ Register registers[] = {cp, x0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x0: value
+ Register registers[] = {cp, x0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastCloneShallowArrayDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x3: array literals array
+ // x2: array literal index
+ // x1: constant elements
+ Register registers[] = {cp, x3, x2, x1};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void FastCloneShallowObjectDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x3: object literals array
+ // x2: object literal index
+ // x1: constant properties
+ // x0: object literal flags
+ Register registers[] = {cp, x3, x2, x1, x0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CreateAllocationSiteDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x2: feedback vector
+ // x3: call feedback slot
+ Register registers[] = {cp, x2, x3};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StoreArrayLiteralElementDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, x3, x0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // x1 function the function to call
+ Register registers[] = {cp, x1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionWithFeedbackDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, x1, x3};
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // x0 : number of arguments
+ // x1 : the function to call
+ // x2 : feedback vector
+ // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
+ // TODO(turbofan): So far we don't gather type feedback and hence skip the
+ // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+ Register registers[] = {cp, x0, x1, x2};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void RegExpConstructResultDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x2: length
+ // x1: index (of last match)
+ // x0: string
+ Register registers[] = {cp, x2, x1, x0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void TransitionElementsKindDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x0: value (js_array)
+ // x1: to_map
+ Register registers[] = {cp, x0, x1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorConstantArgCountDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x1: function
+ // x2: allocation site with elements kind
+ // x0: number of arguments to the constructor function
+ Register registers[] = {cp, x1, x2};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = {cp, x1, x2, x0};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(),
+ Representation::Tagged(), Representation::Integer32()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x1: constructor function
+ // x0: number of arguments to the constructor function
+ Register registers[] = {cp, x1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void InternalArrayConstructorDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = {cp, x1, x0};
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Integer32()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x0: value to compare
+ Register registers[] = {cp, x0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x0: value
+ Register registers[] = {cp, x0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x1: left operand
+ // x0: right operand
+ Register registers[] = {cp, x1, x0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpWithAllocationSiteDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x2: allocation site
+ // x1: left operand
+ // x0: right operand
+ Register registers[] = {cp, x2, x1, x0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x1: left operand
+ // x0: right operand
+ Register registers[] = {cp, x1, x0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ static PlatformInterfaceDescriptor noInlineDescriptor =
+ PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
+
+ Register registers[] = {
+ cp, // context
+ x2, // key
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ data->Initialize(arraysize(registers), registers, representations,
+ &noInlineDescriptor);
+}
+
+
+void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ static PlatformInterfaceDescriptor noInlineDescriptor =
+ PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
+
+ Register registers[] = {
+ cp, // context
+ x2, // name
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ data->Initialize(arraysize(registers), registers, representations,
+ &noInlineDescriptor);
+}
+
+
+void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ static PlatformInterfaceDescriptor default_descriptor =
+ PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+ Register registers[] = {
+ cp, // context
+ x0, // receiver
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ data->Initialize(arraysize(registers), registers, representations,
+ &default_descriptor);
+}
+
+
+void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ static PlatformInterfaceDescriptor default_descriptor =
+ PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+ Register registers[] = {
+ cp, // context
+ x1, // JSFunction
+ x0, // actual number of arguments
+ x2, // expected number of arguments
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // JSFunction
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ data->Initialize(arraysize(registers), registers, representations,
+ &default_descriptor);
+}
+
+
+void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ static PlatformInterfaceDescriptor default_descriptor =
+ PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+ Register registers[] = {
+ cp, // context
+ x0, // callee
+ x4, // call_data
+ x2, // holder
+ x1, // api_function_address
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ };
+ data->Initialize(arraysize(registers), registers, representations,
+ &default_descriptor);
+}
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.h b/deps/v8/src/arm64/interface-descriptors-arm64.h
new file mode 100644
index 0000000000..76def88326
--- /dev/null
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.h
@@ -0,0 +1,26 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_INTERFACE_DESCRIPTORS_ARM64_H_
+#define V8_ARM64_INTERFACE_DESCRIPTORS_ARM64_H_
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+class PlatformInterfaceDescriptor {
+ public:
+ explicit PlatformInterfaceDescriptor(TargetAddressStorageMode storage_mode)
+ : storage_mode_(storage_mode) {}
+
+ TargetAddressStorageMode storage_mode() { return storage_mode_; }
+
+ private:
+ TargetAddressStorageMode storage_mode_;
+};
+}
+} // namespace v8::internal
+
+#endif // V8_ARM64_INTERFACE_DESCRIPTORS_ARM64_H_
diff --git a/deps/v8/src/arm64/lithium-arm64.cc b/deps/v8/src/arm64/lithium-arm64.cc
index 7bb66dbd70..502b046927 100644
--- a/deps/v8/src/arm64/lithium-arm64.cc
+++ b/deps/v8/src/arm64/lithium-arm64.cc
@@ -354,12 +354,6 @@ const char* LArithmeticT::Mnemonic() const {
}
-void LChunkBuilder::Abort(BailoutReason reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
Register::ToAllocationIndex(reg));
@@ -1036,14 +1030,14 @@ LInstruction* LChunkBuilder::DoCallJSFunction(
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
- const InterfaceDescriptor* descriptor = instr->descriptor();
+ CallInterfaceDescriptor descriptor = instr->descriptor();
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
ops.Add(target, zone());
for (int i = 1; i < instr->OperandCount(); i++) {
- LOperand* op = UseFixed(instr->OperandAt(i),
- descriptor->GetParameterRegister(i - 1));
+ LOperand* op =
+ UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
ops.Add(op, zone());
}
@@ -1252,7 +1246,6 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
DCHECK(input_rep.IsSmiOrTagged());
return AssignEnvironment(
DefineAsRegister(new(zone()) LClampTToUint8(reg,
- TempRegister(),
TempDoubleRegister())));
}
}
@@ -1475,6 +1468,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
}
+ inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
@@ -1561,6 +1555,19 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
+LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
+ HTailCallThroughMegamorphicCache* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* receiver_register =
+ UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
+ LOperand* name_register =
+ UseFixed(instr->name(), LoadDescriptor::NameRegister());
+ // Not marked as call. It can't deoptimize, and it never returns.
+ return new (zone()) LTailCallThroughMegamorphicCache(
+ context, receiver_register, name_register);
+}
+
+
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
// The function is required (by MacroAssembler::InvokeFunction) to be in x1.
@@ -1663,11 +1670,11 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* global_object = UseFixed(instr->global_object(),
- LoadIC::ReceiverRegister());
+ LOperand* global_object =
+ UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
@@ -1725,11 +1732,12 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), LoadIC::NameRegister());
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LInstruction* result =
@@ -1747,10 +1755,11 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister());
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LInstruction* result =
@@ -1934,12 +1943,12 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
int32_t constant_abs = Abs(constant);
if (!end_range_constant &&
- (small_constant ||
- (IsPowerOf2(constant_abs)) ||
- (!can_overflow && (IsPowerOf2(constant_abs + 1) ||
- IsPowerOf2(constant_abs - 1))))) {
+ (small_constant || (base::bits::IsPowerOfTwo32(constant_abs)) ||
+ (!can_overflow && (base::bits::IsPowerOfTwo32(constant_abs + 1) ||
+ base::bits::IsPowerOfTwo32(constant_abs - 1))))) {
LConstantOperand* right = UseConstant(most_const);
- bool need_register = IsPowerOf2(constant_abs) && !small_constant;
+ bool need_register =
+ base::bits::IsPowerOfTwo32(constant_abs) && !small_constant;
LOperand* left = need_register ? UseRegister(least_const)
: UseRegisterAtStart(least_const);
LInstruction* result =
@@ -1985,10 +1994,10 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
return DefineAsSpilled(result, spill_index);
} else {
DCHECK(info()->IsStub());
- CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor();
+ CallInterfaceDescriptor descriptor =
+ info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index());
- Register reg = descriptor->GetEnvironmentParameterRegister(index);
+ Register reg = descriptor.GetEnvironmentParameterRegister(index);
return DefineFixed(result, reg);
}
}
@@ -2001,11 +2010,14 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
Representation exponent_type = instr->right()->representation();
DCHECK(instr->left()->representation().IsDouble());
LOperand* left = UseFixedDouble(instr->left(), d0);
- LOperand* right = exponent_type.IsInteger32()
- ? UseFixed(instr->right(), x12)
- : exponent_type.IsDouble()
- ? UseFixedDouble(instr->right(), d1)
- : UseFixed(instr->right(), x11);
+ LOperand* right;
+ if (exponent_type.IsInteger32()) {
+ right = UseFixed(instr->right(), MathPowIntegerDescriptor::exponent());
+ } else if (exponent_type.IsDouble()) {
+ right = UseFixedDouble(instr->right(), d1);
+ } else {
+ right = UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
+ }
LPower* result = new(zone()) LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, d0),
instr,
@@ -2203,8 +2215,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
return DoArithmeticT(op, instr);
}
- DCHECK(instr->representation().IsInteger32() ||
- instr->representation().IsSmi());
+ DCHECK(instr->representation().IsSmiOrInteger32());
DCHECK(instr->left()->representation().Equals(instr->representation()));
DCHECK(instr->right()->representation().Equals(instr->representation()));
@@ -2215,42 +2226,30 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
LOperand* left = instr->representation().IsSmi()
? UseRegister(instr->left())
: UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- LOperand* temp = NULL;
- int constant_value = 0;
- if (right_value->IsConstant()) {
- right = UseConstant(right_value);
- constant_value = JSShiftAmountFromHConstant(right_value);
- } else {
- right = UseRegisterAtStart(right_value);
- if (op == Token::ROR) {
- temp = TempRegister();
- }
- }
-
- // Shift operations can only deoptimize if we do a logical shift by 0 and the
- // result cannot be truncated to int32.
- bool does_deopt = false;
- if ((op == Token::SHR) && (constant_value == 0)) {
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+
+ // The only shift that can deoptimize is `left >>> 0`, where left is negative.
+ // In these cases, the result is a uint32 that is too large for an int32.
+ bool right_can_be_zero = !instr->right()->IsConstant() ||
+ (JSShiftAmountFromHConstant(instr->right()) == 0);
+ bool can_deopt = false;
+ if ((op == Token::SHR) && right_can_be_zero) {
if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ can_deopt = !instr->CheckFlag(HInstruction::kUint32);
} else {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ can_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
}
}
LInstruction* result;
if (instr->representation().IsInteger32()) {
- result = DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+ result = DefineAsRegister(new (zone()) LShiftI(op, left, right, can_deopt));
} else {
DCHECK(instr->representation().IsSmi());
- result = DefineAsRegister(
- new(zone()) LShiftS(op, left, right, temp, does_deopt));
+ result = DefineAsRegister(new (zone()) LShiftS(op, left, right, can_deopt));
}
- return does_deopt ? AssignEnvironment(result) : result;
+ return can_deopt ? AssignEnvironment(result) : result;
}
@@ -2379,10 +2378,10 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->object(),
- KeyedStoreIC::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), KeyedStoreIC::NameRegister());
- LOperand* value = UseFixed(instr->value(), KeyedStoreIC::ValueRegister());
+ LOperand* object =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+ LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
DCHECK(instr->object()->representation().IsTagged());
DCHECK(instr->key()->representation().IsTagged());
@@ -2424,8 +2423,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->object(), StoreIC::ReceiverRegister());
- LOperand* value = UseFixed(instr->value(), StoreIC::ValueRegister());
+ LOperand* object =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
LInstruction* result = new(zone()) LStoreNamedGeneric(context, object, value);
return MarkAsCall(result, instr);
@@ -2682,7 +2682,7 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
} else {
spill_index = env_index - instr->environment()->first_local_index();
if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Abort(kTooManySpillSlotsNeededForOSR);
+ Retry(kTooManySpillSlotsNeededForOSR);
spill_index = 0;
}
}
diff --git a/deps/v8/src/arm64/lithium-arm64.h b/deps/v8/src/arm64/lithium-arm64.h
index 21a5f74141..6ead3fe8ca 100644
--- a/deps/v8/src/arm64/lithium-arm64.h
+++ b/deps/v8/src/arm64/lithium-arm64.h
@@ -165,6 +165,7 @@ class LCodeGen;
V(SubI) \
V(SubS) \
V(TaggedToI) \
+ V(TailCallThroughMegamorphicCache) \
V(ThisFunction) \
V(ToFastProperties) \
V(TransitionElementsKind) \
@@ -178,11 +179,11 @@ class LCodeGen;
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ virtual Opcode opcode() const FINAL OVERRIDE { \
return LInstruction::k##type; \
} \
- virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
- virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE; \
+ virtual const char* Mnemonic() const FINAL OVERRIDE { \
return mnemonic; \
} \
static L##type* cast(LInstruction* instr) { \
@@ -294,7 +295,7 @@ class LTemplateResultInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ virtual bool HasResult() const FINAL OVERRIDE {
return (R != 0) && (result() != NULL);
}
void set_result(LOperand* operand) { results_[0] = operand; }
@@ -316,17 +317,38 @@ class LTemplateInstruction : public LTemplateResultInstruction<R> {
private:
// Iterator support.
- virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
- virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+ virtual int InputCount() FINAL OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
- virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
- virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
+ virtual int TempCount() FINAL OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
};
-class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LTailCallThroughMegamorphicCache FINAL
+ : public LTemplateInstruction<0, 3, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ explicit LTailCallThroughMegamorphicCache(LOperand* context,
+ LOperand* receiver,
+ LOperand* name) {
+ inputs_[0] = context;
+ inputs_[1] = receiver;
+ inputs_[2] = name;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* name() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
+ "tail-call-through-megamorphic-cache")
+ DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
+};
+
+
+class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
@@ -338,7 +360,7 @@ class LControlInstruction : public LTemplateInstruction<0, I, T> {
public:
LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
- virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual bool IsControl() const FINAL OVERRIDE { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -388,8 +410,8 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const V8_OVERRIDE { return true; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsGap() const OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
static LGap* cast(LInstruction* instr) {
DCHECK(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@@ -425,11 +447,11 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
};
-class LInstructionGap V8_FINAL : public LGap {
+class LInstructionGap FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return !IsRedundant();
}
@@ -437,7 +459,7 @@ class LInstructionGap V8_FINAL : public LGap {
};
-class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDrop FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
@@ -450,14 +472,14 @@ class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LDummy FINAL : public LTemplateInstruction<1, 0, 0> {
public:
LDummy() {}
DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
};
-class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
inputs_[0] = value;
@@ -466,14 +488,14 @@ class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGoto(HBasicBlock* block) : block_(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual bool IsControl() const V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ virtual bool IsControl() const OVERRIDE { return true; }
int block_id() const { return block_->block_id(); }
@@ -482,7 +504,7 @@ class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -498,17 +520,17 @@ class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LLabel V8_FINAL : public LGap {
+class LLabel FINAL : public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -524,18 +546,18 @@ class LLabel V8_FINAL : public LGap {
};
-class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
};
-class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments,
LOperand* length,
@@ -551,11 +573,11 @@ class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
LOperand* length() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LAddE V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LAddE FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddE(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -570,7 +592,7 @@ class LAddE V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LAddI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right)
: shift_(NO_SHIFT), shift_amount_(0) {
@@ -599,7 +621,7 @@ class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LAddS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LAddS FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddS(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -614,7 +636,7 @@ class LAddS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 3> {
+class LAllocate FINAL : public LTemplateInstruction<1, 2, 3> {
public:
LAllocate(LOperand* context,
LOperand* size,
@@ -639,7 +661,7 @@ class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 3> {
};
-class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
@@ -660,7 +682,7 @@ class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
};
-class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 1> {
+class LArgumentsElements FINAL : public LTemplateInstruction<1, 0, 1> {
public:
explicit LArgumentsElements(LOperand* temp) {
temps_[0] = temp;
@@ -673,7 +695,7 @@ class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 1> {
};
-class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -685,7 +707,7 @@ class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op,
LOperand* left,
@@ -699,18 +721,18 @@ class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- virtual Opcode opcode() const V8_OVERRIDE {
+ virtual Opcode opcode() const OVERRIDE {
return LInstruction::kArithmeticD;
}
- virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
- virtual const char* Mnemonic() const V8_OVERRIDE;
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
private:
Token::Value op_;
};
-class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LArithmeticT(Token::Value op,
LOperand* context,
@@ -727,18 +749,18 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
LOperand* right() { return inputs_[2]; }
Token::Value op() const { return op_; }
- virtual Opcode opcode() const V8_OVERRIDE {
+ virtual Opcode opcode() const OVERRIDE {
return LInstruction::kArithmeticT;
}
- virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
- virtual const char* Mnemonic() const V8_OVERRIDE;
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
private:
Token::Value op_;
};
-class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck FINAL : public LTemplateInstruction<0, 2, 0> {
public:
explicit LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
@@ -753,7 +775,7 @@ class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
};
-class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LBitI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right)
: shift_(NO_SHIFT), shift_amount_(0) {
@@ -784,7 +806,7 @@ class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LBitS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LBitS FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitS(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -801,7 +823,7 @@ class LBitS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LBranch V8_FINAL : public LControlInstruction<1, 2> {
+class LBranch FINAL : public LControlInstruction<1, 2> {
public:
explicit LBranch(LOperand* value, LOperand *temp1, LOperand *temp2) {
inputs_[0] = value;
@@ -816,11 +838,11 @@ class LBranch V8_FINAL : public LControlInstruction<1, 2> {
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallJSFunction(LOperand* function) {
inputs_[0] = function;
@@ -831,13 +853,13 @@ class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
@@ -854,7 +876,7 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNew(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
@@ -867,13 +889,13 @@ class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
@@ -886,13 +908,13 @@ class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallRuntime(LOperand* context) {
inputs_[0] = context;
@@ -903,7 +925,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
@@ -913,7 +935,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallStub(LOperand* context) {
inputs_[0] = context;
@@ -926,7 +948,7 @@ class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LCheckInstanceType FINAL : public LTemplateInstruction<0, 1, 1> {
public:
explicit LCheckInstanceType(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -941,7 +963,7 @@ class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 1> {
public:
explicit LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) {
inputs_[0] = value;
@@ -956,7 +978,7 @@ class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
@@ -969,7 +991,7 @@ class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -981,7 +1003,7 @@ class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckValue FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
@@ -994,7 +1016,7 @@ class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampDToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampDToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -1006,7 +1028,7 @@ class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -1018,23 +1040,21 @@ class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LClampTToUint8 FINAL : public LTemplateInstruction<1, 1, 1> {
public:
- LClampTToUint8(LOperand* unclamped, LOperand* temp1, LOperand* temp2) {
+ LClampTToUint8(LOperand* unclamped, LOperand* temp1) {
inputs_[0] = unclamped;
temps_[0] = temp1;
- temps_[1] = temp2;
}
LOperand* unclamped() { return inputs_[0]; }
LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
};
-class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleBits FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleBits(LOperand* value) {
inputs_[0] = value;
@@ -1047,7 +1067,7 @@ class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LConstructDouble FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LConstructDouble(LOperand* hi, LOperand* lo) {
inputs_[0] = hi;
@@ -1061,7 +1081,7 @@ class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -1077,11 +1097,11 @@ class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LCmpHoleAndBranchD V8_FINAL : public LControlInstruction<1, 1> {
+class LCmpHoleAndBranchD FINAL : public LControlInstruction<1, 1> {
public:
explicit LCmpHoleAndBranchD(LOperand* object, LOperand* temp) {
inputs_[0] = object;
@@ -1096,7 +1116,7 @@ class LCmpHoleAndBranchD V8_FINAL : public LControlInstruction<1, 1> {
};
-class LCmpHoleAndBranchT V8_FINAL : public LControlInstruction<1, 0> {
+class LCmpHoleAndBranchT FINAL : public LControlInstruction<1, 0> {
public:
explicit LCmpHoleAndBranchT(LOperand* object) {
inputs_[0] = object;
@@ -1109,7 +1129,7 @@ class LCmpHoleAndBranchT V8_FINAL : public LControlInstruction<1, 0> {
};
-class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCmpMapAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LCmpMapAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1126,7 +1146,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1141,7 +1161,7 @@ class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
};
-class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LCmpT FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LCmpT(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1160,7 +1180,7 @@ class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1176,7 +1196,7 @@ class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
public:
LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1195,11 +1215,11 @@ class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
return hydrogen()->representation().IsDouble();
}
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantD FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1208,7 +1228,7 @@ class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantE FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1219,7 +1239,7 @@ class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantI FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1228,7 +1248,7 @@ class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantS FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1237,7 +1257,7 @@ class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantT FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1248,14 +1268,14 @@ class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LContext FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
DECLARE_HYDROGEN_ACCESSOR(Context)
};
-class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDateField FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDateField(LOperand* date, Smi* index) : index_(index) {
inputs_[0] = date;
@@ -1272,13 +1292,13 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
};
-class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LDeclareGlobals FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
inputs_[0] = context;
@@ -1291,15 +1311,15 @@ class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- virtual bool IsControl() const V8_OVERRIDE { return true; }
+ virtual bool IsControl() const OVERRIDE { return true; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
-class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -1317,7 +1337,7 @@ class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LDivByConstI FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
inputs_[0] = dividend;
@@ -1337,7 +1357,7 @@ class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LDivI FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
inputs_[0] = dividend;
@@ -1354,7 +1374,7 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LDoubleToIntOrSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToIntOrSmi FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleToIntOrSmi(LOperand* value) {
inputs_[0] = value;
@@ -1369,7 +1389,7 @@ class LDoubleToIntOrSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
@@ -1385,7 +1405,7 @@ class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LForInPrepareMap FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LForInPrepareMap(LOperand* context, LOperand* object) {
inputs_[0] = context;
@@ -1399,7 +1419,7 @@ class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -1412,7 +1432,7 @@ class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndexAndBranch V8_FINAL
+class LHasCachedArrayIndexAndBranch FINAL
: public LControlInstruction<1, 1> {
public:
LHasCachedArrayIndexAndBranch(LOperand* value, LOperand* temp) {
@@ -1427,11 +1447,11 @@ class LHasCachedArrayIndexAndBranch V8_FINAL
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1445,11 +1465,11 @@ class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LInnerAllocatedObject V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInnerAllocatedObject FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
inputs_[0] = base_object;
@@ -1459,13 +1479,13 @@ class LInnerAllocatedObject V8_FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* base_object() const { return inputs_[0]; }
LOperand* offset() const { return inputs_[1]; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
};
-class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LInstanceOf FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1481,7 +1501,7 @@ class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInstanceOfKnownGlobal FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInstanceOfKnownGlobal(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -1500,7 +1520,7 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 0> {
return lazy_deopt_env_;
}
virtual void SetDeferredLazyDeoptimizationEnvironment(
- LEnvironment* env) V8_OVERRIDE {
+ LEnvironment* env) OVERRIDE {
lazy_deopt_env_ = env;
}
@@ -1509,7 +1529,7 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1521,42 +1541,41 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
+class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
public:
- LCallWithDescriptor(const InterfaceDescriptor* descriptor,
- const ZoneList<LOperand*>& operands,
- Zone* zone)
- : descriptor_(descriptor),
- inputs_(descriptor->GetRegisterParameterCount() + 1, zone) {
- DCHECK(descriptor->GetRegisterParameterCount() + 1 == operands.length());
+ LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+ const ZoneList<LOperand*>& operands, Zone* zone)
+ : descriptor_(descriptor),
+ inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
+ DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
inputs_.AddAll(operands, zone);
}
LOperand* target() const { return inputs_[0]; }
- const InterfaceDescriptor* descriptor() { return descriptor_; }
+ CallInterfaceDescriptor descriptor() { return descriptor_; }
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
- const InterfaceDescriptor* descriptor_;
+ CallInterfaceDescriptor descriptor_;
ZoneList<LOperand*> inputs_;
// Iterator support.
- virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
- virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+ virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
- virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
- virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+ virtual int TempCount() FINAL OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
};
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
@@ -1569,13 +1588,13 @@ class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 2> {
+class LIsConstructCallAndBranch FINAL : public LControlInstruction<0, 2> {
public:
LIsConstructCallAndBranch(LOperand* temp1, LOperand* temp2) {
temps_[0] = temp1;
@@ -1590,7 +1609,7 @@ class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 2> {
};
-class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+class LIsObjectAndBranch FINAL : public LControlInstruction<1, 2> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -1605,11 +1624,11 @@ class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 2> {
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1622,11 +1641,11 @@ class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1637,11 +1656,11 @@ class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1655,11 +1674,11 @@ class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
@@ -1672,11 +1691,11 @@ class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
int slot_index() const { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
@@ -1689,7 +1708,7 @@ class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFunctionLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LFunctionLiteral(LOperand* context) {
inputs_[0] = context;
@@ -1702,7 +1721,7 @@ class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LLoadFunctionPrototype FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
inputs_[0] = function;
@@ -1717,14 +1736,14 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
LOperand* vector) {
@@ -1770,7 +1789,7 @@ class LLoadKeyed : public LTemplateInstruction<1, 2, T> {
uint32_t base_offset() const {
return this->hydrogen()->base_offset();
}
- void PrintDataTo(StringStream* stream) V8_OVERRIDE {
+ void PrintDataTo(StringStream* stream) OVERRIDE {
this->elements()->PrintTo(stream);
stream->Add("[");
this->key()->PrintTo(stream);
@@ -1824,7 +1843,7 @@ class LLoadKeyedFixedDouble: public LLoadKeyed<1> {
};
-class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> {
+class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
public:
LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
LOperand* vector) {
@@ -1844,7 +1863,7 @@ class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> {
};
-class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LLoadNamedGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
inputs_[0] = context;
@@ -1863,7 +1882,7 @@ class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadRoot FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
@@ -1872,7 +1891,7 @@ class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
@@ -1894,13 +1913,13 @@ class LUnaryMathOperation : public LTemplateInstruction<1, 1, T> {
LOperand* value() { return this->inputs_[0]; }
BuiltinFunctionId op() const { return this->hydrogen()->op(); }
- void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ void PrintDataTo(StringStream* stream) OVERRIDE;
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
-class LMathAbs V8_FINAL : public LUnaryMathOperation<0> {
+class LMathAbs FINAL : public LUnaryMathOperation<0> {
public:
explicit LMathAbs(LOperand* value) : LUnaryMathOperation<0>(value) {}
@@ -1930,7 +1949,7 @@ class LMathAbsTagged: public LTemplateInstruction<1, 2, 3> {
};
-class LMathExp V8_FINAL : public LUnaryMathOperation<4> {
+class LMathExp FINAL : public LUnaryMathOperation<4> {
public:
LMathExp(LOperand* value,
LOperand* double_temp1,
@@ -1955,7 +1974,7 @@ class LMathExp V8_FINAL : public LUnaryMathOperation<4> {
// Math.floor with a double result.
-class LMathFloorD V8_FINAL : public LUnaryMathOperation<0> {
+class LMathFloorD FINAL : public LUnaryMathOperation<0> {
public:
explicit LMathFloorD(LOperand* value) : LUnaryMathOperation<0>(value) { }
DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d")
@@ -1963,14 +1982,14 @@ class LMathFloorD V8_FINAL : public LUnaryMathOperation<0> {
// Math.floor with an integer result.
-class LMathFloorI V8_FINAL : public LUnaryMathOperation<0> {
+class LMathFloorI FINAL : public LUnaryMathOperation<0> {
public:
explicit LMathFloorI(LOperand* value) : LUnaryMathOperation<0>(value) { }
DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i")
};
-class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFlooringDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -1989,7 +2008,7 @@ class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LFlooringDivByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
inputs_[0] = dividend;
@@ -2009,7 +2028,7 @@ class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivI FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
inputs_[0] = dividend;
@@ -2026,21 +2045,21 @@ class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LMathLog V8_FINAL : public LUnaryMathOperation<0> {
+class LMathLog FINAL : public LUnaryMathOperation<0> {
public:
explicit LMathLog(LOperand* value) : LUnaryMathOperation<0>(value) { }
DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
};
-class LMathClz32 V8_FINAL : public LUnaryMathOperation<0> {
+class LMathClz32 FINAL : public LUnaryMathOperation<0> {
public:
explicit LMathClz32(LOperand* value) : LUnaryMathOperation<0>(value) { }
DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
};
-class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathMinMax(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -2055,7 +2074,7 @@ class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LMathPowHalf V8_FINAL : public LUnaryMathOperation<0> {
+class LMathPowHalf FINAL : public LUnaryMathOperation<0> {
public:
explicit LMathPowHalf(LOperand* value) : LUnaryMathOperation<0>(value) { }
DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
@@ -2063,7 +2082,7 @@ class LMathPowHalf V8_FINAL : public LUnaryMathOperation<0> {
// Math.round with an integer result.
-class LMathRoundD V8_FINAL : public LUnaryMathOperation<0> {
+class LMathRoundD FINAL : public LUnaryMathOperation<0> {
public:
explicit LMathRoundD(LOperand* value)
: LUnaryMathOperation<0>(value) {
@@ -2074,7 +2093,7 @@ class LMathRoundD V8_FINAL : public LUnaryMathOperation<0> {
// Math.round with an integer result.
-class LMathRoundI V8_FINAL : public LUnaryMathOperation<1> {
+class LMathRoundI FINAL : public LUnaryMathOperation<1> {
public:
LMathRoundI(LOperand* value, LOperand* temp1)
: LUnaryMathOperation<1>(value) {
@@ -2087,7 +2106,7 @@ class LMathRoundI V8_FINAL : public LUnaryMathOperation<1> {
};
-class LMathFround V8_FINAL : public LUnaryMathOperation<0> {
+class LMathFround FINAL : public LUnaryMathOperation<0> {
public:
explicit LMathFround(LOperand* value) : LUnaryMathOperation<0>(value) {}
@@ -2095,14 +2114,14 @@ class LMathFround V8_FINAL : public LUnaryMathOperation<0> {
};
-class LMathSqrt V8_FINAL : public LUnaryMathOperation<0> {
+class LMathSqrt FINAL : public LUnaryMathOperation<0> {
public:
explicit LMathSqrt(LOperand* value) : LUnaryMathOperation<0>(value) { }
DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
};
-class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LModByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -2120,7 +2139,7 @@ class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LModByConstI FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LModByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
inputs_[0] = dividend;
@@ -2140,7 +2159,7 @@ class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LModI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LModI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LModI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -2155,7 +2174,7 @@ class LModI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LMulConstIS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMulConstIS FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMulConstIS(LOperand* left, LConstantOperand* right) {
inputs_[0] = left;
@@ -2170,7 +2189,7 @@ class LMulConstIS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMulI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMulI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -2185,7 +2204,7 @@ class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LMulS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMulS FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMulS(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -2200,7 +2219,7 @@ class LMulS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagD FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -2217,7 +2236,7 @@ class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagU FINAL : public LTemplateInstruction<1, 1, 2> {
public:
explicit LNumberTagU(LOperand* value,
LOperand* temp1,
@@ -2235,7 +2254,7 @@ class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberUntagD FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LNumberUntagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2251,14 +2270,14 @@ class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
public:
virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LPower FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -2273,7 +2292,7 @@ class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LPreparePushArguments V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LPreparePushArguments FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LPreparePushArguments(int argc) : argc_(argc) {}
@@ -2286,7 +2305,7 @@ class LPreparePushArguments V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LPushArguments V8_FINAL : public LTemplateResultInstruction<0> {
+class LPushArguments FINAL : public LTemplateResultInstruction<0> {
public:
explicit LPushArguments(Zone* zone,
int capacity = kRecommendedMaxPushedArgs)
@@ -2312,15 +2331,15 @@ class LPushArguments V8_FINAL : public LTemplateResultInstruction<0> {
private:
// Iterator support.
- virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
- virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+ virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
- virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
- virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+ virtual int TempCount() FINAL OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
};
-class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LRegExpLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LRegExpLiteral(LOperand* context) {
inputs_[0] = context;
@@ -2333,7 +2352,7 @@ class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LReturn FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
inputs_[0] = value;
@@ -2356,7 +2375,7 @@ class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
-class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LSeqStringGetChar FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LSeqStringGetChar(LOperand* string,
LOperand* index,
@@ -2375,7 +2394,7 @@ class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 1> {
+class LSeqStringSetChar FINAL : public LTemplateInstruction<1, 4, 1> {
public:
LSeqStringSetChar(LOperand* context,
LOperand* string,
@@ -2400,7 +2419,7 @@ class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 1> {
};
-class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiTag FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -2413,7 +2432,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -2430,7 +2449,7 @@ class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LStackCheck FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LStackCheck(LOperand* context) {
inputs_[0] = context;
@@ -2480,7 +2499,7 @@ class LStoreKeyed : public LTemplateInstruction<0, 3, T> {
}
uint32_t base_offset() const { return this->hydrogen()->base_offset(); }
- void PrintDataTo(StringStream* stream) V8_OVERRIDE {
+ void PrintDataTo(StringStream* stream) OVERRIDE {
this->elements()->PrintTo(stream);
stream->Add("[");
this->key()->PrintTo(stream);
@@ -2503,7 +2522,7 @@ class LStoreKeyed : public LTemplateInstruction<0, 3, T> {
};
-class LStoreKeyedExternal V8_FINAL : public LStoreKeyed<1> {
+class LStoreKeyedExternal FINAL : public LStoreKeyed<1> {
public:
LStoreKeyedExternal(LOperand* elements, LOperand* key, LOperand* value,
LOperand* temp) :
@@ -2517,7 +2536,7 @@ class LStoreKeyedExternal V8_FINAL : public LStoreKeyed<1> {
};
-class LStoreKeyedFixed V8_FINAL : public LStoreKeyed<1> {
+class LStoreKeyedFixed FINAL : public LStoreKeyed<1> {
public:
LStoreKeyedFixed(LOperand* elements, LOperand* key, LOperand* value,
LOperand* temp) :
@@ -2531,7 +2550,7 @@ class LStoreKeyedFixed V8_FINAL : public LStoreKeyed<1> {
};
-class LStoreKeyedFixedDouble V8_FINAL : public LStoreKeyed<1> {
+class LStoreKeyedFixedDouble FINAL : public LStoreKeyed<1> {
public:
LStoreKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* value,
LOperand* temp) :
@@ -2546,7 +2565,7 @@ class LStoreKeyedFixedDouble V8_FINAL : public LStoreKeyed<1> {
};
-class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyedGeneric(LOperand* context,
LOperand* obj,
@@ -2566,13 +2585,13 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
-class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 2> {
public:
LStoreNamedField(LOperand* object, LOperand* value,
LOperand* temp0, LOperand* temp1) {
@@ -2590,7 +2609,7 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Representation representation() const {
return hydrogen()->field_representation();
@@ -2598,7 +2617,7 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
};
-class LStoreNamedGeneric V8_FINAL: public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric FINAL: public LTemplateInstruction<0, 3, 0> {
public:
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
inputs_[0] = context;
@@ -2613,14 +2632,14 @@ class LStoreNamedGeneric V8_FINAL: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
-class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringAdd FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -2638,7 +2657,7 @@ class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
-class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringCharCodeAt FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
inputs_[0] = context;
@@ -2655,7 +2674,7 @@ class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringCharFromCode FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringCharFromCode(LOperand* context, LOperand* char_code) {
inputs_[0] = context;
@@ -2670,7 +2689,7 @@ class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
+class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
public:
LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -2688,12 +2707,12 @@ class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
Token::Value op() const { return hydrogen()->token(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LTaggedToI FINAL : public LTemplateInstruction<1, 1, 2> {
public:
explicit LTaggedToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -2712,7 +2731,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LShiftI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -2733,19 +2752,17 @@ class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LShiftS V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LShiftS FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LShiftS(Token::Value op, LOperand* left, LOperand* right, LOperand* temp,
- bool can_deopt) : op_(op), can_deopt_(can_deopt) {
+ LShiftS(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+ : op_(op), can_deopt_(can_deopt) {
inputs_[0] = left;
inputs_[1] = right;
- temps_[0] = temp;
}
Token::Value op() const { return op_; }
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
bool can_deopt() const { return can_deopt_; }
DECLARE_CONCRETE_INSTRUCTION(ShiftS, "shift-s")
@@ -2756,7 +2773,7 @@ class LShiftS V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 1> {
+class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 1> {
public:
LStoreCodeEntry(LOperand* function, LOperand* code_object,
LOperand* temp) {
@@ -2769,14 +2786,14 @@ class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 1> {
LOperand* code_object() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
};
-class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
@@ -2793,11 +2810,11 @@ class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 2> {
+class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 2> {
public:
LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -2814,7 +2831,7 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 2> {
};
-class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSubI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right)
: shift_(NO_SHIFT), shift_amount_(0) {
@@ -2858,14 +2875,14 @@ class LSubS: public LTemplateInstruction<1, 2, 0> {
};
-class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LThisFunction FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
-class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
@@ -2878,7 +2895,7 @@ class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* context,
@@ -2899,7 +2916,7 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
Handle<Map> transitioned_map() {
@@ -2910,7 +2927,7 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
};
-class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 2> {
+class LTrapAllocationMemento FINAL : public LTemplateInstruction<0, 1, 2> {
public:
LTrapAllocationMemento(LOperand* object, LOperand* temp1, LOperand* temp2) {
inputs_[0] = object;
@@ -2926,7 +2943,7 @@ class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 2> {
};
-class LTruncateDoubleToIntOrSmi V8_FINAL
+class LTruncateDoubleToIntOrSmi FINAL
: public LTemplateInstruction<1, 1, 0> {
public:
explicit LTruncateDoubleToIntOrSmi(LOperand* value) {
@@ -2943,7 +2960,7 @@ class LTruncateDoubleToIntOrSmi V8_FINAL
};
-class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LTypeof FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -2957,7 +2974,7 @@ class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 2> {
public:
LTypeofIsAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -2974,11 +2991,11 @@ class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 2> {
Handle<String> type_literal() const { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LUint32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -2990,7 +3007,7 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LCheckMapValue FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LCheckMapValue(LOperand* value, LOperand* map, LOperand* temp) {
inputs_[0] = value;
@@ -3006,7 +3023,7 @@ class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 1> {
};
-class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
@@ -3049,7 +3066,7 @@ class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
};
-class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LWrapReceiver FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LWrapReceiver(LOperand* receiver, LOperand* function) {
inputs_[0] = receiver;
@@ -3065,7 +3082,7 @@ class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LPlatformChunk V8_FINAL : public LChunk {
+class LPlatformChunk FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
@@ -3075,17 +3092,13 @@ class LPlatformChunk V8_FINAL : public LChunk {
};
-class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
+class LChunkBuilder FINAL : public LChunkBuilderBase {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : LChunkBuilderBase(graph->zone()),
- chunk_(NULL),
- info_(info),
- graph_(graph),
- status_(UNUSED),
+ : LChunkBuilderBase(info, graph),
current_instruction_(NULL),
current_block_(NULL),
- allocator_(allocator) { }
+ allocator_(allocator) {}
// Build the sequence for the graph.
LPlatformChunk* Build();
@@ -3108,27 +3121,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
static bool HasMagicNumberForDivision(int32_t divisor);
private:
- enum Status {
- UNUSED,
- BUILDING,
- DONE,
- ABORTED
- };
-
- HGraph* graph() const { return graph_; }
- Isolate* isolate() const { return info_->isolate(); }
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_building() const { return status_ == BUILDING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- int argument_count() const { return argument_count_; }
- CompilationInfo* info() const { return info_; }
- Heap* heap() const { return isolate()->heap(); }
-
- void Abort(BailoutReason reason);
-
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(DoubleRegister reg);
@@ -3252,10 +3244,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoArithmeticT(Token::Value op,
HBinaryOperation* instr);
- LPlatformChunk* chunk_;
- CompilationInfo* info_;
- HGraph* const graph_;
- Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
LAllocator* allocator_;
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.cc b/deps/v8/src/arm64/lithium-codegen-arm64.cc
index 53a1cfac42..b9b67d9bbd 100644
--- a/deps/v8/src/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/arm64/lithium-codegen-arm64.cc
@@ -6,15 +6,18 @@
#include "src/arm64/lithium-codegen-arm64.h"
#include "src/arm64/lithium-gap-resolver-arm64.h"
+#include "src/base/bits.h"
+#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/hydrogen-osr.h"
-#include "src/stub-cache.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
-class SafepointGenerator V8_FINAL : public CallWrapper {
+class SafepointGenerator FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -590,11 +593,8 @@ bool LCodeGen::GenerateCode() {
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::NONE);
- return GeneratePrologue() &&
- GenerateBody() &&
- GenerateDeferredCode() &&
- GenerateDeoptJumpTable() &&
- GenerateSafepointTable();
+ return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+ GenerateJumpTable() && GenerateSafepointTable();
}
@@ -824,28 +824,23 @@ bool LCodeGen::GenerateDeferredCode() {
}
-bool LCodeGen::GenerateDeoptJumpTable() {
+bool LCodeGen::GenerateJumpTable() {
Label needs_frame, restore_caller_doubles, call_deopt_entry;
- if (deopt_jump_table_.length() > 0) {
+ if (jump_table_.length() > 0) {
Comment(";;; -------------------- Jump table --------------------");
- Address base = deopt_jump_table_[0]->address;
+ Address base = jump_table_[0]->address;
UseScratchRegisterScope temps(masm());
Register entry_offset = temps.AcquireX();
- int length = deopt_jump_table_.length();
+ int length = jump_table_.length();
for (int i = 0; i < length; i++) {
- __ Bind(&deopt_jump_table_[i]->label);
+ Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
+ __ Bind(&table_entry->label);
- Deoptimizer::BailoutType type = deopt_jump_table_[i]->bailout_type;
- Address entry = deopt_jump_table_[i]->address;
- int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- Comment(";;; jump table entry %d.", i);
- } else {
- Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
- }
+ Address entry = table_entry->address;
+ DeoptComment(table_entry->reason);
// Second-level deopt table entries are contiguous and small, so instead
// of loading the full, absolute address of each one, load the base
@@ -856,7 +851,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
// branch.
bool last_entry = (i + 1) == length;
- if (deopt_jump_table_[i]->needs_frame) {
+ if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
if (!needs_frame.is_bound()) {
// This variant of deopt can only be used with stubs. Since we don't
@@ -938,7 +933,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
if (length == 0) return;
Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, 0, TENURED);
+ DeoptimizationInputData::New(isolate(), length, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
@@ -994,9 +989,9 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
void LCodeGen::DeoptimizeBranch(
- LEnvironment* environment,
- BranchType branch_type, Register reg, int bit,
- Deoptimizer::BailoutType* override_bailout_type) {
+ LInstruction* instr, const char* detail, BranchType branch_type,
+ Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) {
+ LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
Deoptimizer::BailoutType bailout_type =
info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
@@ -1045,102 +1040,108 @@ void LCodeGen::DeoptimizeBranch(
__ Bind(&dont_trap);
}
+ Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), detail);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to build frame, or restore caller doubles.
if (branch_type == always &&
frame_is_built_ && !info()->saves_caller_doubles()) {
+ DeoptComment(reason);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
+ Deoptimizer::JumpTableEntry* table_entry =
+ new (zone()) Deoptimizer::JumpTableEntry(entry, reason, bailout_type,
+ !frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (deopt_jump_table_.is_empty() ||
- (deopt_jump_table_.last()->address != entry) ||
- (deopt_jump_table_.last()->bailout_type != bailout_type) ||
- (deopt_jump_table_.last()->needs_frame != !frame_is_built_)) {
- Deoptimizer::JumpTableEntry* table_entry =
- new(zone()) Deoptimizer::JumpTableEntry(entry,
- bailout_type,
- !frame_is_built_);
- deopt_jump_table_.Add(table_entry, zone());
+ if (jump_table_.is_empty() ||
+ !table_entry->IsEquivalentTo(*jump_table_.last())) {
+ jump_table_.Add(table_entry, zone());
}
- __ B(&deopt_jump_table_.last()->label,
- branch_type, reg, bit);
+ __ B(&jump_table_.last()->label, branch_type, reg, bit);
}
}
-void LCodeGen::Deoptimize(LEnvironment* environment,
+void LCodeGen::Deoptimize(LInstruction* instr, const char* detail,
Deoptimizer::BailoutType* override_bailout_type) {
- DeoptimizeBranch(environment, always, NoReg, -1, override_bailout_type);
+ DeoptimizeBranch(instr, detail, always, NoReg, -1, override_bailout_type);
}
-void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment) {
- DeoptimizeBranch(environment, static_cast<BranchType>(cond));
+void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
+ const char* detail) {
+ DeoptimizeBranch(instr, detail, static_cast<BranchType>(cond));
}
-void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) {
- DeoptimizeBranch(environment, reg_zero, rt);
+void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
+ const char* detail) {
+ DeoptimizeBranch(instr, detail, reg_zero, rt);
}
-void LCodeGen::DeoptimizeIfNotZero(Register rt, LEnvironment* environment) {
- DeoptimizeBranch(environment, reg_not_zero, rt);
+void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
+ const char* detail) {
+ DeoptimizeBranch(instr, detail, reg_not_zero, rt);
}
-void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) {
+void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
+ const char* detail) {
int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
- DeoptimizeIfBitSet(rt, sign_bit, environment);
+ DeoptimizeIfBitSet(rt, sign_bit, instr, detail);
}
-void LCodeGen::DeoptimizeIfSmi(Register rt,
- LEnvironment* environment) {
- DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), environment);
+void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
+ const char* detail) {
+ DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, detail);
}
-void LCodeGen::DeoptimizeIfNotSmi(Register rt, LEnvironment* environment) {
- DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), environment);
+void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
+ const char* detail) {
+ DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, detail);
}
-void LCodeGen::DeoptimizeIfRoot(Register rt,
- Heap::RootListIndex index,
- LEnvironment* environment) {
+void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
+ LInstruction* instr, const char* detail) {
__ CompareRoot(rt, index);
- DeoptimizeIf(eq, environment);
+ DeoptimizeIf(eq, instr, detail);
}
-void LCodeGen::DeoptimizeIfNotRoot(Register rt,
- Heap::RootListIndex index,
- LEnvironment* environment) {
+void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
+ LInstruction* instr, const char* detail) {
__ CompareRoot(rt, index);
- DeoptimizeIf(ne, environment);
+ DeoptimizeIf(ne, instr, detail);
}
-void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input,
- LEnvironment* environment) {
+void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
+ const char* detail) {
__ TestForMinusZero(input);
- DeoptimizeIf(vs, environment);
+ DeoptimizeIf(vs, instr, detail);
}
-void LCodeGen::DeoptimizeIfBitSet(Register rt,
- int bit,
- LEnvironment* environment) {
- DeoptimizeBranch(environment, reg_bit_set, rt, bit);
+void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
+ __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
+ DeoptimizeIf(ne, instr, "not heap number");
}
-void LCodeGen::DeoptimizeIfBitClear(Register rt,
- int bit,
- LEnvironment* environment) {
- DeoptimizeBranch(environment, reg_bit_clear, rt, bit);
+void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
+ const char* detail) {
+ DeoptimizeBranch(instr, detail, reg_bit_set, rt, bit);
+}
+
+
+void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
+ const char* detail) {
+ DeoptimizeBranch(instr, detail, reg_bit_clear, rt, bit);
}
@@ -1226,17 +1227,7 @@ Operand LCodeGen::ToOperand(LOperand* op) {
}
-Operand LCodeGen::ToOperand32I(LOperand* op) {
- return ToOperand32(op, SIGNED_INT32);
-}
-
-
-Operand LCodeGen::ToOperand32U(LOperand* op) {
- return ToOperand32(op, UNSIGNED_INT32);
-}
-
-
-Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
+Operand LCodeGen::ToOperand32(LOperand* op) {
DCHECK(op != NULL);
if (op->IsRegister()) {
return Operand(ToRegister32(op));
@@ -1245,10 +1236,7 @@ Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
HConstant* constant = chunk()->LookupConstant(const_op);
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
- DCHECK(constant->HasInteger32Value());
- return (signedness == SIGNED_INT32)
- ? Operand(constant->Integer32Value())
- : Operand(static_cast<uint32_t>(constant->Integer32Value()));
+ return Operand(constant->Integer32Value());
} else {
// Other constants not implemented.
Abort(kToOperand32UnsupportedImmediate);
@@ -1260,7 +1248,7 @@ Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
}
-static ptrdiff_t ArgumentsOffsetWithoutFrame(ptrdiff_t index) {
+static int64_t ArgumentsOffsetWithoutFrame(int index) {
DCHECK(index < 0);
return -(index + 1) * kPointerSize;
}
@@ -1314,12 +1302,10 @@ Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
}
-template<class LI>
-Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info,
- IntegerSignedness signedness) {
+template <class LI>
+Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info) {
if (shift_info->shift() == NO_SHIFT) {
- return (signedness == SIGNED_INT32) ? ToOperand32I(right)
- : ToOperand32U(right);
+ return ToOperand32(right);
} else {
return Operand(
ToRegister32(right),
@@ -1396,11 +1382,11 @@ void LCodeGen::EmitBranchGeneric(InstrType instr,
EmitGoto(left_block);
} else if (left_block == next_block) {
branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
- } else if (right_block == next_block) {
- branch.Emit(chunk_->GetAssemblyLabel(left_block));
} else {
branch.Emit(chunk_->GetAssemblyLabel(left_block));
- __ B(chunk_->GetAssemblyLabel(right_block));
+ if (right_block != next_block) {
+ __ B(chunk_->GetAssemblyLabel(right_block));
+ }
}
}
@@ -1501,7 +1487,7 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
}
} else {
Register length = ToRegister32(instr->length());
- Operand index = ToOperand32I(instr->index());
+ Operand index = ToOperand32(instr->index());
__ Sub(result.W(), length, index);
__ Add(result.W(), result.W(), 1);
__ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
@@ -1525,11 +1511,11 @@ void LCodeGen::DoAddI(LAddI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
Register result = ToRegister32(instr->result());
Register left = ToRegister32(instr->left());
- Operand right = ToShiftedRightOperand32I(instr->right(), instr);
+ Operand right = ToShiftedRightOperand32(instr->right(), instr);
if (can_overflow) {
__ Adds(result, left, right);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Add(result, left, right);
}
@@ -1543,7 +1529,7 @@ void LCodeGen::DoAddS(LAddS* instr) {
Operand right = ToOperand(instr->right());
if (can_overflow) {
__ Adds(result, left, right);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Add(result, left, right);
}
@@ -1669,7 +1655,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ Cmp(length, kArgumentsLimit);
- DeoptimizeIf(hi, instr->environment());
+ DeoptimizeIf(hi, instr, "too many arguments");
// Push the receiver and use the register to keep the original
// number of arguments.
@@ -1796,15 +1782,16 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(x0));
DCHECK(ToRegister(instr->result()).is(x0));
- BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoBitI(LBitI* instr) {
Register result = ToRegister32(instr->result());
Register left = ToRegister32(instr->left());
- Operand right = ToShiftedRightOperand32U(instr->right(), instr);
+ Operand right = ToShiftedRightOperand32(instr->right(), instr);
switch (instr->op()) {
case Token::BIT_AND: __ And(result, left, right); break;
@@ -1838,19 +1825,19 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
DCHECK(instr->hydrogen()->index()->representation().IsInteger32());
DCHECK(instr->hydrogen()->length()->representation().IsInteger32());
if (instr->index()->IsConstantOperand()) {
- Operand index = ToOperand32I(instr->index());
+ Operand index = ToOperand32(instr->index());
Register length = ToRegister32(instr->length());
__ Cmp(length, index);
cond = CommuteCondition(cond);
} else {
Register index = ToRegister32(instr->index());
- Operand length = ToOperand32I(instr->length());
+ Operand length = ToOperand32(instr->length());
__ Cmp(index, length);
}
if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
__ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
} else {
- DeoptimizeIf(cond, instr->environment());
+ DeoptimizeIf(cond, instr, "out of bounds");
}
}
@@ -1929,7 +1916,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ JumpIfSmi(value, true_label);
} else if (expected.NeedsMap()) {
// If we need a map later and have a smi, deopt.
- DeoptimizeIfSmi(value, instr->environment());
+ DeoptimizeIfSmi(value, instr, "Smi");
}
Register map = NoReg;
@@ -1990,7 +1977,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- Deoptimize(instr->environment());
+ Deoptimize(instr, "unexpected object");
}
}
}
@@ -2052,6 +2039,34 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
+void LCodeGen::DoTailCallThroughMegamorphicCache(
+ LTailCallThroughMegamorphicCache* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register name = ToRegister(instr->name());
+ DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(name.is(LoadDescriptor::NameRegister()));
+ DCHECK(receiver.is(x1));
+ DCHECK(name.is(x2));
+
+ Register scratch = x3;
+ Register extra = x4;
+ Register extra2 = x5;
+ Register extra3 = x6;
+
+ // Important for the tail-call.
+ bool must_teardown_frame = NeedsEagerFrame();
+
+ // The probe will tail call to a handler if found.
+ isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
+ must_teardown_frame, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Tail call to miss if we ended up here.
+ if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
+ LoadIC::GenerateMiss(masm());
+}
+
+
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(instr->IsMarkedAsCall());
DCHECK(ToRegister(instr->result()).Is(x0));
@@ -2147,7 +2162,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(x0, temp);
}
- DeoptimizeIfSmi(temp, instr->environment());
+ DeoptimizeIfSmi(temp, instr, "instance migration failed");
}
@@ -2202,7 +2217,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ B(ne, deferred->entry());
} else {
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "wrong map");
}
__ Bind(&success);
@@ -2211,7 +2226,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment());
+ DeoptimizeIfSmi(ToRegister(instr->value()), instr, "Smi");
}
}
@@ -2219,7 +2234,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
Register value = ToRegister(instr->value());
DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
- DeoptimizeIfNotSmi(value, instr->environment());
+ DeoptimizeIfNotSmi(value, instr, "not a Smi");
}
@@ -2237,27 +2252,29 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
__ Cmp(scratch, first);
if (first == last) {
// If there is only one type in the interval check for equality.
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "wrong instance type");
} else if (last == LAST_TYPE) {
// We don't need to compare with the higher bound of the interval.
- DeoptimizeIf(lo, instr->environment());
+ DeoptimizeIf(lo, instr, "wrong instance type");
} else {
// If we are below the lower bound, set the C flag and clear the Z flag
// to force a deopt.
__ Ccmp(scratch, last, CFlag, hs);
- DeoptimizeIf(hi, instr->environment());
+ DeoptimizeIf(hi, instr, "wrong instance type");
}
} else {
uint8_t mask;
uint8_t tag;
instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
- if (IsPowerOf2(mask)) {
+ if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK((tag == 0) || (tag == mask));
if (tag == 0) {
- DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr->environment());
+ DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr,
+ "wrong instance type");
} else {
- DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr->environment());
+ DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr,
+ "wrong instance type");
}
} else {
if (tag == 0) {
@@ -2266,7 +2283,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
__ And(scratch, scratch, mask);
__ Cmp(scratch, tag);
}
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "wrong instance type");
}
}
}
@@ -2289,7 +2306,6 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
Register input = ToRegister(instr->unclamped());
Register result = ToRegister32(instr->result());
- Register scratch = ToRegister(instr->temp1());
Label done;
// Both smi and heap number cases are handled.
@@ -2303,19 +2319,18 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for heap number.
Label is_heap_number;
- __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ JumpIfRoot(scratch, Heap::kHeapNumberMapRootIndex, &is_heap_number);
+ __ JumpIfHeapNumber(input, &is_heap_number);
// Check for undefined. Undefined is coverted to zero for clamping conversion.
- DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
- instr->environment());
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
+ "not a heap number/undefined");
__ Mov(result, 0);
__ B(&done);
// Heap number case.
__ Bind(&is_heap_number);
DoubleRegister dbl_scratch = double_scratch();
- DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp2());
+ DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1());
__ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
__ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
@@ -2358,7 +2373,7 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
__ JumpIfSmi(input, false_label);
Register map = scratch2;
- if (class_name->IsUtf8EqualTo(CStrVector("Function"))) {
+ if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2383,7 +2398,7 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
__ Ldr(scratch1, FieldMemOperand(map, Map::kConstructorOffset));
// Objects with a non-function constructor have class 'Object'.
- if (class_name->IsUtf8EqualTo(CStrVector("Object"))) {
+ if (String::Equals(class_name, isolate()->factory()->Object_string())) {
__ JumpIfNotObjectType(
scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label);
} else {
@@ -2451,8 +2466,7 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
instr->TrueLabel(chunk()));
} else {
Register value = ToRegister(instr->value());
- __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
- instr->FalseLabel(chunk()), DO_SMI_CHECK);
+ __ JumpIfNotHeapNumber(value, instr->FalseLabel(chunk()), DO_SMI_CHECK);
__ Ldr(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
__ JumpIfMinusZero(scratch, instr->TrueLabel(chunk()));
}
@@ -2486,16 +2500,12 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
} else {
if (instr->hydrogen_value()->representation().IsInteger32()) {
if (right->IsConstantOperand()) {
- EmitCompareAndBranch(instr,
- cond,
- ToRegister32(left),
- ToOperand32I(right));
+ EmitCompareAndBranch(instr, cond, ToRegister32(left),
+ ToOperand32(right));
} else {
// Commute the operands and the condition.
- EmitCompareAndBranch(instr,
- CommuteCondition(cond),
- ToRegister32(right),
- ToOperand32I(left));
+ EmitCompareAndBranch(instr, CommuteCondition(cond),
+ ToRegister32(right), ToOperand32(left));
}
} else {
DCHECK(instr->hydrogen_value()->representation().IsSmi());
@@ -2538,7 +2548,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->left()).Is(x1));
DCHECK(ToRegister(instr->right()).Is(x0));
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// Signal that we don't inline smi code before this stub.
InlineSmiCheckInfo::EmitNotInlined(masm());
@@ -2618,7 +2628,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
} else {
__ Cmp(reg, Operand(object));
}
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "value mismatch");
}
@@ -2642,9 +2652,9 @@ void LCodeGen::DoDateField(LDateField* instr) {
DCHECK(object.is(result) && object.Is(x0));
DCHECK(instr->IsMarkedAsCall());
- DeoptimizeIfSmi(object, instr->environment());
+ DeoptimizeIfSmi(object, instr, "Smi");
__ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "not a date object");
if (index->value() == 0) {
__ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
@@ -2680,8 +2690,7 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
type = Deoptimizer::LAZY;
}
- Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
- Deoptimize(instr->environment(), &type);
+ Deoptimize(instr, instr->hydrogen()->reason(), &type);
}
@@ -2689,27 +2698,27 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
Register dividend = ToRegister32(instr->dividend());
int32_t divisor = instr->divisor();
Register result = ToRegister32(instr->result());
- DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
+ DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
DCHECK(!result.is(dividend));
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr->environment());
+ DeoptimizeIfZero(dividend, instr, "division by zero");
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
// Test dividend for kMinInt by subtracting one (cmp) and checking for
// overflow.
__ Cmp(dividend, 1);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ Tst(dividend, mask);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "lost precision");
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@@ -2737,14 +2746,14 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(!AreAliased(dividend, result));
if (divisor == 0) {
- Deoptimize(instr->environment());
+ Deoptimize(instr, "division by zero");
return;
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr->environment());
+ DeoptimizeIfZero(dividend, instr, "minus zero");
}
__ TruncatingDiv(result, dividend, Abs(divisor));
@@ -2756,7 +2765,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ Sxtw(dividend.X(), dividend);
__ Mov(temp, divisor);
__ Smsubl(temp.X(), result, temp, dividend.X());
- DeoptimizeIfNotZero(temp, instr->environment());
+ DeoptimizeIfNotZero(temp, instr, "lost precision");
}
}
@@ -2779,7 +2788,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIfZero(divisor, instr->environment());
+ DeoptimizeIfZero(divisor, instr, "division by zero");
}
// Check for (0 / -x) as that will produce negative zero.
@@ -2791,7 +2800,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// If the divisor >= 0 (pl, the opposite of mi) set the flags to
// condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
__ Ccmp(dividend, 0, NoFlag, mi);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "minus zero");
}
// Check for (kMinInt / -1).
@@ -2803,13 +2812,13 @@ void LCodeGen::DoDivI(LDivI* instr) {
// -1. If overflow is clear, set the flags for condition ne, as the
// dividend isn't -1, and thus we shouldn't deopt.
__ Ccmp(divisor, -1, NoFlag, vs);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "overflow");
}
// Compute remainder and deopt if it's not zero.
Register remainder = ToRegister32(instr->temp());
__ Msub(remainder, result, divisor, dividend);
- DeoptimizeIfNotZero(remainder, instr->environment());
+ DeoptimizeIfNotZero(remainder, instr, "lost precision");
}
@@ -2818,11 +2827,11 @@ void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
Register result = ToRegister32(instr->result());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIfMinusZero(input, instr->environment());
+ DeoptimizeIfMinusZero(input, instr, "minus zero");
}
__ TryRepresentDoubleAsInt32(result, input, double_scratch());
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
if (instr->tag_result()) {
__ SmiTag(result.X());
@@ -2854,9 +2863,8 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(),
- instr->hydrogen()->strict_mode(),
- instr->hydrogen()->is_generator());
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ instr->hydrogen()->kind());
__ Mov(x2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
@@ -2884,7 +2892,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ LoadInstanceDescriptors(map, result);
__ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIfZero(result, instr->environment());
+ DeoptimizeIfZero(result, instr, "no cache");
__ Bind(&done);
}
@@ -2897,18 +2905,17 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(instr->IsMarkedAsCall());
DCHECK(object.Is(x0));
- DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex,
- instr->environment());
+ DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr, "undefined");
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ Cmp(object, null_value);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "null");
- DeoptimizeIfSmi(object, instr->environment());
+ DeoptimizeIfSmi(object, instr, "Smi");
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
- DeoptimizeIf(le, instr->environment());
+ DeoptimizeIf(le, instr, "not a JavaScript object");
Label use_cache, call_runtime;
__ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
@@ -2922,7 +2929,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
__ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
- DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr->environment());
+ DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr, "wrong map");
__ Bind(&use_cache);
}
@@ -3017,7 +3024,7 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
if (instr->offset()->IsConstantOperand()) {
- __ Add(result, base, ToOperand32I(instr->offset()));
+ __ Add(result, base, ToOperand32(instr->offset()));
} else {
__ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
}
@@ -3315,8 +3322,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
__ Ldr(result, ContextMemOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
- instr->environment());
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
} else {
Label not_the_hole;
__ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
@@ -3337,8 +3343,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
JSFunction::kPrototypeOrInitialMapOffset));
// Check that the function has a prototype or an initial map.
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
- instr->environment());
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
// If the function does not have an initial map, we're done.
Label done;
@@ -3358,28 +3363,35 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
__ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
__ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
- DeoptimizeIfRoot(
- result, Heap::kTheHoleValueRootIndex, instr->environment());
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
}
}
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+ DCHECK(FLAG_vector_ics);
+ Register vector = ToRegister(instr->temp_vector());
+ DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
+ __ Mov(vector, instr->hydrogen()->feedback_vector());
+ // No need to allocate this register.
+ DCHECK(VectorLoadICDescriptor::SlotRegister().is(x0));
+ __ Mov(VectorLoadICDescriptor::SlotRegister(),
+ Smi::FromInt(instr->hydrogen()->slot()));
+}
+
+
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister()));
+ DCHECK(ToRegister(instr->global_object())
+ .is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).Is(x0));
- __ Mov(LoadIC::NameRegister(), Operand(instr->name()));
+ __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ Mov(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(x0));
- __ Mov(LoadIC::SlotRegister(),
- Smi::FromInt(instr->hydrogen()->slot()));
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3482,7 +3494,7 @@ void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
// Deopt if value > 0x80000000.
__ Tst(result, 0xFFFFFFFF80000000);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "negative value");
}
break;
case FLOAT32_ELEMENTS:
@@ -3579,7 +3591,7 @@ void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
STATIC_ASSERT(kHoleNanInt64 == 0x7fffffffffffffff);
__ Ldr(scratch, mem_op);
__ Cmn(scratch, 1);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "hole");
}
}
@@ -3617,10 +3629,9 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- DeoptimizeIfNotSmi(result, instr->environment());
+ DeoptimizeIfNotSmi(result, instr, "not a Smi");
} else {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
- instr->environment());
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
}
}
}
@@ -3628,19 +3639,13 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister()));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ Mov(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(x0));
- __ Mov(LoadIC::SlotRegister(),
- Smi::FromInt(instr->hydrogen()->slot()));
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).Is(x0));
@@ -3690,19 +3695,13 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
// LoadIC expects name and receiver in registers.
- DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
- __ Mov(LoadIC::NameRegister(), Operand(instr->name()));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+ __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ Mov(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(x0));
- __ Mov(LoadIC::SlotRegister(),
- Smi::FromInt(instr->hydrogen()->slot()));
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
}
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).is(x0));
@@ -3734,7 +3733,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
Register result = r.IsSmi() ? ToRegister(instr->result())
: ToRegister32(instr->result());
__ Abs(result, input);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
}
}
@@ -3762,9 +3761,7 @@ void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
Label runtime_allocation;
// Deoptimize if the input is not a HeapNumber.
- __ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
- DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex,
- instr->environment());
+ DeoptimizeIfNotHeapNumber(input, instr);
// If the argument is positive, we can return it as-is, without any need to
// allocate a new HeapNumber for the result. We have to do this in integer
@@ -3888,7 +3885,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
Register result = ToRegister(instr->result());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIfMinusZero(input, instr->environment());
+ DeoptimizeIfMinusZero(input, instr, "minus zero");
}
__ Fcvtms(result, input);
@@ -3898,7 +3895,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ Cmp(result, Operand(result, SXTW));
// - The input was not NaN.
__ Fccmp(input, input, NoFlag, eq);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
}
@@ -3924,13 +3921,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ Negs(result, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "minus zero");
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
}
return;
}
@@ -3953,14 +3950,14 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(!AreAliased(dividend, result));
if (divisor == 0) {
- Deoptimize(instr->environment());
+ Deoptimize(instr, "division by zero");
return;
}
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr->environment());
+ DeoptimizeIfZero(dividend, instr, "minus zero");
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -4003,14 +4000,14 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ Sdiv(result, dividend, divisor);
// Check for x / 0.
- DeoptimizeIfZero(divisor, instr->environment());
+ DeoptimizeIfZero(divisor, instr, "division by zero");
// Check for (kMinInt / -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
// The V flag will be set iff dividend == kMinInt.
__ Cmp(dividend, 1);
__ Ccmp(divisor, -1, NoFlag, vs);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "overflow");
}
// Check for (0 / -x) that will produce negative zero.
@@ -4020,7 +4017,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// "divisor" can't be null because the code would have already been
// deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
// In this case we need to deoptimize to produce a -0.
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "minus zero");
}
Label done;
@@ -4081,11 +4078,14 @@ void LCodeGen::DoPower(LPower* instr) {
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
+ Register tagged_exponent = MathPowTaggedDescriptor::exponent();
+ Register integer_exponent = MathPowIntegerDescriptor::exponent();
DCHECK(!instr->right()->IsDoubleRegister() ||
ToDoubleRegister(instr->right()).is(d1));
DCHECK(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(x11));
- DCHECK(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12));
+ ToRegister(instr->right()).is(tagged_exponent));
+ DCHECK(!exponent_type.IsInteger32() ||
+ ToRegister(instr->right()).is(integer_exponent));
DCHECK(ToDoubleRegister(instr->left()).is(d0));
DCHECK(ToDoubleRegister(instr->result()).is(d0));
@@ -4094,18 +4094,15 @@ void LCodeGen::DoPower(LPower* instr) {
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
- __ JumpIfSmi(x11, &no_deopt);
- __ Ldr(x0, FieldMemOperand(x11, HeapObject::kMapOffset));
- DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex,
- instr->environment());
+ __ JumpIfSmi(tagged_exponent, &no_deopt);
+ DeoptimizeIfNotHeapNumber(tagged_exponent, instr);
__ Bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsInteger32()) {
// Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
// supports large integer exponents.
- Register exponent = ToRegister(instr->right());
- __ Sxtw(exponent, exponent);
+ __ Sxtw(integer_exponent, integer_exponent);
MathPowStub stub(isolate(), MathPowStub::INTEGER);
__ CallStub(&stub);
} else {
@@ -4179,18 +4176,18 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
// Deoptimize if the result > 1, as it must be larger than 32 bits.
__ Cmp(result, 1);
- DeoptimizeIf(hi, instr->environment());
+ DeoptimizeIf(hi, instr, "overflow");
// Deoptimize for negative inputs, which at this point are only numbers in
// the range [-0.5, -0.0]
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Fmov(result, input);
- DeoptimizeIfNegative(result, instr->environment());
+ DeoptimizeIfNegative(result, instr, "minus zero");
}
// Deoptimize if the input was NaN.
__ Fcmp(input, dot_five);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "NaN");
// Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
// if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
@@ -4220,7 +4217,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
if (instr->hydrogen()->representation().IsInteger32()) {
Register result = ToRegister32(instr->result());
Register left = ToRegister32(instr->left());
- Operand right = ToOperand32I(instr->right());
+ Operand right = ToOperand32(instr->right());
__ Cmp(left, right);
__ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
@@ -4268,7 +4265,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ And(dividend, dividend, mask);
__ Negs(dividend, dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "minus zero");
}
__ B(&done);
}
@@ -4287,7 +4284,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(!AreAliased(dividend, result, temp));
if (divisor == 0) {
- Deoptimize(instr->environment());
+ Deoptimize(instr, "division by zero");
return;
}
@@ -4301,7 +4298,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ Cbnz(result, &remainder_not_zero);
- DeoptimizeIfNegative(dividend, instr->environment());
+ DeoptimizeIfNegative(dividend, instr, "minus zero");
__ bind(&remainder_not_zero);
}
}
@@ -4316,12 +4313,12 @@ void LCodeGen::DoModI(LModI* instr) {
// modulo = dividend - quotient * divisor
__ Sdiv(result, dividend, divisor);
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIfZero(divisor, instr->environment());
+ DeoptimizeIfZero(divisor, instr, "division by zero");
}
__ Msub(result, result, divisor, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Cbnz(result, &done);
- DeoptimizeIfNegative(dividend, instr->environment());
+ DeoptimizeIfNegative(dividend, instr, "minus zero");
}
__ Bind(&done);
}
@@ -4344,10 +4341,10 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
if (bailout_on_minus_zero) {
if (right < 0) {
// The result is -0 if right is negative and left is zero.
- DeoptimizeIfZero(left, instr->environment());
+ DeoptimizeIfZero(left, instr, "minus zero");
} else if (right == 0) {
// The result is -0 if the right is zero and the left is negative.
- DeoptimizeIfNegative(left, instr->environment());
+ DeoptimizeIfNegative(left, instr, "minus zero");
}
}
@@ -4357,7 +4354,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
if (can_overflow) {
// Only 0x80000000 can overflow here.
__ Negs(result, left);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Neg(result, left);
}
@@ -4373,7 +4370,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
case 2:
if (can_overflow) {
__ Adds(result, left, left);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Add(result, left, left);
}
@@ -4384,7 +4381,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
// can be done efficiently with shifted operands.
int32_t right_abs = Abs(right);
- if (IsPowerOf2(right_abs)) {
+ if (base::bits::IsPowerOfTwo32(right_abs)) {
int right_log2 = WhichPowerOf2(right_abs);
if (can_overflow) {
@@ -4392,7 +4389,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
DCHECK(!AreAliased(scratch, left));
__ Cls(scratch, left);
__ Cmp(scratch, right_log2);
- DeoptimizeIf(lt, instr->environment());
+ DeoptimizeIf(lt, instr, "overflow");
}
if (right >= 0) {
@@ -4402,7 +4399,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
// result = -left << log2(-right)
if (can_overflow) {
__ Negs(result, Operand(left, LSL, right_log2));
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Neg(result, Operand(left, LSL, right_log2));
}
@@ -4417,10 +4414,10 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
DCHECK(!can_overflow);
if (right >= 0) {
- if (IsPowerOf2(right - 1)) {
+ if (base::bits::IsPowerOfTwo32(right - 1)) {
// result = left + left << log2(right - 1)
__ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
- } else if (IsPowerOf2(right + 1)) {
+ } else if (base::bits::IsPowerOfTwo32(right + 1)) {
// result = -left + left << log2(right + 1)
__ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
__ Neg(result, result);
@@ -4428,10 +4425,10 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
UNREACHABLE();
}
} else {
- if (IsPowerOf2(-right + 1)) {
+ if (base::bits::IsPowerOfTwo32(-right + 1)) {
// result = left - left << log2(-right + 1)
__ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
- } else if (IsPowerOf2(-right - 1)) {
+ } else if (base::bits::IsPowerOfTwo32(-right - 1)) {
// result = -left - left << log2(-right - 1)
__ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
__ Neg(result, result);
@@ -4460,13 +4457,13 @@ void LCodeGen::DoMulI(LMulI* instr) {
// - If so (eq), set N (mi) if left + right is negative.
// - Otherwise, clear N.
__ Ccmn(left, right, NoFlag, eq);
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr, "minus zero");
}
if (can_overflow) {
__ Smull(result.X(), left, right);
__ Cmp(result.X(), Operand(result, SXTW));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "overflow");
} else {
__ Mul(result, left, right);
}
@@ -4490,7 +4487,7 @@ void LCodeGen::DoMulS(LMulS* instr) {
// - If so (eq), set N (mi) if left + right is negative.
// - Otherwise, clear N.
__ Ccmn(left, right, NoFlag, eq);
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr, "minus zero");
}
STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
@@ -4498,7 +4495,7 @@ void LCodeGen::DoMulS(LMulS* instr) {
__ Smulh(result, left, right);
__ Cmp(result, Operand(result.W(), SXTW));
__ SmiTag(result);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "overflow");
} else {
if (AreAliased(result, left, right)) {
// All three registers are the same: half untag the input and then
@@ -4665,26 +4662,23 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
Label convert_undefined;
// Heap number map check.
- __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
if (can_convert_undefined_to_nan) {
- __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
- &convert_undefined);
+ __ JumpIfNotHeapNumber(input, &convert_undefined);
} else {
- DeoptimizeIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
- instr->environment());
+ DeoptimizeIfNotHeapNumber(input, instr);
}
// Load heap number.
__ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
if (instr->hydrogen()->deoptimize_on_minus_zero()) {
- DeoptimizeIfMinusZero(result, instr->environment());
+ DeoptimizeIfMinusZero(result, instr, "minus zero");
}
__ B(&done);
if (can_convert_undefined_to_nan) {
__ Bind(&convert_undefined);
- DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
- instr->environment());
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
+ "not a heap number/undefined");
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
@@ -4877,7 +4871,7 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
Register output = ToRegister(instr->result());
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIfNegative(input.W(), instr->environment());
+ DeoptimizeIfNegative(input.W(), instr, "overflow");
}
__ SmiTag(output, input);
}
@@ -4889,7 +4883,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
Label done, untag;
if (instr->needs_check()) {
- DeoptimizeIfNotSmi(input, instr->environment());
+ DeoptimizeIfNotSmi(input, instr, "not a Smi");
}
__ Bind(&untag);
@@ -4910,13 +4904,12 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::SAR: __ Asr(result, left, right); break;
case Token::SHL: __ Lsl(result, left, right); break;
case Token::SHR:
+ __ Lsr(result, left, right);
if (instr->can_deopt()) {
- Label right_not_zero;
- __ Cbnz(right, &right_not_zero);
- DeoptimizeIfNegative(left, instr->environment());
- __ Bind(&right_not_zero);
+ // If `left >>> right` >= 0x80000000, the result is not representable
+ // in a signed 32-bit smi.
+ DeoptimizeIfNegative(result, instr, "negative value");
}
- __ Lsr(result, left, right);
break;
default: UNREACHABLE();
}
@@ -4925,7 +4918,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
int shift_count = JSShiftAmountFromLConstant(right_op);
if (shift_count == 0) {
if ((instr->op() == Token::SHR) && instr->can_deopt()) {
- DeoptimizeIfNegative(left, instr->environment());
+ DeoptimizeIfNegative(left, instr, "negative value");
}
__ Mov(result, left, kDiscardForSameWReg);
} else {
@@ -4946,40 +4939,40 @@ void LCodeGen::DoShiftS(LShiftS* instr) {
Register left = ToRegister(instr->left());
Register result = ToRegister(instr->result());
- // Only ROR by register needs a temp.
- DCHECK(((instr->op() == Token::ROR) && right_op->IsRegister()) ||
- (instr->temp() == NULL));
-
if (right_op->IsRegister()) {
Register right = ToRegister(instr->right());
+
+ // JavaScript shifts only look at the bottom 5 bits of the 'right' operand.
+ // Since we're handling smis in X registers, we have to extract these bits
+ // explicitly.
+ __ Ubfx(result, right, kSmiShift, 5);
+
switch (instr->op()) {
case Token::ROR: {
- Register temp = ToRegister(instr->temp());
- __ Ubfx(temp, right, kSmiShift, 5);
- __ SmiUntag(result, left);
- __ Ror(result.W(), result.W(), temp.W());
+ // This is the only case that needs a scratch register. To keep things
+ // simple for the other cases, borrow a MacroAssembler scratch register.
+ UseScratchRegisterScope temps(masm());
+ Register temp = temps.AcquireW();
+ __ SmiUntag(temp, left);
+ __ Ror(result.W(), temp.W(), result.W());
__ SmiTag(result);
break;
}
case Token::SAR:
- __ Ubfx(result, right, kSmiShift, 5);
__ Asr(result, left, result);
__ Bic(result, result, kSmiShiftMask);
break;
case Token::SHL:
- __ Ubfx(result, right, kSmiShift, 5);
__ Lsl(result, left, result);
break;
case Token::SHR:
- if (instr->can_deopt()) {
- Label right_not_zero;
- __ Cbnz(right, &right_not_zero);
- DeoptimizeIfNegative(left, instr->environment());
- __ Bind(&right_not_zero);
- }
- __ Ubfx(result, right, kSmiShift, 5);
__ Lsr(result, left, result);
__ Bic(result, result, kSmiShiftMask);
+ if (instr->can_deopt()) {
+ // If `left >>> right` >= 0x80000000, the result is not representable
+ // in a signed 32-bit smi.
+ DeoptimizeIfNegative(result, instr, "negative value");
+ }
break;
default: UNREACHABLE();
}
@@ -4988,7 +4981,7 @@ void LCodeGen::DoShiftS(LShiftS* instr) {
int shift_count = JSShiftAmountFromLConstant(right_op);
if (shift_count == 0) {
if ((instr->op() == Token::SHR) && instr->can_deopt()) {
- DeoptimizeIfNegative(left, instr->environment());
+ DeoptimizeIfNegative(left, instr, "negative value");
}
__ Mov(result, left);
} else {
@@ -5117,8 +5110,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ Ldr(scratch, target);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex,
- instr->environment());
+ DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr, "hole");
} else {
__ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
}
@@ -5156,8 +5148,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
Register payload = ToRegister(instr->temp2());
__ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
- DeoptimizeIfRoot(
- payload, Heap::kTheHoleValueRootIndex, instr->environment());
+ DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr, "hole");
}
// Store the value.
@@ -5334,13 +5325,12 @@ void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister()));
- DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister()));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- Handle<Code> ic = instr->strict_mode() == STRICT
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -5441,10 +5431,10 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister()));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- __ Mov(StoreIC::NameRegister(), Operand(instr->name()));
+ __ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -5557,7 +5547,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
InlineSmiCheckInfo::EmitNotInlined(masm());
@@ -5571,11 +5561,11 @@ void LCodeGen::DoSubI(LSubI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
Register result = ToRegister32(instr->result());
Register left = ToRegister32(instr->left());
- Operand right = ToShiftedRightOperand32I(instr->right(), instr);
+ Operand right = ToShiftedRightOperand32(instr->right(), instr);
if (can_overflow) {
__ Subs(result, left, right);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Sub(result, left, right);
}
@@ -5589,7 +5579,7 @@ void LCodeGen::DoSubS(LSubS* instr) {
Operand right = ToOperand(instr->right());
if (can_overflow) {
__ Subs(result, left, right);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Sub(result, left, right);
}
@@ -5606,15 +5596,12 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
Label done;
- // Load heap object map.
- __ Ldr(scratch1, FieldMemOperand(input, HeapObject::kMapOffset));
-
if (instr->truncating()) {
Register output = ToRegister(instr->result());
Label check_bools;
// If it's not a heap number, jump to undefined check.
- __ JumpIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, &check_bools);
+ __ JumpIfNotHeapNumber(input, &check_bools);
// A heap number: load value and convert to int32 using truncating function.
__ TruncateHeapNumberToI(output, input);
@@ -5632,28 +5619,25 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
// Output contains zero, undefined is converted to zero for truncating
// conversions.
- DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
- instr->environment());
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
+ "not a heap number/undefined/true/false");
} else {
Register output = ToRegister32(instr->result());
-
DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
- // Deoptimized if it's not a heap number.
- DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex,
- instr->environment());
+ DeoptimizeIfNotHeapNumber(input, instr);
// A heap number: load value and convert to int32 using non-truncating
// function. If the result is out of range, branch to deoptimize.
__ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
__ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Cmp(output, 0);
__ B(ne, &done);
__ Fmov(scratch1, dbl_scratch1);
- DeoptimizeIfNegative(scratch1, instr->environment());
+ DeoptimizeIfNegative(scratch1, instr, "minus zero");
}
}
__ Bind(&done);
@@ -5794,7 +5778,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr, "memento found");
__ Bind(&no_memento_found);
}
@@ -5824,13 +5808,22 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Factory* factory = isolate()->factory();
if (String::Equals(type_name, factory->number_string())) {
- DCHECK(instr->temp1() != NULL);
- Register map = ToRegister(instr->temp1());
-
__ JumpIfSmi(value, true_label);
- __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
- __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- EmitBranch(instr, eq);
+
+ int true_block = instr->TrueDestination(chunk_);
+ int false_block = instr->FalseDestination(chunk_);
+ int next_block = GetNextEmittedBlock();
+
+ if (true_block == false_block) {
+ EmitGoto(true_block);
+ } else if (true_block == next_block) {
+ __ JumpIfNotHeapNumber(value, chunk_->GetAssemblyLabel(false_block));
+ } else {
+ __ JumpIfHeapNumber(value, chunk_->GetAssemblyLabel(true_block));
+ if (false_block != next_block) {
+ __ B(chunk_->GetAssemblyLabel(false_block));
+ }
+ }
} else if (String::Equals(type_name, factory->string_string())) {
DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
@@ -5910,7 +5903,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register temp = ToRegister(instr->temp());
__ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
__ Cmp(map, temp);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "wrong map");
}
@@ -5944,10 +5937,10 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
// Deoptimize if the receiver is not a JS object.
- DeoptimizeIfSmi(receiver, instr->environment());
+ DeoptimizeIfSmi(receiver, instr, "Smi");
__ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
__ B(ge, &copy_receiver);
- Deoptimize(instr->environment());
+ Deoptimize(instr, "not a JavaScript object");
__ Bind(&global_object);
__ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
@@ -5977,7 +5970,7 @@ void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ class DeferredLoadMutableDouble FINAL : public LDeferredCode {
public:
DeferredLoadMutableDouble(LCodeGen* codegen,
LLoadFieldByIndex* instr,
@@ -5990,10 +5983,10 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
object_(object),
index_(index) {
}
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LLoadFieldByIndex* instr_;
Register result_;
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.h b/deps/v8/src/arm64/lithium-codegen-arm64.h
index bb06f483af..a73bb8caaf 100644
--- a/deps/v8/src/arm64/lithium-codegen-arm64.h
+++ b/deps/v8/src/arm64/lithium-codegen-arm64.h
@@ -27,7 +27,7 @@ class LCodeGen: public LCodeGenBase {
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
- deopt_jump_table_(4, info->zone()),
+ jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
@@ -83,31 +83,17 @@ class LCodeGen: public LCodeGenBase {
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
// Support for converting LOperands to assembler types.
- // LOperand must be a register.
Register ToRegister(LOperand* op) const;
Register ToRegister32(LOperand* op) const;
Operand ToOperand(LOperand* op);
- Operand ToOperand32I(LOperand* op);
- Operand ToOperand32U(LOperand* op);
+ Operand ToOperand32(LOperand* op);
enum StackMode { kMustUseFramePointer, kCanUseStackPointer };
MemOperand ToMemOperand(LOperand* op,
StackMode stack_mode = kCanUseStackPointer) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
- template<class LI>
- Operand ToShiftedRightOperand32I(LOperand* right,
- LI* shift_info) {
- return ToShiftedRightOperand32(right, shift_info, SIGNED_INT32);
- }
- template<class LI>
- Operand ToShiftedRightOperand32U(LOperand* right,
- LI* shift_info) {
- return ToShiftedRightOperand32(right, shift_info, UNSIGNED_INT32);
- }
- template<class LI>
- Operand ToShiftedRightOperand32(LOperand* right,
- LI* shift_info,
- IntegerSignedness signedness);
+ template <class LI>
+ Operand ToShiftedRightOperand32(LOperand* right, LI* shift_info);
int JSShiftAmountFromLConstant(LOperand* constant) {
return ToInteger32(LConstantOperand::cast(constant)) & 0x1f;
@@ -158,8 +144,6 @@ class LCodeGen: public LCodeGenBase {
Register object,
Register index);
- Operand ToOperand32(LOperand* op, IntegerSignedness signedness);
-
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
void DoGap(LGap* instr);
@@ -212,6 +196,9 @@ class LCodeGen: public LCodeGenBase {
int* offset,
AllocationSiteMode mode);
+ template <class T>
+ void EmitVectorLoadICRegisters(T* instr);
+
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
@@ -226,27 +213,31 @@ class LCodeGen: public LCodeGenBase {
Register temp,
LOperand* index,
String::Encoding encoding);
- void DeoptimizeBranch(
- LEnvironment* environment,
- BranchType branch_type, Register reg = NoReg, int bit = -1,
- Deoptimizer::BailoutType* override_bailout_type = NULL);
- void Deoptimize(LEnvironment* environment,
+ void DeoptimizeBranch(LInstruction* instr, const char* detail,
+ BranchType branch_type, Register reg = NoReg,
+ int bit = -1,
+ Deoptimizer::BailoutType* override_bailout_type = NULL);
+ void Deoptimize(LInstruction* instr, const char* detail,
Deoptimizer::BailoutType* override_bailout_type = NULL);
- void DeoptimizeIf(Condition cond, LEnvironment* environment);
- void DeoptimizeIfZero(Register rt, LEnvironment* environment);
- void DeoptimizeIfNotZero(Register rt, LEnvironment* environment);
- void DeoptimizeIfNegative(Register rt, LEnvironment* environment);
- void DeoptimizeIfSmi(Register rt, LEnvironment* environment);
- void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment);
- void DeoptimizeIfRoot(Register rt,
- Heap::RootListIndex index,
- LEnvironment* environment);
- void DeoptimizeIfNotRoot(Register rt,
- Heap::RootListIndex index,
- LEnvironment* environment);
- void DeoptimizeIfMinusZero(DoubleRegister input, LEnvironment* environment);
- void DeoptimizeIfBitSet(Register rt, int bit, LEnvironment* environment);
- void DeoptimizeIfBitClear(Register rt, int bit, LEnvironment* environment);
+ void DeoptimizeIf(Condition cond, LInstruction* instr, const char* detail);
+ void DeoptimizeIfZero(Register rt, LInstruction* instr, const char* detail);
+ void DeoptimizeIfNotZero(Register rt, LInstruction* instr,
+ const char* detail);
+ void DeoptimizeIfNegative(Register rt, LInstruction* instr,
+ const char* detail);
+ void DeoptimizeIfSmi(Register rt, LInstruction* instr, const char* detail);
+ void DeoptimizeIfNotSmi(Register rt, LInstruction* instr, const char* detail);
+ void DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
+ LInstruction* instr, const char* detail);
+ void DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
+ LInstruction* instr, const char* detail);
+ void DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr);
+ void DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
+ const char* detail);
+ void DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
+ const char* detail);
+ void DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
+ const char* detail);
MemOperand PrepareKeyedExternalArrayOperand(Register key,
Register base,
@@ -286,10 +277,10 @@ class LCodeGen: public LCodeGenBase {
void RestoreCallerDoubles();
// Code generation steps. Returns true if code generation should continue.
- void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+ void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
bool GeneratePrologue();
bool GenerateDeferredCode();
- bool GenerateDeoptJumpTable();
+ bool GenerateJumpTable();
bool GenerateSafepointTable();
// Generates the custom OSR entrypoint and sets the osr_pc_offset.
@@ -338,7 +329,7 @@ class LCodeGen: public LCodeGenBase {
Register function_reg = NoReg);
// Support for recording safepoint and position information.
- void RecordAndWritePosition(int position) V8_OVERRIDE;
+ void RecordAndWritePosition(int position) OVERRIDE;
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
@@ -351,10 +342,10 @@ class LCodeGen: public LCodeGenBase {
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
- void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+ void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<Deoptimizer::JumpTableEntry*> deopt_jump_table_;
+ ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
index f7c724842a..23767e48b3 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -13,6 +13,7 @@
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/instrument-arm64.h"
#include "src/arm64/macro-assembler-arm64.h"
+#include "src/base/bits.h"
namespace v8 {
@@ -1520,7 +1521,7 @@ void MacroAssembler::Claim(uint64_t count, uint64_t unit_size) {
void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
if (unit_size == 0) return;
- DCHECK(IsPowerOf2(unit_size));
+ DCHECK(base::bits::IsPowerOfTwo64(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
const Operand size(count, LSL, shift);
@@ -1538,7 +1539,7 @@ void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
- DCHECK(unit_size == 0 || IsPowerOf2(unit_size));
+ DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo64(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
const Operand size(count_smi,
(shift >= 0) ? (LSL) : (LSR),
@@ -1578,7 +1579,7 @@ void MacroAssembler::Drop(uint64_t count, uint64_t unit_size) {
void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
if (unit_size == 0) return;
- DCHECK(IsPowerOf2(unit_size));
+ DCHECK(base::bits::IsPowerOfTwo64(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
const Operand size(count, LSL, shift);
@@ -1599,7 +1600,7 @@ void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
- DCHECK(unit_size == 0 || IsPowerOf2(unit_size));
+ DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo64(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
const Operand size(count_smi,
(shift >= 0) ? (LSL) : (LSR),
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 658497b9f7..3d6709777f 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -6,12 +6,14 @@
#if V8_TARGET_ARCH_ARM64
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/isolate-inl.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
@@ -1656,12 +1658,6 @@ void MacroAssembler::ThrowUncatchable(Register value,
}
-void MacroAssembler::SmiAbs(const Register& smi, Label* slow) {
- DCHECK(smi.Is64Bits());
- Abs(smi, smi, slow);
-}
-
-
void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -2059,7 +2055,7 @@ void MacroAssembler::CallCFunction(Register function,
int sp_alignment = ActivationFrameAlignment();
// The ABI mandates at least 16-byte alignment.
DCHECK(sp_alignment >= 16);
- DCHECK(IsPowerOf2(sp_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
// The current stack pointer is a callee saved register, and is preserved
// across the call.
@@ -2251,58 +2247,38 @@ int MacroAssembler::CallSize(Handle<Code> code,
}
+void MacroAssembler::JumpIfHeapNumber(Register object, Label* on_heap_number,
+ SmiCheckType smi_check_type) {
+ Label on_not_heap_number;
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(object, &on_not_heap_number);
+ }
-
-void MacroAssembler::JumpForHeapNumber(Register object,
- Register heap_number_map,
- Label* on_heap_number,
- Label* on_not_heap_number) {
- DCHECK(on_heap_number || on_not_heap_number);
AssertNotSmi(object);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
-
- // Load the HeapNumber map if it is not passed.
- if (heap_number_map.Is(NoReg)) {
- heap_number_map = temps.AcquireX();
- LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- } else {
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- }
-
- DCHECK(!AreAliased(temp, heap_number_map));
-
Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- Cmp(temp, heap_number_map);
+ JumpIfRoot(temp, Heap::kHeapNumberMapRootIndex, on_heap_number);
- if (on_heap_number) {
- B(eq, on_heap_number);
- }
- if (on_not_heap_number) {
- B(ne, on_not_heap_number);
- }
-}
-
-
-void MacroAssembler::JumpIfHeapNumber(Register object,
- Label* on_heap_number,
- Register heap_number_map) {
- JumpForHeapNumber(object,
- heap_number_map,
- on_heap_number,
- NULL);
+ Bind(&on_not_heap_number);
}
void MacroAssembler::JumpIfNotHeapNumber(Register object,
Label* on_not_heap_number,
- Register heap_number_map) {
- JumpForHeapNumber(object,
- heap_number_map,
- NULL,
- on_not_heap_number);
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(object, on_not_heap_number);
+ }
+
+ AssertNotSmi(object);
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
}
@@ -2336,8 +2312,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
Label load_result_from_cache;
JumpIfSmi(object, &is_smi);
- CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
- DONT_DO_SMI_CHECK);
+ JumpIfNotHeapNumber(object, not_found);
STATIC_ASSERT(kDoubleSize == (kWRegSize * 2));
Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag);
@@ -2699,14 +2674,9 @@ void MacroAssembler::FillFields(Register dst,
}
-void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure,
- SmiCheckType smi_check) {
-
+void MacroAssembler::JumpIfEitherIsNotSequentialOneByteStrings(
+ Register first, Register second, Register scratch1, Register scratch2,
+ Label* failure, SmiCheckType smi_check) {
if (smi_check == DO_SMI_CHECK) {
JumpIfEitherSmi(first, second, failure);
} else if (emit_debug_code()) {
@@ -2721,73 +2691,64 @@ void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings(
Bind(&not_smi);
}
- // Test that both first and second are sequential ASCII strings.
+ // Test that both first and second are sequential one-byte strings.
Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
- JumpIfEitherInstanceTypeIsNotSequentialAscii(scratch1,
- scratch2,
- scratch1,
- scratch2,
- failure);
+ JumpIfEitherInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, scratch1,
+ scratch2, failure);
}
-void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialAscii(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
+void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialOneByte(
+ Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
DCHECK(!AreAliased(scratch1, second));
DCHECK(!AreAliased(scratch1, scratch2));
- static const int kFlatAsciiStringMask =
+ static const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- static const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- And(scratch1, first, kFlatAsciiStringMask);
- And(scratch2, second, kFlatAsciiStringMask);
- Cmp(scratch1, kFlatAsciiStringTag);
- Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
+ static const int kFlatOneByteStringTag = ONE_BYTE_STRING_TYPE;
+ And(scratch1, first, kFlatOneByteStringMask);
+ And(scratch2, second, kFlatOneByteStringMask);
+ Cmp(scratch1, kFlatOneByteStringTag);
+ Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
B(ne, failure);
}
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure) {
- const int kFlatAsciiStringMask =
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
+ Register scratch,
+ Label* failure) {
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatAsciiStringTag =
+ const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
- And(scratch, type, kFlatAsciiStringMask);
- Cmp(scratch, kFlatAsciiStringTag);
+ And(scratch, type, kFlatOneByteStringMask);
+ Cmp(scratch, kFlatOneByteStringTag);
B(ne, failure);
}
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
+ Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
DCHECK(!AreAliased(first, second, scratch1, scratch2));
- const int kFlatAsciiStringMask =
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatAsciiStringTag =
+ const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
- And(scratch1, first, kFlatAsciiStringMask);
- And(scratch2, second, kFlatAsciiStringMask);
- Cmp(scratch1, kFlatAsciiStringTag);
- Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
+ And(scratch1, first, kFlatOneByteStringMask);
+ And(scratch2, second, kFlatOneByteStringMask);
+ Cmp(scratch1, kFlatOneByteStringTag);
+ Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
B(ne, failure);
}
-void MacroAssembler::JumpIfNotUniqueName(Register type,
- Label* not_unique_name) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
+ Label* not_unique_name) {
STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
// if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
// continue
@@ -3013,12 +2974,22 @@ void MacroAssembler::TryConvertDoubleToInt64(Register result,
void MacroAssembler::TruncateDoubleToI(Register result,
DoubleRegister double_input) {
Label done;
- DCHECK(jssp.Is(StackPointer()));
// Try to convert the double to an int64. If successful, the bottom 32 bits
// contain our truncated int32 result.
TryConvertDoubleToInt64(result, double_input, &done);
+ const Register old_stack_pointer = StackPointer();
+ if (csp.Is(old_stack_pointer)) {
+ // This currently only happens during compiler-unittest. If it arises
+ // during regular code generation the DoubleToI stub should be updated to
+ // cope with csp and have an extra parameter indicating which stack pointer
+ // it should use.
+ Push(jssp, xzr); // Push xzr to maintain csp required 16-bytes alignment.
+ Mov(jssp, csp);
+ SetStackPointer(jssp);
+ }
+
// If we fell through then inline version didn't succeed - call stub instead.
Push(lr, double_input);
@@ -3030,8 +3001,15 @@ void MacroAssembler::TruncateDoubleToI(Register result,
true); // skip_fastpath
CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
- Drop(1, kDoubleSize); // Drop the double input on the stack.
- Pop(lr);
+ DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
+ Pop(xzr, lr); // xzr to drop the double input on the stack.
+
+ if (csp.Is(old_stack_pointer)) {
+ Mov(csp, jssp);
+ SetStackPointer(csp);
+ AssertStackConsistency();
+ Pop(xzr, jssp);
+ }
Bind(&done);
}
@@ -3556,12 +3534,10 @@ void MacroAssembler::AllocateTwoByteString(Register result,
}
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
@@ -3570,7 +3546,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
Bic(scratch1, scratch1, kObjectAlignmentMask);
- // Allocate ASCII string in new space.
+ // Allocate one-byte string in new space.
Allocate(scratch1,
result,
scratch2,
@@ -3579,11 +3555,8 @@ void MacroAssembler::AllocateAsciiString(Register result,
TAG_OBJECT);
// Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
@@ -3603,11 +3576,10 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
}
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
Allocate(ConsString::kSize,
result,
scratch1,
@@ -3615,11 +3587,8 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
gc_required,
TAG_OBJECT);
- InitializeNewString(result,
- length,
- Heap::kConsAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
@@ -3640,20 +3609,17 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
}
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
DCHECK(!AreAliased(result, length, scratch1, scratch2));
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
- InitializeNewString(result,
- length,
- Heap::kSlicedAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
@@ -3754,9 +3720,16 @@ void MacroAssembler::CompareInstanceType(Register map,
}
-void MacroAssembler::CompareMap(Register obj,
- Register scratch,
- Handle<Map> map) {
+void MacroAssembler::CompareObjectMap(Register obj, Heap::RootListIndex index) {
+ UseScratchRegisterScope temps(this);
+ Register obj_map = temps.AcquireX();
+ Ldr(obj_map, FieldMemOperand(obj, HeapObject::kMapOffset));
+ CompareRoot(obj_map, index);
+}
+
+
+void MacroAssembler::CompareObjectMap(Register obj, Register scratch,
+ Handle<Map> map) {
Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
CompareMap(scratch, map);
}
@@ -3777,7 +3750,7 @@ void MacroAssembler::CheckMap(Register obj,
JumpIfSmi(obj, fail);
}
- CompareMap(obj, scratch, map);
+ CompareObjectMap(obj, scratch, map);
B(ne, fail);
}
@@ -4017,8 +3990,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
JumpIfSmi(value_reg, &store_num);
// Ensure that the object is a heap number.
- CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(),
- fail, DONT_DO_SMI_CHECK);
+ JumpIfNotHeapNumber(value_reg, fail);
Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
@@ -4284,8 +4256,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Bind(&store_buffer_overflow);
Push(lr);
- StoreBufferOverflowStub store_buffer_overflow_stub =
- StoreBufferOverflowStub(isolate(), fp_mode);
+ StoreBufferOverflowStub store_buffer_overflow_stub(isolate(), fp_mode);
CallStub(&store_buffer_overflow_stub);
Pop(lr);
@@ -4424,8 +4395,8 @@ void MacroAssembler::RecordWriteField(
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- Mov(value, Operand(BitCast<int64_t>(kZapValue + 4)));
- Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8)));
+ Mov(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
+ Mov(scratch, Operand(bit_cast<int64_t>(kZapValue + 8)));
}
}
@@ -4444,7 +4415,7 @@ void MacroAssembler::RecordWriteForMap(Register object,
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- CompareMap(map, temp, isolate()->factory()->meta_map());
+ CompareObjectMap(map, temp, isolate()->factory()->meta_map());
Check(eq, kWrongAddressOrValuePassedToRecordWrite);
}
@@ -4496,8 +4467,8 @@ void MacroAssembler::RecordWriteForMap(Register object,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- Mov(dst, Operand(BitCast<int64_t>(kZapValue + 12)));
- Mov(map, Operand(BitCast<int64_t>(kZapValue + 16)));
+ Mov(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
+ Mov(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
}
}
@@ -4569,8 +4540,8 @@ void MacroAssembler::RecordWrite(
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- Mov(address, Operand(BitCast<int64_t>(kZapValue + 12)));
- Mov(value, Operand(BitCast<int64_t>(kZapValue + 16)));
+ Mov(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
+ Mov(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
}
}
@@ -4775,8 +4746,8 @@ void MacroAssembler::EnsureNotWhite(
Mov(length_scratch, ExternalString::kSize);
TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
- // Sequential string, either ASCII or UC16.
- // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+ // Sequential string, either Latin1 or UC16.
+ // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
@@ -5315,13 +5286,15 @@ void MacroAssembler::TruncatingDiv(Register result,
int32_t divisor) {
DCHECK(!AreAliased(result, dividend));
DCHECK(result.Is32Bits() && dividend.Is32Bits());
- MultiplierAndShift ms(divisor);
- Mov(result, ms.multiplier());
+ base::MagicNumbersForDivision<uint32_t> mag =
+ base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+ Mov(result, mag.multiplier);
Smull(result.X(), dividend, result);
Asr(result.X(), result.X(), 32);
- if (divisor > 0 && ms.multiplier() < 0) Add(result, result, dividend);
- if (divisor < 0 && ms.multiplier() > 0) Sub(result, result, dividend);
- if (ms.shift() > 0) Asr(result, result, ms.shift());
+ bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+ if (divisor > 0 && neg) Add(result, result, dividend);
+ if (divisor < 0 && !neg && mag.multiplier > 0) Sub(result, result, dividend);
+ if (mag.shift > 0) Asr(result, result, mag.shift);
Add(result, result, Operand(dividend, LSR, 31));
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index aa83c7040f..7a106a18b9 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -7,9 +7,11 @@
#include <vector>
+#include "src/bailout-reason.h"
#include "src/globals.h"
#include "src/arm64/assembler-arm64-inl.h"
+#include "src/base/bits.h"
// Simulator specific helpers.
#if USE_SIMULATOR
@@ -808,7 +810,7 @@ class MacroAssembler : public Assembler {
int sp_alignment = ActivationFrameAlignment();
// AAPCS64 mandates at least 16-byte alignment.
DCHECK(sp_alignment >= 16);
- DCHECK(IsPowerOf2(sp_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
Bic(csp, StackPointer(), sp_alignment - 1);
SetStackPointer(csp);
}
@@ -909,11 +911,6 @@ class MacroAssembler : public Assembler {
inline void SmiTagAndPush(Register src);
inline void SmiTagAndPush(Register src1, Register src2);
- // Compute the absolute value of 'smi' and leave the result in 'smi'
- // register. If 'smi' is the most negative SMI, the absolute value cannot
- // be represented as a SMI and a jump to 'slow' is done.
- void SmiAbs(const Register& smi, Label* slow);
-
inline void JumpIfSmi(Register value,
Label* smi_label,
Label* not_smi_label = NULL);
@@ -950,16 +947,10 @@ class MacroAssembler : public Assembler {
// Abort execution if argument is not a string, enabled via --debug-code.
void AssertString(Register object);
- void JumpForHeapNumber(Register object,
- Register heap_number_map,
- Label* on_heap_number,
- Label* on_not_heap_number = NULL);
- void JumpIfHeapNumber(Register object,
- Label* on_heap_number,
- Register heap_number_map = NoReg);
- void JumpIfNotHeapNumber(Register object,
- Label* on_not_heap_number,
- Register heap_number_map = NoReg);
+ void JumpIfHeapNumber(Register object, Label* on_heap_number,
+ SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
+ void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
+ SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
// Sets the vs flag if the input is -0.0.
void TestForMinusZero(DoubleRegister input);
@@ -1055,41 +1046,30 @@ class MacroAssembler : public Assembler {
// ---- String Utilities ----
- // Jump to label if either object is not a sequential ASCII string.
+ // Jump to label if either object is not a sequential one-byte string.
// Optionally perform a smi check on the objects first.
- void JumpIfEitherIsNotSequentialAsciiStrings(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure,
- SmiCheckType smi_check = DO_SMI_CHECK);
+ void JumpIfEitherIsNotSequentialOneByteStrings(
+ Register first, Register second, Register scratch1, Register scratch2,
+ Label* failure, SmiCheckType smi_check = DO_SMI_CHECK);
- // Check if instance type is sequential ASCII string and jump to label if
+ // Check if instance type is sequential one-byte string and jump to label if
// it is not.
- void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure);
+ void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
+ Label* failure);
- // Checks if both instance types are sequential ASCII strings and jumps to
+ // Checks if both instance types are sequential one-byte strings and jumps to
// label if either is not.
- void JumpIfEitherInstanceTypeIsNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* failure);
+ void JumpIfEitherInstanceTypeIsNotSequentialOneByte(
+ Register first_object_instance_type, Register second_object_instance_type,
+ Register scratch1, Register scratch2, Label* failure);
- // Checks if both instance types are sequential ASCII strings and jumps to
+ // Checks if both instance types are sequential one-byte strings and jumps to
// label if either is not.
- void JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* failure);
+ void JumpIfBothInstanceTypesAreNotSequentialOneByte(
+ Register first_object_instance_type, Register second_object_instance_type,
+ Register scratch1, Register scratch2, Label* failure);
- void JumpIfNotUniqueName(Register type, Label* not_unique_name);
+ void JumpIfNotUniqueNameInstanceType(Register type, Label* not_unique_name);
// ---- Calling / Jumping helpers ----
@@ -1369,32 +1349,25 @@ class MacroAssembler : public Assembler {
Register scratch2,
Register scratch3,
Label* gc_required);
- void AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
+ void AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3, Label* gc_required);
void AllocateTwoByteConsString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
- void AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateOneByteConsString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
void AllocateTwoByteSlicedString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
- void AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateOneByteSlicedString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed.
@@ -1470,9 +1443,11 @@ class MacroAssembler : public Assembler {
// Compare an object's map with the specified map. Condition flags are set
// with result of map compare.
- void CompareMap(Register obj,
- Register scratch,
- Handle<Map> map);
+ void CompareObjectMap(Register obj, Heap::RootListIndex index);
+
+ // Compare an object's map with the specified map. Condition flags are set
+ // with result of map compare.
+ void CompareObjectMap(Register obj, Register scratch, Handle<Map> map);
// As above, but the map of the object is already loaded into the register
// which is preserved by the code generated.
diff --git a/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
index 432d9568bd..e9a485d090 100644
--- a/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
@@ -260,7 +260,7 @@ void RegExpMacroAssemblerARM64::CheckCharacters(Vector<const uc16> str,
}
for (int i = 0; i < str.length(); i++) {
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
__ Ldrb(w10, MemOperand(characters_address, 1, PostIndex));
DCHECK(str[i] <= String::kMaxOneByteCharCode);
} else {
@@ -307,7 +307,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
__ Cmn(capture_length, current_input_offset());
BranchOrBacktrack(gt, on_no_match);
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
Label success;
Label fail;
Label loop_check;
@@ -447,7 +447,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReference(
Label loop;
__ Bind(&loop);
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
__ Ldrb(w10, MemOperand(capture_start_address, 1, PostIndex));
__ Ldrb(w11, MemOperand(current_position_address, 1, PostIndex));
} else {
@@ -530,7 +530,7 @@ void RegExpMacroAssemblerARM64::CheckBitInTable(
Handle<ByteArray> table,
Label* on_bit_set) {
__ Mov(x11, Operand(table));
- if ((mode_ != ASCII) || (kTableMask != String::kMaxOneByteCharCode)) {
+ if ((mode_ != LATIN1) || (kTableMask != String::kMaxOneByteCharCode)) {
__ And(w10, current_character(), kTableMask);
__ Add(w10, w10, ByteArray::kHeaderSize - kHeapObjectTag);
} else {
@@ -548,7 +548,7 @@ bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
switch (type) {
case 's':
// Match space-characters
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
// One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success;
// Check for ' ' or 0x00a0.
@@ -611,8 +611,8 @@ bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
return true;
}
case 'w': {
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
CompareAndBranchOrBacktrack(current_character(), 'z', hi, on_no_match);
}
ExternalReference map = ExternalReference::re_word_character_map();
@@ -623,8 +623,8 @@ bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
}
case 'W': {
Label done;
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
__ Cmp(current_character(), 'z');
__ B(hi, &done);
}
@@ -1315,7 +1315,7 @@ int RegExpMacroAssemblerARM64::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInput));
// Current string.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+ bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
DCHECK(re_code->instruction_start() <= *return_address);
DCHECK(*return_address <=
@@ -1346,8 +1346,8 @@ int RegExpMacroAssemblerARM64::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
- // If we changed between an ASCII and an UC16 string, the specialized
+ if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
+ // If we changed between an Latin1 and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
return RETRY;
@@ -1675,7 +1675,7 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
offset = w10;
}
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
if (characters == 4) {
__ Ldr(current_character(), MemOperand(input_end(), offset, SXTW));
} else if (characters == 2) {
diff --git a/deps/v8/src/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/arm64/regexp-macro-assembler-arm64.h
index a27cff0566..632c513643 100644
--- a/deps/v8/src/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/regexp-macro-assembler-arm64.h
@@ -265,7 +265,7 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
MacroAssembler* masm_;
- // Which mode to generate code for (ASCII or UC16).
+ // Which mode to generate code for (LATIN1 or UC16).
Mode mode_;
// One greater than maximal register index actually used.
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index cde93db98e..129252b49b 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -30,30 +30,28 @@ namespace internal {
// Helpers for colors.
-// Depending on your terminal configuration, the colour names may not match the
-// observed colours.
-#define COLOUR(colour_code) "\033[" colour_code "m"
-#define BOLD(colour_code) "1;" colour_code
-#define NORMAL ""
-#define GREY "30"
-#define GREEN "32"
-#define ORANGE "33"
-#define BLUE "34"
-#define PURPLE "35"
-#define INDIGO "36"
-#define WHITE "37"
+#define COLOUR(colour_code) "\033[0;" colour_code "m"
+#define COLOUR_BOLD(colour_code) "\033[1;" colour_code "m"
+#define NORMAL ""
+#define GREY "30"
+#define RED "31"
+#define GREEN "32"
+#define YELLOW "33"
+#define BLUE "34"
+#define MAGENTA "35"
+#define CYAN "36"
+#define WHITE "37"
typedef char const * const TEXT_COLOUR;
TEXT_COLOUR clr_normal = FLAG_log_colour ? COLOUR(NORMAL) : "";
-TEXT_COLOUR clr_flag_name = FLAG_log_colour ? COLOUR(BOLD(GREY)) : "";
-TEXT_COLOUR clr_flag_value = FLAG_log_colour ? COLOUR(BOLD(WHITE)) : "";
-TEXT_COLOUR clr_reg_name = FLAG_log_colour ? COLOUR(BOLD(BLUE)) : "";
-TEXT_COLOUR clr_reg_value = FLAG_log_colour ? COLOUR(BOLD(INDIGO)) : "";
-TEXT_COLOUR clr_fpreg_name = FLAG_log_colour ? COLOUR(BOLD(ORANGE)) : "";
-TEXT_COLOUR clr_fpreg_value = FLAG_log_colour ? COLOUR(BOLD(PURPLE)) : "";
-TEXT_COLOUR clr_memory_value = FLAG_log_colour ? COLOUR(BOLD(GREEN)) : "";
-TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR(GREEN) : "";
-TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR(BOLD(ORANGE)) : "";
-TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(ORANGE) : "";
+TEXT_COLOUR clr_flag_name = FLAG_log_colour ? COLOUR_BOLD(WHITE) : "";
+TEXT_COLOUR clr_flag_value = FLAG_log_colour ? COLOUR(NORMAL) : "";
+TEXT_COLOUR clr_reg_name = FLAG_log_colour ? COLOUR_BOLD(CYAN) : "";
+TEXT_COLOUR clr_reg_value = FLAG_log_colour ? COLOUR(CYAN) : "";
+TEXT_COLOUR clr_fpreg_name = FLAG_log_colour ? COLOUR_BOLD(MAGENTA) : "";
+TEXT_COLOUR clr_fpreg_value = FLAG_log_colour ? COLOUR(MAGENTA) : "";
+TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR_BOLD(BLUE) : "";
+TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR_BOLD(YELLOW) : "";
+TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(YELLOW) : "";
TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : "";
@@ -337,7 +335,7 @@ uintptr_t Simulator::PopAddress() {
uintptr_t Simulator::StackLimit() const {
// Leave a safety margin of 1024 bytes to prevent overrunning the stack when
// pushing values.
- return reinterpret_cast<uintptr_t>(stack_limit_) + 1024;
+ return stack_limit_ + 1024;
}
@@ -380,11 +378,11 @@ void Simulator::Init(FILE* stream) {
// Allocate and setup the simulator stack.
stack_size_ = (FLAG_sim_stack_size * KB) + (2 * stack_protection_size_);
- stack_ = new byte[stack_size_];
+ stack_ = reinterpret_cast<uintptr_t>(new byte[stack_size_]);
stack_limit_ = stack_ + stack_protection_size_;
- byte* tos = stack_ + stack_size_ - stack_protection_size_;
- // The stack pointer must be 16 bytes aligned.
- set_sp(reinterpret_cast<int64_t>(tos) & ~0xfUL);
+ uintptr_t tos = stack_ + stack_size_ - stack_protection_size_;
+ // The stack pointer must be 16-byte aligned.
+ set_sp(tos & ~0xfUL);
stream_ = stream;
print_disasm_ = new PrintDisassembler(stream_);
@@ -420,7 +418,7 @@ void Simulator::ResetState() {
Simulator::~Simulator() {
- delete[] stack_;
+ delete[] reinterpret_cast<byte*>(stack_);
if (FLAG_log_instruction_stats) {
delete instrument_;
}
@@ -704,7 +702,7 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
case ExternalReference::PROFILING_GETTER_CALL: {
// void f(Local<String> property, PropertyCallbackInfo& info,
- // AccessorGetterCallback callback)
+ // AccessorNameGetterCallback callback)
TraceSim("Type: PROFILING_GETTER_CALL\n");
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
@@ -734,15 +732,15 @@ void* Simulator::RedirectExternalReference(void* external_function,
const char* Simulator::xreg_names[] = {
-"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
-"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
-"ip0", "ip1", "x18", "x19", "x20", "x21", "x22", "x23",
-"x24", "x25", "x26", "cp", "jssp", "fp", "lr", "xzr", "csp"};
+"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
+"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
+"ip0", "ip1", "x18", "x19", "x20", "x21", "x22", "x23",
+"x24", "x25", "x26", "cp", "jssp", "fp", "lr", "xzr", "csp"};
const char* Simulator::wreg_names[] = {
-"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7",
-"w8", "w9", "w10", "w11", "w12", "w13", "w14", "w15",
-"w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23",
+"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7",
+"w8", "w9", "w10", "w11", "w12", "w13", "w14", "w15",
+"w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23",
"w24", "w25", "w26", "wcp", "wjssp", "wfp", "wlr", "wzr", "wcsp"};
const char* Simulator::sreg_names[] = {
@@ -765,7 +763,12 @@ const char* Simulator::vreg_names[] = {
const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
+ STATIC_ASSERT(arraysize(Simulator::wreg_names) == (kNumberOfRegisters + 1));
DCHECK(code < kNumberOfRegisters);
+ // The modulo operator has no effect here, but it silences a broken GCC
+ // warning about out-of-bounds array accesses.
+ code %= kNumberOfRegisters;
+
// If the code represents the stack pointer, index the name after zr.
if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
code = kZeroRegCode + 1;
@@ -775,7 +778,10 @@ const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
+ STATIC_ASSERT(arraysize(Simulator::xreg_names) == (kNumberOfRegisters + 1));
DCHECK(code < kNumberOfRegisters);
+ code %= kNumberOfRegisters;
+
// If the code represents the stack pointer, index the name after zr.
if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
code = kZeroRegCode + 1;
@@ -785,20 +791,23 @@ const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
const char* Simulator::SRegNameForCode(unsigned code) {
+ STATIC_ASSERT(arraysize(Simulator::sreg_names) == kNumberOfFPRegisters);
DCHECK(code < kNumberOfFPRegisters);
- return sreg_names[code];
+ return sreg_names[code % kNumberOfFPRegisters];
}
const char* Simulator::DRegNameForCode(unsigned code) {
+ STATIC_ASSERT(arraysize(Simulator::dreg_names) == kNumberOfFPRegisters);
DCHECK(code < kNumberOfFPRegisters);
- return dreg_names[code];
+ return dreg_names[code % kNumberOfFPRegisters];
}
const char* Simulator::VRegNameForCode(unsigned code) {
+ STATIC_ASSERT(arraysize(Simulator::vreg_names) == kNumberOfFPRegisters);
DCHECK(code < kNumberOfFPRegisters);
- return vreg_names[code];
+ return vreg_names[code % kNumberOfFPRegisters];
}
@@ -855,6 +864,7 @@ T Simulator::AddWithCarry(bool set_flags,
nzcv().SetZ(Z);
nzcv().SetC(C);
nzcv().SetV(V);
+ LogSystemRegister(NZCV);
}
return result;
}
@@ -978,6 +988,7 @@ void Simulator::FPCompare(double val0, double val1) {
} else {
UNREACHABLE();
}
+ LogSystemRegister(NZCV);
}
@@ -1044,118 +1055,206 @@ void Simulator::PrintInstructionsAt(Instruction* start, uint64_t count) {
}
-void Simulator::PrintSystemRegisters(bool print_all) {
- static bool first_run = true;
+void Simulator::PrintSystemRegisters() {
+ PrintSystemRegister(NZCV);
+ PrintSystemRegister(FPCR);
+}
+
+
+void Simulator::PrintRegisters() {
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ PrintRegister(i);
+ }
+}
+
- static SimSystemRegister last_nzcv;
- if (print_all || first_run || (last_nzcv.RawValue() != nzcv().RawValue())) {
- fprintf(stream_, "# %sFLAGS: %sN:%d Z:%d C:%d V:%d%s\n",
- clr_flag_name,
- clr_flag_value,
- nzcv().N(), nzcv().Z(), nzcv().C(), nzcv().V(),
- clr_normal);
+void Simulator::PrintFPRegisters() {
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ PrintFPRegister(i);
}
- last_nzcv = nzcv();
+}
+
- static SimSystemRegister last_fpcr;
- if (print_all || first_run || (last_fpcr.RawValue() != fpcr().RawValue())) {
- static const char * rmode[] = {
- "0b00 (Round to Nearest)",
- "0b01 (Round towards Plus Infinity)",
- "0b10 (Round towards Minus Infinity)",
- "0b11 (Round towards Zero)"
- };
- DCHECK(fpcr().RMode() < ARRAY_SIZE(rmode));
- fprintf(stream_, "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
- clr_flag_name,
- clr_flag_value,
- fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()],
- clr_normal);
+void Simulator::PrintRegister(unsigned code, Reg31Mode r31mode) {
+ // Don't print writes into xzr.
+ if ((code == kZeroRegCode) && (r31mode == Reg31IsZeroRegister)) {
+ return;
}
- last_fpcr = fpcr();
- first_run = false;
+ // The template is "# x<code>:value".
+ fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s\n",
+ clr_reg_name, XRegNameForCode(code, r31mode),
+ clr_reg_value, reg<uint64_t>(code, r31mode), clr_normal);
}
-void Simulator::PrintRegisters(bool print_all_regs) {
- static bool first_run = true;
- static int64_t last_regs[kNumberOfRegisters];
+void Simulator::PrintFPRegister(unsigned code, PrintFPRegisterSizes sizes) {
+ // The template is "# v<code>:bits (d<code>:value, ...)".
- for (unsigned i = 0; i < kNumberOfRegisters; i++) {
- if (print_all_regs || first_run ||
- (last_regs[i] != xreg(i, Reg31IsStackPointer))) {
+ DCHECK(sizes != 0);
+ DCHECK((sizes & kPrintAllFPRegValues) == sizes);
+
+ // Print the raw bits.
+ fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (",
+ clr_fpreg_name, VRegNameForCode(code),
+ clr_fpreg_value, fpreg<uint64_t>(code), clr_normal);
+
+ // Print all requested value interpretations.
+ bool need_separator = false;
+ if (sizes & kPrintDRegValue) {
+ fprintf(stream_, "%s%s%s: %s%g%s",
+ need_separator ? ", " : "",
+ clr_fpreg_name, DRegNameForCode(code),
+ clr_fpreg_value, fpreg<double>(code), clr_normal);
+ need_separator = true;
+ }
+
+ if (sizes & kPrintSRegValue) {
+ fprintf(stream_, "%s%s%s: %s%g%s",
+ need_separator ? ", " : "",
+ clr_fpreg_name, SRegNameForCode(code),
+ clr_fpreg_value, fpreg<float>(code), clr_normal);
+ need_separator = true;
+ }
+
+ // End the value list.
+ fprintf(stream_, ")\n");
+}
+
+
+void Simulator::PrintSystemRegister(SystemRegister id) {
+ switch (id) {
+ case NZCV:
+ fprintf(stream_, "# %sNZCV: %sN:%d Z:%d C:%d V:%d%s\n",
+ clr_flag_name, clr_flag_value,
+ nzcv().N(), nzcv().Z(), nzcv().C(), nzcv().V(),
+ clr_normal);
+ break;
+ case FPCR: {
+ static const char * rmode[] = {
+ "0b00 (Round to Nearest)",
+ "0b01 (Round towards Plus Infinity)",
+ "0b10 (Round towards Minus Infinity)",
+ "0b11 (Round towards Zero)"
+ };
+ DCHECK(fpcr().RMode() < arraysize(rmode));
fprintf(stream_,
- "# %s%4s:%s 0x%016" PRIx64 "%s\n",
- clr_reg_name,
- XRegNameForCode(i, Reg31IsStackPointer),
- clr_reg_value,
- xreg(i, Reg31IsStackPointer),
+ "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
+ clr_flag_name, clr_flag_value,
+ fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()],
clr_normal);
+ break;
}
- // Cache the new register value so the next run can detect any changes.
- last_regs[i] = xreg(i, Reg31IsStackPointer);
+ default:
+ UNREACHABLE();
}
- first_run = false;
}
-void Simulator::PrintFPRegisters(bool print_all_regs) {
- static bool first_run = true;
- static uint64_t last_regs[kNumberOfFPRegisters];
+void Simulator::PrintRead(uintptr_t address,
+ size_t size,
+ unsigned reg_code) {
+ USE(size); // Size is unused here.
- // Print as many rows of registers as necessary, keeping each individual
- // register in the same column each time (to make it easy to visually scan
- // for changes).
- for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
- if (print_all_regs || first_run || (last_regs[i] != dreg_bits(i))) {
- fprintf(stream_,
- "# %s %4s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n",
- clr_fpreg_name,
- VRegNameForCode(i),
- clr_fpreg_value,
- dreg_bits(i),
- clr_normal,
- clr_fpreg_name,
- DRegNameForCode(i),
- clr_fpreg_value,
- dreg(i),
- clr_fpreg_name,
- SRegNameForCode(i),
- clr_fpreg_value,
- sreg(i),
- clr_normal);
- }
- // Cache the new register value so the next run can detect any changes.
- last_regs[i] = dreg_bits(i);
+ // The template is "# x<code>:value <- address".
+ fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s",
+ clr_reg_name, XRegNameForCode(reg_code),
+ clr_reg_value, reg<uint64_t>(reg_code), clr_normal);
+
+ fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n",
+ clr_memory_address, address, clr_normal);
+}
+
+
+void Simulator::PrintReadFP(uintptr_t address,
+ size_t size,
+ unsigned reg_code) {
+ // The template is "# reg:bits (reg:value) <- address".
+ switch (size) {
+ case kSRegSize:
+ fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (%s%s: %s%gf%s)",
+ clr_fpreg_name, VRegNameForCode(reg_code),
+ clr_fpreg_value, fpreg<uint64_t>(reg_code), clr_normal,
+ clr_fpreg_name, SRegNameForCode(reg_code),
+ clr_fpreg_value, fpreg<float>(reg_code), clr_normal);
+ break;
+ case kDRegSize:
+ fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (%s%s: %s%g%s)",
+ clr_fpreg_name, VRegNameForCode(reg_code),
+ clr_fpreg_value, fpreg<uint64_t>(reg_code), clr_normal,
+ clr_fpreg_name, DRegNameForCode(reg_code),
+ clr_fpreg_value, fpreg<double>(reg_code), clr_normal);
+ break;
+ default:
+ UNREACHABLE();
}
- first_run = false;
+
+ fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n",
+ clr_memory_address, address, clr_normal);
}
-void Simulator::PrintProcessorState() {
- PrintSystemRegisters();
- PrintRegisters();
- PrintFPRegisters();
+void Simulator::PrintWrite(uintptr_t address,
+ size_t size,
+ unsigned reg_code) {
+ // The template is "# reg:value -> address". To keep the trace tidy and
+ // readable, the value is aligned with the values in the register trace.
+ switch (size) {
+ case kByteSizeInBytes:
+ fprintf(stream_, "# %s%5s<7:0>: %s0x%02" PRIx8 "%s",
+ clr_reg_name, WRegNameForCode(reg_code),
+ clr_reg_value, reg<uint8_t>(reg_code), clr_normal);
+ break;
+ case kHalfWordSizeInBytes:
+ fprintf(stream_, "# %s%5s<15:0>: %s0x%04" PRIx16 "%s",
+ clr_reg_name, WRegNameForCode(reg_code),
+ clr_reg_value, reg<uint16_t>(reg_code), clr_normal);
+ break;
+ case kWRegSize:
+ fprintf(stream_, "# %s%5s: %s0x%08" PRIx32 "%s",
+ clr_reg_name, WRegNameForCode(reg_code),
+ clr_reg_value, reg<uint32_t>(reg_code), clr_normal);
+ break;
+ case kXRegSize:
+ fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s",
+ clr_reg_name, XRegNameForCode(reg_code),
+ clr_reg_value, reg<uint64_t>(reg_code), clr_normal);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n",
+ clr_memory_address, address, clr_normal);
}
-void Simulator::PrintWrite(uint8_t* address,
- uint64_t value,
- unsigned num_bytes) {
- // The template is "# value -> address". The template is not directly used
- // in the printf since compilers tend to struggle with the parametrized
- // width (%0*).
- const char* format = "# %s0x%0*" PRIx64 "%s -> %s0x%016" PRIx64 "%s\n";
- fprintf(stream_,
- format,
- clr_memory_value,
- num_bytes * 2, // The width in hexa characters.
- value,
- clr_normal,
- clr_memory_address,
- address,
- clr_normal);
+void Simulator::PrintWriteFP(uintptr_t address,
+ size_t size,
+ unsigned reg_code) {
+ // The template is "# reg:bits (reg:value) -> address". To keep the trace tidy
+ // and readable, the value is aligned with the values in the register trace.
+ switch (size) {
+ case kSRegSize:
+ fprintf(stream_, "# %s%5s<31:0>: %s0x%08" PRIx32 "%s (%s%s: %s%gf%s)",
+ clr_fpreg_name, VRegNameForCode(reg_code),
+ clr_fpreg_value, fpreg<uint32_t>(reg_code), clr_normal,
+ clr_fpreg_name, SRegNameForCode(reg_code),
+ clr_fpreg_value, fpreg<float>(reg_code), clr_normal);
+ break;
+ case kDRegSize:
+ fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (%s%s: %s%g%s)",
+ clr_fpreg_name, VRegNameForCode(reg_code),
+ clr_fpreg_value, fpreg<uint64_t>(reg_code), clr_normal,
+ clr_fpreg_name, DRegNameForCode(reg_code),
+ clr_fpreg_value, fpreg<double>(reg_code), clr_normal);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n",
+ clr_memory_address, address, clr_normal);
}
@@ -1384,6 +1483,7 @@ void Simulator::LogicalHelper(Instruction* instr, T op2) {
nzcv().SetZ(CalcZFlag(result));
nzcv().SetC(0);
nzcv().SetV(0);
+ LogSystemRegister(NZCV);
}
set_reg<T>(instr->Rd(), result, instr->RdMode());
@@ -1424,6 +1524,7 @@ void Simulator::ConditionalCompareHelper(Instruction* instr, T op2) {
} else {
// If the condition fails, set the status flags to the nzcv immediate.
nzcv().SetFlags(instr->Nzcv());
+ LogSystemRegister(NZCV);
}
}
@@ -1464,9 +1565,8 @@ void Simulator::LoadStoreHelper(Instruction* instr,
AddrMode addrmode) {
unsigned srcdst = instr->Rt();
unsigned addr_reg = instr->Rn();
- uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode);
- int num_bytes = 1 << instr->SizeLS();
- uint8_t* stack = NULL;
+ uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode);
+ uintptr_t stack = 0;
// Handle the writeback for stores before the store. On a CPU the writeback
// and the store are atomic, but when running on the simulator it is possible
@@ -1480,44 +1580,50 @@ void Simulator::LoadStoreHelper(Instruction* instr,
// For store the address post writeback is used to check access below the
// stack.
- stack = reinterpret_cast<uint8_t*>(sp());
+ stack = sp();
}
LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreOpMask));
switch (op) {
- case LDRB_w:
- case LDRH_w:
- case LDR_w:
- case LDR_x: set_xreg(srcdst, MemoryRead(address, num_bytes)); break;
- case STRB_w:
- case STRH_w:
- case STR_w:
- case STR_x: MemoryWrite(address, xreg(srcdst), num_bytes); break;
- case LDRSB_w: {
- set_wreg(srcdst, ExtendValue<int32_t>(MemoryRead8(address), SXTB));
- break;
- }
- case LDRSB_x: {
- set_xreg(srcdst, ExtendValue<int64_t>(MemoryRead8(address), SXTB));
- break;
- }
- case LDRSH_w: {
- set_wreg(srcdst, ExtendValue<int32_t>(MemoryRead16(address), SXTH));
- break;
- }
- case LDRSH_x: {
- set_xreg(srcdst, ExtendValue<int64_t>(MemoryRead16(address), SXTH));
- break;
+ // Use _no_log variants to suppress the register trace (LOG_REGS,
+ // LOG_FP_REGS). We will print a more detailed log.
+ case LDRB_w: set_wreg_no_log(srcdst, MemoryRead<uint8_t>(address)); break;
+ case LDRH_w: set_wreg_no_log(srcdst, MemoryRead<uint16_t>(address)); break;
+ case LDR_w: set_wreg_no_log(srcdst, MemoryRead<uint32_t>(address)); break;
+ case LDR_x: set_xreg_no_log(srcdst, MemoryRead<uint64_t>(address)); break;
+ case LDRSB_w: set_wreg_no_log(srcdst, MemoryRead<int8_t>(address)); break;
+ case LDRSH_w: set_wreg_no_log(srcdst, MemoryRead<int16_t>(address)); break;
+ case LDRSB_x: set_xreg_no_log(srcdst, MemoryRead<int8_t>(address)); break;
+ case LDRSH_x: set_xreg_no_log(srcdst, MemoryRead<int16_t>(address)); break;
+ case LDRSW_x: set_xreg_no_log(srcdst, MemoryRead<int32_t>(address)); break;
+ case LDR_s: set_sreg_no_log(srcdst, MemoryRead<float>(address)); break;
+ case LDR_d: set_dreg_no_log(srcdst, MemoryRead<double>(address)); break;
+
+ case STRB_w: MemoryWrite<uint8_t>(address, wreg(srcdst)); break;
+ case STRH_w: MemoryWrite<uint16_t>(address, wreg(srcdst)); break;
+ case STR_w: MemoryWrite<uint32_t>(address, wreg(srcdst)); break;
+ case STR_x: MemoryWrite<uint64_t>(address, xreg(srcdst)); break;
+ case STR_s: MemoryWrite<float>(address, sreg(srcdst)); break;
+ case STR_d: MemoryWrite<double>(address, dreg(srcdst)); break;
+
+ default: UNIMPLEMENTED();
+ }
+
+ // Print a detailed trace (including the memory address) instead of the basic
+ // register:value trace generated by set_*reg().
+ size_t access_size = 1 << instr->SizeLS();
+ if (instr->IsLoad()) {
+ if ((op == LDR_s) || (op == LDR_d)) {
+ LogReadFP(address, access_size, srcdst);
+ } else {
+ LogRead(address, access_size, srcdst);
}
- case LDRSW_x: {
- set_xreg(srcdst, ExtendValue<int64_t>(MemoryRead32(address), SXTW));
- break;
+ } else {
+ if ((op == STR_s) || (op == STR_d)) {
+ LogWriteFP(address, access_size, srcdst);
+ } else {
+ LogWrite(address, access_size, srcdst);
}
- case LDR_s: set_sreg(srcdst, MemoryReadFP32(address)); break;
- case LDR_d: set_dreg(srcdst, MemoryReadFP64(address)); break;
- case STR_s: MemoryWriteFP32(address, sreg(srcdst)); break;
- case STR_d: MemoryWriteFP64(address, dreg(srcdst)); break;
- default: UNIMPLEMENTED();
}
// Handle the writeback for loads after the load to ensure safe pop
@@ -1527,7 +1633,7 @@ void Simulator::LoadStoreHelper(Instruction* instr,
if (instr->IsLoad()) {
// For loads the address pre writeback is used to check access below the
// stack.
- stack = reinterpret_cast<uint8_t*>(sp());
+ stack = sp();
LoadStoreWriteBack(addr_reg, offset, addrmode);
}
@@ -1563,9 +1669,11 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
unsigned rt = instr->Rt();
unsigned rt2 = instr->Rt2();
unsigned addr_reg = instr->Rn();
- int offset = instr->ImmLSPair() << instr->SizeLSPair();
- uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode);
- uint8_t* stack = NULL;
+ size_t access_size = 1 << instr->SizeLSPair();
+ int64_t offset = instr->ImmLSPair() * access_size;
+ uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode);
+ uintptr_t address2 = address + access_size;
+ uintptr_t stack = 0;
// Handle the writeback for stores before the store. On a CPU the writeback
// and the store are atomic, but when running on the simulator it is possible
@@ -1579,7 +1687,7 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
// For store the address post writeback is used to check access below the
// stack.
- stack = reinterpret_cast<uint8_t*>(sp());
+ stack = sp();
}
LoadStorePairOp op =
@@ -1589,55 +1697,85 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
DCHECK(((op & LoadStorePairLBit) == 0) || (rt != rt2));
switch (op) {
+ // Use _no_log variants to suppress the register trace (LOG_REGS,
+ // LOG_FP_REGS). We will print a more detailed log.
case LDP_w: {
- set_wreg(rt, MemoryRead32(address));
- set_wreg(rt2, MemoryRead32(address + kWRegSize));
+ DCHECK(access_size == kWRegSize);
+ set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
+ set_wreg_no_log(rt2, MemoryRead<uint32_t>(address2));
break;
}
case LDP_s: {
- set_sreg(rt, MemoryReadFP32(address));
- set_sreg(rt2, MemoryReadFP32(address + kSRegSize));
+ DCHECK(access_size == kSRegSize);
+ set_sreg_no_log(rt, MemoryRead<float>(address));
+ set_sreg_no_log(rt2, MemoryRead<float>(address2));
break;
}
case LDP_x: {
- set_xreg(rt, MemoryRead64(address));
- set_xreg(rt2, MemoryRead64(address + kXRegSize));
+ DCHECK(access_size == kXRegSize);
+ set_xreg_no_log(rt, MemoryRead<uint64_t>(address));
+ set_xreg_no_log(rt2, MemoryRead<uint64_t>(address2));
break;
}
case LDP_d: {
- set_dreg(rt, MemoryReadFP64(address));
- set_dreg(rt2, MemoryReadFP64(address + kDRegSize));
+ DCHECK(access_size == kDRegSize);
+ set_dreg_no_log(rt, MemoryRead<double>(address));
+ set_dreg_no_log(rt2, MemoryRead<double>(address2));
break;
}
case LDPSW_x: {
- set_xreg(rt, ExtendValue<int64_t>(MemoryRead32(address), SXTW));
- set_xreg(rt2, ExtendValue<int64_t>(
- MemoryRead32(address + kWRegSize), SXTW));
+ DCHECK(access_size == kWRegSize);
+ set_xreg_no_log(rt, MemoryRead<int32_t>(address));
+ set_xreg_no_log(rt2, MemoryRead<int32_t>(address2));
break;
}
case STP_w: {
- MemoryWrite32(address, wreg(rt));
- MemoryWrite32(address + kWRegSize, wreg(rt2));
+ DCHECK(access_size == kWRegSize);
+ MemoryWrite<uint32_t>(address, wreg(rt));
+ MemoryWrite<uint32_t>(address2, wreg(rt2));
break;
}
case STP_s: {
- MemoryWriteFP32(address, sreg(rt));
- MemoryWriteFP32(address + kSRegSize, sreg(rt2));
+ DCHECK(access_size == kSRegSize);
+ MemoryWrite<float>(address, sreg(rt));
+ MemoryWrite<float>(address2, sreg(rt2));
break;
}
case STP_x: {
- MemoryWrite64(address, xreg(rt));
- MemoryWrite64(address + kXRegSize, xreg(rt2));
+ DCHECK(access_size == kXRegSize);
+ MemoryWrite<uint64_t>(address, xreg(rt));
+ MemoryWrite<uint64_t>(address2, xreg(rt2));
break;
}
case STP_d: {
- MemoryWriteFP64(address, dreg(rt));
- MemoryWriteFP64(address + kDRegSize, dreg(rt2));
+ DCHECK(access_size == kDRegSize);
+ MemoryWrite<double>(address, dreg(rt));
+ MemoryWrite<double>(address2, dreg(rt2));
break;
}
default: UNREACHABLE();
}
+ // Print a detailed trace (including the memory address) instead of the basic
+ // register:value trace generated by set_*reg().
+ if (instr->IsLoad()) {
+ if ((op == LDP_s) || (op == LDP_d)) {
+ LogReadFP(address, access_size, rt);
+ LogReadFP(address2, access_size, rt2);
+ } else {
+ LogRead(address, access_size, rt);
+ LogRead(address2, access_size, rt2);
+ }
+ } else {
+ if ((op == STP_s) || (op == STP_d)) {
+ LogWriteFP(address, access_size, rt);
+ LogWriteFP(address2, access_size, rt2);
+ } else {
+ LogWrite(address, access_size, rt);
+ LogWrite(address2, access_size, rt2);
+ }
+ }
+
// Handle the writeback for loads after the load to ensure safe pop
// operation even when interrupted in the middle of it. The stack pointer
// is only updated after the load so pop(fp) will never break the invariant
@@ -1645,7 +1783,7 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
if (instr->IsLoad()) {
// For loads the address pre writeback is used to check access below the
// stack.
- stack = reinterpret_cast<uint8_t*>(sp());
+ stack = sp();
LoadStoreWriteBack(addr_reg, offset, addrmode);
}
@@ -1657,24 +1795,37 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
void Simulator::VisitLoadLiteral(Instruction* instr) {
- uint8_t* address = instr->LiteralAddress();
+ uintptr_t address = instr->LiteralAddress();
unsigned rt = instr->Rt();
switch (instr->Mask(LoadLiteralMask)) {
- case LDR_w_lit: set_wreg(rt, MemoryRead32(address)); break;
- case LDR_x_lit: set_xreg(rt, MemoryRead64(address)); break;
- case LDR_s_lit: set_sreg(rt, MemoryReadFP32(address)); break;
- case LDR_d_lit: set_dreg(rt, MemoryReadFP64(address)); break;
+ // Use _no_log variants to suppress the register trace (LOG_REGS,
+ // LOG_FP_REGS), then print a more detailed log.
+ case LDR_w_lit:
+ set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
+ LogRead(address, kWRegSize, rt);
+ break;
+ case LDR_x_lit:
+ set_xreg_no_log(rt, MemoryRead<uint64_t>(address));
+ LogRead(address, kXRegSize, rt);
+ break;
+ case LDR_s_lit:
+ set_sreg_no_log(rt, MemoryRead<float>(address));
+ LogReadFP(address, kSRegSize, rt);
+ break;
+ case LDR_d_lit:
+ set_dreg_no_log(rt, MemoryRead<double>(address));
+ LogReadFP(address, kDRegSize, rt);
+ break;
default: UNREACHABLE();
}
}
-uint8_t* Simulator::LoadStoreAddress(unsigned addr_reg,
- int64_t offset,
- AddrMode addrmode) {
+uintptr_t Simulator::LoadStoreAddress(unsigned addr_reg, int64_t offset,
+ AddrMode addrmode) {
const unsigned kSPRegCode = kSPRegInternalCode & kRegCodeMask;
- int64_t address = xreg(addr_reg, Reg31IsStackPointer);
+ uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
if ((addr_reg == kSPRegCode) && ((address % 16) != 0)) {
// When the base register is SP the stack pointer is required to be
// quadword aligned prior to the address calculation and write-backs.
@@ -1686,7 +1837,7 @@ uint8_t* Simulator::LoadStoreAddress(unsigned addr_reg,
address += offset;
}
- return reinterpret_cast<uint8_t*>(address);
+ return address;
}
@@ -1701,88 +1852,21 @@ void Simulator::LoadStoreWriteBack(unsigned addr_reg,
}
-void Simulator::CheckMemoryAccess(uint8_t* address, uint8_t* stack) {
+void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) {
if ((address >= stack_limit_) && (address < stack)) {
fprintf(stream_, "ACCESS BELOW STACK POINTER:\n");
- fprintf(stream_, " sp is here: 0x%16p\n", stack);
- fprintf(stream_, " access was here: 0x%16p\n", address);
- fprintf(stream_, " stack limit is here: 0x%16p\n", stack_limit_);
+ fprintf(stream_, " sp is here: 0x%016" PRIx64 "\n",
+ static_cast<uint64_t>(stack));
+ fprintf(stream_, " access was here: 0x%016" PRIx64 "\n",
+ static_cast<uint64_t>(address));
+ fprintf(stream_, " stack limit is here: 0x%016" PRIx64 "\n",
+ static_cast<uint64_t>(stack_limit_));
fprintf(stream_, "\n");
FATAL("ACCESS BELOW STACK POINTER");
}
}
-uint64_t Simulator::MemoryRead(uint8_t* address, unsigned num_bytes) {
- DCHECK(address != NULL);
- DCHECK((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
- uint64_t read = 0;
- memcpy(&read, address, num_bytes);
- return read;
-}
-
-
-uint8_t Simulator::MemoryRead8(uint8_t* address) {
- return MemoryRead(address, sizeof(uint8_t));
-}
-
-
-uint16_t Simulator::MemoryRead16(uint8_t* address) {
- return MemoryRead(address, sizeof(uint16_t));
-}
-
-
-uint32_t Simulator::MemoryRead32(uint8_t* address) {
- return MemoryRead(address, sizeof(uint32_t));
-}
-
-
-float Simulator::MemoryReadFP32(uint8_t* address) {
- return rawbits_to_float(MemoryRead32(address));
-}
-
-
-uint64_t Simulator::MemoryRead64(uint8_t* address) {
- return MemoryRead(address, sizeof(uint64_t));
-}
-
-
-double Simulator::MemoryReadFP64(uint8_t* address) {
- return rawbits_to_double(MemoryRead64(address));
-}
-
-
-void Simulator::MemoryWrite(uint8_t* address,
- uint64_t value,
- unsigned num_bytes) {
- DCHECK(address != NULL);
- DCHECK((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
-
- LogWrite(address, value, num_bytes);
- memcpy(address, &value, num_bytes);
-}
-
-
-void Simulator::MemoryWrite32(uint8_t* address, uint32_t value) {
- MemoryWrite(address, value, sizeof(uint32_t));
-}
-
-
-void Simulator::MemoryWriteFP32(uint8_t* address, float value) {
- MemoryWrite32(address, float_to_rawbits(value));
-}
-
-
-void Simulator::MemoryWrite64(uint8_t* address, uint64_t value) {
- MemoryWrite(address, value, sizeof(uint64_t));
-}
-
-
-void Simulator::MemoryWriteFP64(uint8_t* address, double value) {
- MemoryWrite64(address, double_to_rawbits(value));
-}
-
-
void Simulator::VisitMoveWideImmediate(Instruction* instr) {
MoveWideImmediateOp mov_op =
static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask));
@@ -2331,6 +2415,7 @@ void Simulator::VisitFPConditionalCompare(Instruction* instr) {
} else {
// If the condition fails, set the status flags to the nzcv immediate.
nzcv().SetFlags(instr->Nzcv());
+ LogSystemRegister(NZCV);
}
break;
}
@@ -3113,8 +3198,14 @@ void Simulator::VisitSystem(Instruction* instr) {
}
case MSR: {
switch (instr->ImmSystemRegister()) {
- case NZCV: nzcv().SetRawValue(xreg(instr->Rt())); break;
- case FPCR: fpcr().SetRawValue(xreg(instr->Rt())); break;
+ case NZCV:
+ nzcv().SetRawValue(xreg(instr->Rt()));
+ LogSystemRegister(NZCV);
+ break;
+ case FPCR:
+ fpcr().SetRawValue(xreg(instr->Rt()));
+ LogSystemRegister(FPCR);
+ break;
default: UNIMPLEMENTED();
}
break;
@@ -3325,8 +3416,8 @@ void Simulator::Debug() {
} else if ((strcmp(cmd, "print") == 0) || (strcmp(cmd, "p") == 0)) {
if (argc == 2) {
if (strcmp(arg1, "all") == 0) {
- PrintRegisters(true);
- PrintFPRegisters(true);
+ PrintRegisters();
+ PrintFPRegisters();
} else {
if (!PrintValue(arg1)) {
PrintF("%s unrecognized\n", arg1);
@@ -3530,7 +3621,7 @@ void Simulator::VisitException(Instruction* instr) {
if (FLAG_trace_sim_messages || FLAG_trace_sim || (parameters & BREAK)) {
if (message != NULL) {
PrintF(stream_,
- "%sDebugger hit %d: %s%s%s\n",
+ "# %sDebugger hit %d: %s%s%s\n",
clr_debug_number,
code,
clr_debug_message,
@@ -3538,7 +3629,7 @@ void Simulator::VisitException(Instruction* instr) {
clr_normal);
} else {
PrintF(stream_,
- "%sDebugger hit %d.%s\n",
+ "# %sDebugger hit %d.%s\n",
clr_debug_number,
code,
clr_normal);
@@ -3565,9 +3656,9 @@ void Simulator::VisitException(Instruction* instr) {
// Don't print information that is already being traced.
parameters &= ~log_parameters();
// Print the requested information.
- if (parameters & LOG_SYS_REGS) PrintSystemRegisters(true);
- if (parameters & LOG_REGS) PrintRegisters(true);
- if (parameters & LOG_FP_REGS) PrintFPRegisters(true);
+ if (parameters & LOG_SYS_REGS) PrintSystemRegisters();
+ if (parameters & LOG_REGS) PrintRegisters();
+ if (parameters & LOG_FP_REGS) PrintFPRegisters();
}
// The stop parameters are inlined in the code. Skip them:
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index 6b0211816c..108f6f2b54 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -312,7 +312,6 @@ class Simulator : public DecoderVisitor {
DCHECK(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize));
CheckBreakNext();
Decode(pc_);
- LogProcessorState();
increment_pc();
CheckBreakpoints();
}
@@ -348,16 +347,13 @@ class Simulator : public DecoderVisitor {
return reg<int64_t>(code, r31mode);
}
- // Write 'size' bits of 'value' into an integer register. The value is
- // zero-extended. This behaviour matches AArch64 register writes.
-
- // Like set_reg(), but infer the access size from the template type.
+ // Write 'value' into an integer register. The value is zero-extended. This
+ // behaviour matches AArch64 register writes.
template<typename T>
void set_reg(unsigned code, T value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
- DCHECK(code < kNumberOfRegisters);
- if (!IsZeroRegister(code, r31mode))
- registers_[code].Set(value);
+ set_reg_no_log(code, value, r31mode);
+ LogRegister(code, r31mode);
}
// Common specialized accessors for the set_reg() template.
@@ -371,6 +367,26 @@ class Simulator : public DecoderVisitor {
set_reg(code, value, r31mode);
}
+ // As above, but don't automatically log the register update.
+ template <typename T>
+ void set_reg_no_log(unsigned code, T value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ DCHECK(code < kNumberOfRegisters);
+ if (!IsZeroRegister(code, r31mode)) {
+ registers_[code].Set(value);
+ }
+ }
+
+ void set_wreg_no_log(unsigned code, int32_t value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg_no_log(code, value, r31mode);
+ }
+
+ void set_xreg_no_log(unsigned code, int64_t value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg_no_log(code, value, r31mode);
+ }
+
// Commonly-used special cases.
template<typename T>
void set_lr(T value) {
@@ -430,9 +446,13 @@ class Simulator : public DecoderVisitor {
// This behaviour matches AArch64 register writes.
template<typename T>
void set_fpreg(unsigned code, T value) {
- DCHECK((sizeof(value) == kDRegSize) || (sizeof(value) == kSRegSize));
- DCHECK(code < kNumberOfFPRegisters);
- fpregisters_[code].Set(value);
+ set_fpreg_no_log(code, value);
+
+ if (sizeof(value) <= kSRegSize) {
+ LogFPRegister(code, kPrintSRegValue);
+ } else {
+ LogFPRegister(code, kPrintDRegValue);
+ }
}
// Common specialized accessors for the set_fpreg() template.
@@ -452,6 +472,22 @@ class Simulator : public DecoderVisitor {
set_fpreg(code, value);
}
+ // As above, but don't automatically log the register update.
+ template <typename T>
+ void set_fpreg_no_log(unsigned code, T value) {
+ DCHECK((sizeof(value) == kDRegSize) || (sizeof(value) == kSRegSize));
+ DCHECK(code < kNumberOfFPRegisters);
+ fpregisters_[code].Set(value);
+ }
+
+ void set_sreg_no_log(unsigned code, float value) {
+ set_fpreg_no_log(code, value);
+ }
+
+ void set_dreg_no_log(unsigned code, double value) {
+ set_fpreg_no_log(code, value);
+ }
+
SimSystemRegister& nzcv() { return nzcv_; }
SimSystemRegister& fpcr() { return fpcr_; }
@@ -478,27 +514,68 @@ class Simulator : public DecoderVisitor {
// Disassemble instruction at the given address.
void PrintInstructionsAt(Instruction* pc, uint64_t count);
- void PrintSystemRegisters(bool print_all = false);
- void PrintRegisters(bool print_all_regs = false);
- void PrintFPRegisters(bool print_all_regs = false);
- void PrintProcessorState();
- void PrintWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
+ // Print all registers of the specified types.
+ void PrintRegisters();
+ void PrintFPRegisters();
+ void PrintSystemRegisters();
+
+ // Like Print* (above), but respect log_parameters().
void LogSystemRegisters() {
- if (log_parameters_ & LOG_SYS_REGS) PrintSystemRegisters();
+ if (log_parameters() & LOG_SYS_REGS) PrintSystemRegisters();
}
void LogRegisters() {
- if (log_parameters_ & LOG_REGS) PrintRegisters();
+ if (log_parameters() & LOG_REGS) PrintRegisters();
}
void LogFPRegisters() {
- if (log_parameters_ & LOG_FP_REGS) PrintFPRegisters();
+ if (log_parameters() & LOG_FP_REGS) PrintFPRegisters();
+ }
+
+ // Specify relevant register sizes, for PrintFPRegister.
+ //
+ // These values are bit masks; they can be combined in case multiple views of
+ // a machine register are interesting.
+ enum PrintFPRegisterSizes {
+ kPrintDRegValue = 1 << kDRegSize,
+ kPrintSRegValue = 1 << kSRegSize,
+ kPrintAllFPRegValues = kPrintDRegValue | kPrintSRegValue
+ };
+
+ // Print individual register values (after update).
+ void PrintRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer);
+ void PrintFPRegister(unsigned code,
+ PrintFPRegisterSizes sizes = kPrintAllFPRegValues);
+ void PrintSystemRegister(SystemRegister id);
+
+ // Like Print* (above), but respect log_parameters().
+ void LogRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer) {
+ if (log_parameters() & LOG_REGS) PrintRegister(code, r31mode);
}
- void LogProcessorState() {
- LogSystemRegisters();
- LogRegisters();
- LogFPRegisters();
+ void LogFPRegister(unsigned code,
+ PrintFPRegisterSizes sizes = kPrintAllFPRegValues) {
+ if (log_parameters() & LOG_FP_REGS) PrintFPRegister(code, sizes);
}
- void LogWrite(uint8_t* address, uint64_t value, unsigned num_bytes) {
- if (log_parameters_ & LOG_WRITE) PrintWrite(address, value, num_bytes);
+ void LogSystemRegister(SystemRegister id) {
+ if (log_parameters() & LOG_SYS_REGS) PrintSystemRegister(id);
+ }
+
+ // Print memory accesses.
+ void PrintRead(uintptr_t address, size_t size, unsigned reg_code);
+ void PrintReadFP(uintptr_t address, size_t size, unsigned reg_code);
+ void PrintWrite(uintptr_t address, size_t size, unsigned reg_code);
+ void PrintWriteFP(uintptr_t address, size_t size, unsigned reg_code);
+
+ // Like Print* (above), but respect log_parameters().
+ void LogRead(uintptr_t address, size_t size, unsigned reg_code) {
+ if (log_parameters() & LOG_REGS) PrintRead(address, size, reg_code);
+ }
+ void LogReadFP(uintptr_t address, size_t size, unsigned reg_code) {
+ if (log_parameters() & LOG_FP_REGS) PrintReadFP(address, size, reg_code);
+ }
+ void LogWrite(uintptr_t address, size_t size, unsigned reg_code) {
+ if (log_parameters() & LOG_WRITE) PrintWrite(address, size, reg_code);
+ }
+ void LogWriteFP(uintptr_t address, size_t size, unsigned reg_code) {
+ if (log_parameters() & LOG_WRITE) PrintWriteFP(address, size, reg_code);
}
int log_parameters() { return log_parameters_; }
@@ -589,28 +666,30 @@ class Simulator : public DecoderVisitor {
int64_t offset,
AddrMode addrmode);
void LoadStorePairHelper(Instruction* instr, AddrMode addrmode);
- uint8_t* LoadStoreAddress(unsigned addr_reg,
- int64_t offset,
- AddrMode addrmode);
+ uintptr_t LoadStoreAddress(unsigned addr_reg, int64_t offset,
+ AddrMode addrmode);
void LoadStoreWriteBack(unsigned addr_reg,
int64_t offset,
AddrMode addrmode);
- void CheckMemoryAccess(uint8_t* address, uint8_t* stack);
-
- uint64_t MemoryRead(uint8_t* address, unsigned num_bytes);
- uint8_t MemoryRead8(uint8_t* address);
- uint16_t MemoryRead16(uint8_t* address);
- uint32_t MemoryRead32(uint8_t* address);
- float MemoryReadFP32(uint8_t* address);
- uint64_t MemoryRead64(uint8_t* address);
- double MemoryReadFP64(uint8_t* address);
+ void CheckMemoryAccess(uintptr_t address, uintptr_t stack);
- void MemoryWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
- void MemoryWrite32(uint8_t* address, uint32_t value);
- void MemoryWriteFP32(uint8_t* address, float value);
- void MemoryWrite64(uint8_t* address, uint64_t value);
- void MemoryWriteFP64(uint8_t* address, double value);
+ // Memory read helpers.
+ template <typename T, typename A>
+ T MemoryRead(A address) {
+ T value;
+ STATIC_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
+ (sizeof(value) == 4) || (sizeof(value) == 8));
+ memcpy(&value, reinterpret_cast<const void*>(address), sizeof(value));
+ return value;
+ }
+ // Memory write helpers.
+ template <typename T, typename A>
+ void MemoryWrite(A address, T value) {
+ STATIC_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
+ (sizeof(value) == 4) || (sizeof(value) == 8));
+ memcpy(reinterpret_cast<void*>(address), &value, sizeof(value));
+ }
template <typename T>
T ShiftOperand(T value,
@@ -763,10 +842,10 @@ class Simulator : public DecoderVisitor {
static const uint32_t kConditionFlagsMask = 0xf0000000;
// Stack
- byte* stack_;
- static const intptr_t stack_protection_size_ = KB;
- intptr_t stack_size_;
- byte* stack_limit_;
+ uintptr_t stack_;
+ static const size_t stack_protection_size_ = KB;
+ size_t stack_size_;
+ uintptr_t stack_limit_;
Decoder<DispatchingDecoderVisitor>* decoder_;
Decoder<DispatchingDecoderVisitor>* disassembler_decoder_;
diff --git a/deps/v8/src/array-iterator.js b/deps/v8/src/array-iterator.js
index f04d6c974a..82779bc228 100644
--- a/deps/v8/src/array-iterator.js
+++ b/deps/v8/src/array-iterator.js
@@ -50,7 +50,7 @@ function ArrayIteratorIterator() {
function ArrayIteratorNext() {
var iterator = ToObject(this);
- if (!HAS_PRIVATE(iterator, arrayIteratorObjectSymbol)) {
+ if (!HAS_DEFINED_PRIVATE(iterator, arrayIteratorNextIndexSymbol)) {
throw MakeTypeError('incompatible_method_receiver',
['Array Iterator.prototype.next']);
}
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index cf99aceb69..44deff7de4 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -144,7 +144,7 @@ function Join(array, length, separator, convert) {
elements[elements_length++] = e;
}
elements.length = elements_length;
- var result = %_FastAsciiArrayJoin(elements, '');
+ var result = %_FastOneByteArrayJoin(elements, '');
if (!IS_UNDEFINED(result)) return result;
return %StringBuilderConcat(elements, elements_length, '');
}
@@ -168,7 +168,7 @@ function Join(array, length, separator, convert) {
elements[i] = e;
}
}
- var result = %_FastAsciiArrayJoin(elements, separator);
+ var result = %_FastOneByteArrayJoin(elements, separator);
if (!IS_UNDEFINED(result)) return result;
return %StringBuilderJoin(elements, length, separator);
@@ -375,7 +375,7 @@ function ArrayJoin(separator) {
separator = NonStringToString(separator);
}
- var result = %_FastAsciiArrayJoin(array, separator);
+ var result = %_FastOneByteArrayJoin(array, separator);
if (!IS_UNDEFINED(result)) return result;
return Join(array, length, separator, ConvertToString);
@@ -863,11 +863,12 @@ function ArraySort(comparefn) {
var t_array = [];
// Use both 'from' and 'to' to determine the pivot candidates.
var increment = 200 + ((to - from) & 15);
- for (var i = from + 1; i < to - 1; i += increment) {
- t_array.push([i, a[i]]);
+ for (var i = from + 1, j = 0; i < to - 1; i += increment, j++) {
+ t_array[j] = [i, a[i]];
}
- t_array.sort(function(a, b) {
- return %_CallFunction(receiver, a[1], b[1], comparefn) } );
+ %_CallFunction(t_array, function(a, b) {
+ return %_CallFunction(receiver, a[1], b[1], comparefn);
+ }, ArraySort);
var third_index = t_array[t_array.length >> 1][0];
return third_index;
}
@@ -969,7 +970,7 @@ function ArraySort(comparefn) {
// It's an interval.
var proto_length = indices;
for (var i = 0; i < proto_length; i++) {
- if (!obj.hasOwnProperty(i) && proto.hasOwnProperty(i)) {
+ if (!HAS_OWN_PROPERTY(obj, i) && HAS_OWN_PROPERTY(proto, i)) {
obj[i] = proto[i];
if (i >= max) { max = i + 1; }
}
@@ -977,8 +978,8 @@ function ArraySort(comparefn) {
} else {
for (var i = 0; i < indices.length; i++) {
var index = indices[i];
- if (!IS_UNDEFINED(index) &&
- !obj.hasOwnProperty(index) && proto.hasOwnProperty(index)) {
+ if (!IS_UNDEFINED(index) && !HAS_OWN_PROPERTY(obj, index)
+ && HAS_OWN_PROPERTY(proto, index)) {
obj[index] = proto[index];
if (index >= max) { max = index + 1; }
}
@@ -998,7 +999,7 @@ function ArraySort(comparefn) {
// It's an interval.
var proto_length = indices;
for (var i = from; i < proto_length; i++) {
- if (proto.hasOwnProperty(i)) {
+ if (HAS_OWN_PROPERTY(proto, i)) {
obj[i] = UNDEFINED;
}
}
@@ -1006,7 +1007,7 @@ function ArraySort(comparefn) {
for (var i = 0; i < indices.length; i++) {
var index = indices[i];
if (!IS_UNDEFINED(index) && from <= index &&
- proto.hasOwnProperty(index)) {
+ HAS_OWN_PROPERTY(proto, index)) {
obj[index] = UNDEFINED;
}
}
@@ -1029,14 +1030,14 @@ function ArraySort(comparefn) {
}
// Maintain the invariant num_holes = the number of holes in the original
// array with indices <= first_undefined or > last_defined.
- if (!obj.hasOwnProperty(first_undefined)) {
+ if (!HAS_OWN_PROPERTY(obj, first_undefined)) {
num_holes++;
}
// Find last defined element.
while (first_undefined < last_defined &&
IS_UNDEFINED(obj[last_defined])) {
- if (!obj.hasOwnProperty(last_defined)) {
+ if (!HAS_OWN_PROPERTY(obj, last_defined)) {
num_holes++;
}
last_defined--;
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index e6dc4eb14e..0d868aa641 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -40,19 +40,20 @@
#include "src/base/lazy-instance.h"
#include "src/base/platform/platform.h"
#include "src/builtins.h"
+#include "src/codegen.h"
#include "src/counters.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
-#include "src/ic.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
#include "src/isolate-inl.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
#include "src/regexp-stack.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
#include "src/serialize.h"
-#include "src/stub-cache.h"
#include "src/token.h"
#if V8_TARGET_ARCH_IA32
@@ -882,7 +883,7 @@ void ExternalReference::SetUp() {
double_constants.one_half = 0.5;
double_constants.minus_one_half = -0.5;
double_constants.canonical_non_hole_nan = base::OS::nan_value();
- double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
+ double_constants.the_hole_nan = bit_cast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY;
double_constants.uint32_bias =
static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
@@ -923,9 +924,9 @@ void ExternalReference::InitializeMathExpData() {
math_exp_log_table_array = new double[kTableSize];
for (int i = 0; i < kTableSize; i++) {
double value = std::pow(2, i / kTableSizeDouble);
- uint64_t bits = BitCast<uint64_t, double>(value);
+ uint64_t bits = bit_cast<uint64_t, double>(value);
bits &= (static_cast<uint64_t>(1) << 52) - 1;
- double mantissa = BitCast<double, uint64_t>(bits);
+ double mantissa = bit_cast<double, uint64_t>(bits);
math_exp_log_table_array[i] = mantissa;
}
@@ -936,8 +937,11 @@ void ExternalReference::InitializeMathExpData() {
void ExternalReference::TearDownMathExpData() {
delete[] math_exp_constants_array;
+ math_exp_constants_array = NULL;
delete[] math_exp_log_table_array;
+ math_exp_log_table_array = NULL;
delete math_exp_data_mutex;
+ math_exp_data_mutex = NULL;
}
@@ -1567,38 +1571,4 @@ bool PositionsRecorder::WriteRecordedPositions() {
return written;
}
-
-MultiplierAndShift::MultiplierAndShift(int32_t d) {
- DCHECK(d <= -2 || 2 <= d);
- const uint32_t two31 = 0x80000000;
- uint32_t ad = Abs(d);
- uint32_t t = two31 + (uint32_t(d) >> 31);
- uint32_t anc = t - 1 - t % ad; // Absolute value of nc.
- int32_t p = 31; // Init. p.
- uint32_t q1 = two31 / anc; // Init. q1 = 2**p/|nc|.
- uint32_t r1 = two31 - q1 * anc; // Init. r1 = rem(2**p, |nc|).
- uint32_t q2 = two31 / ad; // Init. q2 = 2**p/|d|.
- uint32_t r2 = two31 - q2 * ad; // Init. r2 = rem(2**p, |d|).
- uint32_t delta;
- do {
- p++;
- q1 *= 2; // Update q1 = 2**p/|nc|.
- r1 *= 2; // Update r1 = rem(2**p, |nc|).
- if (r1 >= anc) { // Must be an unsigned comparison here.
- q1++;
- r1 = r1 - anc;
- }
- q2 *= 2; // Update q2 = 2**p/|d|.
- r2 *= 2; // Update r2 = rem(2**p, |d|).
- if (r2 >= ad) { // Must be an unsigned comparison here.
- q2++;
- r2 = r2 - ad;
- }
- delta = ad - r2;
- } while (q1 < delta || (q1 == delta && r1 == 0));
- int32_t mul = static_cast<int32_t>(q2 + 1);
- multiplier_ = (d < 0) ? -mul : mul;
- shift_ = p - 32;
-}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index a128c09d09..37e82ca35c 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -41,7 +41,7 @@
#include "src/builtins.h"
#include "src/gdb-jit.h"
#include "src/isolate.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
#include "src/token.h"
namespace v8 {
@@ -459,9 +459,7 @@ class RelocInfo {
Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; }
double data64() const { return data64_; }
- uint64_t raw_data64() {
- return BitCast<uint64_t>(data64_);
- }
+ uint64_t raw_data64() { return bit_cast<uint64_t>(data64_); }
Code* host() const { return host_; }
void set_host(Code* host) { host_ = host; }
@@ -774,12 +772,12 @@ class ExternalReference BASE_EMBEDDED {
PROFILING_API_CALL,
// Direct call to accessor getter callback.
- // void f(Local<String> property, PropertyCallbackInfo& info)
+ // void f(Local<Name> property, PropertyCallbackInfo& info)
DIRECT_GETTER_CALL,
// Call to accessor getter callback via InvokeAccessorGetterCallback.
- // void f(Local<String> property, PropertyCallbackInfo& info,
- // AccessorGetterCallback callback)
+ // void f(Local<Name> property, PropertyCallbackInfo& info,
+ // AccessorNameGetterCallback callback)
PROFILING_GETTER_CALL
};
@@ -1110,20 +1108,6 @@ class NullCallWrapper : public CallWrapper {
};
-// The multiplier and shift for signed division via multiplication, see Warren's
-// "Hacker's Delight", chapter 10.
-class MultiplierAndShift {
- public:
- explicit MultiplierAndShift(int32_t d);
- int32_t multiplier() const { return multiplier_; }
- int32_t shift() const { return shift_; }
-
- private:
- int32_t multiplier_;
- int32_t shift_;
-};
-
-
} } // namespace v8::internal
#endif // V8_ASSEMBLER_H_
diff --git a/deps/v8/src/assert-scope.cc b/deps/v8/src/assert-scope.cc
index c4aa9877d4..4c10fddb91 100644
--- a/deps/v8/src/assert-scope.cc
+++ b/deps/v8/src/assert-scope.cc
@@ -2,20 +2,154 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
#include "src/assert-scope.h"
-#include "src/v8.h"
+
+#include "src/base/lazy-instance.h"
+#include "src/base/platform/platform.h"
+#include "src/isolate-inl.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
-uint32_t PerIsolateAssertBase::GetData(Isolate* isolate) {
- return isolate->per_isolate_assert_data();
+namespace {
+
+struct PerThreadAssertKeyConstructTrait FINAL {
+ static void Construct(base::Thread::LocalStorageKey* key) {
+ *key = base::Thread::CreateThreadLocalKey();
+ }
+};
+
+
+typedef base::LazyStaticInstance<base::Thread::LocalStorageKey,
+ PerThreadAssertKeyConstructTrait>::type
+ PerThreadAssertKey;
+
+
+PerThreadAssertKey kPerThreadAssertKey;
+
+} // namespace
+
+
+class PerThreadAssertData FINAL {
+ public:
+ PerThreadAssertData() : nesting_level_(0) {
+ for (int i = 0; i < LAST_PER_THREAD_ASSERT_TYPE; i++) {
+ assert_states_[i] = true;
+ }
+ }
+
+ ~PerThreadAssertData() {
+ for (int i = 0; i < LAST_PER_THREAD_ASSERT_TYPE; ++i) {
+ DCHECK(assert_states_[i]);
+ }
+ }
+
+ bool Get(PerThreadAssertType type) const { return assert_states_[type]; }
+ void Set(PerThreadAssertType type, bool x) { assert_states_[type] = x; }
+
+ void IncrementLevel() { ++nesting_level_; }
+ bool DecrementLevel() { return --nesting_level_ == 0; }
+
+ static PerThreadAssertData* GetCurrent() {
+ return reinterpret_cast<PerThreadAssertData*>(
+ base::Thread::GetThreadLocal(kPerThreadAssertKey.Get()));
+ }
+ static void SetCurrent(PerThreadAssertData* data) {
+ base::Thread::SetThreadLocal(kPerThreadAssertKey.Get(), data);
+ }
+
+ private:
+ bool assert_states_[LAST_PER_THREAD_ASSERT_TYPE];
+ int nesting_level_;
+
+ DISALLOW_COPY_AND_ASSIGN(PerThreadAssertData);
+};
+
+
+template <PerThreadAssertType kType, bool kAllow>
+PerThreadAssertScope<kType, kAllow>::PerThreadAssertScope()
+ : data_(PerThreadAssertData::GetCurrent()) {
+ if (data_ == NULL) {
+ data_ = new PerThreadAssertData();
+ PerThreadAssertData::SetCurrent(data_);
+ }
+ data_->IncrementLevel();
+ old_state_ = data_->Get(kType);
+ data_->Set(kType, kAllow);
}
-void PerIsolateAssertBase::SetData(Isolate* isolate, uint32_t data) {
- isolate->set_per_isolate_assert_data(data);
+template <PerThreadAssertType kType, bool kAllow>
+PerThreadAssertScope<kType, kAllow>::~PerThreadAssertScope() {
+ DCHECK_NOT_NULL(data_);
+ data_->Set(kType, old_state_);
+ if (data_->DecrementLevel()) {
+ PerThreadAssertData::SetCurrent(NULL);
+ delete data_;
+ }
}
-} } // namespace v8::internal
+
+// static
+template <PerThreadAssertType kType, bool kAllow>
+bool PerThreadAssertScope<kType, kAllow>::IsAllowed() {
+ PerThreadAssertData* data = PerThreadAssertData::GetCurrent();
+ return data == NULL || data->Get(kType);
+}
+
+
+template <PerIsolateAssertType kType, bool kAllow>
+class PerIsolateAssertScope<kType, kAllow>::DataBit
+ : public BitField<bool, kType, 1> {};
+
+
+template <PerIsolateAssertType kType, bool kAllow>
+PerIsolateAssertScope<kType, kAllow>::PerIsolateAssertScope(Isolate* isolate)
+ : isolate_(isolate), old_data_(isolate->per_isolate_assert_data()) {
+ DCHECK_NOT_NULL(isolate);
+ STATIC_ASSERT(kType < 32);
+ isolate_->set_per_isolate_assert_data(DataBit::update(old_data_, kAllow));
+}
+
+
+template <PerIsolateAssertType kType, bool kAllow>
+PerIsolateAssertScope<kType, kAllow>::~PerIsolateAssertScope() {
+ isolate_->set_per_isolate_assert_data(old_data_);
+}
+
+
+// static
+template <PerIsolateAssertType kType, bool kAllow>
+bool PerIsolateAssertScope<kType, kAllow>::IsAllowed(Isolate* isolate) {
+ return DataBit::decode(isolate->per_isolate_assert_data());
+}
+
+
+// -----------------------------------------------------------------------------
+// Instantiations.
+
+template class PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, false>;
+template class PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, true>;
+template class PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, false>;
+template class PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, true>;
+template class PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, false>;
+template class PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, true>;
+template class PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, false>;
+template class PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, true>;
+template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, false>;
+template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, true>;
+
+template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, false>;
+template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, true>;
+template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, false>;
+template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, true>;
+template class PerIsolateAssertScope<ALLOCATION_FAILURE_ASSERT, false>;
+template class PerIsolateAssertScope<ALLOCATION_FAILURE_ASSERT, true>;
+template class PerIsolateAssertScope<DEOPTIMIZATION_ASSERT, false>;
+template class PerIsolateAssertScope<DEOPTIMIZATION_ASSERT, true>;
+template class PerIsolateAssertScope<COMPILATION_ASSERT, false>;
+template class PerIsolateAssertScope<COMPILATION_ASSERT, true>;
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/assert-scope.h b/deps/v8/src/assert-scope.h
index 7cfec567ba..41baa65563 100644
--- a/deps/v8/src/assert-scope.h
+++ b/deps/v8/src/assert-scope.h
@@ -5,14 +5,16 @@
#ifndef V8_ASSERT_SCOPE_H_
#define V8_ASSERT_SCOPE_H_
-#include "src/allocation.h"
-#include "src/base/platform/platform.h"
-#include "src/utils.h"
+#include "include/v8stdint.h"
+#include "src/base/macros.h"
namespace v8 {
namespace internal {
+// Forward declarations.
class Isolate;
+class PerThreadAssertData;
+
enum PerThreadAssertType {
HEAP_ALLOCATION_ASSERT,
@@ -33,120 +35,35 @@ enum PerIsolateAssertType {
};
-class PerThreadAssertData {
+template <PerThreadAssertType kType, bool kAllow>
+class PerThreadAssertScope {
public:
- PerThreadAssertData() : nesting_level_(0) {
- for (int i = 0; i < LAST_PER_THREAD_ASSERT_TYPE; i++) {
- assert_states_[i] = true;
- }
- }
-
- void set(PerThreadAssertType type, bool allow) {
- assert_states_[type] = allow;
- }
-
- bool get(PerThreadAssertType type) const {
- return assert_states_[type];
- }
+ PerThreadAssertScope();
+ ~PerThreadAssertScope();
- void increment_level() { ++nesting_level_; }
- bool decrement_level() { return --nesting_level_ == 0; }
+ static bool IsAllowed();
private:
- bool assert_states_[LAST_PER_THREAD_ASSERT_TYPE];
- int nesting_level_;
-
- DISALLOW_COPY_AND_ASSIGN(PerThreadAssertData);
-};
-
-
-class PerThreadAssertScopeBase {
- protected:
- PerThreadAssertScopeBase() {
- data_ = GetAssertData();
- if (data_ == NULL) {
- data_ = new PerThreadAssertData();
- SetThreadLocalData(data_);
- }
- data_->increment_level();
- }
-
- ~PerThreadAssertScopeBase() {
- if (!data_->decrement_level()) return;
- for (int i = 0; i < LAST_PER_THREAD_ASSERT_TYPE; i++) {
- DCHECK(data_->get(static_cast<PerThreadAssertType>(i)));
- }
- delete data_;
- SetThreadLocalData(NULL);
- }
-
- static PerThreadAssertData* GetAssertData() {
- return reinterpret_cast<PerThreadAssertData*>(
- base::Thread::GetThreadLocal(thread_local_key));
- }
-
- static base::Thread::LocalStorageKey thread_local_key;
PerThreadAssertData* data_;
- friend class Isolate;
-
- private:
- static void SetThreadLocalData(PerThreadAssertData* data) {
- base::Thread::SetThreadLocal(thread_local_key, data);
- }
-};
-
-
-template <PerThreadAssertType type, bool allow>
-class PerThreadAssertScope : public PerThreadAssertScopeBase {
- public:
- PerThreadAssertScope() {
- old_state_ = data_->get(type);
- data_->set(type, allow);
- }
-
- ~PerThreadAssertScope() { data_->set(type, old_state_); }
-
- static bool IsAllowed() {
- PerThreadAssertData* data = GetAssertData();
- return data == NULL || data->get(type);
- }
-
- private:
bool old_state_;
DISALLOW_COPY_AND_ASSIGN(PerThreadAssertScope);
};
-class PerIsolateAssertBase {
- protected:
- static uint32_t GetData(Isolate* isolate);
- static void SetData(Isolate* isolate, uint32_t data);
-};
-
-
template <PerIsolateAssertType type, bool allow>
-class PerIsolateAssertScope : public PerIsolateAssertBase {
+class PerIsolateAssertScope {
public:
- explicit PerIsolateAssertScope(Isolate* isolate) : isolate_(isolate) {
- STATIC_ASSERT(type < 32);
- old_data_ = GetData(isolate_);
- SetData(isolate_, DataBit::update(old_data_, allow));
- }
+ explicit PerIsolateAssertScope(Isolate* isolate);
+ ~PerIsolateAssertScope();
- ~PerIsolateAssertScope() {
- SetData(isolate_, old_data_);
- }
-
- static bool IsAllowed(Isolate* isolate) {
- return DataBit::decode(GetData(isolate));
- }
+ static bool IsAllowed(Isolate* isolate);
private:
- typedef BitField<bool, type, 1> DataBit;
+ class DataBit;
- uint32_t old_data_;
Isolate* isolate_;
+ uint32_t old_data_;
DISALLOW_COPY_AND_ASSIGN(PerIsolateAssertScope);
};
diff --git a/deps/v8/src/ast-value-factory.cc b/deps/v8/src/ast-value-factory.cc
index dcfd289091..ea8474ff52 100644
--- a/deps/v8/src/ast-value-factory.cc
+++ b/deps/v8/src/ast-value-factory.cc
@@ -56,22 +56,22 @@ class AstRawStringInternalizationKey : public HashTableKey {
explicit AstRawStringInternalizationKey(const AstRawString* string)
: string_(string) {}
- virtual bool IsMatch(Object* other) V8_OVERRIDE {
+ virtual bool IsMatch(Object* other) OVERRIDE {
if (string_->is_one_byte_)
return String::cast(other)->IsOneByteEqualTo(string_->literal_bytes_);
return String::cast(other)->IsTwoByteEqualTo(
Vector<const uint16_t>::cast(string_->literal_bytes_));
}
- virtual uint32_t Hash() V8_OVERRIDE {
+ virtual uint32_t Hash() OVERRIDE {
return string_->hash() >> Name::kHashShift;
}
- virtual uint32_t HashForObject(Object* key) V8_OVERRIDE {
+ virtual uint32_t HashForObject(Object* key) OVERRIDE {
return String::cast(key)->Hash();
}
- virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
+ virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
if (string_->is_one_byte_)
return isolate->factory()->NewOneByteInternalizedString(
string_->literal_bytes_, string_->hash());
@@ -249,7 +249,7 @@ const AstRawString* AstValueFactory::GetTwoByteString(
const AstRawString* AstValueFactory::GetString(Handle<String> literal) {
DisallowHeapAllocation no_gc;
String::FlatContent content = literal->GetFlatContent();
- if (content.IsAscii()) {
+ if (content.IsOneByte()) {
return GetOneByteString(content.ToOneByteVector());
}
DCHECK(content.IsTwoByte());
diff --git a/deps/v8/src/ast-value-factory.h b/deps/v8/src/ast-value-factory.h
index c3bf24c2d6..2f84163ece 100644
--- a/deps/v8/src/ast-value-factory.h
+++ b/deps/v8/src/ast-value-factory.h
@@ -64,13 +64,13 @@ class AstString : public ZoneObject {
class AstRawString : public AstString {
public:
- virtual int length() const V8_OVERRIDE {
+ virtual int length() const OVERRIDE {
if (is_one_byte_)
return literal_bytes_.length();
return literal_bytes_.length() / 2;
}
- virtual void Internalize(Isolate* isolate) V8_OVERRIDE;
+ virtual void Internalize(Isolate* isolate) OVERRIDE;
bool AsArrayIndex(uint32_t* index) const;
@@ -120,11 +120,11 @@ class AstConsString : public AstString {
: left_(left),
right_(right) {}
- virtual int length() const V8_OVERRIDE {
+ virtual int length() const OVERRIDE {
return left_->length() + right_->length();
}
- virtual void Internalize(Isolate* isolate) V8_OVERRIDE;
+ virtual void Internalize(Isolate* isolate) OVERRIDE;
private:
friend class AstValueFactory;
@@ -235,32 +235,33 @@ class AstValue : public ZoneObject {
// For generating string constants.
-#define STRING_CONSTANTS(F) \
- F(anonymous_function, "(anonymous function)") \
- F(arguments, "arguments") \
- F(done, "done") \
- F(dot, ".") \
- F(dot_for, ".for") \
- F(dot_generator, ".generator") \
- F(dot_generator_object, ".generator_object") \
- F(dot_iterator, ".iterator") \
- F(dot_module, ".module") \
- F(dot_result, ".result") \
- F(empty, "") \
- F(eval, "eval") \
+#define STRING_CONSTANTS(F) \
+ F(anonymous_function, "(anonymous function)") \
+ F(arguments, "arguments") \
+ F(constructor, "constructor") \
+ F(done, "done") \
+ F(dot, ".") \
+ F(dot_for, ".for") \
+ F(dot_generator, ".generator") \
+ F(dot_generator_object, ".generator_object") \
+ F(dot_iterator, ".iterator") \
+ F(dot_module, ".module") \
+ F(dot_result, ".result") \
+ F(empty, "") \
+ F(eval, "eval") \
F(initialize_const_global, "initializeConstGlobal") \
- F(initialize_var_global, "initializeVarGlobal") \
- F(make_reference_error, "MakeReferenceError") \
- F(make_syntax_error, "MakeSyntaxError") \
- F(make_type_error, "MakeTypeError") \
- F(module, "module") \
- F(native, "native") \
- F(next, "next") \
- F(proto, "__proto__") \
- F(prototype, "prototype") \
- F(this, "this") \
- F(use_asm, "use asm") \
- F(use_strict, "use strict") \
+ F(initialize_var_global, "initializeVarGlobal") \
+ F(make_reference_error, "MakeReferenceError") \
+ F(make_syntax_error, "MakeSyntaxError") \
+ F(make_type_error, "MakeTypeError") \
+ F(module, "module") \
+ F(native, "native") \
+ F(next, "next") \
+ F(proto, "__proto__") \
+ F(prototype, "prototype") \
+ F(this, "this") \
+ F(use_asm, "use asm") \
+ F(use_strict, "use strict") \
F(value, "value")
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 9f431604b7..a7d9bad384 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -59,8 +59,9 @@ bool Expression::IsUndefinedLiteral(Isolate* isolate) const {
}
-VariableProxy::VariableProxy(Zone* zone, Variable* var, int position)
- : Expression(zone, position),
+VariableProxy::VariableProxy(Zone* zone, Variable* var, int position,
+ IdGen* id_gen)
+ : Expression(zone, position, id_gen),
name_(var->raw_name()),
var_(NULL), // Will be set by the call to BindTo.
is_this_(var->is_this()),
@@ -71,19 +72,15 @@ VariableProxy::VariableProxy(Zone* zone, Variable* var, int position)
}
-VariableProxy::VariableProxy(Zone* zone,
- const AstRawString* name,
- bool is_this,
- Interface* interface,
- int position)
- : Expression(zone, position),
+VariableProxy::VariableProxy(Zone* zone, const AstRawString* name, bool is_this,
+ Interface* interface, int position, IdGen* id_gen)
+ : Expression(zone, position, id_gen),
name_(name),
var_(NULL),
is_this_(is_this),
is_assigned_(false),
interface_(interface),
- variable_feedback_slot_(kInvalidFeedbackSlot) {
-}
+ variable_feedback_slot_(kInvalidFeedbackSlot) {}
void VariableProxy::BindTo(Variable* var) {
@@ -101,19 +98,16 @@ void VariableProxy::BindTo(Variable* var) {
}
-Assignment::Assignment(Zone* zone,
- Token::Value op,
- Expression* target,
- Expression* value,
- int pos)
- : Expression(zone, pos),
+Assignment::Assignment(Zone* zone, Token::Value op, Expression* target,
+ Expression* value, int pos, IdGen* id_gen)
+ : Expression(zone, pos, id_gen),
op_(op),
target_(target),
value_(value),
binary_operation_(NULL),
- assignment_id_(GetNextId(zone)),
+ assignment_id_(id_gen->GetNextId()),
is_uninitialized_(false),
- store_mode_(STANDARD_STORE) { }
+ store_mode_(STANDARD_STORE) {}
Token::Value Assignment::binary_op() const {
@@ -179,10 +173,12 @@ void FunctionLiteral::InitializeSharedInfo(
ObjectLiteralProperty::ObjectLiteralProperty(Zone* zone,
AstValueFactory* ast_value_factory,
- Literal* key, Expression* value) {
+ Literal* key, Expression* value,
+ bool is_static) {
emit_store_ = true;
key_ = key;
value_ = value;
+ is_static_ = is_static;
if (key->raw_value()->EqualsString(ast_value_factory->proto_string())) {
kind_ = PROTOTYPE;
} else if (value_->AsMaterializedLiteral() != NULL) {
@@ -195,11 +191,13 @@ ObjectLiteralProperty::ObjectLiteralProperty(Zone* zone,
}
-ObjectLiteralProperty::ObjectLiteralProperty(
- Zone* zone, bool is_getter, FunctionLiteral* value) {
+ObjectLiteralProperty::ObjectLiteralProperty(Zone* zone, bool is_getter,
+ FunctionLiteral* value,
+ bool is_static) {
emit_store_ = true;
value_ = value;
kind_ = is_getter ? GETTER : SETTER;
+ is_static_ = is_static;
}
@@ -590,18 +588,16 @@ Call::CallType Call::GetCallType(Isolate* isolate) const {
bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
- LookupResult* lookup) {
+ LookupIterator* it) {
target_ = Handle<JSFunction>::null();
cell_ = Handle<Cell>::null();
- DCHECK(lookup->IsFound() &&
- lookup->type() == NORMAL &&
- lookup->holder() == *global);
- cell_ = Handle<Cell>(global->GetPropertyCell(lookup));
+ DCHECK(it->IsFound() && it->GetHolder<JSObject>().is_identical_to(global));
+ cell_ = it->GetPropertyCell();
if (cell_->value()->IsJSFunction()) {
Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
// If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code.
- if (!lookup->isolate()->heap()->InNewSpace(*candidate)) {
+ if (!it->isolate()->heap()->InNewSpace(*candidate)) {
target_ = candidate;
return true;
}
@@ -619,9 +615,6 @@ void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
is_monomorphic_ = oracle->CallNewIsMonomorphic(CallNewFeedbackSlot());
if (is_monomorphic_) {
target_ = oracle->GetCallNewTarget(CallNewFeedbackSlot());
- if (!allocation_site_.is_null()) {
- elements_kind_ = allocation_site_->GetElementsKind();
- }
}
}
@@ -799,12 +792,12 @@ bool RegExpCapture::IsAnchoredAtEnd() {
// in as many cases as possible, to make it more difficult for incorrect
// parses to look as correct ones which is likely if the input and
// output formats are alike.
-class RegExpUnparser V8_FINAL : public RegExpVisitor {
+class RegExpUnparser FINAL : public RegExpVisitor {
public:
RegExpUnparser(OStream& os, Zone* zone) : os_(os), zone_(zone) {}
void VisitCharacterRange(CharacterRange that);
#define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, \
- void* data) V8_OVERRIDE;
+ void* data) OVERRIDE;
FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
#undef MAKE_CASE
private:
@@ -995,58 +988,62 @@ RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
}
-CaseClause::CaseClause(Zone* zone,
- Expression* label,
- ZoneList<Statement*>* statements,
- int pos)
- : Expression(zone, pos),
+CaseClause::CaseClause(Zone* zone, Expression* label,
+ ZoneList<Statement*>* statements, int pos, IdGen* id_gen)
+ : Expression(zone, pos, id_gen),
label_(label),
statements_(statements),
compare_type_(Type::None(zone)),
- compare_id_(AstNode::GetNextId(zone)),
- entry_id_(AstNode::GetNextId(zone)) {
-}
+ compare_id_(id_gen->GetNextId()),
+ entry_id_(id_gen->GetNextId()) {}
-#define REGULAR_NODE(NodeType) \
+#define REGULAR_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
+ increase_node_count(); \
}
-#define REGULAR_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
+#define REGULAR_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
- add_slot_node(node); \
+ increase_node_count(); \
+ add_slot_node(node); \
}
-#define DONT_OPTIMIZE_NODE(NodeType) \
+#define DONT_OPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
- set_dont_optimize_reason(k##NodeType); \
- add_flag(kDontSelfOptimize); \
+ increase_node_count(); \
+ set_dont_crankshaft_reason(k##NodeType); \
+ add_flag(kDontSelfOptimize); \
}
-#define DONT_OPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
+#define DONT_OPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
- add_slot_node(node); \
- set_dont_optimize_reason(k##NodeType); \
- add_flag(kDontSelfOptimize); \
+ increase_node_count(); \
+ add_slot_node(node); \
+ set_dont_crankshaft_reason(k##NodeType); \
+ add_flag(kDontSelfOptimize); \
+ }
+#define DONT_TURBOFAN_NODE(NodeType) \
+ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
+ increase_node_count(); \
+ set_dont_crankshaft_reason(k##NodeType); \
+ set_dont_turbofan_reason(k##NodeType); \
+ add_flag(kDontSelfOptimize); \
}
#define DONT_SELFOPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
- add_flag(kDontSelfOptimize); \
+ increase_node_count(); \
+ add_flag(kDontSelfOptimize); \
}
-#define DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
+#define DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
- add_slot_node(node); \
- add_flag(kDontSelfOptimize); \
+ increase_node_count(); \
+ add_slot_node(node); \
+ add_flag(kDontSelfOptimize); \
}
-#define DONT_CACHE_NODE(NodeType) \
+#define DONT_CACHE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
- set_dont_optimize_reason(k##NodeType); \
- add_flag(kDontSelfOptimize); \
- add_flag(kDontCache); \
+ increase_node_count(); \
+ set_dont_crankshaft_reason(k##NodeType); \
+ add_flag(kDontSelfOptimize); \
+ add_flag(kDontCache); \
}
REGULAR_NODE(VariableDeclaration)
@@ -1093,17 +1090,21 @@ DONT_OPTIMIZE_NODE(ModulePath)
DONT_OPTIMIZE_NODE(ModuleUrl)
DONT_OPTIMIZE_NODE(ModuleStatement)
DONT_OPTIMIZE_NODE(WithStatement)
-DONT_OPTIMIZE_NODE(TryCatchStatement)
-DONT_OPTIMIZE_NODE(TryFinallyStatement)
DONT_OPTIMIZE_NODE(DebuggerStatement)
+DONT_OPTIMIZE_NODE(ClassLiteral)
DONT_OPTIMIZE_NODE(NativeFunctionLiteral)
+DONT_OPTIMIZE_NODE(SuperReference)
DONT_OPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(Yield)
+// TODO(turbofan): Remove the dont_turbofan_reason once this list is empty.
+DONT_TURBOFAN_NODE(ForOfStatement)
+DONT_TURBOFAN_NODE(TryCatchStatement)
+DONT_TURBOFAN_NODE(TryFinallyStatement)
+
DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement)
DONT_SELFOPTIMIZE_NODE(ForStatement)
-DONT_SELFOPTIMIZE_NODE(ForOfStatement)
DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(ForInStatement)
@@ -1115,7 +1116,7 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
add_slot_node(node);
if (node->is_jsruntime()) {
// Don't try to optimize JS runtime calls because we bailout on them.
- set_dont_optimize_reason(kCallToAJavaScriptRuntimeFunction);
+ set_dont_crankshaft_reason(kCallToAJavaScriptRuntimeFunction);
}
}
@@ -1129,7 +1130,7 @@ Handle<String> Literal::ToString() {
if (value_->IsString()) return value_->AsString()->string();
DCHECK(value_->IsNumber());
char arr[100];
- Vector<char> buffer(arr, ARRAY_SIZE(arr));
+ Vector<char> buffer(arr, arraysize(arr));
const char* str;
if (value()->IsSmi()) {
// Optimization only, the heap number case would subsume this.
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index e18fdc79c7..63055ea356 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -9,13 +9,14 @@
#include "src/assembler.h"
#include "src/ast-value-factory.h"
+#include "src/bailout-reason.h"
#include "src/factory.h"
#include "src/feedback-slots.h"
#include "src/interface.h"
#include "src/isolate.h"
#include "src/jsregexp.h"
#include "src/list-inl.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
#include "src/small-pointer-list.h"
#include "src/smart-pointers.h"
#include "src/token.h"
@@ -40,12 +41,12 @@ namespace internal {
// Nodes of the abstract syntax tree. Only concrete classes are
// enumerated here.
-#define DECLARATION_NODE_LIST(V) \
- V(VariableDeclaration) \
- V(FunctionDeclaration) \
- V(ModuleDeclaration) \
- V(ImportDeclaration) \
- V(ExportDeclaration) \
+#define DECLARATION_NODE_LIST(V) \
+ V(VariableDeclaration) \
+ V(FunctionDeclaration) \
+ V(ModuleDeclaration) \
+ V(ImportDeclaration) \
+ V(ExportDeclaration)
#define MODULE_NODE_LIST(V) \
V(ModuleLiteral) \
@@ -73,27 +74,29 @@ namespace internal {
V(TryFinallyStatement) \
V(DebuggerStatement)
-#define EXPRESSION_NODE_LIST(V) \
- V(FunctionLiteral) \
- V(NativeFunctionLiteral) \
- V(Conditional) \
- V(VariableProxy) \
- V(Literal) \
- V(RegExpLiteral) \
- V(ObjectLiteral) \
- V(ArrayLiteral) \
- V(Assignment) \
- V(Yield) \
- V(Throw) \
- V(Property) \
- V(Call) \
- V(CallNew) \
- V(CallRuntime) \
- V(UnaryOperation) \
- V(CountOperation) \
- V(BinaryOperation) \
- V(CompareOperation) \
- V(ThisFunction) \
+#define EXPRESSION_NODE_LIST(V) \
+ V(FunctionLiteral) \
+ V(ClassLiteral) \
+ V(NativeFunctionLiteral) \
+ V(Conditional) \
+ V(VariableProxy) \
+ V(Literal) \
+ V(RegExpLiteral) \
+ V(ObjectLiteral) \
+ V(ArrayLiteral) \
+ V(Assignment) \
+ V(Yield) \
+ V(Throw) \
+ V(Property) \
+ V(Call) \
+ V(CallNew) \
+ V(CallRuntime) \
+ V(UnaryOperation) \
+ V(CountOperation) \
+ V(BinaryOperation) \
+ V(CompareOperation) \
+ V(ThisFunction) \
+ V(SuperReference) \
V(CaseClause)
#define AST_NODE_LIST(V) \
@@ -142,8 +145,8 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
#define DECLARE_NODE_TYPE(type) \
- virtual void Accept(AstVisitor* v) V8_OVERRIDE; \
- virtual AstNode::NodeType node_type() const V8_FINAL V8_OVERRIDE { \
+ virtual void Accept(AstVisitor* v) OVERRIDE; \
+ virtual AstNode::NodeType node_type() const FINAL OVERRIDE { \
return AstNode::k##type; \
} \
template<class> friend class AstNodeFactory;
@@ -156,7 +159,7 @@ enum AstPropertiesFlag {
};
-class AstProperties V8_FINAL BASE_EMBEDDED {
+class AstProperties FINAL BASE_EMBEDDED {
public:
class Flags : public EnumSet<AstPropertiesFlag, int> {};
@@ -180,6 +183,22 @@ AstProperties() : node_count_(0), feedback_slots_(0) {}
class AstNode: public ZoneObject {
public:
+ // For generating IDs for AstNodes.
+ class IdGen {
+ public:
+ explicit IdGen(int id = 0) : id_(id) {}
+
+ int GetNextId() { return ReserveIdRange(1); }
+ int ReserveIdRange(int n) {
+ int tmp = id_;
+ id_ += n;
+ return tmp;
+ }
+
+ private:
+ int id_;
+ };
+
#define DECLARE_TYPE_ENUM(type) k##type,
enum NodeType {
AST_NODE_LIST(DECLARE_TYPE_ENUM)
@@ -216,16 +235,6 @@ class AstNode: public ZoneObject {
virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
protected:
- static int GetNextId(Zone* zone) {
- return ReserveIdRange(zone, 1);
- }
-
- static int ReserveIdRange(Zone* zone, int n) {
- int tmp = zone->isolate()->ast_node_id();
- zone->isolate()->set_ast_node_id(tmp + n);
- return tmp;
- }
-
// Some nodes re-use bailout IDs for type feedback.
static TypeFeedbackId reuse(BailoutId id) {
return TypeFeedbackId(id.ToInt());
@@ -252,7 +261,7 @@ class Statement : public AstNode {
};
-class SmallMapList V8_FINAL {
+class SmallMapList FINAL {
public:
SmallMapList() {}
SmallMapList(int capacity, Zone* zone) : list_(capacity, zone) {}
@@ -371,17 +380,14 @@ class Expression : public AstNode {
TypeFeedbackId test_id() const { return test_id_; }
protected:
- Expression(Zone* zone, int pos)
+ Expression(Zone* zone, int pos, IdGen* id_gen)
: AstNode(pos),
- zone_(zone),
bounds_(Bounds::Unbounded(zone)),
parenthesization_level_(0),
- id_(GetNextId(zone)),
- test_id_(GetNextId(zone)) {}
+ id_(id_gen->GetNextId()),
+ test_id_(id_gen->GetNextId()) {}
void set_to_boolean_types(byte types) { to_boolean_types_ = types; }
- Zone* zone_;
-
private:
Bounds bounds_;
byte to_boolean_types_;
@@ -404,7 +410,7 @@ class BreakableStatement : public Statement {
ZoneList<const AstRawString*>* labels() const { return labels_; }
// Type testing & conversion.
- virtual BreakableStatement* AsBreakableStatement() V8_FINAL V8_OVERRIDE {
+ virtual BreakableStatement* AsBreakableStatement() FINAL OVERRIDE {
return this;
}
@@ -420,14 +426,13 @@ class BreakableStatement : public Statement {
BailoutId ExitId() const { return exit_id_; }
protected:
- BreakableStatement(
- Zone* zone, ZoneList<const AstRawString*>* labels,
- BreakableType breakable_type, int position)
+ BreakableStatement(Zone* zone, ZoneList<const AstRawString*>* labels,
+ BreakableType breakable_type, int position, IdGen* id_gen)
: Statement(zone, position),
labels_(labels),
breakable_type_(breakable_type),
- entry_id_(GetNextId(zone)),
- exit_id_(GetNextId(zone)) {
+ entry_id_(id_gen->GetNextId()),
+ exit_id_(id_gen->GetNextId()) {
DCHECK(labels == NULL || labels->length() > 0);
}
@@ -441,7 +446,7 @@ class BreakableStatement : public Statement {
};
-class Block V8_FINAL : public BreakableStatement {
+class Block FINAL : public BreakableStatement {
public:
DECLARE_NODE_TYPE(Block)
@@ -454,7 +459,7 @@ class Block V8_FINAL : public BreakableStatement {
BailoutId DeclsId() const { return decls_id_; }
- virtual bool IsJump() const V8_OVERRIDE {
+ virtual bool IsJump() const OVERRIDE {
return !statements_.is_empty() && statements_.last()->IsJump()
&& labels() == NULL; // Good enough as an approximation...
}
@@ -463,17 +468,13 @@ class Block V8_FINAL : public BreakableStatement {
void set_scope(Scope* scope) { scope_ = scope; }
protected:
- Block(Zone* zone,
- ZoneList<const AstRawString*>* labels,
- int capacity,
- bool is_initializer_block,
- int pos)
- : BreakableStatement(zone, labels, TARGET_FOR_NAMED_ONLY, pos),
+ Block(Zone* zone, ZoneList<const AstRawString*>* labels, int capacity,
+ bool is_initializer_block, int pos, IdGen* id_gen)
+ : BreakableStatement(zone, labels, TARGET_FOR_NAMED_ONLY, pos, id_gen),
statements_(capacity, zone),
is_initializer_block_(is_initializer_block),
- decls_id_(GetNextId(zone)),
- scope_(NULL) {
- }
+ decls_id_(id_gen->GetNextId()),
+ scope_(NULL) {}
private:
ZoneList<Statement*> statements_;
@@ -513,11 +514,11 @@ class Declaration : public AstNode {
};
-class VariableDeclaration V8_FINAL : public Declaration {
+class VariableDeclaration FINAL : public Declaration {
public:
DECLARE_NODE_TYPE(VariableDeclaration)
- virtual InitializationFlag initialization() const V8_OVERRIDE {
+ virtual InitializationFlag initialization() const OVERRIDE {
return mode() == VAR ? kCreatedInitialized : kNeedsInitialization;
}
@@ -532,15 +533,15 @@ class VariableDeclaration V8_FINAL : public Declaration {
};
-class FunctionDeclaration V8_FINAL : public Declaration {
+class FunctionDeclaration FINAL : public Declaration {
public:
DECLARE_NODE_TYPE(FunctionDeclaration)
FunctionLiteral* fun() const { return fun_; }
- virtual InitializationFlag initialization() const V8_OVERRIDE {
+ virtual InitializationFlag initialization() const OVERRIDE {
return kCreatedInitialized;
}
- virtual bool IsInlineable() const V8_OVERRIDE;
+ virtual bool IsInlineable() const OVERRIDE;
protected:
FunctionDeclaration(Zone* zone,
@@ -561,12 +562,12 @@ class FunctionDeclaration V8_FINAL : public Declaration {
};
-class ModuleDeclaration V8_FINAL : public Declaration {
+class ModuleDeclaration FINAL : public Declaration {
public:
DECLARE_NODE_TYPE(ModuleDeclaration)
Module* module() const { return module_; }
- virtual InitializationFlag initialization() const V8_OVERRIDE {
+ virtual InitializationFlag initialization() const OVERRIDE {
return kCreatedInitialized;
}
@@ -585,12 +586,12 @@ class ModuleDeclaration V8_FINAL : public Declaration {
};
-class ImportDeclaration V8_FINAL : public Declaration {
+class ImportDeclaration FINAL : public Declaration {
public:
DECLARE_NODE_TYPE(ImportDeclaration)
Module* module() const { return module_; }
- virtual InitializationFlag initialization() const V8_OVERRIDE {
+ virtual InitializationFlag initialization() const OVERRIDE {
return kCreatedInitialized;
}
@@ -609,11 +610,11 @@ class ImportDeclaration V8_FINAL : public Declaration {
};
-class ExportDeclaration V8_FINAL : public Declaration {
+class ExportDeclaration FINAL : public Declaration {
public:
DECLARE_NODE_TYPE(ExportDeclaration)
- virtual InitializationFlag initialization() const V8_OVERRIDE {
+ virtual InitializationFlag initialization() const OVERRIDE {
return kCreatedInitialized;
}
@@ -644,7 +645,7 @@ class Module : public AstNode {
};
-class ModuleLiteral V8_FINAL : public Module {
+class ModuleLiteral FINAL : public Module {
public:
DECLARE_NODE_TYPE(ModuleLiteral)
@@ -654,7 +655,7 @@ class ModuleLiteral V8_FINAL : public Module {
};
-class ModuleVariable V8_FINAL : public Module {
+class ModuleVariable FINAL : public Module {
public:
DECLARE_NODE_TYPE(ModuleVariable)
@@ -668,7 +669,7 @@ class ModuleVariable V8_FINAL : public Module {
};
-class ModulePath V8_FINAL : public Module {
+class ModulePath FINAL : public Module {
public:
DECLARE_NODE_TYPE(ModulePath)
@@ -685,7 +686,7 @@ class ModulePath V8_FINAL : public Module {
};
-class ModuleUrl V8_FINAL : public Module {
+class ModuleUrl FINAL : public Module {
public:
DECLARE_NODE_TYPE(ModuleUrl)
@@ -701,7 +702,7 @@ class ModuleUrl V8_FINAL : public Module {
};
-class ModuleStatement V8_FINAL : public Statement {
+class ModuleStatement FINAL : public Statement {
public:
DECLARE_NODE_TYPE(ModuleStatement)
@@ -724,7 +725,7 @@ class ModuleStatement V8_FINAL : public Statement {
class IterationStatement : public BreakableStatement {
public:
// Type testing & conversion.
- virtual IterationStatement* AsIterationStatement() V8_FINAL V8_OVERRIDE {
+ virtual IterationStatement* AsIterationStatement() FINAL OVERRIDE {
return this;
}
@@ -738,11 +739,11 @@ class IterationStatement : public BreakableStatement {
Label* continue_target() { return &continue_target_; }
protected:
- IterationStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
- : BreakableStatement(zone, labels, TARGET_FOR_ANONYMOUS, pos),
+ IterationStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos,
+ IdGen* id_gen)
+ : BreakableStatement(zone, labels, TARGET_FOR_ANONYMOUS, pos, id_gen),
body_(NULL),
- osr_entry_id_(GetNextId(zone)) {
- }
+ osr_entry_id_(id_gen->GetNextId()) {}
void Initialize(Statement* body) {
body_ = body;
@@ -756,7 +757,7 @@ class IterationStatement : public BreakableStatement {
};
-class DoWhileStatement V8_FINAL : public IterationStatement {
+class DoWhileStatement FINAL : public IterationStatement {
public:
DECLARE_NODE_TYPE(DoWhileStatement)
@@ -767,17 +768,17 @@ class DoWhileStatement V8_FINAL : public IterationStatement {
Expression* cond() const { return cond_; }
- virtual BailoutId ContinueId() const V8_OVERRIDE { return continue_id_; }
- virtual BailoutId StackCheckId() const V8_OVERRIDE { return back_edge_id_; }
+ virtual BailoutId ContinueId() const OVERRIDE { return continue_id_; }
+ virtual BailoutId StackCheckId() const OVERRIDE { return back_edge_id_; }
BailoutId BackEdgeId() const { return back_edge_id_; }
protected:
- DoWhileStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
- : IterationStatement(zone, labels, pos),
+ DoWhileStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos,
+ IdGen* id_gen)
+ : IterationStatement(zone, labels, pos, id_gen),
cond_(NULL),
- continue_id_(GetNextId(zone)),
- back_edge_id_(GetNextId(zone)) {
- }
+ continue_id_(id_gen->GetNextId()),
+ back_edge_id_(id_gen->GetNextId()) {}
private:
Expression* cond_;
@@ -787,7 +788,7 @@ class DoWhileStatement V8_FINAL : public IterationStatement {
};
-class WhileStatement V8_FINAL : public IterationStatement {
+class WhileStatement FINAL : public IterationStatement {
public:
DECLARE_NODE_TYPE(WhileStatement)
@@ -804,17 +805,17 @@ class WhileStatement V8_FINAL : public IterationStatement {
may_have_function_literal_ = value;
}
- virtual BailoutId ContinueId() const V8_OVERRIDE { return EntryId(); }
- virtual BailoutId StackCheckId() const V8_OVERRIDE { return body_id_; }
+ virtual BailoutId ContinueId() const OVERRIDE { return EntryId(); }
+ virtual BailoutId StackCheckId() const OVERRIDE { return body_id_; }
BailoutId BodyId() const { return body_id_; }
protected:
- WhileStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
- : IterationStatement(zone, labels, pos),
+ WhileStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos,
+ IdGen* id_gen)
+ : IterationStatement(zone, labels, pos, id_gen),
cond_(NULL),
may_have_function_literal_(true),
- body_id_(GetNextId(zone)) {
- }
+ body_id_(id_gen->GetNextId()) {}
private:
Expression* cond_;
@@ -826,7 +827,7 @@ class WhileStatement V8_FINAL : public IterationStatement {
};
-class ForStatement V8_FINAL : public IterationStatement {
+class ForStatement FINAL : public IterationStatement {
public:
DECLARE_NODE_TYPE(ForStatement)
@@ -851,8 +852,8 @@ class ForStatement V8_FINAL : public IterationStatement {
may_have_function_literal_ = value;
}
- virtual BailoutId ContinueId() const V8_OVERRIDE { return continue_id_; }
- virtual BailoutId StackCheckId() const V8_OVERRIDE { return body_id_; }
+ virtual BailoutId ContinueId() const OVERRIDE { return continue_id_; }
+ virtual BailoutId StackCheckId() const OVERRIDE { return body_id_; }
BailoutId BodyId() const { return body_id_; }
bool is_fast_smi_loop() { return loop_variable_ != NULL; }
@@ -860,16 +861,16 @@ class ForStatement V8_FINAL : public IterationStatement {
void set_loop_variable(Variable* var) { loop_variable_ = var; }
protected:
- ForStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
- : IterationStatement(zone, labels, pos),
+ ForStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos,
+ IdGen* id_gen)
+ : IterationStatement(zone, labels, pos, id_gen),
init_(NULL),
cond_(NULL),
next_(NULL),
may_have_function_literal_(true),
loop_variable_(NULL),
- continue_id_(GetNextId(zone)),
- body_id_(GetNextId(zone)) {
- }
+ continue_id_(id_gen->GetNextId()),
+ body_id_(id_gen->GetNextId()) {}
private:
Statement* init_;
@@ -902,8 +903,11 @@ class ForEachStatement : public IterationStatement {
Expression* subject() const { return subject_; }
protected:
- ForEachStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
- : IterationStatement(zone, labels, pos), each_(NULL), subject_(NULL) {}
+ ForEachStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos,
+ IdGen* id_gen)
+ : IterationStatement(zone, labels, pos, id_gen),
+ each_(NULL),
+ subject_(NULL) {}
private:
Expression* each_;
@@ -911,7 +915,7 @@ class ForEachStatement : public IterationStatement {
};
-class ForInStatement V8_FINAL : public ForEachStatement,
+class ForInStatement FINAL : public ForEachStatement,
public FeedbackSlotInterface {
public:
DECLARE_NODE_TYPE(ForInStatement)
@@ -935,17 +939,17 @@ class ForInStatement V8_FINAL : public ForEachStatement,
BailoutId BodyId() const { return body_id_; }
BailoutId PrepareId() const { return prepare_id_; }
- virtual BailoutId ContinueId() const V8_OVERRIDE { return EntryId(); }
- virtual BailoutId StackCheckId() const V8_OVERRIDE { return body_id_; }
+ virtual BailoutId ContinueId() const OVERRIDE { return EntryId(); }
+ virtual BailoutId StackCheckId() const OVERRIDE { return body_id_; }
protected:
- ForInStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
- : ForEachStatement(zone, labels, pos),
+ ForInStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos,
+ IdGen* id_gen)
+ : ForEachStatement(zone, labels, pos, id_gen),
for_in_type_(SLOW_FOR_IN),
for_in_feedback_slot_(kInvalidFeedbackSlot),
- body_id_(GetNextId(zone)),
- prepare_id_(GetNextId(zone)) {
- }
+ body_id_(id_gen->GetNextId()),
+ prepare_id_(id_gen->GetNextId()) {}
ForInType for_in_type_;
int for_in_feedback_slot_;
@@ -954,7 +958,7 @@ class ForInStatement V8_FINAL : public ForEachStatement,
};
-class ForOfStatement V8_FINAL : public ForEachStatement {
+class ForOfStatement FINAL : public ForEachStatement {
public:
DECLARE_NODE_TYPE(ForOfStatement)
@@ -996,20 +1000,20 @@ class ForOfStatement V8_FINAL : public ForEachStatement {
return assign_each_;
}
- virtual BailoutId ContinueId() const V8_OVERRIDE { return EntryId(); }
- virtual BailoutId StackCheckId() const V8_OVERRIDE { return BackEdgeId(); }
+ virtual BailoutId ContinueId() const OVERRIDE { return EntryId(); }
+ virtual BailoutId StackCheckId() const OVERRIDE { return BackEdgeId(); }
BailoutId BackEdgeId() const { return back_edge_id_; }
protected:
- ForOfStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
- : ForEachStatement(zone, labels, pos),
+ ForOfStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos,
+ IdGen* id_gen)
+ : ForEachStatement(zone, labels, pos, id_gen),
assign_iterator_(NULL),
next_result_(NULL),
result_done_(NULL),
assign_each_(NULL),
- back_edge_id_(GetNextId(zone)) {
- }
+ back_edge_id_(id_gen->GetNextId()) {}
Expression* assign_iterator_;
Expression* next_result_;
@@ -1019,13 +1023,13 @@ class ForOfStatement V8_FINAL : public ForEachStatement {
};
-class ExpressionStatement V8_FINAL : public Statement {
+class ExpressionStatement FINAL : public Statement {
public:
DECLARE_NODE_TYPE(ExpressionStatement)
void set_expression(Expression* e) { expression_ = e; }
Expression* expression() const { return expression_; }
- virtual bool IsJump() const V8_OVERRIDE { return expression_->IsThrow(); }
+ virtual bool IsJump() const OVERRIDE { return expression_->IsThrow(); }
protected:
ExpressionStatement(Zone* zone, Expression* expression, int pos)
@@ -1038,14 +1042,14 @@ class ExpressionStatement V8_FINAL : public Statement {
class JumpStatement : public Statement {
public:
- virtual bool IsJump() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual bool IsJump() const FINAL OVERRIDE { return true; }
protected:
explicit JumpStatement(Zone* zone, int pos) : Statement(zone, pos) {}
};
-class ContinueStatement V8_FINAL : public JumpStatement {
+class ContinueStatement FINAL : public JumpStatement {
public:
DECLARE_NODE_TYPE(ContinueStatement)
@@ -1060,7 +1064,7 @@ class ContinueStatement V8_FINAL : public JumpStatement {
};
-class BreakStatement V8_FINAL : public JumpStatement {
+class BreakStatement FINAL : public JumpStatement {
public:
DECLARE_NODE_TYPE(BreakStatement)
@@ -1075,7 +1079,7 @@ class BreakStatement V8_FINAL : public JumpStatement {
};
-class ReturnStatement V8_FINAL : public JumpStatement {
+class ReturnStatement FINAL : public JumpStatement {
public:
DECLARE_NODE_TYPE(ReturnStatement)
@@ -1090,7 +1094,7 @@ class ReturnStatement V8_FINAL : public JumpStatement {
};
-class WithStatement V8_FINAL : public Statement {
+class WithStatement FINAL : public Statement {
public:
DECLARE_NODE_TYPE(WithStatement)
@@ -1114,7 +1118,7 @@ class WithStatement V8_FINAL : public Statement {
};
-class CaseClause V8_FINAL : public Expression {
+class CaseClause FINAL : public Expression {
public:
DECLARE_NODE_TYPE(CaseClause)
@@ -1134,10 +1138,8 @@ class CaseClause V8_FINAL : public Expression {
void set_compare_type(Type* type) { compare_type_ = type; }
private:
- CaseClause(Zone* zone,
- Expression* label,
- ZoneList<Statement*>* statements,
- int pos);
+ CaseClause(Zone* zone, Expression* label, ZoneList<Statement*>* statements,
+ int pos, IdGen* id_gen);
Expression* label_;
Label body_target_;
@@ -1149,7 +1151,7 @@ class CaseClause V8_FINAL : public Expression {
};
-class SwitchStatement V8_FINAL : public BreakableStatement {
+class SwitchStatement FINAL : public BreakableStatement {
public:
DECLARE_NODE_TYPE(SwitchStatement)
@@ -1162,10 +1164,11 @@ class SwitchStatement V8_FINAL : public BreakableStatement {
ZoneList<CaseClause*>* cases() const { return cases_; }
protected:
- SwitchStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
- : BreakableStatement(zone, labels, TARGET_FOR_ANONYMOUS, pos),
+ SwitchStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos,
+ IdGen* id_gen)
+ : BreakableStatement(zone, labels, TARGET_FOR_ANONYMOUS, pos, id_gen),
tag_(NULL),
- cases_(NULL) { }
+ cases_(NULL) {}
private:
Expression* tag_;
@@ -1178,7 +1181,7 @@ class SwitchStatement V8_FINAL : public BreakableStatement {
// the parser implicitly creates an empty statement. Use the
// HasThenStatement() and HasElseStatement() functions to check if a
// given if-statement has a then- or an else-part containing code.
-class IfStatement V8_FINAL : public Statement {
+class IfStatement FINAL : public Statement {
public:
DECLARE_NODE_TYPE(IfStatement)
@@ -1189,7 +1192,7 @@ class IfStatement V8_FINAL : public Statement {
Statement* then_statement() const { return then_statement_; }
Statement* else_statement() const { return else_statement_; }
- virtual bool IsJump() const V8_OVERRIDE {
+ virtual bool IsJump() const OVERRIDE {
return HasThenStatement() && then_statement()->IsJump()
&& HasElseStatement() && else_statement()->IsJump();
}
@@ -1199,19 +1202,15 @@ class IfStatement V8_FINAL : public Statement {
BailoutId ElseId() const { return else_id_; }
protected:
- IfStatement(Zone* zone,
- Expression* condition,
- Statement* then_statement,
- Statement* else_statement,
- int pos)
+ IfStatement(Zone* zone, Expression* condition, Statement* then_statement,
+ Statement* else_statement, int pos, IdGen* id_gen)
: Statement(zone, pos),
condition_(condition),
then_statement_(then_statement),
else_statement_(else_statement),
- if_id_(GetNextId(zone)),
- then_id_(GetNextId(zone)),
- else_id_(GetNextId(zone)) {
- }
+ if_id_(id_gen->GetNextId()),
+ then_id_(id_gen->GetNextId()),
+ else_id_(id_gen->GetNextId()) {}
private:
Expression* condition_;
@@ -1225,7 +1224,7 @@ class IfStatement V8_FINAL : public Statement {
// NOTE: TargetCollectors are represented as nodes to fit in the target
// stack in the compiler; this should probably be reworked.
-class TargetCollector V8_FINAL : public AstNode {
+class TargetCollector FINAL : public AstNode {
public:
explicit TargetCollector(Zone* zone)
: AstNode(RelocInfo::kNoPosition), targets_(0, zone) { }
@@ -1236,9 +1235,9 @@ class TargetCollector V8_FINAL : public AstNode {
void AddTarget(Label* target, Zone* zone);
// Virtual behaviour. TargetCollectors are never part of the AST.
- virtual void Accept(AstVisitor* v) V8_OVERRIDE { UNREACHABLE(); }
- virtual NodeType node_type() const V8_OVERRIDE { return kInvalid; }
- virtual TargetCollector* AsTargetCollector() V8_OVERRIDE { return this; }
+ virtual void Accept(AstVisitor* v) OVERRIDE { UNREACHABLE(); }
+ virtual NodeType node_type() const OVERRIDE { return kInvalid; }
+ virtual TargetCollector* AsTargetCollector() OVERRIDE { return this; }
ZoneList<Label*>* targets() { return &targets_; }
@@ -1273,7 +1272,7 @@ class TryStatement : public Statement {
};
-class TryCatchStatement V8_FINAL : public TryStatement {
+class TryCatchStatement FINAL : public TryStatement {
public:
DECLARE_NODE_TYPE(TryCatchStatement)
@@ -1302,7 +1301,7 @@ class TryCatchStatement V8_FINAL : public TryStatement {
};
-class TryFinallyStatement V8_FINAL : public TryStatement {
+class TryFinallyStatement FINAL : public TryStatement {
public:
DECLARE_NODE_TYPE(TryFinallyStatement)
@@ -1319,16 +1318,22 @@ class TryFinallyStatement V8_FINAL : public TryStatement {
};
-class DebuggerStatement V8_FINAL : public Statement {
+class DebuggerStatement FINAL : public Statement {
public:
DECLARE_NODE_TYPE(DebuggerStatement)
+ BailoutId DebugBreakId() const { return debugger_id_; }
+
protected:
- explicit DebuggerStatement(Zone* zone, int pos): Statement(zone, pos) {}
+ explicit DebuggerStatement(Zone* zone, int pos, IdGen* id_gen)
+ : Statement(zone, pos), debugger_id_(id_gen->GetNextId()) {}
+
+ private:
+ const BailoutId debugger_id_;
};
-class EmptyStatement V8_FINAL : public Statement {
+class EmptyStatement FINAL : public Statement {
public:
DECLARE_NODE_TYPE(EmptyStatement)
@@ -1337,11 +1342,11 @@ class EmptyStatement V8_FINAL : public Statement {
};
-class Literal V8_FINAL : public Expression {
+class Literal FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Literal)
- virtual bool IsPropertyName() const V8_OVERRIDE {
+ virtual bool IsPropertyName() const OVERRIDE {
return value_->IsPropertyName();
}
@@ -1355,10 +1360,10 @@ class Literal V8_FINAL : public Expression {
return value_->AsString();
}
- virtual bool ToBooleanIsTrue() const V8_OVERRIDE {
+ virtual bool ToBooleanIsTrue() const OVERRIDE {
return value()->BooleanValue();
}
- virtual bool ToBooleanIsFalse() const V8_OVERRIDE {
+ virtual bool ToBooleanIsFalse() const OVERRIDE {
return !value()->BooleanValue();
}
@@ -1378,10 +1383,10 @@ class Literal V8_FINAL : public Expression {
TypeFeedbackId LiteralFeedbackId() const { return reuse(id()); }
protected:
- Literal(Zone* zone, const AstValue* value, int position)
- : Expression(zone, position),
+ Literal(Zone* zone, const AstValue* value, int position, IdGen* id_gen)
+ : Expression(zone, position, id_gen),
value_(value),
- isolate_(zone->isolate()) { }
+ isolate_(zone->isolate()) {}
private:
Handle<String> ToString();
@@ -1406,10 +1411,8 @@ class MaterializedLiteral : public Expression {
}
protected:
- MaterializedLiteral(Zone* zone,
- int literal_index,
- int pos)
- : Expression(zone, pos),
+ MaterializedLiteral(Zone* zone, int literal_index, int pos, IdGen* id_gen)
+ : Expression(zone, pos, id_gen),
literal_index_(literal_index),
is_simple_(false),
depth_(0) {}
@@ -1447,7 +1450,7 @@ class MaterializedLiteral : public Expression {
// Property is used for passing information
// about an object literal's properties from the parser
// to the code generator.
-class ObjectLiteralProperty V8_FINAL : public ZoneObject {
+class ObjectLiteralProperty FINAL : public ZoneObject {
public:
enum Kind {
CONSTANT, // Property with constant value (compile time).
@@ -1458,7 +1461,7 @@ class ObjectLiteralProperty V8_FINAL : public ZoneObject {
};
ObjectLiteralProperty(Zone* zone, AstValueFactory* ast_value_factory,
- Literal* key, Expression* value);
+ Literal* key, Expression* value, bool is_static);
Literal* key() { return key_; }
Expression* value() { return value_; }
@@ -1477,7 +1480,8 @@ class ObjectLiteralProperty V8_FINAL : public ZoneObject {
protected:
template<class> friend class AstNodeFactory;
- ObjectLiteralProperty(Zone* zone, bool is_getter, FunctionLiteral* value);
+ ObjectLiteralProperty(Zone* zone, bool is_getter, FunctionLiteral* value,
+ bool is_static);
void set_key(Literal* key) { key_ = key; }
private:
@@ -1485,13 +1489,14 @@ class ObjectLiteralProperty V8_FINAL : public ZoneObject {
Expression* value_;
Kind kind_;
bool emit_store_;
+ bool is_static_;
Handle<Map> receiver_type_;
};
// An object literal has a boilerplate object that is used
// for minimizing the work when constructing it at runtime.
-class ObjectLiteral V8_FINAL : public MaterializedLiteral {
+class ObjectLiteral FINAL : public MaterializedLiteral {
public:
typedef ObjectLiteralProperty Property;
@@ -1536,13 +1541,10 @@ class ObjectLiteral V8_FINAL : public MaterializedLiteral {
};
protected:
- ObjectLiteral(Zone* zone,
- ZoneList<Property*>* properties,
- int literal_index,
- int boilerplate_properties,
- bool has_function,
- int pos)
- : MaterializedLiteral(zone, literal_index, pos),
+ ObjectLiteral(Zone* zone, ZoneList<Property*>* properties, int literal_index,
+ int boilerplate_properties, bool has_function, int pos,
+ IdGen* id_gen)
+ : MaterializedLiteral(zone, literal_index, pos, id_gen),
properties_(properties),
boilerplate_properties_(boilerplate_properties),
fast_elements_(false),
@@ -1560,7 +1562,7 @@ class ObjectLiteral V8_FINAL : public MaterializedLiteral {
// Node for capturing a regexp literal.
-class RegExpLiteral V8_FINAL : public MaterializedLiteral {
+class RegExpLiteral FINAL : public MaterializedLiteral {
public:
DECLARE_NODE_TYPE(RegExpLiteral)
@@ -1568,12 +1570,10 @@ class RegExpLiteral V8_FINAL : public MaterializedLiteral {
Handle<String> flags() const { return flags_->string(); }
protected:
- RegExpLiteral(Zone* zone,
- const AstRawString* pattern,
- const AstRawString* flags,
- int literal_index,
- int pos)
- : MaterializedLiteral(zone, literal_index, pos),
+ RegExpLiteral(Zone* zone, const AstRawString* pattern,
+ const AstRawString* flags, int literal_index, int pos,
+ IdGen* id_gen)
+ : MaterializedLiteral(zone, literal_index, pos, id_gen),
pattern_(pattern),
flags_(flags) {
set_depth(1);
@@ -1587,7 +1587,7 @@ class RegExpLiteral V8_FINAL : public MaterializedLiteral {
// An array literal has a literals object that is used
// for minimizing the work when constructing it at runtime.
-class ArrayLiteral V8_FINAL : public MaterializedLiteral {
+class ArrayLiteral FINAL : public MaterializedLiteral {
public:
DECLARE_NODE_TYPE(ArrayLiteral)
@@ -1616,13 +1616,11 @@ class ArrayLiteral V8_FINAL : public MaterializedLiteral {
};
protected:
- ArrayLiteral(Zone* zone,
- ZoneList<Expression*>* values,
- int literal_index,
- int pos)
- : MaterializedLiteral(zone, literal_index, pos),
+ ArrayLiteral(Zone* zone, ZoneList<Expression*>* values, int literal_index,
+ int pos, IdGen* id_gen)
+ : MaterializedLiteral(zone, literal_index, pos, id_gen),
values_(values),
- first_element_id_(ReserveIdRange(zone, values->length())) {}
+ first_element_id_(id_gen->ReserveIdRange(values->length())) {}
private:
Handle<FixedArray> constant_elements_;
@@ -1631,11 +1629,11 @@ class ArrayLiteral V8_FINAL : public MaterializedLiteral {
};
-class VariableProxy V8_FINAL : public Expression, public FeedbackSlotInterface {
+class VariableProxy FINAL : public Expression, public FeedbackSlotInterface {
public:
DECLARE_NODE_TYPE(VariableProxy)
- virtual bool IsValidReferenceExpression() const V8_OVERRIDE {
+ virtual bool IsValidReferenceExpression() const OVERRIDE {
return var_ == NULL ? true : var_->IsValidReference();
}
@@ -1661,13 +1659,10 @@ class VariableProxy V8_FINAL : public Expression, public FeedbackSlotInterface {
int VariableFeedbackSlot() { return variable_feedback_slot_; }
protected:
- VariableProxy(Zone* zone, Variable* var, int position);
+ VariableProxy(Zone* zone, Variable* var, int position, IdGen* id_gen);
- VariableProxy(Zone* zone,
- const AstRawString* name,
- bool is_this,
- Interface* interface,
- int position);
+ VariableProxy(Zone* zone, const AstRawString* name, bool is_this,
+ Interface* interface, int position, IdGen* id_gen);
const AstRawString* name_;
Variable* var_; // resolved variable, or NULL
@@ -1678,11 +1673,11 @@ class VariableProxy V8_FINAL : public Expression, public FeedbackSlotInterface {
};
-class Property V8_FINAL : public Expression, public FeedbackSlotInterface {
+class Property FINAL : public Expression, public FeedbackSlotInterface {
public:
DECLARE_NODE_TYPE(Property)
- virtual bool IsValidReferenceExpression() const V8_OVERRIDE { return true; }
+ virtual bool IsValidReferenceExpression() const OVERRIDE { return true; }
Expression* obj() const { return obj_; }
Expression* key() const { return key_; }
@@ -1692,13 +1687,13 @@ class Property V8_FINAL : public Expression, public FeedbackSlotInterface {
bool IsStringAccess() const { return is_string_access_; }
// Type feedback information.
- virtual bool IsMonomorphic() V8_OVERRIDE {
+ virtual bool IsMonomorphic() OVERRIDE {
return receiver_types_.length() == 1;
}
- virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
+ virtual SmallMapList* GetReceiverTypes() OVERRIDE {
return &receiver_types_;
}
- virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
+ virtual KeyedAccessStoreMode GetStoreMode() OVERRIDE {
return STANDARD_STORE;
}
bool IsUninitialized() { return !is_for_call_ && is_uninitialized_; }
@@ -1710,6 +1705,10 @@ class Property V8_FINAL : public Expression, public FeedbackSlotInterface {
void mark_for_call() { is_for_call_ = true; }
bool IsForCall() { return is_for_call_; }
+ bool IsSuperAccess() {
+ return obj()->IsSuperReference();
+ }
+
TypeFeedbackId PropertyFeedbackId() { return reuse(id()); }
virtual int ComputeFeedbackSlotCount() { return FLAG_vector_ics ? 1 : 0; }
@@ -1720,11 +1719,11 @@ class Property V8_FINAL : public Expression, public FeedbackSlotInterface {
int PropertyFeedbackSlot() const { return property_feedback_slot_; }
protected:
- Property(Zone* zone, Expression* obj, Expression* key, int pos)
- : Expression(zone, pos),
+ Property(Zone* zone, Expression* obj, Expression* key, int pos, IdGen* id_gen)
+ : Expression(zone, pos, id_gen),
obj_(obj),
key_(key),
- load_id_(GetNextId(zone)),
+ load_id_(id_gen->GetNextId()),
property_feedback_slot_(kInvalidFeedbackSlot),
is_for_call_(false),
is_uninitialized_(false),
@@ -1743,7 +1742,7 @@ class Property V8_FINAL : public Expression, public FeedbackSlotInterface {
};
-class Call V8_FINAL : public Expression, public FeedbackSlotInterface {
+class Call FINAL : public Expression, public FeedbackSlotInterface {
public:
DECLARE_NODE_TYPE(Call)
@@ -1761,14 +1760,14 @@ class Call V8_FINAL : public Expression, public FeedbackSlotInterface {
}
int CallFeedbackSlot() const { return call_feedback_slot_; }
- virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
+ virtual SmallMapList* GetReceiverTypes() OVERRIDE {
if (expression()->IsProperty()) {
return expression()->AsProperty()->GetReceiverTypes();
}
return NULL;
}
- virtual bool IsMonomorphic() V8_OVERRIDE {
+ virtual bool IsMonomorphic() OVERRIDE {
if (expression()->IsProperty()) {
return expression()->AsProperty()->IsMonomorphic();
}
@@ -1794,7 +1793,7 @@ class Call V8_FINAL : public Expression, public FeedbackSlotInterface {
void set_allocation_site(Handle<AllocationSite> site) {
allocation_site_ = site;
}
- bool ComputeGlobalTarget(Handle<GlobalObject> global, LookupResult* lookup);
+ bool ComputeGlobalTarget(Handle<GlobalObject> global, LookupIterator* it);
BailoutId ReturnId() const { return return_id_; }
@@ -1816,15 +1815,13 @@ class Call V8_FINAL : public Expression, public FeedbackSlotInterface {
#endif
protected:
- Call(Zone* zone,
- Expression* expression,
- ZoneList<Expression*>* arguments,
- int pos)
- : Expression(zone, pos),
+ Call(Zone* zone, Expression* expression, ZoneList<Expression*>* arguments,
+ int pos, IdGen* id_gen)
+ : Expression(zone, pos, id_gen),
expression_(expression),
arguments_(arguments),
call_feedback_slot_(kInvalidFeedbackSlot),
- return_id_(GetNextId(zone)) {
+ return_id_(id_gen->GetNextId()) {
if (expression->IsProperty()) {
expression->AsProperty()->mark_for_call();
}
@@ -1843,7 +1840,7 @@ class Call V8_FINAL : public Expression, public FeedbackSlotInterface {
};
-class CallNew V8_FINAL : public Expression, public FeedbackSlotInterface {
+class CallNew FINAL : public Expression, public FeedbackSlotInterface {
public:
DECLARE_NODE_TYPE(CallNew)
@@ -1869,9 +1866,8 @@ class CallNew V8_FINAL : public Expression, public FeedbackSlotInterface {
}
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
+ virtual bool IsMonomorphic() OVERRIDE { return is_monomorphic_; }
Handle<JSFunction> target() const { return target_; }
- ElementsKind elements_kind() const { return elements_kind_; }
Handle<AllocationSite> allocation_site() const {
return allocation_site_;
}
@@ -1881,17 +1877,14 @@ class CallNew V8_FINAL : public Expression, public FeedbackSlotInterface {
BailoutId ReturnId() const { return return_id_; }
protected:
- CallNew(Zone* zone,
- Expression* expression,
- ZoneList<Expression*>* arguments,
- int pos)
- : Expression(zone, pos),
+ CallNew(Zone* zone, Expression* expression, ZoneList<Expression*>* arguments,
+ int pos, IdGen* id_gen)
+ : Expression(zone, pos, id_gen),
expression_(expression),
arguments_(arguments),
is_monomorphic_(false),
- elements_kind_(GetInitialFastElementsKind()),
callnew_feedback_slot_(kInvalidFeedbackSlot),
- return_id_(GetNextId(zone)) { }
+ return_id_(id_gen->GetNextId()) {}
private:
Expression* expression_;
@@ -1899,7 +1892,6 @@ class CallNew V8_FINAL : public Expression, public FeedbackSlotInterface {
bool is_monomorphic_;
Handle<JSFunction> target_;
- ElementsKind elements_kind_;
Handle<AllocationSite> allocation_site_;
int callnew_feedback_slot_;
@@ -1911,7 +1903,7 @@ class CallNew V8_FINAL : public Expression, public FeedbackSlotInterface {
// language construct. Instead it is used to call a C or JS function
// with a set of arguments. This is used from the builtins that are
// implemented in JavaScript (see "v8natives.js").
-class CallRuntime V8_FINAL : public Expression, public FeedbackSlotInterface {
+class CallRuntime FINAL : public Expression, public FeedbackSlotInterface {
public:
DECLARE_NODE_TYPE(CallRuntime)
@@ -1938,15 +1930,13 @@ class CallRuntime V8_FINAL : public Expression, public FeedbackSlotInterface {
TypeFeedbackId CallRuntimeFeedbackId() const { return reuse(id()); }
protected:
- CallRuntime(Zone* zone,
- const AstRawString* name,
+ CallRuntime(Zone* zone, const AstRawString* name,
const Runtime::Function* function,
- ZoneList<Expression*>* arguments,
- int pos)
- : Expression(zone, pos),
+ ZoneList<Expression*>* arguments, int pos, IdGen* id_gen)
+ : Expression(zone, pos, id_gen),
raw_name_(name),
function_(function),
- arguments_(arguments) { }
+ arguments_(arguments) {}
private:
const AstRawString* raw_name_;
@@ -1956,7 +1946,7 @@ class CallRuntime V8_FINAL : public Expression, public FeedbackSlotInterface {
};
-class UnaryOperation V8_FINAL : public Expression {
+class UnaryOperation FINAL : public Expression {
public:
DECLARE_NODE_TYPE(UnaryOperation)
@@ -1967,18 +1957,16 @@ class UnaryOperation V8_FINAL : public Expression {
BailoutId MaterializeFalseId() { return materialize_false_id_; }
virtual void RecordToBooleanTypeFeedback(
- TypeFeedbackOracle* oracle) V8_OVERRIDE;
+ TypeFeedbackOracle* oracle) OVERRIDE;
protected:
- UnaryOperation(Zone* zone,
- Token::Value op,
- Expression* expression,
- int pos)
- : Expression(zone, pos),
+ UnaryOperation(Zone* zone, Token::Value op, Expression* expression, int pos,
+ IdGen* id_gen)
+ : Expression(zone, pos, id_gen),
op_(op),
expression_(expression),
- materialize_true_id_(GetNextId(zone)),
- materialize_false_id_(GetNextId(zone)) {
+ materialize_true_id_(id_gen->GetNextId()),
+ materialize_false_id_(id_gen->GetNextId()) {
DCHECK(Token::IsUnaryOp(op));
}
@@ -1993,11 +1981,11 @@ class UnaryOperation V8_FINAL : public Expression {
};
-class BinaryOperation V8_FINAL : public Expression {
+class BinaryOperation FINAL : public Expression {
public:
DECLARE_NODE_TYPE(BinaryOperation)
- virtual bool ResultOverwriteAllowed() const V8_OVERRIDE;
+ virtual bool ResultOverwriteAllowed() const OVERRIDE;
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
@@ -2014,19 +2002,16 @@ class BinaryOperation V8_FINAL : public Expression {
void set_fixed_right_arg(Maybe<int> arg) { fixed_right_arg_ = arg; }
virtual void RecordToBooleanTypeFeedback(
- TypeFeedbackOracle* oracle) V8_OVERRIDE;
+ TypeFeedbackOracle* oracle) OVERRIDE;
protected:
- BinaryOperation(Zone* zone,
- Token::Value op,
- Expression* left,
- Expression* right,
- int pos)
- : Expression(zone, pos),
+ BinaryOperation(Zone* zone, Token::Value op, Expression* left,
+ Expression* right, int pos, IdGen* id_gen)
+ : Expression(zone, pos, id_gen),
op_(op),
left_(left),
right_(right),
- right_id_(GetNextId(zone)) {
+ right_id_(id_gen->GetNextId()) {
DCHECK(Token::IsBinaryOp(op));
}
@@ -2046,7 +2031,7 @@ class BinaryOperation V8_FINAL : public Expression {
};
-class CountOperation V8_FINAL : public Expression {
+class CountOperation FINAL : public Expression {
public:
DECLARE_NODE_TYPE(CountOperation)
@@ -2060,13 +2045,13 @@ class CountOperation V8_FINAL : public Expression {
Expression* expression() const { return expression_; }
- virtual bool IsMonomorphic() V8_OVERRIDE {
+ virtual bool IsMonomorphic() OVERRIDE {
return receiver_types_.length() == 1;
}
- virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
+ virtual SmallMapList* GetReceiverTypes() OVERRIDE {
return &receiver_types_;
}
- virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
+ virtual KeyedAccessStoreMode GetStoreMode() OVERRIDE {
return store_mode_;
}
Type* type() const { return type_; }
@@ -2079,18 +2064,15 @@ class CountOperation V8_FINAL : public Expression {
TypeFeedbackId CountStoreFeedbackId() const { return reuse(id()); }
protected:
- CountOperation(Zone* zone,
- Token::Value op,
- bool is_prefix,
- Expression* expr,
- int pos)
- : Expression(zone, pos),
+ CountOperation(Zone* zone, Token::Value op, bool is_prefix, Expression* expr,
+ int pos, IdGen* id_gen)
+ : Expression(zone, pos, id_gen),
op_(op),
is_prefix_(is_prefix),
store_mode_(STANDARD_STORE),
expression_(expr),
- assignment_id_(GetNextId(zone)),
- count_id_(GetNextId(zone)) {}
+ assignment_id_(id_gen->GetNextId()),
+ count_id_(id_gen->GetNextId()) {}
private:
Token::Value op_;
@@ -2106,7 +2088,7 @@ class CountOperation V8_FINAL : public Expression {
};
-class CompareOperation V8_FINAL : public Expression {
+class CompareOperation FINAL : public Expression {
public:
DECLARE_NODE_TYPE(CompareOperation)
@@ -2125,12 +2107,9 @@ class CompareOperation V8_FINAL : public Expression {
bool IsLiteralCompareNull(Expression** expr);
protected:
- CompareOperation(Zone* zone,
- Token::Value op,
- Expression* left,
- Expression* right,
- int pos)
- : Expression(zone, pos),
+ CompareOperation(Zone* zone, Token::Value op, Expression* left,
+ Expression* right, int pos, IdGen* id_gen)
+ : Expression(zone, pos, id_gen),
op_(op),
left_(left),
right_(right),
@@ -2147,7 +2126,7 @@ class CompareOperation V8_FINAL : public Expression {
};
-class Conditional V8_FINAL : public Expression {
+class Conditional FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Conditional)
@@ -2159,17 +2138,14 @@ class Conditional V8_FINAL : public Expression {
BailoutId ElseId() const { return else_id_; }
protected:
- Conditional(Zone* zone,
- Expression* condition,
- Expression* then_expression,
- Expression* else_expression,
- int position)
- : Expression(zone, position),
+ Conditional(Zone* zone, Expression* condition, Expression* then_expression,
+ Expression* else_expression, int position, IdGen* id_gen)
+ : Expression(zone, position, id_gen),
condition_(condition),
then_expression_(then_expression),
else_expression_(else_expression),
- then_id_(GetNextId(zone)),
- else_id_(GetNextId(zone)) { }
+ then_id_(id_gen->GetNextId()),
+ else_id_(id_gen->GetNextId()) {}
private:
Expression* condition_;
@@ -2180,7 +2156,7 @@ class Conditional V8_FINAL : public Expression {
};
-class Assignment V8_FINAL : public Expression {
+class Assignment FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Assignment)
@@ -2200,28 +2176,25 @@ class Assignment V8_FINAL : public Expression {
// Type feedback information.
TypeFeedbackId AssignmentFeedbackId() { return reuse(id()); }
- virtual bool IsMonomorphic() V8_OVERRIDE {
+ virtual bool IsMonomorphic() OVERRIDE {
return receiver_types_.length() == 1;
}
bool IsUninitialized() { return is_uninitialized_; }
bool HasNoTypeInformation() {
return is_uninitialized_;
}
- virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
+ virtual SmallMapList* GetReceiverTypes() OVERRIDE {
return &receiver_types_;
}
- virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
+ virtual KeyedAccessStoreMode GetStoreMode() OVERRIDE {
return store_mode_;
}
void set_is_uninitialized(bool b) { is_uninitialized_ = b; }
void set_store_mode(KeyedAccessStoreMode mode) { store_mode_ = mode; }
protected:
- Assignment(Zone* zone,
- Token::Value op,
- Expression* target,
- Expression* value,
- int pos);
+ Assignment(Zone* zone, Token::Value op, Expression* target, Expression* value,
+ int pos, IdGen* id_gen);
template<class Visitor>
void Init(Zone* zone, AstNodeFactory<Visitor>* factory) {
@@ -2246,15 +2219,15 @@ class Assignment V8_FINAL : public Expression {
};
-class Yield V8_FINAL : public Expression, public FeedbackSlotInterface {
+class Yield FINAL : public Expression, public FeedbackSlotInterface {
public:
DECLARE_NODE_TYPE(Yield)
enum Kind {
- INITIAL, // The initial yield that returns the unboxed generator object.
- SUSPEND, // A normal yield: { value: EXPRESSION, done: false }
- DELEGATING, // A yield*.
- FINAL // A return: { value: EXPRESSION, done: true }
+ kInitial, // The initial yield that returns the unboxed generator object.
+ kSuspend, // A normal yield: { value: EXPRESSION, done: false }
+ kDelegating, // A yield*.
+ kFinal // A return: { value: EXPRESSION, done: true }
};
Expression* generator_object() const { return generator_object_; }
@@ -2265,17 +2238,17 @@ class Yield V8_FINAL : public Expression, public FeedbackSlotInterface {
// locates the catch handler in the handler table, and is equivalent to
// TryCatchStatement::index().
int index() const {
- DCHECK(yield_kind() == DELEGATING);
+ DCHECK_EQ(kDelegating, yield_kind());
return index_;
}
void set_index(int index) {
- DCHECK(yield_kind() == DELEGATING);
+ DCHECK_EQ(kDelegating, yield_kind());
index_ = index;
}
// Type feedback information.
virtual int ComputeFeedbackSlotCount() {
- return (FLAG_vector_ics && yield_kind() == DELEGATING) ? 3 : 0;
+ return (FLAG_vector_ics && yield_kind() == kDelegating) ? 3 : 0;
}
virtual void SetFirstFeedbackSlot(int slot) {
yield_first_feedback_slot_ = slot;
@@ -2297,17 +2270,14 @@ class Yield V8_FINAL : public Expression, public FeedbackSlotInterface {
}
protected:
- Yield(Zone* zone,
- Expression* generator_object,
- Expression* expression,
- Kind yield_kind,
- int pos)
- : Expression(zone, pos),
+ Yield(Zone* zone, Expression* generator_object, Expression* expression,
+ Kind yield_kind, int pos, IdGen* id_gen)
+ : Expression(zone, pos, id_gen),
generator_object_(generator_object),
expression_(expression),
yield_kind_(yield_kind),
index_(-1),
- yield_first_feedback_slot_(kInvalidFeedbackSlot) { }
+ yield_first_feedback_slot_(kInvalidFeedbackSlot) {}
private:
Expression* generator_object_;
@@ -2318,22 +2288,22 @@ class Yield V8_FINAL : public Expression, public FeedbackSlotInterface {
};
-class Throw V8_FINAL : public Expression {
+class Throw FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Throw)
Expression* exception() const { return exception_; }
protected:
- Throw(Zone* zone, Expression* exception, int pos)
- : Expression(zone, pos), exception_(exception) {}
+ Throw(Zone* zone, Expression* exception, int pos, IdGen* id_gen)
+ : Expression(zone, pos, id_gen), exception_(exception) {}
private:
Expression* exception_;
};
-class FunctionLiteral V8_FINAL : public Expression {
+class FunctionLiteral FINAL : public Expression {
public:
enum FunctionType {
ANONYMOUS_EXPRESSION,
@@ -2356,12 +2326,6 @@ class FunctionLiteral V8_FINAL : public Expression {
kNotParenthesized
};
- enum KindFlag {
- kNormalFunction,
- kArrowFunction,
- kGeneratorFunction
- };
-
enum ArityRestriction {
NORMAL_ARITY,
GETTER_ARITY,
@@ -2451,8 +2415,16 @@ class FunctionLiteral V8_FINAL : public Expression {
bitfield_ = IsParenthesized::update(bitfield_, kIsParenthesized);
}
- bool is_generator() { return IsGenerator::decode(bitfield_); }
- bool is_arrow() { return IsArrow::decode(bitfield_); }
+ FunctionKind kind() { return FunctionKindBits::decode(bitfield_); }
+ bool is_arrow() {
+ return IsArrowFunction(FunctionKindBits::decode(bitfield_));
+ }
+ bool is_generator() {
+ return IsGeneratorFunction(FunctionKindBits::decode(bitfield_));
+ }
+ bool is_concise_method() {
+ return IsConciseMethod(FunctionKindBits::decode(bitfield_));
+ }
int ast_node_count() { return ast_properties_.node_count(); }
AstProperties::Flags* flags() { return ast_properties_.flags(); }
@@ -2476,9 +2448,9 @@ class FunctionLiteral V8_FINAL : public Expression {
int parameter_count, FunctionType function_type,
ParameterFlag has_duplicate_parameters,
IsFunctionFlag is_function,
- IsParenthesizedFlag is_parenthesized, KindFlag kind,
- int position)
- : Expression(zone, position),
+ IsParenthesizedFlag is_parenthesized, FunctionKind kind,
+ int position, IdGen* id_gen)
+ : Expression(zone, position, id_gen),
raw_name_(name),
scope_(scope),
body_(body),
@@ -2495,8 +2467,8 @@ class FunctionLiteral V8_FINAL : public Expression {
HasDuplicateParameters::encode(has_duplicate_parameters) |
IsFunction::encode(is_function) |
IsParenthesized::encode(is_parenthesized) |
- IsGenerator::encode(kind == kGeneratorFunction) |
- IsArrow::encode(kind == kArrowFunction);
+ FunctionKindBits::encode(kind);
+ DCHECK(IsValidFunctionKind(kind));
}
private:
@@ -2517,18 +2489,47 @@ class FunctionLiteral V8_FINAL : public Expression {
int function_token_position_;
unsigned bitfield_;
- class IsExpression: public BitField<bool, 0, 1> {};
- class IsAnonymous: public BitField<bool, 1, 1> {};
- class Pretenure: public BitField<bool, 2, 1> {};
- class HasDuplicateParameters: public BitField<ParameterFlag, 3, 1> {};
- class IsFunction: public BitField<IsFunctionFlag, 4, 1> {};
- class IsParenthesized: public BitField<IsParenthesizedFlag, 5, 1> {};
- class IsGenerator : public BitField<bool, 6, 1> {};
- class IsArrow : public BitField<bool, 7, 1> {};
+ class IsExpression : public BitField<bool, 0, 1> {};
+ class IsAnonymous : public BitField<bool, 1, 1> {};
+ class Pretenure : public BitField<bool, 2, 1> {};
+ class HasDuplicateParameters : public BitField<ParameterFlag, 3, 1> {};
+ class IsFunction : public BitField<IsFunctionFlag, 4, 1> {};
+ class IsParenthesized : public BitField<IsParenthesizedFlag, 5, 1> {};
+ class FunctionKindBits : public BitField<FunctionKind, 6, 3> {};
};
-class NativeFunctionLiteral V8_FINAL : public Expression {
+class ClassLiteral FINAL : public Expression {
+ public:
+ typedef ObjectLiteralProperty Property;
+
+ DECLARE_NODE_TYPE(ClassLiteral)
+
+ Handle<String> name() const { return raw_name_->string(); }
+ const AstRawString* raw_name() const { return raw_name_; }
+ Expression* extends() const { return extends_; }
+ Expression* constructor() const { return constructor_; }
+ ZoneList<Property*>* properties() const { return properties_; }
+
+ protected:
+ ClassLiteral(Zone* zone, const AstRawString* name, Expression* extends,
+ Expression* constructor, ZoneList<Property*>* properties,
+ int position, IdGen* id_gen)
+ : Expression(zone, position, id_gen),
+ raw_name_(name),
+ extends_(extends),
+ constructor_(constructor),
+ properties_(properties) {}
+
+ private:
+ const AstRawString* raw_name_;
+ Expression* extends_;
+ Expression* constructor_;
+ ZoneList<Property*>* properties_;
+};
+
+
+class NativeFunctionLiteral FINAL : public Expression {
public:
DECLARE_NODE_TYPE(NativeFunctionLiteral)
@@ -2537,8 +2538,8 @@ class NativeFunctionLiteral V8_FINAL : public Expression {
protected:
NativeFunctionLiteral(Zone* zone, const AstRawString* name,
- v8::Extension* extension, int pos)
- : Expression(zone, pos), name_(name), extension_(extension) {}
+ v8::Extension* extension, int pos, IdGen* id_gen)
+ : Expression(zone, pos, id_gen), name_(name), extension_(extension) {}
private:
const AstRawString* name_;
@@ -2546,14 +2547,34 @@ class NativeFunctionLiteral V8_FINAL : public Expression {
};
-class ThisFunction V8_FINAL : public Expression {
+class ThisFunction FINAL : public Expression {
public:
DECLARE_NODE_TYPE(ThisFunction)
protected:
- explicit ThisFunction(Zone* zone, int pos): Expression(zone, pos) {}
+ ThisFunction(Zone* zone, int pos, IdGen* id_gen)
+ : Expression(zone, pos, id_gen) {}
};
+
+class SuperReference FINAL : public Expression {
+ public:
+ DECLARE_NODE_TYPE(SuperReference)
+
+ VariableProxy* this_var() const { return this_var_; }
+
+ TypeFeedbackId HomeObjectFeedbackId() { return reuse(id()); }
+
+ protected:
+ SuperReference(Zone* zone, VariableProxy* this_var, int pos, IdGen* id_gen)
+ : Expression(zone, pos, id_gen), this_var_(this_var) {
+ DCHECK(this_var->is_this());
+ }
+
+ VariableProxy* this_var_;
+};
+
+
#undef DECLARE_NODE_TYPE
@@ -2596,19 +2617,19 @@ class RegExpTree : public ZoneObject {
};
-class RegExpDisjunction V8_FINAL : public RegExpTree {
+class RegExpDisjunction FINAL : public RegExpTree {
public:
explicit RegExpDisjunction(ZoneList<RegExpTree*>* alternatives);
- virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
+ virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) V8_OVERRIDE;
- virtual RegExpDisjunction* AsDisjunction() V8_OVERRIDE;
- virtual Interval CaptureRegisters() V8_OVERRIDE;
- virtual bool IsDisjunction() V8_OVERRIDE;
- virtual bool IsAnchoredAtStart() V8_OVERRIDE;
- virtual bool IsAnchoredAtEnd() V8_OVERRIDE;
- virtual int min_match() V8_OVERRIDE { return min_match_; }
- virtual int max_match() V8_OVERRIDE { return max_match_; }
+ RegExpNode* on_success) OVERRIDE;
+ virtual RegExpDisjunction* AsDisjunction() OVERRIDE;
+ virtual Interval CaptureRegisters() OVERRIDE;
+ virtual bool IsDisjunction() OVERRIDE;
+ virtual bool IsAnchoredAtStart() OVERRIDE;
+ virtual bool IsAnchoredAtEnd() OVERRIDE;
+ virtual int min_match() OVERRIDE { return min_match_; }
+ virtual int max_match() OVERRIDE { return max_match_; }
ZoneList<RegExpTree*>* alternatives() { return alternatives_; }
private:
ZoneList<RegExpTree*>* alternatives_;
@@ -2617,19 +2638,19 @@ class RegExpDisjunction V8_FINAL : public RegExpTree {
};
-class RegExpAlternative V8_FINAL : public RegExpTree {
+class RegExpAlternative FINAL : public RegExpTree {
public:
explicit RegExpAlternative(ZoneList<RegExpTree*>* nodes);
- virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
+ virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) V8_OVERRIDE;
- virtual RegExpAlternative* AsAlternative() V8_OVERRIDE;
- virtual Interval CaptureRegisters() V8_OVERRIDE;
- virtual bool IsAlternative() V8_OVERRIDE;
- virtual bool IsAnchoredAtStart() V8_OVERRIDE;
- virtual bool IsAnchoredAtEnd() V8_OVERRIDE;
- virtual int min_match() V8_OVERRIDE { return min_match_; }
- virtual int max_match() V8_OVERRIDE { return max_match_; }
+ RegExpNode* on_success) OVERRIDE;
+ virtual RegExpAlternative* AsAlternative() OVERRIDE;
+ virtual Interval CaptureRegisters() OVERRIDE;
+ virtual bool IsAlternative() OVERRIDE;
+ virtual bool IsAnchoredAtStart() OVERRIDE;
+ virtual bool IsAnchoredAtEnd() OVERRIDE;
+ virtual int min_match() OVERRIDE { return min_match_; }
+ virtual int max_match() OVERRIDE { return max_match_; }
ZoneList<RegExpTree*>* nodes() { return nodes_; }
private:
ZoneList<RegExpTree*>* nodes_;
@@ -2638,7 +2659,7 @@ class RegExpAlternative V8_FINAL : public RegExpTree {
};
-class RegExpAssertion V8_FINAL : public RegExpTree {
+class RegExpAssertion FINAL : public RegExpTree {
public:
enum AssertionType {
START_OF_LINE,
@@ -2649,22 +2670,22 @@ class RegExpAssertion V8_FINAL : public RegExpTree {
NON_BOUNDARY
};
explicit RegExpAssertion(AssertionType type) : assertion_type_(type) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
+ virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) V8_OVERRIDE;
- virtual RegExpAssertion* AsAssertion() V8_OVERRIDE;
- virtual bool IsAssertion() V8_OVERRIDE;
- virtual bool IsAnchoredAtStart() V8_OVERRIDE;
- virtual bool IsAnchoredAtEnd() V8_OVERRIDE;
- virtual int min_match() V8_OVERRIDE { return 0; }
- virtual int max_match() V8_OVERRIDE { return 0; }
+ RegExpNode* on_success) OVERRIDE;
+ virtual RegExpAssertion* AsAssertion() OVERRIDE;
+ virtual bool IsAssertion() OVERRIDE;
+ virtual bool IsAnchoredAtStart() OVERRIDE;
+ virtual bool IsAnchoredAtEnd() OVERRIDE;
+ virtual int min_match() OVERRIDE { return 0; }
+ virtual int max_match() OVERRIDE { return 0; }
AssertionType assertion_type() { return assertion_type_; }
private:
AssertionType assertion_type_;
};
-class CharacterSet V8_FINAL BASE_EMBEDDED {
+class CharacterSet FINAL BASE_EMBEDDED {
public:
explicit CharacterSet(uc16 standard_set_type)
: ranges_(NULL),
@@ -2687,7 +2708,7 @@ class CharacterSet V8_FINAL BASE_EMBEDDED {
};
-class RegExpCharacterClass V8_FINAL : public RegExpTree {
+class RegExpCharacterClass FINAL : public RegExpTree {
public:
RegExpCharacterClass(ZoneList<CharacterRange>* ranges, bool is_negated)
: set_(ranges),
@@ -2695,15 +2716,15 @@ class RegExpCharacterClass V8_FINAL : public RegExpTree {
explicit RegExpCharacterClass(uc16 type)
: set_(type),
is_negated_(false) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
+ virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) V8_OVERRIDE;
- virtual RegExpCharacterClass* AsCharacterClass() V8_OVERRIDE;
- virtual bool IsCharacterClass() V8_OVERRIDE;
- virtual bool IsTextElement() V8_OVERRIDE { return true; }
- virtual int min_match() V8_OVERRIDE { return 1; }
- virtual int max_match() V8_OVERRIDE { return 1; }
- virtual void AppendToText(RegExpText* text, Zone* zone) V8_OVERRIDE;
+ RegExpNode* on_success) OVERRIDE;
+ virtual RegExpCharacterClass* AsCharacterClass() OVERRIDE;
+ virtual bool IsCharacterClass() OVERRIDE;
+ virtual bool IsTextElement() OVERRIDE { return true; }
+ virtual int min_match() OVERRIDE { return 1; }
+ virtual int max_match() OVERRIDE { return 1; }
+ virtual void AppendToText(RegExpText* text, Zone* zone) OVERRIDE;
CharacterSet character_set() { return set_; }
// TODO(lrn): Remove need for complex version if is_standard that
// recognizes a mangled standard set and just do { return set_.is_special(); }
@@ -2729,18 +2750,18 @@ class RegExpCharacterClass V8_FINAL : public RegExpTree {
};
-class RegExpAtom V8_FINAL : public RegExpTree {
+class RegExpAtom FINAL : public RegExpTree {
public:
explicit RegExpAtom(Vector<const uc16> data) : data_(data) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
+ virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) V8_OVERRIDE;
- virtual RegExpAtom* AsAtom() V8_OVERRIDE;
- virtual bool IsAtom() V8_OVERRIDE;
- virtual bool IsTextElement() V8_OVERRIDE { return true; }
- virtual int min_match() V8_OVERRIDE { return data_.length(); }
- virtual int max_match() V8_OVERRIDE { return data_.length(); }
- virtual void AppendToText(RegExpText* text, Zone* zone) V8_OVERRIDE;
+ RegExpNode* on_success) OVERRIDE;
+ virtual RegExpAtom* AsAtom() OVERRIDE;
+ virtual bool IsAtom() OVERRIDE;
+ virtual bool IsTextElement() OVERRIDE { return true; }
+ virtual int min_match() OVERRIDE { return data_.length(); }
+ virtual int max_match() OVERRIDE { return data_.length(); }
+ virtual void AppendToText(RegExpText* text, Zone* zone) OVERRIDE;
Vector<const uc16> data() { return data_; }
int length() { return data_.length(); }
private:
@@ -2748,18 +2769,18 @@ class RegExpAtom V8_FINAL : public RegExpTree {
};
-class RegExpText V8_FINAL : public RegExpTree {
+class RegExpText FINAL : public RegExpTree {
public:
explicit RegExpText(Zone* zone) : elements_(2, zone), length_(0) {}
- virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
+ virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) V8_OVERRIDE;
- virtual RegExpText* AsText() V8_OVERRIDE;
- virtual bool IsText() V8_OVERRIDE;
- virtual bool IsTextElement() V8_OVERRIDE { return true; }
- virtual int min_match() V8_OVERRIDE { return length_; }
- virtual int max_match() V8_OVERRIDE { return length_; }
- virtual void AppendToText(RegExpText* text, Zone* zone) V8_OVERRIDE;
+ RegExpNode* on_success) OVERRIDE;
+ virtual RegExpText* AsText() OVERRIDE;
+ virtual bool IsText() OVERRIDE;
+ virtual bool IsTextElement() OVERRIDE { return true; }
+ virtual int min_match() OVERRIDE { return length_; }
+ virtual int max_match() OVERRIDE { return length_; }
+ virtual void AppendToText(RegExpText* text, Zone* zone) OVERRIDE;
void AddElement(TextElement elm, Zone* zone) {
elements_.Add(elm, zone);
length_ += elm.length();
@@ -2771,7 +2792,7 @@ class RegExpText V8_FINAL : public RegExpTree {
};
-class RegExpQuantifier V8_FINAL : public RegExpTree {
+class RegExpQuantifier FINAL : public RegExpTree {
public:
enum QuantifierType { GREEDY, NON_GREEDY, POSSESSIVE };
RegExpQuantifier(int min, int max, QuantifierType type, RegExpTree* body)
@@ -2786,9 +2807,9 @@ class RegExpQuantifier V8_FINAL : public RegExpTree {
max_match_ = max * body->max_match();
}
}
- virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
+ virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) V8_OVERRIDE;
+ RegExpNode* on_success) OVERRIDE;
static RegExpNode* ToNode(int min,
int max,
bool is_greedy,
@@ -2796,11 +2817,11 @@ class RegExpQuantifier V8_FINAL : public RegExpTree {
RegExpCompiler* compiler,
RegExpNode* on_success,
bool not_at_start = false);
- virtual RegExpQuantifier* AsQuantifier() V8_OVERRIDE;
- virtual Interval CaptureRegisters() V8_OVERRIDE;
- virtual bool IsQuantifier() V8_OVERRIDE;
- virtual int min_match() V8_OVERRIDE { return min_match_; }
- virtual int max_match() V8_OVERRIDE { return max_match_; }
+ virtual RegExpQuantifier* AsQuantifier() OVERRIDE;
+ virtual Interval CaptureRegisters() OVERRIDE;
+ virtual bool IsQuantifier() OVERRIDE;
+ virtual int min_match() OVERRIDE { return min_match_; }
+ virtual int max_match() OVERRIDE { return max_match_; }
int min() { return min_; }
int max() { return max_; }
bool is_possessive() { return quantifier_type_ == POSSESSIVE; }
@@ -2818,24 +2839,24 @@ class RegExpQuantifier V8_FINAL : public RegExpTree {
};
-class RegExpCapture V8_FINAL : public RegExpTree {
+class RegExpCapture FINAL : public RegExpTree {
public:
explicit RegExpCapture(RegExpTree* body, int index)
: body_(body), index_(index) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
+ virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) V8_OVERRIDE;
+ RegExpNode* on_success) OVERRIDE;
static RegExpNode* ToNode(RegExpTree* body,
int index,
RegExpCompiler* compiler,
RegExpNode* on_success);
- virtual RegExpCapture* AsCapture() V8_OVERRIDE;
- virtual bool IsAnchoredAtStart() V8_OVERRIDE;
- virtual bool IsAnchoredAtEnd() V8_OVERRIDE;
- virtual Interval CaptureRegisters() V8_OVERRIDE;
- virtual bool IsCapture() V8_OVERRIDE;
- virtual int min_match() V8_OVERRIDE { return body_->min_match(); }
- virtual int max_match() V8_OVERRIDE { return body_->max_match(); }
+ virtual RegExpCapture* AsCapture() OVERRIDE;
+ virtual bool IsAnchoredAtStart() OVERRIDE;
+ virtual bool IsAnchoredAtEnd() OVERRIDE;
+ virtual Interval CaptureRegisters() OVERRIDE;
+ virtual bool IsCapture() OVERRIDE;
+ virtual int min_match() OVERRIDE { return body_->min_match(); }
+ virtual int max_match() OVERRIDE { return body_->max_match(); }
RegExpTree* body() { return body_; }
int index() { return index_; }
static int StartRegister(int index) { return index * 2; }
@@ -2847,7 +2868,7 @@ class RegExpCapture V8_FINAL : public RegExpTree {
};
-class RegExpLookahead V8_FINAL : public RegExpTree {
+class RegExpLookahead FINAL : public RegExpTree {
public:
RegExpLookahead(RegExpTree* body,
bool is_positive,
@@ -2858,15 +2879,15 @@ class RegExpLookahead V8_FINAL : public RegExpTree {
capture_count_(capture_count),
capture_from_(capture_from) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
+ virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) V8_OVERRIDE;
- virtual RegExpLookahead* AsLookahead() V8_OVERRIDE;
- virtual Interval CaptureRegisters() V8_OVERRIDE;
- virtual bool IsLookahead() V8_OVERRIDE;
- virtual bool IsAnchoredAtStart() V8_OVERRIDE;
- virtual int min_match() V8_OVERRIDE { return 0; }
- virtual int max_match() V8_OVERRIDE { return 0; }
+ RegExpNode* on_success) OVERRIDE;
+ virtual RegExpLookahead* AsLookahead() OVERRIDE;
+ virtual Interval CaptureRegisters() OVERRIDE;
+ virtual bool IsLookahead() OVERRIDE;
+ virtual bool IsAnchoredAtStart() OVERRIDE;
+ virtual int min_match() OVERRIDE { return 0; }
+ virtual int max_match() OVERRIDE { return 0; }
RegExpTree* body() { return body_; }
bool is_positive() { return is_positive_; }
int capture_count() { return capture_count_; }
@@ -2880,17 +2901,17 @@ class RegExpLookahead V8_FINAL : public RegExpTree {
};
-class RegExpBackReference V8_FINAL : public RegExpTree {
+class RegExpBackReference FINAL : public RegExpTree {
public:
explicit RegExpBackReference(RegExpCapture* capture)
: capture_(capture) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
+ virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) V8_OVERRIDE;
- virtual RegExpBackReference* AsBackReference() V8_OVERRIDE;
- virtual bool IsBackReference() V8_OVERRIDE;
- virtual int min_match() V8_OVERRIDE { return 0; }
- virtual int max_match() V8_OVERRIDE { return capture_->max_match(); }
+ RegExpNode* on_success) OVERRIDE;
+ virtual RegExpBackReference* AsBackReference() OVERRIDE;
+ virtual bool IsBackReference() OVERRIDE;
+ virtual int min_match() OVERRIDE { return 0; }
+ virtual int max_match() OVERRIDE { return capture_->max_match(); }
int index() { return capture_->index(); }
RegExpCapture* capture() { return capture_; }
private:
@@ -2898,16 +2919,16 @@ class RegExpBackReference V8_FINAL : public RegExpTree {
};
-class RegExpEmpty V8_FINAL : public RegExpTree {
+class RegExpEmpty FINAL : public RegExpTree {
public:
RegExpEmpty() { }
- virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
+ virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) V8_OVERRIDE;
- virtual RegExpEmpty* AsEmpty() V8_OVERRIDE;
- virtual bool IsEmpty() V8_OVERRIDE;
- virtual int min_match() V8_OVERRIDE { return 0; }
- virtual int max_match() V8_OVERRIDE { return 0; }
+ RegExpNode* on_success) OVERRIDE;
+ virtual RegExpEmpty* AsEmpty() OVERRIDE;
+ virtual bool IsEmpty() OVERRIDE;
+ virtual int min_match() OVERRIDE { return 0; }
+ virtual int max_match() OVERRIDE { return 0; }
static RegExpEmpty* GetInstance() {
static RegExpEmpty* instance = ::new RegExpEmpty();
return instance;
@@ -2951,7 +2972,7 @@ class AstVisitor BASE_EMBEDDED {
#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS() \
public: \
- virtual void Visit(AstNode* node) V8_FINAL V8_OVERRIDE { \
+ virtual void Visit(AstNode* node) FINAL OVERRIDE { \
if (!CheckStackOverflow()) node->Accept(this); \
} \
\
@@ -2983,10 +3004,17 @@ private: \
class AstConstructionVisitor BASE_EMBEDDED {
public:
- AstConstructionVisitor() : dont_optimize_reason_(kNoReason) { }
+ AstConstructionVisitor()
+ : dont_crankshaft_reason_(kNoReason), dont_turbofan_reason_(kNoReason) {}
AstProperties* ast_properties() { return &properties_; }
- BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
+ BailoutReason dont_optimize_reason() {
+ if (dont_turbofan_reason_ != kNoReason) {
+ return dont_turbofan_reason_;
+ } else {
+ return dont_crankshaft_reason_;
+ }
+ }
private:
template<class> friend class AstNodeFactory;
@@ -2999,8 +3027,11 @@ class AstConstructionVisitor BASE_EMBEDDED {
void increase_node_count() { properties_.add_node_count(1); }
void add_flag(AstPropertiesFlag flag) { properties_.flags()->Add(flag); }
- void set_dont_optimize_reason(BailoutReason reason) {
- dont_optimize_reason_ = reason;
+ void set_dont_crankshaft_reason(BailoutReason reason) {
+ dont_crankshaft_reason_ = reason;
+ }
+ void set_dont_turbofan_reason(BailoutReason reason) {
+ dont_turbofan_reason_ = reason;
}
void add_slot_node(FeedbackSlotInterface* slot_node) {
@@ -3012,7 +3043,8 @@ class AstConstructionVisitor BASE_EMBEDDED {
}
AstProperties properties_;
- BailoutReason dont_optimize_reason_;
+ BailoutReason dont_crankshaft_reason_;
+ BailoutReason dont_turbofan_reason_;
};
@@ -3031,10 +3063,11 @@ class AstNullVisitor BASE_EMBEDDED {
// AstNode factory
template<class Visitor>
-class AstNodeFactory V8_FINAL BASE_EMBEDDED {
+class AstNodeFactory FINAL BASE_EMBEDDED {
public:
- explicit AstNodeFactory(Zone* zone, AstValueFactory* ast_value_factory)
- : zone_(zone), ast_value_factory_(ast_value_factory) {}
+ AstNodeFactory(Zone* zone, AstValueFactory* ast_value_factory,
+ AstNode::IdGen* id_gen)
+ : zone_(zone), ast_value_factory_(ast_value_factory), id_gen_(id_gen) {}
Visitor* visitor() { return &visitor_; }
@@ -3112,15 +3145,15 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
int capacity,
bool is_initializer_block,
int pos) {
- Block* block = new(zone_) Block(
- zone_, labels, capacity, is_initializer_block, pos);
+ Block* block = new (zone_)
+ Block(zone_, labels, capacity, is_initializer_block, pos, id_gen_);
VISIT_AND_RETURN(Block, block)
}
-#define STATEMENT_WITH_LABELS(NodeType) \
+#define STATEMENT_WITH_LABELS(NodeType) \
NodeType* New##NodeType(ZoneList<const AstRawString*>* labels, int pos) { \
- NodeType* stmt = new(zone_) NodeType(zone_, labels, pos); \
- VISIT_AND_RETURN(NodeType, stmt); \
+ NodeType* stmt = new (zone_) NodeType(zone_, labels, pos, id_gen_); \
+ VISIT_AND_RETURN(NodeType, stmt); \
}
STATEMENT_WITH_LABELS(DoWhileStatement)
STATEMENT_WITH_LABELS(WhileStatement)
@@ -3133,11 +3166,13 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
int pos) {
switch (visit_mode) {
case ForEachStatement::ENUMERATE: {
- ForInStatement* stmt = new(zone_) ForInStatement(zone_, labels, pos);
+ ForInStatement* stmt =
+ new (zone_) ForInStatement(zone_, labels, pos, id_gen_);
VISIT_AND_RETURN(ForInStatement, stmt);
}
case ForEachStatement::ITERATE: {
- ForOfStatement* stmt = new(zone_) ForOfStatement(zone_, labels, pos);
+ ForOfStatement* stmt =
+ new (zone_) ForOfStatement(zone_, labels, pos, id_gen_);
VISIT_AND_RETURN(ForOfStatement, stmt);
}
}
@@ -3185,8 +3220,8 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Statement* then_statement,
Statement* else_statement,
int pos) {
- IfStatement* stmt = new(zone_) IfStatement(
- zone_, condition, then_statement, else_statement, pos);
+ IfStatement* stmt = new (zone_) IfStatement(
+ zone_, condition, then_statement, else_statement, pos, id_gen_);
VISIT_AND_RETURN(IfStatement, stmt)
}
@@ -3211,7 +3246,8 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
}
DebuggerStatement* NewDebuggerStatement(int pos) {
- DebuggerStatement* stmt = new(zone_) DebuggerStatement(zone_, pos);
+ DebuggerStatement* stmt =
+ new (zone_) DebuggerStatement(zone_, pos, id_gen_);
VISIT_AND_RETURN(DebuggerStatement, stmt)
}
@@ -3222,63 +3258,63 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
CaseClause* NewCaseClause(
Expression* label, ZoneList<Statement*>* statements, int pos) {
CaseClause* clause =
- new(zone_) CaseClause(zone_, label, statements, pos);
+ new (zone_) CaseClause(zone_, label, statements, pos, id_gen_);
VISIT_AND_RETURN(CaseClause, clause)
}
Literal* NewStringLiteral(const AstRawString* string, int pos) {
- Literal* lit =
- new (zone_) Literal(zone_, ast_value_factory_->NewString(string), pos);
+ Literal* lit = new (zone_)
+ Literal(zone_, ast_value_factory_->NewString(string), pos, id_gen_);
VISIT_AND_RETURN(Literal, lit)
}
// A JavaScript symbol (ECMA-262 edition 6).
Literal* NewSymbolLiteral(const char* name, int pos) {
- Literal* lit =
- new (zone_) Literal(zone_, ast_value_factory_->NewSymbol(name), pos);
+ Literal* lit = new (zone_)
+ Literal(zone_, ast_value_factory_->NewSymbol(name), pos, id_gen_);
VISIT_AND_RETURN(Literal, lit)
}
Literal* NewNumberLiteral(double number, int pos) {
Literal* lit = new (zone_)
- Literal(zone_, ast_value_factory_->NewNumber(number), pos);
+ Literal(zone_, ast_value_factory_->NewNumber(number), pos, id_gen_);
VISIT_AND_RETURN(Literal, lit)
}
Literal* NewSmiLiteral(int number, int pos) {
- Literal* lit =
- new (zone_) Literal(zone_, ast_value_factory_->NewSmi(number), pos);
+ Literal* lit = new (zone_)
+ Literal(zone_, ast_value_factory_->NewSmi(number), pos, id_gen_);
VISIT_AND_RETURN(Literal, lit)
}
Literal* NewBooleanLiteral(bool b, int pos) {
- Literal* lit =
- new (zone_) Literal(zone_, ast_value_factory_->NewBoolean(b), pos);
+ Literal* lit = new (zone_)
+ Literal(zone_, ast_value_factory_->NewBoolean(b), pos, id_gen_);
VISIT_AND_RETURN(Literal, lit)
}
Literal* NewStringListLiteral(ZoneList<const AstRawString*>* strings,
int pos) {
- Literal* lit = new (zone_)
- Literal(zone_, ast_value_factory_->NewStringList(strings), pos);
+ Literal* lit = new (zone_) Literal(
+ zone_, ast_value_factory_->NewStringList(strings), pos, id_gen_);
VISIT_AND_RETURN(Literal, lit)
}
Literal* NewNullLiteral(int pos) {
Literal* lit =
- new (zone_) Literal(zone_, ast_value_factory_->NewNull(), pos);
+ new (zone_) Literal(zone_, ast_value_factory_->NewNull(), pos, id_gen_);
VISIT_AND_RETURN(Literal, lit)
}
Literal* NewUndefinedLiteral(int pos) {
- Literal* lit =
- new (zone_) Literal(zone_, ast_value_factory_->NewUndefined(), pos);
+ Literal* lit = new (zone_)
+ Literal(zone_, ast_value_factory_->NewUndefined(), pos, id_gen_);
VISIT_AND_RETURN(Literal, lit)
}
Literal* NewTheHoleLiteral(int pos) {
- Literal* lit =
- new (zone_) Literal(zone_, ast_value_factory_->NewTheHole(), pos);
+ Literal* lit = new (zone_)
+ Literal(zone_, ast_value_factory_->NewTheHole(), pos, id_gen_);
VISIT_AND_RETURN(Literal, lit)
}
@@ -3288,23 +3324,24 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
int boilerplate_properties,
bool has_function,
int pos) {
- ObjectLiteral* lit = new(zone_) ObjectLiteral(
- zone_, properties, literal_index, boilerplate_properties,
- has_function, pos);
+ ObjectLiteral* lit = new (zone_)
+ ObjectLiteral(zone_, properties, literal_index, boilerplate_properties,
+ has_function, pos, id_gen_);
VISIT_AND_RETURN(ObjectLiteral, lit)
}
ObjectLiteral::Property* NewObjectLiteralProperty(Literal* key,
- Expression* value) {
- return new (zone_)
- ObjectLiteral::Property(zone_, ast_value_factory_, key, value);
+ Expression* value,
+ bool is_static) {
+ return new (zone_) ObjectLiteral::Property(zone_, ast_value_factory_, key,
+ value, is_static);
}
ObjectLiteral::Property* NewObjectLiteralProperty(bool is_getter,
FunctionLiteral* value,
- int pos) {
+ int pos, bool is_static) {
ObjectLiteral::Property* prop =
- new(zone_) ObjectLiteral::Property(zone_, is_getter, value);
+ new (zone_) ObjectLiteral::Property(zone_, is_getter, value, is_static);
prop->set_key(NewStringLiteral(value->raw_name(), pos));
return prop; // Not an AST node, will not be visited.
}
@@ -3313,22 +3350,22 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
const AstRawString* flags,
int literal_index,
int pos) {
- RegExpLiteral* lit =
- new(zone_) RegExpLiteral(zone_, pattern, flags, literal_index, pos);
+ RegExpLiteral* lit = new (zone_)
+ RegExpLiteral(zone_, pattern, flags, literal_index, pos, id_gen_);
VISIT_AND_RETURN(RegExpLiteral, lit);
}
ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
int literal_index,
int pos) {
- ArrayLiteral* lit = new(zone_) ArrayLiteral(
- zone_, values, literal_index, pos);
+ ArrayLiteral* lit =
+ new (zone_) ArrayLiteral(zone_, values, literal_index, pos, id_gen_);
VISIT_AND_RETURN(ArrayLiteral, lit)
}
VariableProxy* NewVariableProxy(Variable* var,
int pos = RelocInfo::kNoPosition) {
- VariableProxy* proxy = new(zone_) VariableProxy(zone_, var, pos);
+ VariableProxy* proxy = new (zone_) VariableProxy(zone_, var, pos, id_gen_);
VISIT_AND_RETURN(VariableProxy, proxy)
}
@@ -3336,27 +3373,28 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
bool is_this,
Interface* interface = Interface::NewValue(),
int position = RelocInfo::kNoPosition) {
- VariableProxy* proxy =
- new(zone_) VariableProxy(zone_, name, is_this, interface, position);
+ VariableProxy* proxy = new (zone_)
+ VariableProxy(zone_, name, is_this, interface, position, id_gen_);
VISIT_AND_RETURN(VariableProxy, proxy)
}
Property* NewProperty(Expression* obj, Expression* key, int pos) {
- Property* prop = new(zone_) Property(zone_, obj, key, pos);
+ Property* prop = new (zone_) Property(zone_, obj, key, pos, id_gen_);
VISIT_AND_RETURN(Property, prop)
}
Call* NewCall(Expression* expression,
ZoneList<Expression*>* arguments,
int pos) {
- Call* call = new(zone_) Call(zone_, expression, arguments, pos);
+ Call* call = new (zone_) Call(zone_, expression, arguments, pos, id_gen_);
VISIT_AND_RETURN(Call, call)
}
CallNew* NewCallNew(Expression* expression,
ZoneList<Expression*>* arguments,
int pos) {
- CallNew* call = new(zone_) CallNew(zone_, expression, arguments, pos);
+ CallNew* call =
+ new (zone_) CallNew(zone_, expression, arguments, pos, id_gen_);
VISIT_AND_RETURN(CallNew, call)
}
@@ -3365,7 +3403,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
ZoneList<Expression*>* arguments,
int pos) {
CallRuntime* call =
- new(zone_) CallRuntime(zone_, name, function, arguments, pos);
+ new (zone_) CallRuntime(zone_, name, function, arguments, pos, id_gen_);
VISIT_AND_RETURN(CallRuntime, call)
}
@@ -3373,7 +3411,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Expression* expression,
int pos) {
UnaryOperation* node =
- new(zone_) UnaryOperation(zone_, op, expression, pos);
+ new (zone_) UnaryOperation(zone_, op, expression, pos, id_gen_);
VISIT_AND_RETURN(UnaryOperation, node)
}
@@ -3382,7 +3420,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Expression* right,
int pos) {
BinaryOperation* node =
- new(zone_) BinaryOperation(zone_, op, left, right, pos);
+ new (zone_) BinaryOperation(zone_, op, left, right, pos, id_gen_);
VISIT_AND_RETURN(BinaryOperation, node)
}
@@ -3391,7 +3429,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Expression* expr,
int pos) {
CountOperation* node =
- new(zone_) CountOperation(zone_, op, is_prefix, expr, pos);
+ new (zone_) CountOperation(zone_, op, is_prefix, expr, pos, id_gen_);
VISIT_AND_RETURN(CountOperation, node)
}
@@ -3400,7 +3438,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Expression* right,
int pos) {
CompareOperation* node =
- new(zone_) CompareOperation(zone_, op, left, right, pos);
+ new (zone_) CompareOperation(zone_, op, left, right, pos, id_gen_);
VISIT_AND_RETURN(CompareOperation, node)
}
@@ -3408,8 +3446,8 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Expression* then_expression,
Expression* else_expression,
int position) {
- Conditional* cond = new(zone_) Conditional(
- zone_, condition, then_expression, else_expression, position);
+ Conditional* cond = new (zone_) Conditional(
+ zone_, condition, then_expression, else_expression, position, id_gen_);
VISIT_AND_RETURN(Conditional, cond)
}
@@ -3418,7 +3456,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Expression* value,
int pos) {
Assignment* assign =
- new(zone_) Assignment(zone_, op, target, value, pos);
+ new (zone_) Assignment(zone_, op, target, value, pos, id_gen_);
assign->Init(zone_, this);
VISIT_AND_RETURN(Assignment, assign)
}
@@ -3428,13 +3466,13 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Yield::Kind yield_kind,
int pos) {
if (!expression) expression = NewUndefinedLiteral(pos);
- Yield* yield = new(zone_) Yield(
- zone_, generator_object, expression, yield_kind, pos);
+ Yield* yield = new (zone_)
+ Yield(zone_, generator_object, expression, yield_kind, pos, id_gen_);
VISIT_AND_RETURN(Yield, yield)
}
Throw* NewThrow(Expression* exception, int pos) {
- Throw* t = new(zone_) Throw(zone_, exception, pos);
+ Throw* t = new (zone_) Throw(zone_, exception, pos, id_gen_);
VISIT_AND_RETURN(Throw, t)
}
@@ -3445,13 +3483,13 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
FunctionLiteral::IsFunctionFlag is_function,
- FunctionLiteral::IsParenthesizedFlag is_parenthesized,
- FunctionLiteral::KindFlag kind, int position) {
+ FunctionLiteral::IsParenthesizedFlag is_parenthesized, FunctionKind kind,
+ int position) {
FunctionLiteral* lit = new (zone_) FunctionLiteral(
zone_, name, ast_value_factory, scope, body, materialized_literal_count,
expected_property_count, handler_count, parameter_count, function_type,
- has_duplicate_parameters, is_function, is_parenthesized, kind,
- position);
+ has_duplicate_parameters, is_function, is_parenthesized, kind, position,
+ id_gen_);
// Top-level literal doesn't count for the AST's properties.
if (is_function == FunctionLiteral::kIsFunction) {
visitor_.VisitFunctionLiteral(lit);
@@ -3459,25 +3497,41 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
return lit;
}
- NativeFunctionLiteral* NewNativeFunctionLiteral(
- const AstRawString* name, v8::Extension* extension,
- int pos) {
+ ClassLiteral* NewClassLiteral(const AstRawString* name, Expression* extends,
+ Expression* constructor,
+ ZoneList<ObjectLiteral::Property*>* properties,
+ int position) {
+ ClassLiteral* lit = new (zone_) ClassLiteral(
+ zone_, name, extends, constructor, properties, position, id_gen_);
+ VISIT_AND_RETURN(ClassLiteral, lit)
+ }
+
+ NativeFunctionLiteral* NewNativeFunctionLiteral(const AstRawString* name,
+ v8::Extension* extension,
+ int pos) {
NativeFunctionLiteral* lit =
- new(zone_) NativeFunctionLiteral(zone_, name, extension, pos);
+ new (zone_) NativeFunctionLiteral(zone_, name, extension, pos, id_gen_);
VISIT_AND_RETURN(NativeFunctionLiteral, lit)
}
ThisFunction* NewThisFunction(int pos) {
- ThisFunction* fun = new(zone_) ThisFunction(zone_, pos);
+ ThisFunction* fun = new (zone_) ThisFunction(zone_, pos, id_gen_);
VISIT_AND_RETURN(ThisFunction, fun)
}
+ SuperReference* NewSuperReference(VariableProxy* this_var, int pos) {
+ SuperReference* super =
+ new (zone_) SuperReference(zone_, this_var, pos, id_gen_);
+ VISIT_AND_RETURN(SuperReference, super);
+ }
+
#undef VISIT_AND_RETURN
private:
Zone* zone_;
Visitor visitor_;
AstValueFactory* ast_value_factory_;
+ AstNode::IdGen* id_gen_;
};
diff --git a/deps/v8/src/background-parsing-task.cc b/deps/v8/src/background-parsing-task.cc
new file mode 100644
index 0000000000..c7602a7def
--- /dev/null
+++ b/deps/v8/src/background-parsing-task.cc
@@ -0,0 +1,62 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/background-parsing-task.h"
+
+namespace v8 {
+namespace internal {
+
+BackgroundParsingTask::BackgroundParsingTask(
+ StreamedSource* source, ScriptCompiler::CompileOptions options,
+ int stack_size, Isolate* isolate)
+ : source_(source), options_(options), stack_size_(stack_size) {
+ // Prepare the data for the internalization phase and compilation phase, which
+ // will happen in the main thread after parsing.
+ source->info.Reset(new i::CompilationInfoWithZone(source->source_stream.get(),
+ source->encoding, isolate));
+ source->info->MarkAsGlobal();
+
+ // We don't set the context to the CompilationInfo yet, because the background
+ // thread cannot do anything with it anyway. We set it just before compilation
+ // on the foreground thread.
+ DCHECK(options == ScriptCompiler::kProduceParserCache ||
+ options == ScriptCompiler::kProduceCodeCache ||
+ options == ScriptCompiler::kNoCompileOptions);
+ source->allow_lazy =
+ !i::Compiler::DebuggerWantsEagerCompilation(source->info.get());
+ source->hash_seed = isolate->heap()->HashSeed();
+}
+
+
+void BackgroundParsingTask::Run() {
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
+
+ ScriptData* script_data = NULL;
+ if (options_ == ScriptCompiler::kProduceParserCache ||
+ options_ == ScriptCompiler::kProduceCodeCache) {
+ source_->info->SetCachedData(&script_data, options_);
+ }
+
+ uintptr_t limit = reinterpret_cast<uintptr_t>(&limit) - stack_size_ * KB;
+ Parser::ParseInfo parse_info = {limit, source_->hash_seed,
+ &source_->unicode_cache};
+
+ // Parser needs to stay alive for finalizing the parsing on the main
+ // thread. Passing &parse_info is OK because Parser doesn't store it.
+ source_->parser.Reset(new Parser(source_->info.get(), &parse_info));
+ source_->parser->set_allow_lazy(source_->allow_lazy);
+ source_->parser->ParseOnBackground();
+
+ if (script_data != NULL) {
+ source_->cached_data.Reset(new ScriptCompiler::CachedData(
+ script_data->data(), script_data->length(),
+ ScriptCompiler::CachedData::BufferOwned));
+ script_data->ReleaseDataOwnership();
+ delete script_data;
+ }
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/background-parsing-task.h b/deps/v8/src/background-parsing-task.h
new file mode 100644
index 0000000000..19c93a833a
--- /dev/null
+++ b/deps/v8/src/background-parsing-task.h
@@ -0,0 +1,67 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BACKGROUND_PARSING_TASK_H_
+#define V8_BACKGROUND_PARSING_TASK_H_
+
+#include "src/base/platform/platform.h"
+#include "src/base/platform/semaphore.h"
+#include "src/compiler.h"
+#include "src/parser.h"
+#include "src/smart-pointers.h"
+
+namespace v8 {
+namespace internal {
+
+class Parser;
+
+// Internal representation of v8::ScriptCompiler::StreamedSource. Contains all
+// data which needs to be transmitted between threads for background parsing,
+// finalizing it on the main thread, and compiling on the main thread.
+struct StreamedSource {
+ StreamedSource(ScriptCompiler::ExternalSourceStream* source_stream,
+ ScriptCompiler::StreamedSource::Encoding encoding)
+ : source_stream(source_stream),
+ encoding(encoding),
+ hash_seed(0),
+ allow_lazy(false) {}
+
+ // Internal implementation of v8::ScriptCompiler::StreamedSource.
+ SmartPointer<ScriptCompiler::ExternalSourceStream> source_stream;
+ ScriptCompiler::StreamedSource::Encoding encoding;
+ SmartPointer<ScriptCompiler::CachedData> cached_data;
+
+ // Data needed for parsing, and data needed to to be passed between thread
+ // between parsing and compilation. These need to be initialized before the
+ // compilation starts.
+ UnicodeCache unicode_cache;
+ SmartPointer<CompilationInfo> info;
+ uint32_t hash_seed;
+ bool allow_lazy;
+ SmartPointer<Parser> parser;
+
+ private:
+ // Prevent copying. Not implemented.
+ StreamedSource(const StreamedSource&);
+ StreamedSource& operator=(const StreamedSource&);
+};
+
+
+class BackgroundParsingTask : public ScriptCompiler::ScriptStreamingTask {
+ public:
+ BackgroundParsingTask(StreamedSource* source,
+ ScriptCompiler::CompileOptions options, int stack_size,
+ Isolate* isolate);
+
+ virtual void Run();
+
+ private:
+ StreamedSource* source_; // Not owned.
+ ScriptCompiler::CompileOptions options_;
+ int stack_size_;
+};
+}
+} // namespace v8::internal
+
+#endif // V8_BACKGROUND_PARSING_TASK_H_
diff --git a/deps/v8/src/bailout-reason.cc b/deps/v8/src/bailout-reason.cc
new file mode 100644
index 0000000000..93d43dde26
--- /dev/null
+++ b/deps/v8/src/bailout-reason.cc
@@ -0,0 +1,20 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/bailout-reason.h"
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace internal {
+
+const char* GetBailoutReason(BailoutReason reason) {
+ DCHECK(reason < kLastErrorMessage);
+#define ERROR_MESSAGES_TEXTS(C, T) T,
+ static const char* error_messages_[] = {
+ ERROR_MESSAGES_LIST(ERROR_MESSAGES_TEXTS)};
+#undef ERROR_MESSAGES_TEXTS
+ return error_messages_[reason];
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
new file mode 100644
index 0000000000..7287d629d2
--- /dev/null
+++ b/deps/v8/src/bailout-reason.h
@@ -0,0 +1,339 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BAILOUT_REASON_H_
+#define V8_BAILOUT_REASON_H_
+
+namespace v8 {
+namespace internal {
+
+#define ERROR_MESSAGES_LIST(V) \
+ V(kNoReason, "no reason") \
+ \
+ V(k32BitValueInRegisterIsNotZeroExtended, \
+ "32 bit value in register is not zero-extended") \
+ V(kAlignmentMarkerExpected, "Alignment marker expected") \
+ V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned") \
+ V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
+ V(kArgumentsObjectValueInATestContext, \
+ "Arguments object value in a test context") \
+ V(kArrayBoilerplateCreationFailed, "Array boilerplate creation failed") \
+ V(kArrayIndexConstantValueTooBig, "Array index constant value too big") \
+ V(kAssignmentToArguments, "Assignment to arguments") \
+ V(kAssignmentToLetVariableBeforeInitialization, \
+ "Assignment to let variable before initialization") \
+ V(kAssignmentToLOOKUPVariable, "Assignment to LOOKUP variable") \
+ V(kAssignmentToParameterFunctionUsesArgumentsObject, \
+ "Assignment to parameter, function uses arguments object") \
+ V(kAssignmentToParameterInArgumentsObject, \
+ "Assignment to parameter in arguments object") \
+ V(kAttemptToUseUndefinedCache, "Attempt to use undefined cache") \
+ V(kBadValueContextForArgumentsObjectValue, \
+ "Bad value context for arguments object value") \
+ V(kBadValueContextForArgumentsValue, \
+ "Bad value context for arguments value") \
+ V(kBailedOutDueToDependencyChange, "Bailed out due to dependency change") \
+ V(kBailoutWasNotPrepared, "Bailout was not prepared") \
+ V(kBinaryStubGenerateFloatingPointCode, \
+ "BinaryStub_GenerateFloatingPointCode") \
+ V(kBothRegistersWereSmisInSelectNonSmi, \
+ "Both registers were smis in SelectNonSmi") \
+ V(kCallToAJavaScriptRuntimeFunction, \
+ "Call to a JavaScript runtime function") \
+ V(kCannotTranslatePositionInChangedArea, \
+ "Cannot translate position in changed area") \
+ V(kClassLiteral, "Class literal") \
+ V(kCodeGenerationFailed, "Code generation failed") \
+ V(kCodeObjectNotProperlyPatched, "Code object not properly patched") \
+ V(kCompoundAssignmentToLookupSlot, "Compound assignment to lookup slot") \
+ V(kContextAllocatedArguments, "Context-allocated arguments") \
+ V(kCopyBuffersOverlap, "Copy buffers overlap") \
+ V(kCouldNotGenerateZero, "Could not generate +0.0") \
+ V(kCouldNotGenerateNegativeZero, "Could not generate -0.0") \
+ V(kDebuggerHasBreakPoints, "Debugger has break points") \
+ V(kDebuggerStatement, "DebuggerStatement") \
+ V(kDeclarationInCatchContext, "Declaration in catch context") \
+ V(kDeclarationInWithContext, "Declaration in with context") \
+ V(kDefaultNaNModeNotSet, "Default NaN mode not set") \
+ V(kDeleteWithGlobalVariable, "Delete with global variable") \
+ V(kDeleteWithNonGlobalVariable, "Delete with non-global variable") \
+ V(kDestinationOfCopyNotAligned, "Destination of copy not aligned") \
+ V(kDontDeleteCellsCannotContainTheHole, \
+ "DontDelete cells can't contain the hole") \
+ V(kDoPushArgumentNotImplementedForDoubleType, \
+ "DoPushArgument not implemented for double type") \
+ V(kEliminatedBoundsCheckFailed, "Eliminated bounds check failed") \
+ V(kEmitLoadRegisterUnsupportedDoubleImmediate, \
+ "EmitLoadRegister: Unsupported double immediate") \
+ V(kEval, "eval") \
+ V(kExpected0AsASmiSentinel, "Expected 0 as a Smi sentinel") \
+ V(kExpectedAlignmentMarker, "Expected alignment marker") \
+ V(kExpectedAllocationSite, "Expected allocation site") \
+ V(kExpectedFunctionObject, "Expected function object in register") \
+ V(kExpectedHeapNumber, "Expected HeapNumber") \
+ V(kExpectedNativeContext, "Expected native context") \
+ V(kExpectedNonIdenticalObjects, "Expected non-identical objects") \
+ V(kExpectedNonNullContext, "Expected non-null context") \
+ V(kExpectedPositiveZero, "Expected +0.0") \
+ V(kExpectedAllocationSiteInCell, "Expected AllocationSite in property cell") \
+ V(kExpectedFixedArrayInFeedbackVector, \
+ "Expected fixed array in feedback vector") \
+ V(kExpectedFixedArrayInRegisterA2, "Expected fixed array in register a2") \
+ V(kExpectedFixedArrayInRegisterEbx, "Expected fixed array in register ebx") \
+ V(kExpectedFixedArrayInRegisterR2, "Expected fixed array in register r2") \
+ V(kExpectedFixedArrayInRegisterRbx, "Expected fixed array in register rbx") \
+ V(kExpectedNewSpaceObject, "Expected new space object") \
+ V(kExpectedSmiOrHeapNumber, "Expected smi or HeapNumber") \
+ V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \
+ V(kExpectingAlignmentForCopyBytes, "Expecting alignment for CopyBytes") \
+ V(kExportDeclaration, "Export declaration") \
+ V(kExternalStringExpectedButNotFound, \
+ "External string expected, but not found") \
+ V(kFailedBailedOutLastTime, "Failed/bailed out last time") \
+ V(kForInStatementIsNotFastCase, "ForInStatement is not fast case") \
+ V(kForInStatementOptimizationIsDisabled, \
+ "ForInStatement optimization is disabled") \
+ V(kForInStatementWithNonLocalEachVariable, \
+ "ForInStatement with non-local each variable") \
+ V(kForOfStatement, "ForOfStatement") \
+ V(kFrameIsExpectedToBeAligned, "Frame is expected to be aligned") \
+ V(kFunctionCallsEval, "Function calls eval") \
+ V(kFunctionIsAGenerator, "Function is a generator") \
+ V(kFunctionWithIllegalRedeclaration, "Function with illegal redeclaration") \
+ V(kGeneratedCodeIsTooLarge, "Generated code is too large") \
+ V(kGeneratorFailedToResume, "Generator failed to resume") \
+ V(kGenerator, "Generator") \
+ V(kGlobalFunctionsMustHaveInitialMap, \
+ "Global functions must have initial map") \
+ V(kHeapNumberMapRegisterClobbered, "HeapNumberMap register clobbered") \
+ V(kHydrogenFilter, "Optimization disabled by filter") \
+ V(kImportDeclaration, "Import declaration") \
+ V(kImproperObjectOnPrototypeChainForStore, \
+ "Improper object on prototype chain for store") \
+ V(kIndexIsNegative, "Index is negative") \
+ V(kIndexIsTooLarge, "Index is too large") \
+ V(kInlinedRuntimeFunctionClassOf, "Inlined runtime function: ClassOf") \
+ V(kInlinedRuntimeFunctionFastOneByteArrayJoin, \
+ "Inlined runtime function: FastOneByteArrayJoin") \
+ V(kInlinedRuntimeFunctionGeneratorNext, \
+ "Inlined runtime function: GeneratorNext") \
+ V(kInlinedRuntimeFunctionGeneratorThrow, \
+ "Inlined runtime function: GeneratorThrow") \
+ V(kInlinedRuntimeFunctionGetFromCache, \
+ "Inlined runtime function: GetFromCache") \
+ V(kInlinedRuntimeFunctionIsNonNegativeSmi, \
+ "Inlined runtime function: IsNonNegativeSmi") \
+ V(kInlinedRuntimeFunctionIsStringWrapperSafeForDefaultValueOf, \
+ "Inlined runtime function: IsStringWrapperSafeForDefaultValueOf") \
+ V(kInliningBailedOut, "Inlining bailed out") \
+ V(kInputGPRIsExpectedToHaveUpper32Cleared, \
+ "Input GPR is expected to have upper32 cleared") \
+ V(kInputStringTooLong, "Input string too long") \
+ V(kInstanceofStubUnexpectedCallSiteCacheCheck, \
+ "InstanceofStub unexpected call site cache (check)") \
+ V(kInstanceofStubUnexpectedCallSiteCacheCmp1, \
+ "InstanceofStub unexpected call site cache (cmp 1)") \
+ V(kInstanceofStubUnexpectedCallSiteCacheCmp2, \
+ "InstanceofStub unexpected call site cache (cmp 2)") \
+ V(kInstanceofStubUnexpectedCallSiteCacheMov, \
+ "InstanceofStub unexpected call site cache (mov)") \
+ V(kInteger32ToSmiFieldWritingToNonSmiLocation, \
+ "Integer32ToSmiField writing to non-smi location") \
+ V(kInvalidCaptureReferenced, "Invalid capture referenced") \
+ V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
+ "Invalid ElementsKind for InternalArray or InternalPackedArray") \
+ V(kInvalidFullCodegenState, "invalid full-codegen state") \
+ V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
+ V(kInvalidLeftHandSideInAssignment, "Invalid left-hand side in assignment") \
+ V(kInvalidLhsInCompoundAssignment, "Invalid lhs in compound assignment") \
+ V(kInvalidLhsInCountOperation, "Invalid lhs in count operation") \
+ V(kInvalidMinLength, "Invalid min_length") \
+ V(kJSGlobalObjectNativeContextShouldBeANativeContext, \
+ "JSGlobalObject::native_context should be a native context") \
+ V(kJSGlobalProxyContextShouldNotBeNull, \
+ "JSGlobalProxy::context() should not be null") \
+ V(kJSObjectWithFastElementsMapHasSlowElements, \
+ "JSObject with fast elements map has slow elements") \
+ V(kLetBindingReInitialization, "Let binding re-initialization") \
+ V(kLhsHasBeenClobbered, "lhs has been clobbered") \
+ V(kLiveBytesCountOverflowChunkSize, "Live Bytes Count overflow chunk size") \
+ V(kLiveEdit, "LiveEdit") \
+ V(kLookupVariableInCountOperation, "Lookup variable in count operation") \
+ V(kMapBecameDeprecated, "Map became deprecated") \
+ V(kMapBecameUnstable, "Map became unstable") \
+ V(kMapIsNoLongerInEax, "Map is no longer in eax") \
+ V(kModuleDeclaration, "Module declaration") \
+ V(kModuleLiteral, "Module literal") \
+ V(kModulePath, "Module path") \
+ V(kModuleStatement, "Module statement") \
+ V(kModuleVariable, "Module variable") \
+ V(kModuleUrl, "Module url") \
+ V(kNativeFunctionLiteral, "Native function literal") \
+ V(kSuperReference, "Super reference") \
+ V(kNeedSmiLiteral, "Need a Smi literal here") \
+ V(kNoCasesLeft, "No cases left") \
+ V(kNoEmptyArraysHereInEmitFastOneByteArrayJoin, \
+ "No empty arrays here in EmitFastOneByteArrayJoin") \
+ V(kNonInitializerAssignmentToConst, "Non-initializer assignment to const") \
+ V(kNonSmiIndex, "Non-smi index") \
+ V(kNonSmiKeyInArrayLiteral, "Non-smi key in array literal") \
+ V(kNonSmiValue, "Non-smi value") \
+ V(kNonObject, "Non-object value") \
+ V(kNotEnoughVirtualRegistersForValues, \
+ "Not enough virtual registers for values") \
+ V(kNotEnoughSpillSlotsForOsr, "Not enough spill slots for OSR") \
+ V(kNotEnoughVirtualRegistersRegalloc, \
+ "Not enough virtual registers (regalloc)") \
+ V(kObjectFoundInSmiOnlyArray, "Object found in smi-only array") \
+ V(kObjectLiteralWithComplexProperty, "Object literal with complex property") \
+ V(kOddballInStringTableIsNotUndefinedOrTheHole, \
+ "Oddball in string table is not undefined or the hole") \
+ V(kOffsetOutOfRange, "Offset out of range") \
+ V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name") \
+ V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string") \
+ V(kOperandIsASmi, "Operand is a smi") \
+ V(kOperandIsNotAName, "Operand is not a name") \
+ V(kOperandIsNotANumber, "Operand is not a number") \
+ V(kOperandIsNotASmi, "Operand is not a smi") \
+ V(kOperandIsNotAString, "Operand is not a string") \
+ V(kOperandIsNotSmi, "Operand is not smi") \
+ V(kOperandNotANumber, "Operand not a number") \
+ V(kObjectTagged, "The object is tagged") \
+ V(kObjectNotTagged, "The object is not tagged") \
+ V(kOptimizationDisabled, "Optimization is disabled") \
+ V(kOptimizedTooManyTimes, "Optimized too many times") \
+ V(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister, \
+ "Out of virtual registers while trying to allocate temp register") \
+ V(kParseScopeError, "Parse/scope error") \
+ V(kPossibleDirectCallToEval, "Possible direct call to eval") \
+ V(kPreconditionsWereNotMet, "Preconditions were not met") \
+ V(kPropertyAllocationCountFailed, "Property allocation count failed") \
+ V(kReceivedInvalidReturnAddress, "Received invalid return address") \
+ V(kReferenceToAVariableWhichRequiresDynamicLookup, \
+ "Reference to a variable which requires dynamic lookup") \
+ V(kReferenceToGlobalLexicalVariable, "Reference to global lexical variable") \
+ V(kReferenceToUninitializedVariable, "Reference to uninitialized variable") \
+ V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
+ V(kRegisterWasClobbered, "Register was clobbered") \
+ V(kRememberedSetPointerInNewSpace, "Remembered set pointer is in new space") \
+ V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
+ V(kRhsHasBeenClobbered, "Rhs has been clobbered") \
+ V(kScopedBlock, "ScopedBlock") \
+ V(kSmiAdditionOverflow, "Smi addition overflow") \
+ V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
+ V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
+ V(kStackFrameTypesMustMatch, "Stack frame types must match") \
+ V(kTheCurrentStackPointerIsBelowCsp, \
+ "The current stack pointer is below csp") \
+ V(kTheInstructionShouldBeALui, "The instruction should be a lui") \
+ V(kTheInstructionShouldBeAnOri, "The instruction should be an ori") \
+ V(kTheInstructionToPatchShouldBeALoadFromConstantPool, \
+ "The instruction to patch should be a load from the constant pool") \
+ V(kTheInstructionToPatchShouldBeAnLdrLiteral, \
+ "The instruction to patch should be a ldr literal") \
+ V(kTheInstructionToPatchShouldBeALui, \
+ "The instruction to patch should be a lui") \
+ V(kTheInstructionToPatchShouldBeAnOri, \
+ "The instruction to patch should be an ori") \
+ V(kTheSourceAndDestinationAreTheSame, \
+ "The source and destination are the same") \
+ V(kTheStackPointerIsNotAligned, "The stack pointer is not aligned.") \
+ V(kTheStackWasCorruptedByMacroAssemblerCall, \
+ "The stack was corrupted by MacroAssembler::Call()") \
+ V(kTooManyParametersLocals, "Too many parameters/locals") \
+ V(kTooManyParameters, "Too many parameters") \
+ V(kTooManySpillSlotsNeededForOSR, "Too many spill slots needed for OSR") \
+ V(kToOperand32UnsupportedImmediate, "ToOperand32 unsupported immediate.") \
+ V(kToOperandIsDoubleRegisterUnimplemented, \
+ "ToOperand IsDoubleRegister unimplemented") \
+ V(kToOperandUnsupportedDoubleImmediate, \
+ "ToOperand Unsupported double immediate") \
+ V(kTryCatchStatement, "TryCatchStatement") \
+ V(kTryFinallyStatement, "TryFinallyStatement") \
+ V(kUnableToEncodeValueAsSmi, "Unable to encode value as smi") \
+ V(kUnalignedAllocationInNewSpace, "Unaligned allocation in new space") \
+ V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
+ V(kUndefinedValueNotLoaded, "Undefined value not loaded") \
+ V(kUndoAllocationOfNonAllocatedMemory, \
+ "Undo allocation of non allocated memory") \
+ V(kUnexpectedAllocationTop, "Unexpected allocation top") \
+ V(kUnexpectedColorFound, "Unexpected color bit pattern found") \
+ V(kUnexpectedElementsKindInArrayConstructor, \
+ "Unexpected ElementsKind in array constructor") \
+ V(kUnexpectedFallthroughFromCharCodeAtSlowCase, \
+ "Unexpected fallthrough from CharCodeAt slow case") \
+ V(kUnexpectedFallthroughFromCharFromCodeSlowCase, \
+ "Unexpected fallthrough from CharFromCode slow case") \
+ V(kUnexpectedFallThroughFromStringComparison, \
+ "Unexpected fall-through from string comparison") \
+ V(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode, \
+ "Unexpected fall-through in BinaryStub_GenerateFloatingPointCode") \
+ V(kUnexpectedFallthroughToCharCodeAtSlowCase, \
+ "Unexpected fallthrough to CharCodeAt slow case") \
+ V(kUnexpectedFallthroughToCharFromCodeSlowCase, \
+ "Unexpected fallthrough to CharFromCode slow case") \
+ V(kUnexpectedFPUStackDepthAfterInstruction, \
+ "Unexpected FPU stack depth after instruction") \
+ V(kUnexpectedInitialMapForArrayFunction1, \
+ "Unexpected initial map for Array function (1)") \
+ V(kUnexpectedInitialMapForArrayFunction2, \
+ "Unexpected initial map for Array function (2)") \
+ V(kUnexpectedInitialMapForArrayFunction, \
+ "Unexpected initial map for Array function") \
+ V(kUnexpectedInitialMapForInternalArrayFunction, \
+ "Unexpected initial map for InternalArray function") \
+ V(kUnexpectedLevelAfterReturnFromApiCall, \
+ "Unexpected level after return from api call") \
+ V(kUnexpectedNegativeValue, "Unexpected negative value") \
+ V(kUnexpectedNumberOfPreAllocatedPropertyFields, \
+ "Unexpected number of pre-allocated property fields") \
+ V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
+ V(kUnexpectedSmi, "Unexpected smi value") \
+ V(kUnexpectedStringFunction, "Unexpected String function") \
+ V(kUnexpectedStringType, "Unexpected string type") \
+ V(kUnexpectedStringWrapperInstanceSize, \
+ "Unexpected string wrapper instance size") \
+ V(kUnexpectedTypeForRegExpDataFixedArrayExpected, \
+ "Unexpected type for RegExp data, FixedArray expected") \
+ V(kUnexpectedValue, "Unexpected value") \
+ V(kUnexpectedUnusedPropertiesOfStringWrapper, \
+ "Unexpected unused properties of string wrapper") \
+ V(kUnimplemented, "unimplemented") \
+ V(kUninitializedKSmiConstantRegister, "Uninitialized kSmiConstantRegister") \
+ V(kUnknown, "Unknown") \
+ V(kUnsupportedConstCompoundAssignment, \
+ "Unsupported const compound assignment") \
+ V(kUnsupportedCountOperationWithConst, \
+ "Unsupported count operation with const") \
+ V(kUnsupportedDoubleImmediate, "Unsupported double immediate") \
+ V(kUnsupportedLetCompoundAssignment, "Unsupported let compound assignment") \
+ V(kUnsupportedLookupSlotInDeclaration, \
+ "Unsupported lookup slot in declaration") \
+ V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare") \
+ V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments") \
+ V(kUnsupportedPhiUseOfConstVariable, \
+ "Unsupported phi use of const variable") \
+ V(kUnsupportedTaggedImmediate, "Unsupported tagged immediate") \
+ V(kVariableResolvedToWithContext, "Variable resolved to with context") \
+ V(kWeShouldNotHaveAnEmptyLexicalContext, \
+ "We should not have an empty lexical context") \
+ V(kWithStatement, "WithStatement") \
+ V(kWrongFunctionContext, "Wrong context passed to function") \
+ V(kWrongAddressOrValuePassedToRecordWrite, \
+ "Wrong address or value passed to RecordWrite") \
+ V(kYield, "Yield")
+
+
+#define ERROR_MESSAGES_CONSTANTS(C, T) C,
+enum BailoutReason {
+ ERROR_MESSAGES_LIST(ERROR_MESSAGES_CONSTANTS) kLastErrorMessage
+};
+#undef ERROR_MESSAGES_CONSTANTS
+
+
+const char* GetBailoutReason(BailoutReason reason);
+}
+} // namespace v8::internal
+
+#endif // V8_BAILOUT_REASON_H_
diff --git a/deps/v8/src/base/atomicops_internals_mips_gcc.h b/deps/v8/src/base/atomicops_internals_mips_gcc.h
index 0d3a0e38c1..d33b66876b 100644
--- a/deps/v8/src/base/atomicops_internals_mips_gcc.h
+++ b/deps/v8/src/base/atomicops_internals_mips_gcc.h
@@ -27,16 +27,16 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
- "ll %0, %5\n" // prev = *ptr
- "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
- "move %2, %4\n" // tmp = new_value
- "sc %2, %1\n" // *ptr = tmp (with atomic check)
- "beqz %2, 1b\n" // start again on atomic error
+ "ll %0, 0(%4)\n" // prev = *ptr
+ "bne %0, %2, 2f\n" // if (prev != old_value) goto 2
+ "move %1, %3\n" // tmp = new_value
+ "sc %1, 0(%4)\n" // *ptr = tmp (with atomic check)
+ "beqz %1, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
"2:\n"
".set pop\n"
- : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
- : "Ir" (old_value), "r" (new_value), "m" (*ptr)
+ : "=&r" (prev), "=&r" (tmp)
+ : "Ir" (old_value), "r" (new_value), "r" (ptr)
: "memory");
return prev;
}
@@ -48,15 +48,16 @@ inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 temp, old;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
+ ".set at\n"
"1:\n"
- "ll %1, %2\n" // old = *ptr
- "move %0, %3\n" // temp = new_value
- "sc %0, %2\n" // *ptr = temp (with atomic check)
+ "ll %1, 0(%3)\n" // old = *ptr
+ "move %0, %2\n" // temp = new_value
+ "sc %0, 0(%3)\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
".set pop\n"
- : "=&r" (temp), "=&r" (old), "=m" (*ptr)
- : "r" (new_value), "m" (*ptr)
+ : "=&r" (temp), "=&r" (old)
+ : "r" (new_value), "r" (ptr)
: "memory");
return old;
@@ -71,14 +72,14 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
- "ll %0, %2\n" // temp = *ptr
- "addu %1, %0, %3\n" // temp2 = temp + increment
- "sc %1, %2\n" // *ptr = temp2 (with atomic check)
+ "ll %0, 0(%3)\n" // temp = *ptr
+ "addu %1, %0, %2\n" // temp2 = temp + increment
+ "sc %1, 0(%3)\n" // *ptr = temp2 (with atomic check)
"beqz %1, 1b\n" // start again on atomic error
- "addu %1, %0, %3\n" // temp2 = temp + increment
+ "addu %1, %0, %2\n" // temp2 = temp + increment
".set pop\n"
- : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
- : "Ir" (increment), "m" (*ptr)
+ : "=&r" (temp), "=&r" (temp2)
+ : "Ir" (increment), "r" (ptr)
: "memory");
// temp2 now holds the final value.
return temp2;
diff --git a/deps/v8/test/base-unittests/base-unittests.gyp b/deps/v8/src/base/base.gyp
index 339269db1b..e391e2e8d1 100644
--- a/deps/v8/test/base-unittests/base-unittests.gyp
+++ b/deps/v8/src/base/base.gyp
@@ -20,11 +20,16 @@
'../..',
],
'sources': [ ### gcmole(all) ###
+ 'bits-unittest.cc',
'cpu-unittest.cc',
+ 'division-by-constant-unittest.cc',
+ 'flags-unittest.cc',
'platform/condition-variable-unittest.cc',
'platform/mutex-unittest.cc',
'platform/platform-unittest.cc',
+ 'platform/semaphore-unittest.cc',
'platform/time-unittest.cc',
+ 'sys-info-unittest.cc',
'utils/random-number-generator-unittest.cc',
],
'conditions': [
diff --git a/deps/v8/src/base/bits-unittest.cc b/deps/v8/src/base/bits-unittest.cc
new file mode 100644
index 0000000000..06c1183586
--- /dev/null
+++ b/deps/v8/src/base/bits-unittest.cc
@@ -0,0 +1,167 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/base/bits.h"
+#include "src/base/macros.h"
+#include "testing/gtest-support.h"
+
+#ifdef DEBUG
+#define DISABLE_IN_RELEASE(Name) Name
+#else
+#define DISABLE_IN_RELEASE(Name) DISABLED_##Name
+#endif
+
+namespace v8 {
+namespace base {
+namespace bits {
+
+TEST(Bits, CountPopulation32) {
+ EXPECT_EQ(0u, CountPopulation32(0));
+ EXPECT_EQ(1u, CountPopulation32(1));
+ EXPECT_EQ(8u, CountPopulation32(0x11111111));
+ EXPECT_EQ(16u, CountPopulation32(0xf0f0f0f0));
+ EXPECT_EQ(24u, CountPopulation32(0xfff0f0ff));
+ EXPECT_EQ(32u, CountPopulation32(0xffffffff));
+}
+
+
+TEST(Bits, CountLeadingZeros32) {
+ EXPECT_EQ(32u, CountLeadingZeros32(0));
+ EXPECT_EQ(31u, CountLeadingZeros32(1));
+ TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+ EXPECT_EQ(31u - shift, CountLeadingZeros32(1u << shift));
+ }
+ EXPECT_EQ(4u, CountLeadingZeros32(0x0f0f0f0f));
+}
+
+
+TEST(Bits, CountTrailingZeros32) {
+ EXPECT_EQ(32u, CountTrailingZeros32(0));
+ EXPECT_EQ(31u, CountTrailingZeros32(0x80000000));
+ TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+ EXPECT_EQ(shift, CountTrailingZeros32(1u << shift));
+ }
+ EXPECT_EQ(4u, CountTrailingZeros32(0xf0f0f0f0));
+}
+
+
+TEST(Bits, IsPowerOfTwo32) {
+ EXPECT_FALSE(IsPowerOfTwo32(0U));
+ TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+ EXPECT_TRUE(IsPowerOfTwo32(1U << shift));
+ EXPECT_FALSE(IsPowerOfTwo32((1U << shift) + 5U));
+ EXPECT_FALSE(IsPowerOfTwo32(~(1U << shift)));
+ }
+ TRACED_FORRANGE(uint32_t, shift, 2, 31) {
+ EXPECT_FALSE(IsPowerOfTwo32((1U << shift) - 1U));
+ }
+ EXPECT_FALSE(IsPowerOfTwo32(0xffffffff));
+}
+
+
+TEST(Bits, IsPowerOfTwo64) {
+ EXPECT_FALSE(IsPowerOfTwo64(0U));
+ TRACED_FORRANGE(uint32_t, shift, 0, 63) {
+ EXPECT_TRUE(IsPowerOfTwo64(V8_UINT64_C(1) << shift));
+ EXPECT_FALSE(IsPowerOfTwo64((V8_UINT64_C(1) << shift) + 5U));
+ EXPECT_FALSE(IsPowerOfTwo64(~(V8_UINT64_C(1) << shift)));
+ }
+ TRACED_FORRANGE(uint32_t, shift, 2, 63) {
+ EXPECT_FALSE(IsPowerOfTwo64((V8_UINT64_C(1) << shift) - 1U));
+ }
+ EXPECT_FALSE(IsPowerOfTwo64(V8_UINT64_C(0xffffffffffffffff)));
+}
+
+
+TEST(Bits, RoundUpToPowerOfTwo32) {
+ TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+ EXPECT_EQ(1u << shift, RoundUpToPowerOfTwo32(1u << shift));
+ }
+ EXPECT_EQ(0u, RoundUpToPowerOfTwo32(0));
+ EXPECT_EQ(4u, RoundUpToPowerOfTwo32(3));
+ EXPECT_EQ(0x80000000u, RoundUpToPowerOfTwo32(0x7fffffffu));
+}
+
+
+TEST(BitsDeathTest, DISABLE_IN_RELEASE(RoundUpToPowerOfTwo32)) {
+ ASSERT_DEATH_IF_SUPPORTED({ RoundUpToPowerOfTwo32(0x80000001u); },
+ "0x80000000");
+}
+
+
+TEST(Bits, RoundDownToPowerOfTwo32) {
+ TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+ EXPECT_EQ(1u << shift, RoundDownToPowerOfTwo32(1u << shift));
+ }
+ EXPECT_EQ(0u, RoundDownToPowerOfTwo32(0));
+ EXPECT_EQ(4u, RoundDownToPowerOfTwo32(5));
+ EXPECT_EQ(0x80000000u, RoundDownToPowerOfTwo32(0x80000001u));
+}
+
+
+TEST(Bits, RotateRight32) {
+ TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+ EXPECT_EQ(0u, RotateRight32(0u, shift));
+ }
+ EXPECT_EQ(1u, RotateRight32(1, 0));
+ EXPECT_EQ(1u, RotateRight32(2, 1));
+ EXPECT_EQ(0x80000000u, RotateRight32(1, 1));
+}
+
+
+TEST(Bits, RotateRight64) {
+ TRACED_FORRANGE(uint64_t, shift, 0, 63) {
+ EXPECT_EQ(0u, RotateRight64(0u, shift));
+ }
+ EXPECT_EQ(1u, RotateRight64(1, 0));
+ EXPECT_EQ(1u, RotateRight64(2, 1));
+ EXPECT_EQ(V8_UINT64_C(0x8000000000000000), RotateRight64(1, 1));
+}
+
+
+TEST(Bits, SignedAddOverflow32) {
+ int32_t val = 0;
+ EXPECT_FALSE(SignedAddOverflow32(0, 0, &val));
+ EXPECT_EQ(0, val);
+ EXPECT_TRUE(
+ SignedAddOverflow32(std::numeric_limits<int32_t>::max(), 1, &val));
+ EXPECT_EQ(std::numeric_limits<int32_t>::min(), val);
+ EXPECT_TRUE(
+ SignedAddOverflow32(std::numeric_limits<int32_t>::min(), -1, &val));
+ EXPECT_EQ(std::numeric_limits<int32_t>::max(), val);
+ EXPECT_TRUE(SignedAddOverflow32(std::numeric_limits<int32_t>::max(),
+ std::numeric_limits<int32_t>::max(), &val));
+ EXPECT_EQ(-2, val);
+ TRACED_FORRANGE(int32_t, i, 1, 50) {
+ TRACED_FORRANGE(int32_t, j, 1, i) {
+ EXPECT_FALSE(SignedAddOverflow32(i, j, &val));
+ EXPECT_EQ(i + j, val);
+ }
+ }
+}
+
+
+TEST(Bits, SignedSubOverflow32) {
+ int32_t val = 0;
+ EXPECT_FALSE(SignedSubOverflow32(0, 0, &val));
+ EXPECT_EQ(0, val);
+ EXPECT_TRUE(
+ SignedSubOverflow32(std::numeric_limits<int32_t>::min(), 1, &val));
+ EXPECT_EQ(std::numeric_limits<int32_t>::max(), val);
+ EXPECT_TRUE(
+ SignedSubOverflow32(std::numeric_limits<int32_t>::max(), -1, &val));
+ EXPECT_EQ(std::numeric_limits<int32_t>::min(), val);
+ TRACED_FORRANGE(int32_t, i, 1, 50) {
+ TRACED_FORRANGE(int32_t, j, 1, i) {
+ EXPECT_FALSE(SignedSubOverflow32(i, j, &val));
+ EXPECT_EQ(i - j, val);
+ }
+ }
+}
+
+} // namespace bits
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/bits.cc b/deps/v8/src/base/bits.cc
new file mode 100644
index 0000000000..6daee532a7
--- /dev/null
+++ b/deps/v8/src/base/bits.cc
@@ -0,0 +1,25 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace base {
+namespace bits {
+
+uint32_t RoundUpToPowerOfTwo32(uint32_t value) {
+ DCHECK_LE(value, 0x80000000u);
+ value = value - 1;
+ value = value | (value >> 1);
+ value = value | (value >> 2);
+ value = value | (value >> 4);
+ value = value | (value >> 8);
+ value = value | (value >> 16);
+ return value + 1;
+}
+
+} // namespace bits
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h
new file mode 100644
index 0000000000..e6a733a45d
--- /dev/null
+++ b/deps/v8/src/base/bits.h
@@ -0,0 +1,150 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_BITS_H_
+#define V8_BASE_BITS_H_
+
+#include "include/v8stdint.h"
+#include "src/base/macros.h"
+#if V8_CC_MSVC
+#include <intrin.h>
+#endif
+#if V8_OS_WIN32
+#include "src/base/win32-headers.h"
+#endif
+
+namespace v8 {
+namespace base {
+namespace bits {
+
+// CountPopulation32(value) returns the number of bits set in |value|.
+inline uint32_t CountPopulation32(uint32_t value) {
+#if V8_HAS_BUILTIN_POPCOUNT
+ return __builtin_popcount(value);
+#else
+ value = ((value >> 1) & 0x55555555) + (value & 0x55555555);
+ value = ((value >> 2) & 0x33333333) + (value & 0x33333333);
+ value = ((value >> 4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f);
+ value = ((value >> 8) & 0x00ff00ff) + (value & 0x00ff00ff);
+ value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff);
+ return value;
+#endif
+}
+
+
+// CountLeadingZeros32(value) returns the number of zero bits following the most
+// significant 1 bit in |value| if |value| is non-zero, otherwise it returns 32.
+inline uint32_t CountLeadingZeros32(uint32_t value) {
+#if V8_HAS_BUILTIN_CLZ
+ return value ? __builtin_clz(value) : 32;
+#elif V8_CC_MSVC
+ unsigned long result; // NOLINT(runtime/int)
+ if (!_BitScanReverse(&result, value)) return 32;
+ return static_cast<uint32_t>(31 - result);
+#else
+ value = value | (value >> 1);
+ value = value | (value >> 2);
+ value = value | (value >> 4);
+ value = value | (value >> 8);
+ value = value | (value >> 16);
+ return CountPopulation32(~value);
+#endif
+}
+
+
+// CountTrailingZeros32(value) returns the number of zero bits preceding the
+// least significant 1 bit in |value| if |value| is non-zero, otherwise it
+// returns 32.
+inline uint32_t CountTrailingZeros32(uint32_t value) {
+#if V8_HAS_BUILTIN_CTZ
+ return value ? __builtin_ctz(value) : 32;
+#elif V8_CC_MSVC
+ unsigned long result; // NOLINT(runtime/int)
+ if (!_BitScanForward(&result, value)) return 32;
+ return static_cast<uint32_t>(result);
+#else
+ if (value == 0) return 32;
+ unsigned count = 0;
+ for (value ^= value - 1; value >>= 1; ++count)
+ ;
+ return count;
+#endif
+}
+
+
+// Returns true iff |value| is a power of 2.
+inline bool IsPowerOfTwo32(uint32_t value) {
+ return value && !(value & (value - 1));
+}
+
+
+// Returns true iff |value| is a power of 2.
+inline bool IsPowerOfTwo64(uint64_t value) {
+ return value && !(value & (value - 1));
+}
+
+
+// RoundUpToPowerOfTwo32(value) returns the smallest power of two which is
+// greater than or equal to |value|. If you pass in a |value| that is already a
+// power of two, it is returned as is. |value| must be less than or equal to
+// 0x80000000u. Implementation is from "Hacker's Delight" by Henry S. Warren,
+// Jr., figure 3-3, page 48, where the function is called clp2.
+uint32_t RoundUpToPowerOfTwo32(uint32_t value);
+
+
+// RoundDownToPowerOfTwo32(value) returns the greatest power of two which is
+// less than or equal to |value|. If you pass in a |value| that is already a
+// power of two, it is returned as is.
+inline uint32_t RoundDownToPowerOfTwo32(uint32_t value) {
+ if (value > 0x80000000u) return 0x80000000u;
+ uint32_t result = RoundUpToPowerOfTwo32(value);
+ if (result > value) result >>= 1;
+ return result;
+}
+
+
+inline uint32_t RotateRight32(uint32_t value, uint32_t shift) {
+ if (shift == 0) return value;
+ return (value >> shift) | (value << (32 - shift));
+}
+
+
+inline uint64_t RotateRight64(uint64_t value, uint64_t shift) {
+ if (shift == 0) return value;
+ return (value >> shift) | (value << (64 - shift));
+}
+
+
+// SignedAddOverflow32(lhs,rhs,val) performs a signed summation of |lhs| and
+// |rhs| and stores the result into the variable pointed to by |val| and
+// returns true if the signed summation resulted in an overflow.
+inline bool SignedAddOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
+#if V8_HAS_BUILTIN_SADD_OVERFLOW
+ return __builtin_sadd_overflow(lhs, rhs, val);
+#else
+ uint32_t res = static_cast<uint32_t>(lhs) + static_cast<uint32_t>(rhs);
+ *val = bit_cast<int32_t>(res);
+ return ((res ^ lhs) & (res ^ rhs) & (1U << 31)) != 0;
+#endif
+}
+
+
+// SignedSubOverflow32(lhs,rhs,val) performs a signed subtraction of |lhs| and
+// |rhs| and stores the result into the variable pointed to by |val| and
+// returns true if the signed subtraction resulted in an overflow.
+inline bool SignedSubOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
+#if V8_HAS_BUILTIN_SSUB_OVERFLOW
+ return __builtin_ssub_overflow(lhs, rhs, val);
+#else
+ uint32_t res = static_cast<uint32_t>(lhs) - static_cast<uint32_t>(rhs);
+ *val = bit_cast<int32_t>(res);
+ return ((res ^ lhs) & (res ^ ~rhs) & (1U << 31)) != 0;
+#endif
+}
+
+} // namespace bits
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_BITS_H_
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index c66d6a5cc4..2bf57c9633 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -21,24 +21,20 @@
// V8_HOST_ARCH_IA32 on both 32- and 64-bit x86.
#define V8_HOST_ARCH_IA32 1
#define V8_HOST_ARCH_32_BIT 1
-#define V8_HOST_CAN_READ_UNALIGNED 1
#else
#define V8_HOST_ARCH_X64 1
-#if defined(__x86_64__) && !defined(__LP64__)
+#if defined(__x86_64__) && __SIZEOF_POINTER__ == 4 // Check for x32.
#define V8_HOST_ARCH_32_BIT 1
#else
#define V8_HOST_ARCH_64_BIT 1
#endif
-#define V8_HOST_CAN_READ_UNALIGNED 1
#endif // __native_client__
#elif defined(_M_IX86) || defined(__i386__)
#define V8_HOST_ARCH_IA32 1
#define V8_HOST_ARCH_32_BIT 1
-#define V8_HOST_CAN_READ_UNALIGNED 1
#elif defined(__AARCH64EL__)
#define V8_HOST_ARCH_ARM64 1
#define V8_HOST_ARCH_64_BIT 1
-#define V8_HOST_CAN_READ_UNALIGNED 1
#elif defined(__ARMEL__)
#define V8_HOST_ARCH_ARM 1
#define V8_HOST_ARCH_32_BIT 1
@@ -90,7 +86,7 @@
#define V8_TARGET_ARCH_32_BIT 1
#elif V8_TARGET_ARCH_X64
#if !V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_64_BIT
-#if defined(__x86_64__) && !defined(__LP64__)
+#if defined(__x86_64__) && __SIZEOF_POINTER__ == 4 // Check for x32.
#define V8_TARGET_ARCH_32_BIT 1
#else
#define V8_TARGET_ARCH_64_BIT 1
@@ -158,10 +154,6 @@
#error Unknown target architecture endianness
#endif
-#if V8_OS_MACOSX || defined(__FreeBSD__) || defined(__OpenBSD__)
-#define USING_BSD_ABI
-#endif
-
// Number of bits to represent the page size for paged spaces. The value of 20
// gives 1Mb bytes per page.
const int kPageSizeBits = 20;
diff --git a/deps/v8/src/base/compiler-specific.h b/deps/v8/src/base/compiler-specific.h
new file mode 100644
index 0000000000..475a32c2c1
--- /dev/null
+++ b/deps/v8/src/base/compiler-specific.h
@@ -0,0 +1,58 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_COMPILER_SPECIFIC_H_
+#define V8_BASE_COMPILER_SPECIFIC_H_
+
+#include "include/v8config.h"
+
+// Annotate a variable indicating it's ok if the variable is not used.
+// (Typically used to silence a compiler warning when the assignment
+// is important for some other reason.)
+// Use like:
+// int x ALLOW_UNUSED = ...;
+#if V8_HAS_ATTRIBUTE_UNUSED
+#define ALLOW_UNUSED __attribute__((unused))
+#else
+#define ALLOW_UNUSED
+#endif
+
+
+// Annotate a virtual method indicating it must be overriding a virtual
+// method in the parent class.
+// Use like:
+// virtual void bar() OVERRIDE;
+#if V8_HAS_CXX11_OVERRIDE
+#define OVERRIDE override
+#else
+#define OVERRIDE /* NOT SUPPORTED */
+#endif
+
+
+// Annotate a virtual method indicating that subclasses must not override it,
+// or annotate a class to indicate that it cannot be subclassed.
+// Use like:
+// class B FINAL : public A {};
+// virtual void bar() FINAL;
+#if V8_HAS_CXX11_FINAL
+#define FINAL final
+#elif V8_HAS___FINAL
+#define FINAL __final
+#elif V8_HAS_SEALED
+#define FINAL sealed
+#else
+#define FINAL /* NOT SUPPORTED */
+#endif
+
+
+// Annotate a function indicating the caller must examine the return value.
+// Use like:
+// int foo() WARN_UNUSED_RESULT;
+#if V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT
+#define WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+#else
+#define WARN_UNUSED_RESULT /* NOT SUPPORTED */
+#endif
+
+#endif // V8_BASE_COMPILER_SPECIFIC_H_
diff --git a/deps/v8/test/base-unittests/cpu-unittest.cc b/deps/v8/src/base/cpu-unittest.cc
index 5c58f86238..5c58f86238 100644
--- a/deps/v8/test/base-unittests/cpu-unittest.cc
+++ b/deps/v8/src/base/cpu-unittest.cc
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index adce69d457..fbfbcf683b 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -115,8 +115,32 @@ static uint32_t ReadELFHWCaps() {
#endif // V8_HOST_ARCH_ARM
+#if V8_HOST_ARCH_MIPS
+int __detect_fp64_mode(void) {
+ double result = 0;
+ // Bit representation of (double)1 is 0x3FF0000000000000.
+ asm(
+ "lui $t0, 0x3FF0\n\t"
+ "ldc1 $f0, %0\n\t"
+ "mtc1 $t0, $f1\n\t"
+ "sdc1 $f0, %0\n\t"
+ : "+m" (result)
+ : : "t0", "$f0", "$f1", "memory");
+
+ return !(result == 1);
+}
+
+
+int __detect_mips_arch_revision(void) {
+ // TODO(dusmil): Do the specific syscall as soon as it is implemented in mips
+ // kernel. Currently fail-back to the least common denominator which is
+ // mips32 revision 1.
+ return 1;
+}
+#endif
+
// Extract the information exposed by the kernel via /proc/cpuinfo.
-class CPUInfo V8_FINAL {
+class CPUInfo FINAL {
public:
CPUInfo() : datalen_(0) {
// Get the size of the cpuinfo file by reading it until the end. This is
@@ -263,7 +287,8 @@ CPU::CPU() : stepping_(0),
has_thumb2_(false),
has_vfp_(false),
has_vfp3_(false),
- has_vfp3_d32_(false) {
+ has_vfp3_d32_(false),
+ is_fp64_mode_(false) {
memcpy(vendor_, "Unknown", 8);
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
int cpu_info[4];
@@ -466,6 +491,10 @@ CPU::CPU() : stepping_(0),
char* cpu_model = cpu_info.ExtractField("cpu model");
has_fpu_ = HasListItem(cpu_model, "FPU");
delete[] cpu_model;
+#ifdef V8_HOST_ARCH_MIPS
+ is_fp64_mode_ = __detect_fp64_mode();
+ architecture_ = __detect_mips_arch_revision();
+#endif
#elif V8_HOST_ARCH_ARM64
diff --git a/deps/v8/src/base/cpu.h b/deps/v8/src/base/cpu.h
index eb51674df5..dc0eaf485f 100644
--- a/deps/v8/src/base/cpu.h
+++ b/deps/v8/src/base/cpu.h
@@ -28,7 +28,7 @@ namespace base {
// architectures. For each architecture the file cpu_<arch>.cc contains the
// implementation of these static functions.
-class CPU V8_FINAL {
+class CPU FINAL {
public:
CPU();
@@ -77,6 +77,9 @@ class CPU V8_FINAL {
bool has_vfp3() const { return has_vfp3_; }
bool has_vfp3_d32() const { return has_vfp3_d32_; }
+ // mips features
+ bool is_fp64_mode() const { return is_fp64_mode_; }
+
private:
char vendor_[13];
int stepping_;
@@ -104,6 +107,7 @@ class CPU V8_FINAL {
bool has_vfp_;
bool has_vfp3_;
bool has_vfp3_d32_;
+ bool is_fp64_mode_;
};
} } // namespace v8::base
diff --git a/deps/v8/src/base/division-by-constant-unittest.cc b/deps/v8/src/base/division-by-constant-unittest.cc
new file mode 100644
index 0000000000..47c24834db
--- /dev/null
+++ b/deps/v8/src/base/division-by-constant-unittest.cc
@@ -0,0 +1,132 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Check all examples from table 10-1 of "Hacker's Delight".
+
+#include "src/base/division-by-constant.h"
+
+#include <ostream> // NOLINT
+
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace base {
+
+template <class T>
+std::ostream& operator<<(std::ostream& os,
+ const MagicNumbersForDivision<T>& mag) {
+ return os << "{ multiplier: " << mag.multiplier << ", shift: " << mag.shift
+ << ", add: " << mag.add << " }";
+}
+
+
+// Some abbreviations...
+
+typedef MagicNumbersForDivision<uint32_t> M32;
+typedef MagicNumbersForDivision<uint64_t> M64;
+
+
+static M32 s32(int32_t d) {
+ return SignedDivisionByConstant<uint32_t>(static_cast<uint32_t>(d));
+}
+
+
+static M64 s64(int64_t d) {
+ return SignedDivisionByConstant<uint64_t>(static_cast<uint64_t>(d));
+}
+
+
+static M32 u32(uint32_t d) { return UnsignedDivisionByConstant<uint32_t>(d); }
+static M64 u64(uint64_t d) { return UnsignedDivisionByConstant<uint64_t>(d); }
+
+
+TEST(DivisionByConstant, Signed32) {
+ EXPECT_EQ(M32(0x99999999U, 1, false), s32(-5));
+ EXPECT_EQ(M32(0x55555555U, 1, false), s32(-3));
+ int32_t d = -1;
+ for (unsigned k = 1; k <= 32 - 1; ++k) {
+ d *= 2;
+ EXPECT_EQ(M32(0x7FFFFFFFU, k - 1, false), s32(d));
+ }
+ for (unsigned k = 1; k <= 32 - 2; ++k) {
+ EXPECT_EQ(M32(0x80000001U, k - 1, false), s32(1 << k));
+ }
+ EXPECT_EQ(M32(0x55555556U, 0, false), s32(3));
+ EXPECT_EQ(M32(0x66666667U, 1, false), s32(5));
+ EXPECT_EQ(M32(0x2AAAAAABU, 0, false), s32(6));
+ EXPECT_EQ(M32(0x92492493U, 2, false), s32(7));
+ EXPECT_EQ(M32(0x38E38E39U, 1, false), s32(9));
+ EXPECT_EQ(M32(0x66666667U, 2, false), s32(10));
+ EXPECT_EQ(M32(0x2E8BA2E9U, 1, false), s32(11));
+ EXPECT_EQ(M32(0x2AAAAAABU, 1, false), s32(12));
+ EXPECT_EQ(M32(0x51EB851FU, 3, false), s32(25));
+ EXPECT_EQ(M32(0x10624DD3U, 3, false), s32(125));
+ EXPECT_EQ(M32(0x68DB8BADU, 8, false), s32(625));
+}
+
+
+TEST(DivisionByConstant, Unsigned32) {
+ EXPECT_EQ(M32(0x00000000U, 0, true), u32(1));
+ for (unsigned k = 1; k <= 30; ++k) {
+ EXPECT_EQ(M32(1U << (32 - k), 0, false), u32(1U << k));
+ }
+ EXPECT_EQ(M32(0xAAAAAAABU, 1, false), u32(3));
+ EXPECT_EQ(M32(0xCCCCCCCDU, 2, false), u32(5));
+ EXPECT_EQ(M32(0xAAAAAAABU, 2, false), u32(6));
+ EXPECT_EQ(M32(0x24924925U, 3, true), u32(7));
+ EXPECT_EQ(M32(0x38E38E39U, 1, false), u32(9));
+ EXPECT_EQ(M32(0xCCCCCCCDU, 3, false), u32(10));
+ EXPECT_EQ(M32(0xBA2E8BA3U, 3, false), u32(11));
+ EXPECT_EQ(M32(0xAAAAAAABU, 3, false), u32(12));
+ EXPECT_EQ(M32(0x51EB851FU, 3, false), u32(25));
+ EXPECT_EQ(M32(0x10624DD3U, 3, false), u32(125));
+ EXPECT_EQ(M32(0xD1B71759U, 9, false), u32(625));
+}
+
+
+TEST(DivisionByConstant, Signed64) {
+ EXPECT_EQ(M64(0x9999999999999999ULL, 1, false), s64(-5));
+ EXPECT_EQ(M64(0x5555555555555555ULL, 1, false), s64(-3));
+ int64_t d = -1;
+ for (unsigned k = 1; k <= 64 - 1; ++k) {
+ d *= 2;
+ EXPECT_EQ(M64(0x7FFFFFFFFFFFFFFFULL, k - 1, false), s64(d));
+ }
+ for (unsigned k = 1; k <= 64 - 2; ++k) {
+ EXPECT_EQ(M64(0x8000000000000001ULL, k - 1, false), s64(1LL << k));
+ }
+ EXPECT_EQ(M64(0x5555555555555556ULL, 0, false), s64(3));
+ EXPECT_EQ(M64(0x6666666666666667ULL, 1, false), s64(5));
+ EXPECT_EQ(M64(0x2AAAAAAAAAAAAAABULL, 0, false), s64(6));
+ EXPECT_EQ(M64(0x4924924924924925ULL, 1, false), s64(7));
+ EXPECT_EQ(M64(0x1C71C71C71C71C72ULL, 0, false), s64(9));
+ EXPECT_EQ(M64(0x6666666666666667ULL, 2, false), s64(10));
+ EXPECT_EQ(M64(0x2E8BA2E8BA2E8BA3ULL, 1, false), s64(11));
+ EXPECT_EQ(M64(0x2AAAAAAAAAAAAAABULL, 1, false), s64(12));
+ EXPECT_EQ(M64(0xA3D70A3D70A3D70BULL, 4, false), s64(25));
+ EXPECT_EQ(M64(0x20C49BA5E353F7CFULL, 4, false), s64(125));
+ EXPECT_EQ(M64(0x346DC5D63886594BULL, 7, false), s64(625));
+}
+
+
+TEST(DivisionByConstant, Unsigned64) {
+ EXPECT_EQ(M64(0x0000000000000000ULL, 0, true), u64(1));
+ for (unsigned k = 1; k <= 64 - 2; ++k) {
+ EXPECT_EQ(M64(1ULL << (64 - k), 0, false), u64(1ULL << k));
+ }
+ EXPECT_EQ(M64(0xAAAAAAAAAAAAAAABULL, 1, false), u64(3));
+ EXPECT_EQ(M64(0xCCCCCCCCCCCCCCCDULL, 2, false), u64(5));
+ EXPECT_EQ(M64(0xAAAAAAAAAAAAAAABULL, 2, false), u64(6));
+ EXPECT_EQ(M64(0x2492492492492493ULL, 3, true), u64(7));
+ EXPECT_EQ(M64(0xE38E38E38E38E38FULL, 3, false), u64(9));
+ EXPECT_EQ(M64(0xCCCCCCCCCCCCCCCDULL, 3, false), u64(10));
+ EXPECT_EQ(M64(0x2E8BA2E8BA2E8BA3ULL, 1, false), u64(11));
+ EXPECT_EQ(M64(0xAAAAAAAAAAAAAAABULL, 3, false), u64(12));
+ EXPECT_EQ(M64(0x47AE147AE147AE15ULL, 5, true), u64(25));
+ EXPECT_EQ(M64(0x0624DD2F1A9FBE77ULL, 7, true), u64(125));
+ EXPECT_EQ(M64(0x346DC5D63886594BULL, 7, false), u64(625));
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/division-by-constant.cc b/deps/v8/src/base/division-by-constant.cc
new file mode 100644
index 0000000000..235d39fe57
--- /dev/null
+++ b/deps/v8/src/base/division-by-constant.cc
@@ -0,0 +1,115 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/division-by-constant.h"
+
+#include <stdint.h>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+template <class T>
+bool MagicNumbersForDivision<T>::operator==(
+ const MagicNumbersForDivision& rhs) const {
+ return multiplier == rhs.multiplier && shift == rhs.shift && add == rhs.add;
+}
+
+
+template <class T>
+MagicNumbersForDivision<T> SignedDivisionByConstant(T d) {
+ STATIC_ASSERT(static_cast<T>(0) < static_cast<T>(-1));
+ DCHECK(d != static_cast<T>(-1) && d != 0 && d != 1);
+ const unsigned bits = static_cast<unsigned>(sizeof(T)) * 8;
+ const T min = (static_cast<T>(1) << (bits - 1));
+ const bool neg = (min & d) != 0;
+ const T ad = neg ? (0 - d) : d;
+ const T t = min + (d >> (bits - 1));
+ const T anc = t - 1 - t % ad; // Absolute value of nc
+ unsigned p = bits - 1; // Init. p.
+ T q1 = min / anc; // Init. q1 = 2**p/|nc|.
+ T r1 = min - q1 * anc; // Init. r1 = rem(2**p, |nc|).
+ T q2 = min / ad; // Init. q2 = 2**p/|d|.
+ T r2 = min - q2 * ad; // Init. r2 = rem(2**p, |d|).
+ T delta;
+ do {
+ p = p + 1;
+ q1 = 2 * q1; // Update q1 = 2**p/|nc|.
+ r1 = 2 * r1; // Update r1 = rem(2**p, |nc|).
+ if (r1 >= anc) { // Must be an unsigned comparison here.
+ q1 = q1 + 1;
+ r1 = r1 - anc;
+ }
+ q2 = 2 * q2; // Update q2 = 2**p/|d|.
+ r2 = 2 * r2; // Update r2 = rem(2**p, |d|).
+ if (r2 >= ad) { // Must be an unsigned comparison here.
+ q2 = q2 + 1;
+ r2 = r2 - ad;
+ }
+ delta = ad - r2;
+ } while (q1 < delta || (q1 == delta && r1 == 0));
+ T mul = q2 + 1;
+ return {neg ? (0 - mul) : mul, p - bits, false};
+}
+
+
+template <class T>
+MagicNumbersForDivision<T> UnsignedDivisionByConstant(T d,
+ unsigned leading_zeros) {
+ STATIC_ASSERT(static_cast<T>(0) < static_cast<T>(-1));
+ DCHECK(d != 0);
+ const unsigned bits = static_cast<unsigned>(sizeof(T)) * 8;
+ const T ones = ~static_cast<T>(0) >> leading_zeros;
+ const T min = static_cast<T>(1) << (bits - 1);
+ const T max = ~static_cast<T>(0) >> 1;
+ const T nc = ones - (ones - d) % d;
+ bool a = false; // Init. "add" indicator.
+ unsigned p = bits - 1; // Init. p.
+ T q1 = min / nc; // Init. q1 = 2**p/nc
+ T r1 = min - q1 * nc; // Init. r1 = rem(2**p,nc)
+ T q2 = max / d; // Init. q2 = (2**p - 1)/d.
+ T r2 = max - q2 * d; // Init. r2 = rem(2**p - 1, d).
+ T delta;
+ do {
+ p = p + 1;
+ if (r1 >= nc - r1) {
+ q1 = 2 * q1 + 1;
+ r1 = 2 * r1 - nc;
+ } else {
+ q1 = 2 * q1;
+ r1 = 2 * r1;
+ }
+ if (r2 + 1 >= d - r2) {
+ if (q2 >= max) a = true;
+ q2 = 2 * q2 + 1;
+ r2 = 2 * r2 + 1 - d;
+ } else {
+ if (q2 >= min) a = true;
+ q2 = 2 * q2;
+ r2 = 2 * r2 + 1;
+ }
+ delta = d - 1 - r2;
+ } while (p < bits * 2 && (q1 < delta || (q1 == delta && r1 == 0)));
+ return {q2 + 1, p - bits, a};
+}
+
+
+// -----------------------------------------------------------------------------
+// Instantiations.
+
+template struct MagicNumbersForDivision<uint32_t>;
+template struct MagicNumbersForDivision<uint64_t>;
+
+template MagicNumbersForDivision<uint32_t> SignedDivisionByConstant(uint32_t d);
+template MagicNumbersForDivision<uint64_t> SignedDivisionByConstant(uint64_t d);
+
+template MagicNumbersForDivision<uint32_t> UnsignedDivisionByConstant(
+ uint32_t d, unsigned leading_zeros);
+template MagicNumbersForDivision<uint64_t> UnsignedDivisionByConstant(
+ uint64_t d, unsigned leading_zeros);
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/division-by-constant.h b/deps/v8/src/base/division-by-constant.h
new file mode 100644
index 0000000000..02e7e14b01
--- /dev/null
+++ b/deps/v8/src/base/division-by-constant.h
@@ -0,0 +1,45 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_DIVISION_BY_CONSTANT_H_
+#define V8_BASE_DIVISION_BY_CONSTANT_H_
+
+namespace v8 {
+namespace base {
+
+// ----------------------------------------------------------------------------
+
+// The magic numbers for division via multiplication, see Warren's "Hacker's
+// Delight", chapter 10. The template parameter must be one of the unsigned
+// integral types.
+template <class T>
+struct MagicNumbersForDivision {
+ MagicNumbersForDivision(T m, unsigned s, bool a)
+ : multiplier(m), shift(s), add(a) {}
+ bool operator==(const MagicNumbersForDivision& rhs) const;
+
+ T multiplier;
+ unsigned shift;
+ bool add;
+};
+
+
+// Calculate the multiplier and shift for signed division via multiplication.
+// The divisor must not be -1, 0 or 1 when interpreted as a signed value.
+template <class T>
+MagicNumbersForDivision<T> SignedDivisionByConstant(T d);
+
+
+// Calculate the multiplier and shift for unsigned division via multiplication,
+// see Warren's "Hacker's Delight", chapter 10. The divisor must not be 0 and
+// leading_zeros can be used to speed up the calculation if the given number of
+// upper bits of the dividend value are known to be zero.
+template <class T>
+MagicNumbersForDivision<T> UnsignedDivisionByConstant(
+ T d, unsigned leading_zeros = 0);
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_DIVISION_BY_CONSTANT_H_
diff --git a/deps/v8/src/base/flags-unittest.cc b/deps/v8/src/base/flags-unittest.cc
new file mode 100644
index 0000000000..a1d6f3703a
--- /dev/null
+++ b/deps/v8/src/base/flags-unittest.cc
@@ -0,0 +1,104 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8stdint.h"
+#include "src/base/flags.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+namespace {
+
+enum Flag1 {
+ kFlag1None = 0,
+ kFlag1First = 1u << 1,
+ kFlag1Second = 1u << 2,
+ kFlag1All = kFlag1None | kFlag1First | kFlag1Second
+};
+typedef Flags<Flag1> Flags1;
+
+
+DEFINE_OPERATORS_FOR_FLAGS(Flags1)
+
+
+Flags1 bar(Flags1 flags1) { return flags1; }
+
+} // namespace
+
+
+TEST(FlagsTest, BasicOperations) {
+ Flags1 a;
+ EXPECT_EQ(kFlag1None, static_cast<int>(a));
+ a |= kFlag1First;
+ EXPECT_EQ(kFlag1First, static_cast<int>(a));
+ a = a | kFlag1Second;
+ EXPECT_EQ(kFlag1All, static_cast<int>(a));
+ a &= kFlag1Second;
+ EXPECT_EQ(kFlag1Second, static_cast<int>(a));
+ a = kFlag1None & a;
+ EXPECT_EQ(kFlag1None, static_cast<int>(a));
+ a ^= (kFlag1All | kFlag1None);
+ EXPECT_EQ(kFlag1All, static_cast<int>(a));
+ Flags1 b = ~a;
+ EXPECT_EQ(kFlag1All, static_cast<int>(a));
+ EXPECT_EQ(~static_cast<int>(a), static_cast<int>(b));
+ Flags1 c = a;
+ EXPECT_EQ(a, c);
+ EXPECT_NE(a, b);
+ EXPECT_EQ(a, bar(a));
+ EXPECT_EQ(a, bar(kFlag1All));
+}
+
+
+namespace {
+namespace foo {
+
+enum Option {
+ kNoOptions = 0,
+ kOption1 = 1,
+ kOption2 = 2,
+ kAllOptions = kNoOptions | kOption1 | kOption2
+};
+typedef Flags<Option> Options;
+
+} // namespace foo
+
+
+DEFINE_OPERATORS_FOR_FLAGS(foo::Options)
+
+} // namespace
+
+
+TEST(FlagsTest, NamespaceScope) {
+ foo::Options options;
+ options ^= foo::kNoOptions;
+ options |= foo::kOption1 | foo::kOption2;
+ EXPECT_EQ(foo::kAllOptions, static_cast<int>(options));
+}
+
+
+namespace {
+
+struct Foo {
+ enum Enum { kEnum1 = 1, kEnum2 = 2 };
+ typedef Flags<Enum, uint32_t> Enums;
+};
+
+
+DEFINE_OPERATORS_FOR_FLAGS(Foo::Enums)
+
+} // namespace
+
+
+TEST(FlagsTest, ClassScope) {
+ Foo::Enums enums;
+ enums |= Foo::kEnum1;
+ enums |= Foo::kEnum2;
+ EXPECT_TRUE(enums & Foo::kEnum1);
+ EXPECT_TRUE(enums & Foo::kEnum2);
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/flags.h b/deps/v8/src/base/flags.h
new file mode 100644
index 0000000000..f3420ee536
--- /dev/null
+++ b/deps/v8/src/base/flags.h
@@ -0,0 +1,108 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_FLAGS_H_
+#define V8_BASE_FLAGS_H_
+
+#include "src/base/compiler-specific.h"
+
+namespace v8 {
+namespace base {
+
+// The Flags class provides a type-safe way of storing OR-combinations of enum
+// values. The Flags<T, S> class is a template class, where T is an enum type,
+// and S is the underlying storage type (usually int).
+//
+// The traditional C++ approach for storing OR-combinations of enum values is to
+// use an int or unsigned int variable. The inconvenience with this approach is
+// that there's no type checking at all; any enum value can be OR'd with any
+// other enum value and passed on to a function that takes an int or unsigned
+// int.
+template <typename T, typename S = int>
+class Flags FINAL {
+ public:
+ typedef T flag_type;
+ typedef S mask_type;
+
+ Flags() : mask_(0) {}
+ Flags(flag_type flag) : mask_(flag) {} // NOLINT(runtime/explicit)
+ explicit Flags(mask_type mask) : mask_(mask) {}
+
+ Flags& operator&=(const Flags& flags) {
+ mask_ &= flags.mask_;
+ return *this;
+ }
+ Flags& operator|=(const Flags& flags) {
+ mask_ |= flags.mask_;
+ return *this;
+ }
+ Flags& operator^=(const Flags& flags) {
+ mask_ ^= flags.mask_;
+ return *this;
+ }
+
+ Flags operator&(const Flags& flags) const { return Flags(*this) &= flags; }
+ Flags operator|(const Flags& flags) const { return Flags(*this) |= flags; }
+ Flags operator^(const Flags& flags) const { return Flags(*this) ^= flags; }
+
+ Flags& operator&=(flag_type flag) { return operator&=(Flags(flag)); }
+ Flags& operator|=(flag_type flag) { return operator|=(Flags(flag)); }
+ Flags& operator^=(flag_type flag) { return operator^=(Flags(flag)); }
+
+ Flags operator&(flag_type flag) const { return operator&(Flags(flag)); }
+ Flags operator|(flag_type flag) const { return operator|(Flags(flag)); }
+ Flags operator^(flag_type flag) const { return operator^(Flags(flag)); }
+
+ Flags operator~() const { return Flags(~mask_); }
+
+ operator mask_type() const { return mask_; }
+ bool operator!() const { return !mask_; }
+
+ private:
+ mask_type mask_;
+};
+
+
+#define DEFINE_OPERATORS_FOR_FLAGS(Type) \
+ inline Type operator&(Type::flag_type lhs, \
+ Type::flag_type rhs)ALLOW_UNUSED WARN_UNUSED_RESULT; \
+ inline Type operator&(Type::flag_type lhs, Type::flag_type rhs) { \
+ return Type(lhs) & rhs; \
+ } \
+ inline Type operator&(Type::flag_type lhs, \
+ const Type& rhs)ALLOW_UNUSED WARN_UNUSED_RESULT; \
+ inline Type operator&(Type::flag_type lhs, const Type& rhs) { \
+ return rhs & lhs; \
+ } \
+ inline void operator&(Type::flag_type lhs, Type::mask_type rhs)ALLOW_UNUSED; \
+ inline void operator&(Type::flag_type lhs, Type::mask_type rhs) {} \
+ inline Type operator|(Type::flag_type lhs, Type::flag_type rhs) \
+ ALLOW_UNUSED WARN_UNUSED_RESULT; \
+ inline Type operator|(Type::flag_type lhs, Type::flag_type rhs) { \
+ return Type(lhs) | rhs; \
+ } \
+ inline Type operator|(Type::flag_type lhs, const Type& rhs) \
+ ALLOW_UNUSED WARN_UNUSED_RESULT; \
+ inline Type operator|(Type::flag_type lhs, const Type& rhs) { \
+ return rhs | lhs; \
+ } \
+ inline void operator|(Type::flag_type lhs, Type::mask_type rhs) \
+ ALLOW_UNUSED; \
+ inline void operator|(Type::flag_type lhs, Type::mask_type rhs) {} \
+ inline Type operator^(Type::flag_type lhs, Type::flag_type rhs) \
+ ALLOW_UNUSED WARN_UNUSED_RESULT; \
+ inline Type operator^(Type::flag_type lhs, Type::flag_type rhs) { \
+ return Type(lhs) ^ rhs; \
+ } inline Type operator^(Type::flag_type lhs, const Type& rhs) \
+ ALLOW_UNUSED WARN_UNUSED_RESULT; \
+ inline Type operator^(Type::flag_type lhs, const Type& rhs) { \
+ return rhs ^ lhs; \
+ } inline void operator^(Type::flag_type lhs, Type::mask_type rhs) \
+ ALLOW_UNUSED; \
+ inline void operator^(Type::flag_type lhs, Type::mask_type rhs) {}
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_FLAGS_H_
diff --git a/deps/v8/src/base/logging.cc b/deps/v8/src/base/logging.cc
index 4f62ac48dd..c3f609f980 100644
--- a/deps/v8/src/base/logging.cc
+++ b/deps/v8/src/base/logging.cc
@@ -22,7 +22,7 @@ namespace base {
void DumpBacktrace() {
#if V8_LIBC_GLIBC || V8_OS_BSD
void* trace[100];
- int size = backtrace(trace, ARRAY_SIZE(trace));
+ int size = backtrace(trace, arraysize(trace));
char** symbols = backtrace_symbols(trace, size);
OS::PrintError("\n==== C stack trace ===============================\n\n");
if (size == 0) {
@@ -54,7 +54,7 @@ void DumpBacktrace() {
bt_sprn_memmap(&memmap, out, sizeof(out));
OS::PrintError(out);
bt_addr_t trace[100];
- int size = bt_get_backtrace(&acc, trace, ARRAY_SIZE(trace));
+ int size = bt_get_backtrace(&acc, trace, arraysize(trace));
OS::PrintError("\n==== C stack trace ===============================\n\n");
if (size == 0) {
OS::PrintError("(empty)\n");
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index 50828db57f..cef088cb81 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -5,8 +5,11 @@
#ifndef V8_BASE_MACROS_H_
#define V8_BASE_MACROS_H_
+#include <cstring>
+
#include "include/v8stdint.h"
#include "src/base/build_config.h"
+#include "src/base/compiler-specific.h"
#include "src/base/logging.h"
@@ -20,13 +23,220 @@
(reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4)
-// The expression ARRAY_SIZE(a) is a compile-time constant of type
-// size_t which represents the number of elements of the given
-// array. You should only use ARRAY_SIZE on statically allocated
-// arrays.
-#define ARRAY_SIZE(a) \
- ((sizeof(a) / sizeof(*(a))) / \
- static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
+// ARRAYSIZE_UNSAFE performs essentially the same calculation as arraysize,
+// but can be used on anonymous types or types defined inside
+// functions. It's less safe than arraysize as it accepts some
+// (although not all) pointers. Therefore, you should use arraysize
+// whenever possible.
+//
+// The expression ARRAYSIZE_UNSAFE(a) is a compile-time constant of type
+// size_t.
+//
+// ARRAYSIZE_UNSAFE catches a few type errors. If you see a compiler error
+//
+// "warning: division by zero in ..."
+//
+// when using ARRAYSIZE_UNSAFE, you are (wrongfully) giving it a pointer.
+// You should only use ARRAYSIZE_UNSAFE on statically allocated arrays.
+//
+// The following comments are on the implementation details, and can
+// be ignored by the users.
+//
+// ARRAYSIZE_UNSAFE(arr) works by inspecting sizeof(arr) (the # of bytes in
+// the array) and sizeof(*(arr)) (the # of bytes in one array
+// element). If the former is divisible by the latter, perhaps arr is
+// indeed an array, in which case the division result is the # of
+// elements in the array. Otherwise, arr cannot possibly be an array,
+// and we generate a compiler error to prevent the code from
+// compiling.
+//
+// Since the size of bool is implementation-defined, we need to cast
+// !(sizeof(a) & sizeof(*(a))) to size_t in order to ensure the final
+// result has type size_t.
+//
+// This macro is not perfect as it wrongfully accepts certain
+// pointers, namely where the pointer size is divisible by the pointee
+// size. Since all our code has to go through a 32-bit compiler,
+// where a pointer is 4 bytes, this means all pointers to a type whose
+// size is 3 or greater than 4 will be (righteously) rejected.
+#define ARRAYSIZE_UNSAFE(a) \
+ ((sizeof(a) / sizeof(*(a))) / \
+ static_cast<size_t>(!(sizeof(a) % sizeof(*(a))))) // NOLINT
+
+
+#if V8_OS_NACL
+
+// TODO(bmeurer): For some reason, the NaCl toolchain cannot handle the correct
+// definition of arraysize() below, so we have to use the unsafe version for
+// now.
+#define arraysize ARRAYSIZE_UNSAFE
+
+#else // V8_OS_NACL
+
+// The arraysize(arr) macro returns the # of elements in an array arr.
+// The expression is a compile-time constant, and therefore can be
+// used in defining new arrays, for example. If you use arraysize on
+// a pointer by mistake, you will get a compile-time error.
+//
+// One caveat is that arraysize() doesn't accept any array of an
+// anonymous type or a type defined inside a function. In these rare
+// cases, you have to use the unsafe ARRAYSIZE_UNSAFE() macro below. This is
+// due to a limitation in C++'s template system. The limitation might
+// eventually be removed, but it hasn't happened yet.
+#define arraysize(array) (sizeof(ArraySizeHelper(array)))
+
+
+// This template function declaration is used in defining arraysize.
+// Note that the function doesn't need an implementation, as we only
+// use its type.
+template <typename T, size_t N>
+char (&ArraySizeHelper(T (&array)[N]))[N];
+
+
+#if !V8_CC_MSVC
+// That gcc wants both of these prototypes seems mysterious. VC, for
+// its part, can't decide which to use (another mystery). Matching of
+// template overloads: the final frontier.
+template <typename T, size_t N>
+char (&ArraySizeHelper(const T (&array)[N]))[N];
+#endif
+
+#endif // V8_OS_NACL
+
+
+// The COMPILE_ASSERT macro can be used to verify that a compile time
+// expression is true. For example, you could use it to verify the
+// size of a static array:
+//
+// COMPILE_ASSERT(ARRAYSIZE_UNSAFE(content_type_names) == CONTENT_NUM_TYPES,
+// content_type_names_incorrect_size);
+//
+// or to make sure a struct is smaller than a certain size:
+//
+// COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);
+//
+// The second argument to the macro is the name of the variable. If
+// the expression is false, most compilers will issue a warning/error
+// containing the name of the variable.
+#if V8_HAS_CXX11_STATIC_ASSERT
+
+// Under C++11, just use static_assert.
+#define COMPILE_ASSERT(expr, msg) static_assert(expr, #msg)
+
+#else
+
+template <bool>
+struct CompileAssert {};
+
+#define COMPILE_ASSERT(expr, msg) \
+ typedef CompileAssert<static_cast<bool>(expr)> \
+ msg[static_cast<bool>(expr) ? 1 : -1] ALLOW_UNUSED
+
+// Implementation details of COMPILE_ASSERT:
+//
+// - COMPILE_ASSERT works by defining an array type that has -1
+// elements (and thus is invalid) when the expression is false.
+//
+// - The simpler definition
+//
+// #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1]
+//
+// does not work, as gcc supports variable-length arrays whose sizes
+// are determined at run-time (this is gcc's extension and not part
+// of the C++ standard). As a result, gcc fails to reject the
+// following code with the simple definition:
+//
+// int foo;
+// COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is
+// // not a compile-time constant.
+//
+// - By using the type CompileAssert<(bool(expr))>, we ensures that
+// expr is a compile-time constant. (Template arguments must be
+// determined at compile-time.)
+//
+// - The outer parentheses in CompileAssert<(bool(expr))> are necessary
+// to work around a bug in gcc 3.4.4 and 4.0.1. If we had written
+//
+// CompileAssert<bool(expr)>
+//
+// instead, these compilers will refuse to compile
+//
+// COMPILE_ASSERT(5 > 0, some_message);
+//
+// (They seem to think the ">" in "5 > 0" marks the end of the
+// template argument list.)
+//
+// - The array size is (bool(expr) ? 1 : -1), instead of simply
+//
+// ((expr) ? 1 : -1).
+//
+// This is to avoid running into a bug in MS VC 7.1, which
+// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
+
+#endif
+
+
+// bit_cast<Dest,Source> is a template function that implements the
+// equivalent of "*reinterpret_cast<Dest*>(&source)". We need this in
+// very low-level functions like the protobuf library and fast math
+// support.
+//
+// float f = 3.14159265358979;
+// int i = bit_cast<int32>(f);
+// // i = 0x40490fdb
+//
+// The classical address-casting method is:
+//
+// // WRONG
+// float f = 3.14159265358979; // WRONG
+// int i = * reinterpret_cast<int*>(&f); // WRONG
+//
+// The address-casting method actually produces undefined behavior
+// according to ISO C++ specification section 3.10 -15 -. Roughly, this
+// section says: if an object in memory has one type, and a program
+// accesses it with a different type, then the result is undefined
+// behavior for most values of "different type".
+//
+// This is true for any cast syntax, either *(int*)&f or
+// *reinterpret_cast<int*>(&f). And it is particularly true for
+// conversions between integral lvalues and floating-point lvalues.
+//
+// The purpose of 3.10 -15- is to allow optimizing compilers to assume
+// that expressions with different types refer to different memory. gcc
+// 4.0.1 has an optimizer that takes advantage of this. So a
+// non-conforming program quietly produces wildly incorrect output.
+//
+// The problem is not the use of reinterpret_cast. The problem is type
+// punning: holding an object in memory of one type and reading its bits
+// back using a different type.
+//
+// The C++ standard is more subtle and complex than this, but that
+// is the basic idea.
+//
+// Anyways ...
+//
+// bit_cast<> calls memcpy() which is blessed by the standard,
+// especially by the example in section 3.9 . Also, of course,
+// bit_cast<> wraps up the nasty logic in one place.
+//
+// Fortunately memcpy() is very fast. In optimized mode, with a
+// constant size, gcc 2.95.3, gcc 4.0.1, and msvc 7.1 produce inline
+// code with the minimal amount of data movement. On a 32-bit system,
+// memcpy(d,s,4) compiles to one load and one store, and memcpy(d,s,8)
+// compiles to two loads and two stores.
+//
+// I tested this code with gcc 2.95.3, gcc 4.0.1, icc 8.1, and msvc 7.1.
+//
+// WARNING: if Dest or Source is a non-POD type, the result of the memcpy
+// is likely to surprise you.
+template <class Dest, class Source>
+V8_INLINE Dest bit_cast(Source const& source) {
+ COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), VerifySizesAreEqual);
+
+ Dest dest;
+ memcpy(&dest, &source, sizeof(dest));
+ return dest;
+}
// A macro to disallow the evil copy constructor and operator= functions
@@ -52,8 +262,8 @@
#define NO_INLINE(declarator) V8_NOINLINE declarator
-// Newly written code should use V8_WARN_UNUSED_RESULT.
-#define MUST_USE_RESULT V8_WARN_UNUSED_RESULT
+// Newly written code should use WARN_UNUSED_RESULT.
+#define MUST_USE_RESULT WARN_UNUSED_RESULT
// Define V8_USE_ADDRESS_SANITIZER macros.
@@ -101,7 +311,7 @@ template <int> class StaticAssertionHelper { };
#define STATIC_ASSERT(test) \
typedef \
StaticAssertionHelper<sizeof(StaticAssertion<static_cast<bool>((test))>)> \
- SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) V8_UNUSED
+ SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) ALLOW_UNUSED
#endif
@@ -115,14 +325,6 @@ inline void USE(T) { }
#define IS_POWER_OF_TWO(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
-// Returns true iff x is a power of 2. Cannot be used with the maximally
-// negative value of the type T (the -1 overflows).
-template <typename T>
-inline bool IsPowerOf2(T x) {
- return IS_POWER_OF_TWO(x);
-}
-
-
// Define our own macros for writing 64-bit constants. This is less fragile
// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
// works on compilers that don't have it (like MSVC).
@@ -195,7 +397,7 @@ inline T AddressFrom(intptr_t x) {
// Return the largest multiple of m which is <= x.
template <typename T>
inline T RoundDown(T x, intptr_t m) {
- DCHECK(IsPowerOf2(m));
+ DCHECK(IS_POWER_OF_TWO(m));
return AddressFrom<T>(OffsetFrom(x) & -m);
}
@@ -206,54 +408,4 @@ inline T RoundUp(T x, intptr_t m) {
return RoundDown<T>(static_cast<T>(x + m - 1), m);
}
-
-// Increment a pointer until it has the specified alignment.
-// This works like RoundUp, but it works correctly on pointer types where
-// sizeof(*pointer) might not be 1.
-template<class T>
-T AlignUp(T pointer, size_t alignment) {
- DCHECK(sizeof(pointer) == sizeof(uintptr_t));
- uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer);
- return reinterpret_cast<T>(RoundUp(pointer_raw, alignment));
-}
-
-
-template <typename T, typename U>
-inline bool IsAligned(T value, U alignment) {
- return (value & (alignment - 1)) == 0;
-}
-
-
-// Returns the smallest power of two which is >= x. If you pass in a
-// number that is already a power of two, it is returned as is.
-// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
-// figure 3-3, page 48, where the function is called clp2.
-inline uint32_t RoundUpToPowerOf2(uint32_t x) {
- DCHECK(x <= 0x80000000u);
- x = x - 1;
- x = x | (x >> 1);
- x = x | (x >> 2);
- x = x | (x >> 4);
- x = x | (x >> 8);
- x = x | (x >> 16);
- return x + 1;
-}
-
-
-inline uint32_t RoundDownToPowerOf2(uint32_t x) {
- uint32_t rounded_up = RoundUpToPowerOf2(x);
- if (rounded_up > x) return rounded_up >> 1;
- return rounded_up;
-}
-
-
-// Returns current value of top of the stack. Works correctly with ASAN.
-DISABLE_ASAN
-inline uintptr_t GetCurrentStackPosition() {
- // Takes the address of the limit variable in order to find out where
- // the top of stack is right now.
- uintptr_t limit = reinterpret_cast<uintptr_t>(&limit);
- return limit;
-}
-
#endif // V8_BASE_MACROS_H_
diff --git a/deps/v8/test/base-unittests/platform/condition-variable-unittest.cc b/deps/v8/src/base/platform/condition-variable-unittest.cc
index ea1efd0d5b..fe0ad2ade8 100644
--- a/deps/v8/test/base-unittests/platform/condition-variable-unittest.cc
+++ b/deps/v8/src/base/platform/condition-variable-unittest.cc
@@ -29,7 +29,7 @@ TEST(ConditionVariable, WaitForAfterNofityOnSameThread) {
namespace {
-class ThreadWithMutexAndConditionVariable V8_FINAL : public Thread {
+class ThreadWithMutexAndConditionVariable FINAL : public Thread {
public:
ThreadWithMutexAndConditionVariable()
: Thread(Options("ThreadWithMutexAndConditionVariable")),
@@ -37,7 +37,7 @@ class ThreadWithMutexAndConditionVariable V8_FINAL : public Thread {
finished_(false) {}
virtual ~ThreadWithMutexAndConditionVariable() {}
- virtual void Run() V8_OVERRIDE {
+ virtual void Run() OVERRIDE {
LockGuard<Mutex> lock_guard(&mutex_);
running_ = true;
cv_.NotifyOne();
@@ -54,7 +54,7 @@ class ThreadWithMutexAndConditionVariable V8_FINAL : public Thread {
Mutex mutex_;
};
-}
+} // namespace
TEST(ConditionVariable, MultipleThreadsWithSeparateConditionVariables) {
@@ -108,7 +108,7 @@ TEST(ConditionVariable, MultipleThreadsWithSeparateConditionVariables) {
namespace {
-class ThreadWithSharedMutexAndConditionVariable V8_FINAL : public Thread {
+class ThreadWithSharedMutexAndConditionVariable FINAL : public Thread {
public:
ThreadWithSharedMutexAndConditionVariable()
: Thread(Options("ThreadWithSharedMutexAndConditionVariable")),
@@ -118,7 +118,7 @@ class ThreadWithSharedMutexAndConditionVariable V8_FINAL : public Thread {
mutex_(NULL) {}
virtual ~ThreadWithSharedMutexAndConditionVariable() {}
- virtual void Run() V8_OVERRIDE {
+ virtual void Run() OVERRIDE {
LockGuard<Mutex> lock_guard(mutex_);
running_ = true;
cv_->NotifyAll();
@@ -135,7 +135,7 @@ class ThreadWithSharedMutexAndConditionVariable V8_FINAL : public Thread {
Mutex* mutex_;
};
-}
+} // namespace
TEST(ConditionVariable, MultipleThreadsWithSharedSeparateConditionVariables) {
@@ -218,7 +218,7 @@ TEST(ConditionVariable, MultipleThreadsWithSharedSeparateConditionVariables) {
namespace {
-class LoopIncrementThread V8_FINAL : public Thread {
+class LoopIncrementThread FINAL : public Thread {
public:
LoopIncrementThread(int rem, int* counter, int limit, int thread_count,
ConditionVariable* cv, Mutex* mutex)
@@ -233,7 +233,7 @@ class LoopIncrementThread V8_FINAL : public Thread {
EXPECT_EQ(0, limit % thread_count);
}
- virtual void Run() V8_OVERRIDE {
+ virtual void Run() OVERRIDE {
int last_count = -1;
while (true) {
LockGuard<Mutex> lock_guard(mutex_);
@@ -263,7 +263,7 @@ class LoopIncrementThread V8_FINAL : public Thread {
Mutex* mutex_;
};
-}
+} // namespace
TEST(ConditionVariable, LoopIncrement) {
@@ -275,10 +275,10 @@ TEST(ConditionVariable, LoopIncrement) {
int counter = 0;
// Setup the threads.
- Thread** threads = new Thread*[thread_count];
+ Thread** threads = new Thread* [thread_count];
for (int n = 0; n < thread_count; ++n) {
- threads[n] = new LoopIncrementThread(
- n, &counter, limit, thread_count, &cv, &mutex);
+ threads[n] = new LoopIncrementThread(n, &counter, limit, thread_count,
+ &cv, &mutex);
}
// Start all threads.
diff --git a/deps/v8/src/base/platform/condition-variable.h b/deps/v8/src/base/platform/condition-variable.h
index 9855970eba..b5a6c3f5d7 100644
--- a/deps/v8/src/base/platform/condition-variable.h
+++ b/deps/v8/src/base/platform/condition-variable.h
@@ -28,7 +28,7 @@ class TimeDelta;
// the mutex and suspend the execution of the calling thread. When the condition
// variable is notified, the thread is awakened, and the mutex is reacquired.
-class ConditionVariable V8_FINAL {
+class ConditionVariable FINAL {
public:
ConditionVariable();
~ConditionVariable();
@@ -56,19 +56,19 @@ class ConditionVariable V8_FINAL {
// spuriously. When unblocked, regardless of the reason, the lock on the mutex
// is reacquired and |WaitFor()| exits. Returns true if the condition variable
// was notified prior to the timeout.
- bool WaitFor(Mutex* mutex, const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT;
+ bool WaitFor(Mutex* mutex, const TimeDelta& rel_time) WARN_UNUSED_RESULT;
// The implementation-defined native handle type.
#if V8_OS_POSIX
typedef pthread_cond_t NativeHandle;
#elif V8_OS_WIN
struct Event;
- class NativeHandle V8_FINAL {
+ class NativeHandle FINAL {
public:
NativeHandle() : waitlist_(NULL), freelist_(NULL) {}
~NativeHandle();
- Event* Pre() V8_WARN_UNUSED_RESULT;
+ Event* Pre() WARN_UNUSED_RESULT;
void Post(Event* event, bool result);
Mutex* mutex() { return &mutex_; }
diff --git a/deps/v8/src/base/platform/elapsed-timer.h b/deps/v8/src/base/platform/elapsed-timer.h
index 3f456efdf3..dccba3a3ac 100644
--- a/deps/v8/src/base/platform/elapsed-timer.h
+++ b/deps/v8/src/base/platform/elapsed-timer.h
@@ -11,7 +11,7 @@
namespace v8 {
namespace base {
-class ElapsedTimer V8_FINAL {
+class ElapsedTimer FINAL {
public:
#ifdef DEBUG
ElapsedTimer() : started_(false) {}
diff --git a/deps/v8/test/base-unittests/platform/mutex-unittest.cc b/deps/v8/src/base/platform/mutex-unittest.cc
index 5af5efb5a9..5af5efb5a9 100644
--- a/deps/v8/test/base-unittests/platform/mutex-unittest.cc
+++ b/deps/v8/src/base/platform/mutex-unittest.cc
diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h
index 2f8c07d89e..5d0e57be57 100644
--- a/deps/v8/src/base/platform/mutex.h
+++ b/deps/v8/src/base/platform/mutex.h
@@ -33,7 +33,7 @@ namespace base {
// |TryLock()|. The behavior of a program is undefined if a mutex is destroyed
// while still owned by some thread. The Mutex class is non-copyable.
-class Mutex V8_FINAL {
+class Mutex FINAL {
public:
Mutex();
~Mutex();
@@ -50,7 +50,7 @@ class Mutex V8_FINAL {
// Tries to lock the given mutex. Returns whether the mutex was
// successfully locked.
- bool TryLock() V8_WARN_UNUSED_RESULT;
+ bool TryLock() WARN_UNUSED_RESULT;
// The implementation-defined native handle type.
#if V8_OS_POSIX
@@ -127,7 +127,7 @@ typedef LazyStaticInstance<Mutex, DefaultConstructTrait<Mutex>,
// The behavior of a program is undefined if a recursive mutex is destroyed
// while still owned by some thread. The RecursiveMutex class is non-copyable.
-class RecursiveMutex V8_FINAL {
+class RecursiveMutex FINAL {
public:
RecursiveMutex();
~RecursiveMutex();
@@ -149,7 +149,7 @@ class RecursiveMutex V8_FINAL {
// Tries to lock the given mutex. Returns whether the mutex was
// successfully locked.
- bool TryLock() V8_WARN_UNUSED_RESULT;
+ bool TryLock() WARN_UNUSED_RESULT;
// The implementation-defined native handle type.
typedef Mutex::NativeHandle NativeHandle;
@@ -199,7 +199,7 @@ typedef LazyStaticInstance<RecursiveMutex,
// The LockGuard class is non-copyable.
template <typename Mutex>
-class LockGuard V8_FINAL {
+class LockGuard FINAL {
public:
explicit LockGuard(Mutex* mutex) : mutex_(mutex) { mutex_->Lock(); }
~LockGuard() { mutex_->Unlock(); }
diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc
index d93439bf14..8a767cf296 100644
--- a/deps/v8/src/base/platform/platform-cygwin.cc
+++ b/deps/v8/src/base/platform/platform-cygwin.cc
@@ -205,7 +205,7 @@ VirtualMemory::VirtualMemory(size_t size)
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
: address_(NULL), size_(0) {
- DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ DCHECK((alignment % OS::AllocateAlignment()) == 0);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* address = ReserveRegion(request_size);
diff --git a/deps/v8/src/base/platform/platform-freebsd.cc b/deps/v8/src/base/platform/platform-freebsd.cc
index 09d7ca77d3..507b946f69 100644
--- a/deps/v8/src/base/platform/platform-freebsd.cc
+++ b/deps/v8/src/base/platform/platform-freebsd.cc
@@ -131,14 +131,14 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
addr_buffer[0] = '0';
addr_buffer[1] = 'x';
addr_buffer[10] = 0;
- int result = read(fd, addr_buffer + 2, 8);
- if (result < 8) break;
+ ssize_t bytes_read = read(fd, addr_buffer + 2, 8);
+ if (bytes_read < 8) break;
unsigned start = StringToLong(addr_buffer);
- result = read(fd, addr_buffer + 2, 1);
- if (result < 1) break;
+ bytes_read = read(fd, addr_buffer + 2, 1);
+ if (bytes_read < 1) break;
if (addr_buffer[2] != '-') break;
- result = read(fd, addr_buffer + 2, 8);
- if (result < 8) break;
+ bytes_read = read(fd, addr_buffer + 2, 8);
+ if (bytes_read < 8) break;
unsigned end = StringToLong(addr_buffer);
char buffer[MAP_LENGTH];
int bytes_read = -1;
@@ -146,8 +146,8 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
bytes_read++;
if (bytes_read >= MAP_LENGTH - 1)
break;
- result = read(fd, buffer + bytes_read, 1);
- if (result < 1) break;
+ bytes_read = read(fd, buffer + bytes_read, 1);
+ if (bytes_read < 1) break;
} while (buffer[bytes_read] != '\n');
buffer[bytes_read] = 0;
// Ignore mappings that are not executable.
@@ -182,7 +182,7 @@ VirtualMemory::VirtualMemory(size_t size)
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
: address_(NULL), size_(0) {
- DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ DCHECK((alignment % OS::AllocateAlignment()) == 0);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* reservation = mmap(OS::GetRandomMmapAddr(),
diff --git a/deps/v8/src/base/platform/platform-linux.cc b/deps/v8/src/base/platform/platform-linux.cc
index fca170916c..eff5ced3b7 100644
--- a/deps/v8/src/base/platform/platform-linux.cc
+++ b/deps/v8/src/base/platform/platform-linux.cc
@@ -9,9 +9,7 @@
#include <semaphore.h>
#include <signal.h>
#include <stdlib.h>
-#include <sys/prctl.h>
#include <sys/resource.h>
-#include <sys/syscall.h>
#include <sys/time.h>
#include <sys/types.h>
@@ -46,6 +44,15 @@
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
+#if V8_OS_NACL
+#if !defined(MAP_NORESERVE)
+// PNaCL doesn't have this, so we always grab all of the memory, which is bad.
+#define MAP_NORESERVE 0
+#endif
+#else
+#include <sys/prctl.h>
+#include <sys/syscall.h>
+#endif
namespace v8 {
namespace base {
@@ -95,20 +102,30 @@ bool OS::ArmUsingHardFloat() {
const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+#if V8_OS_NACL
+ // Missing support for tm_zone field.
+ return "";
+#else
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
return t->tm_zone;
+#endif
}
double OS::LocalTimeOffset(TimezoneCache* cache) {
+#if V8_OS_NACL
+ // Missing support for tm_zone field.
+ return 0;
+#else
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
// tm_gmtoff includes any daylight savings offset, so subtract it.
return static_cast<double>(t->tm_gmtoff * msPerSecond -
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+#endif
}
@@ -260,18 +277,15 @@ void OS::SignalCodeMovingGC() {
OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile());
OS::Abort();
}
- void* addr = mmap(OS::GetRandomMmapAddr(),
- size,
-#if defined(__native_client__)
+ void* addr = mmap(OS::GetRandomMmapAddr(), size,
+#if V8_OS_NACL
// The Native Client port of V8 uses an interpreter,
// so code pages don't need PROT_EXEC.
PROT_READ,
#else
PROT_READ | PROT_EXEC,
#endif
- MAP_PRIVATE,
- fileno(f),
- 0);
+ MAP_PRIVATE, fileno(f), 0);
DCHECK(addr != MAP_FAILED);
OS::Free(addr, size);
fclose(f);
@@ -292,7 +306,7 @@ VirtualMemory::VirtualMemory(size_t size)
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
: address_(NULL), size_(0) {
- DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ DCHECK((alignment % OS::AllocateAlignment()) == 0);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* reservation = mmap(OS::GetRandomMmapAddr(),
@@ -387,7 +401,7 @@ void* VirtualMemory::ReserveRegion(size_t size) {
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-#if defined(__native_client__)
+#if V8_OS_NACL
// The Native Client port of V8 uses an interpreter,
// so code pages don't need PROT_EXEC.
int prot = PROT_READ | PROT_WRITE;
diff --git a/deps/v8/src/base/platform/platform-macos.cc b/deps/v8/src/base/platform/platform-macos.cc
index 77771f46c1..77893ee1b9 100644
--- a/deps/v8/src/base/platform/platform-macos.cc
+++ b/deps/v8/src/base/platform/platform-macos.cc
@@ -184,7 +184,7 @@ VirtualMemory::VirtualMemory(size_t size)
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
: address_(NULL), size_(0) {
- DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ DCHECK((alignment % OS::AllocateAlignment()) == 0);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* reservation = mmap(OS::GetRandomMmapAddr(),
diff --git a/deps/v8/src/base/platform/platform-openbsd.cc b/deps/v8/src/base/platform/platform-openbsd.cc
index a3f39e2dd7..4e706cb7c1 100644
--- a/deps/v8/src/base/platform/platform-openbsd.cc
+++ b/deps/v8/src/base/platform/platform-openbsd.cc
@@ -213,7 +213,7 @@ VirtualMemory::VirtualMemory(size_t size)
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
: address_(NULL), size_(0) {
- DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ DCHECK((alignment % OS::AllocateAlignment()) == 0);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* reservation = mmap(OS::GetRandomMmapAddr(),
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index bb004d207e..0fc04fc110 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -6,7 +6,6 @@
// own, but contains the parts which are the same across the POSIX platforms
// Linux, MacOS, FreeBSD, OpenBSD, NetBSD and QNX.
-#include <dlfcn.h>
#include <errno.h>
#include <limits.h>
#include <pthread.h>
@@ -20,21 +19,13 @@
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/stat.h>
-#include <sys/syscall.h>
#include <sys/time.h>
#include <sys/types.h>
-#if defined(__linux__)
-#include <sys/prctl.h> // NOLINT, for prctl
-#endif
#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
defined(__NetBSD__) || defined(__OpenBSD__)
#include <sys/sysctl.h> // NOLINT, for sysctl
#endif
-#include <arpa/inet.h>
-#include <netdb.h>
-#include <netinet/in.h>
-
#undef MAP_TYPE
#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
@@ -55,6 +46,18 @@
#include "src/base/atomicops.h"
#endif
+#if V8_OS_MACOSX
+#include <dlfcn.h>
+#endif
+
+#if V8_OS_LINUX
+#include <sys/prctl.h> // NOLINT, for prctl
+#endif
+
+#if !V8_OS_NACL
+#include <sys/syscall.h>
+#endif
+
namespace v8 {
namespace base {
@@ -70,77 +73,6 @@ const char* g_gc_fake_mmap = NULL;
} // namespace
-int OS::NumberOfProcessorsOnline() {
- return static_cast<int>(sysconf(_SC_NPROCESSORS_ONLN));
-}
-
-
-// Maximum size of the virtual memory. 0 means there is no artificial
-// limit.
-
-intptr_t OS::MaxVirtualMemory() {
- struct rlimit limit;
- int result = getrlimit(RLIMIT_DATA, &limit);
- if (result != 0) return 0;
-#if V8_OS_NACL
- // The NaCl compiler doesn't like resource.h constants.
- if (static_cast<int>(limit.rlim_cur) == -1) return 0;
-#else
- if (limit.rlim_cur == RLIM_INFINITY) return 0;
-#endif
- return limit.rlim_cur;
-}
-
-
-uint64_t OS::TotalPhysicalMemory() {
-#if V8_OS_MACOSX
- int mib[2];
- mib[0] = CTL_HW;
- mib[1] = HW_MEMSIZE;
- int64_t size = 0;
- size_t len = sizeof(size);
- if (sysctl(mib, 2, &size, &len, NULL, 0) != 0) {
- UNREACHABLE();
- return 0;
- }
- return static_cast<uint64_t>(size);
-#elif V8_OS_FREEBSD
- int pages, page_size;
- size_t size = sizeof(pages);
- sysctlbyname("vm.stats.vm.v_page_count", &pages, &size, NULL, 0);
- sysctlbyname("vm.stats.vm.v_page_size", &page_size, &size, NULL, 0);
- if (pages == -1 || page_size == -1) {
- UNREACHABLE();
- return 0;
- }
- return static_cast<uint64_t>(pages) * page_size;
-#elif V8_OS_CYGWIN
- MEMORYSTATUS memory_info;
- memory_info.dwLength = sizeof(memory_info);
- if (!GlobalMemoryStatus(&memory_info)) {
- UNREACHABLE();
- return 0;
- }
- return static_cast<uint64_t>(memory_info.dwTotalPhys);
-#elif V8_OS_QNX
- struct stat stat_buf;
- if (stat("/proc", &stat_buf) != 0) {
- UNREACHABLE();
- return 0;
- }
- return static_cast<uint64_t>(stat_buf.st_size);
-#else
- intptr_t pages = sysconf(_SC_PHYS_PAGES);
- intptr_t page_size = sysconf(_SC_PAGESIZE);
- if (pages == -1 || page_size == -1) {
- UNREACHABLE();
- return 0;
- }
- return static_cast<uint64_t>(pages) * page_size;
-#endif
-}
-
-
int OS::ActivationFrameAlignment() {
#if V8_TARGET_ARCH_ARM
// On EABI ARM targets this is required for fp correctness in the
@@ -293,11 +225,11 @@ void OS::DebugBreak() {
#elif V8_HOST_ARCH_MIPS64
asm("break");
#elif V8_HOST_ARCH_IA32
-#if defined(__native_client__)
+#if V8_OS_NACL
asm("hlt");
#else
asm("int $3");
-#endif // __native_client__
+#endif // V8_OS_NACL
#elif V8_HOST_ARCH_X64
asm("int $3");
#else
@@ -329,7 +261,7 @@ int OS::GetCurrentThreadId() {
// PNaCL doesn't have a way to get an integral thread ID, but it doesn't
// really matter, because we only need it in PerfJitLogger::LogRecordedBuffer.
return 0;
-#endif // defined(ANDROID)
+#endif
}
@@ -338,12 +270,17 @@ int OS::GetCurrentThreadId() {
//
int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
+#if V8_OS_NACL
+ // Optionally used in Logger::ResourceEvent.
+ return -1;
+#else
struct rusage usage;
if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
*secs = usage.ru_utime.tv_sec;
*usecs = usage.ru_utime.tv_usec;
return 0;
+#endif
}
diff --git a/deps/v8/src/base/platform/platform-qnx.cc b/deps/v8/src/base/platform/platform-qnx.cc
index 6f2f989723..2cb3228400 100644
--- a/deps/v8/src/base/platform/platform-qnx.cc
+++ b/deps/v8/src/base/platform/platform-qnx.cc
@@ -249,7 +249,7 @@ VirtualMemory::VirtualMemory(size_t size)
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
: address_(NULL), size_(0) {
- DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ DCHECK((alignment % OS::AllocateAlignment()) == 0);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* reservation = mmap(OS::GetRandomMmapAddr(),
diff --git a/deps/v8/src/base/platform/platform-solaris.cc b/deps/v8/src/base/platform/platform-solaris.cc
index 7a54f7c486..b9ef465b3e 100644
--- a/deps/v8/src/base/platform/platform-solaris.cc
+++ b/deps/v8/src/base/platform/platform-solaris.cc
@@ -154,7 +154,7 @@ VirtualMemory::VirtualMemory(size_t size)
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
: address_(NULL), size_(0) {
- DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ DCHECK((alignment % OS::AllocateAlignment()) == 0);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* reservation = mmap(OS::GetRandomMmapAddr(),
diff --git a/deps/v8/test/base-unittests/platform/platform-unittest.cc b/deps/v8/src/base/platform/platform-unittest.cc
index 3530ff8073..06fbee0042 100644
--- a/deps/v8/test/base-unittests/platform/platform-unittest.cc
+++ b/deps/v8/src/base/platform/platform-unittest.cc
@@ -28,20 +28,15 @@ TEST(OS, GetCurrentProcessId) {
}
-TEST(OS, NumberOfProcessorsOnline) {
- EXPECT_GT(OS::NumberOfProcessorsOnline(), 0);
-}
-
-
namespace {
-class SelfJoinThread V8_FINAL : public Thread {
+class SelfJoinThread FINAL : public Thread {
public:
SelfJoinThread() : Thread(Options("SelfJoinThread")) {}
- virtual void Run() V8_OVERRIDE { Join(); }
+ virtual void Run() OVERRIDE { Join(); }
};
-}
+} // namespace
TEST(Thread, SelfJoin) {
@@ -56,40 +51,40 @@ namespace {
class ThreadLocalStorageTest : public Thread, public ::testing::Test {
public:
ThreadLocalStorageTest() : Thread(Options("ThreadLocalStorageTest")) {
- for (size_t i = 0; i < ARRAY_SIZE(keys_); ++i) {
+ for (size_t i = 0; i < arraysize(keys_); ++i) {
keys_[i] = Thread::CreateThreadLocalKey();
}
}
~ThreadLocalStorageTest() {
- for (size_t i = 0; i < ARRAY_SIZE(keys_); ++i) {
+ for (size_t i = 0; i < arraysize(keys_); ++i) {
Thread::DeleteThreadLocalKey(keys_[i]);
}
}
- virtual void Run() V8_FINAL V8_OVERRIDE {
- for (size_t i = 0; i < ARRAY_SIZE(keys_); i++) {
+ virtual void Run() FINAL OVERRIDE {
+ for (size_t i = 0; i < arraysize(keys_); i++) {
CHECK(!Thread::HasThreadLocal(keys_[i]));
}
- for (size_t i = 0; i < ARRAY_SIZE(keys_); i++) {
+ for (size_t i = 0; i < arraysize(keys_); i++) {
Thread::SetThreadLocal(keys_[i], GetValue(i));
}
- for (size_t i = 0; i < ARRAY_SIZE(keys_); i++) {
+ for (size_t i = 0; i < arraysize(keys_); i++) {
CHECK(Thread::HasThreadLocal(keys_[i]));
}
- for (size_t i = 0; i < ARRAY_SIZE(keys_); i++) {
+ for (size_t i = 0; i < arraysize(keys_); i++) {
CHECK_EQ(GetValue(i), Thread::GetThreadLocal(keys_[i]));
CHECK_EQ(GetValue(i), Thread::GetExistingThreadLocal(keys_[i]));
}
- for (size_t i = 0; i < ARRAY_SIZE(keys_); i++) {
- Thread::SetThreadLocal(keys_[i], GetValue(ARRAY_SIZE(keys_) - i - 1));
+ for (size_t i = 0; i < arraysize(keys_); i++) {
+ Thread::SetThreadLocal(keys_[i], GetValue(arraysize(keys_) - i - 1));
}
- for (size_t i = 0; i < ARRAY_SIZE(keys_); i++) {
+ for (size_t i = 0; i < arraysize(keys_); i++) {
CHECK(Thread::HasThreadLocal(keys_[i]));
}
- for (size_t i = 0; i < ARRAY_SIZE(keys_); i++) {
- CHECK_EQ(GetValue(ARRAY_SIZE(keys_) - i - 1),
+ for (size_t i = 0; i < arraysize(keys_); i++) {
+ CHECK_EQ(GetValue(arraysize(keys_) - i - 1),
Thread::GetThreadLocal(keys_[i]));
- CHECK_EQ(GetValue(ARRAY_SIZE(keys_) - i - 1),
+ CHECK_EQ(GetValue(arraysize(keys_) - i - 1),
Thread::GetExistingThreadLocal(keys_[i]));
}
}
@@ -102,7 +97,7 @@ class ThreadLocalStorageTest : public Thread, public ::testing::Test {
Thread::LocalStorageKey keys_[256];
};
-}
+} // namespace
TEST_F(ThreadLocalStorageTest, DoTest) {
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 9f106785eb..10f89de680 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -21,6 +21,7 @@
#include "src/base/win32-headers.h"
+#include "src/base/bits.h"
#include "src/base/lazy-instance.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
@@ -113,11 +114,6 @@ bool g_hard_abort = false;
} // namespace
-intptr_t OS::MaxVirtualMemory() {
- return 0;
-}
-
-
class TimezoneCache {
public:
TimezoneCache() : initialized_(false) { }
@@ -705,7 +701,7 @@ static size_t GetPageSize() {
if (page_size == 0) {
SYSTEM_INFO info;
GetSystemInfo(&info);
- page_size = RoundUpToPowerOf2(info.dwPageSize);
+ page_size = base::bits::RoundUpToPowerOfTwo32(info.dwPageSize);
}
return page_size;
}
@@ -790,7 +786,7 @@ void* OS::Allocate(const size_t requested,
if (mbase == NULL) return NULL;
- DCHECK(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment()));
+ DCHECK((reinterpret_cast<uintptr_t>(mbase) % OS::AllocateAlignment()) == 0);
*allocated = msize;
return mbase;
@@ -1168,18 +1164,6 @@ void OS::SignalCodeMovingGC() {
}
-uint64_t OS::TotalPhysicalMemory() {
- MEMORYSTATUSEX memory_info;
- memory_info.dwLength = sizeof(memory_info);
- if (!GlobalMemoryStatusEx(&memory_info)) {
- UNREACHABLE();
- return 0;
- }
-
- return static_cast<uint64_t>(memory_info.ullTotalPhys);
-}
-
-
#else // __MINGW32__
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return std::vector<OS::SharedLibraryAddress>();
@@ -1190,13 +1174,6 @@ void OS::SignalCodeMovingGC() { }
#endif // __MINGW32__
-int OS::NumberOfProcessorsOnline() {
- SYSTEM_INFO info;
- GetSystemInfo(&info);
- return info.dwNumberOfProcessors;
-}
-
-
double OS::nan_value() {
#ifdef _MSC_VER
return std::numeric_limits<double>::quiet_NaN();
@@ -1228,7 +1205,7 @@ VirtualMemory::VirtualMemory(size_t size)
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
: address_(NULL), size_(0) {
- DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ DCHECK((alignment % OS::AllocateAlignment()) == 0);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* address = ReserveRegion(request_size);
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 9567572d80..9e20c084c6 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -35,7 +35,6 @@ namespace std {
int signbit(double x);
}
# endif
-#include <alloca.h>
#endif
#if V8_OS_QNX
@@ -285,16 +284,6 @@ class OS {
// using --never-compact) if accurate profiling is desired.
static void SignalCodeMovingGC();
- // Returns the number of processors online.
- static int NumberOfProcessorsOnline();
-
- // The total amount of physical memory available on the current system.
- static uint64_t TotalPhysicalMemory();
-
- // Maximum size of the virtual memory. 0 means there is no artificial
- // limit.
- static intptr_t MaxVirtualMemory();
-
// Returns the double constant NAN
static double nan_value();
diff --git a/deps/v8/src/base/platform/semaphore-unittest.cc b/deps/v8/src/base/platform/semaphore-unittest.cc
new file mode 100644
index 0000000000..c68435f875
--- /dev/null
+++ b/deps/v8/src/base/platform/semaphore-unittest.cc
@@ -0,0 +1,145 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cstring>
+
+#include "src/base/platform/platform.h"
+#include "src/base/platform/semaphore.h"
+#include "src/base/platform/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+namespace {
+
+static const char kAlphabet[] = "XKOAD";
+static const size_t kAlphabetSize = sizeof(kAlphabet) - 1;
+static const size_t kBufferSize = 987; // GCD(buffer size, alphabet size) = 1
+static const size_t kDataSize = kBufferSize * kAlphabetSize * 10;
+
+
+class ProducerThread FINAL : public Thread {
+ public:
+ ProducerThread(char* buffer, Semaphore* free_space, Semaphore* used_space)
+ : Thread(Options("ProducerThread")),
+ buffer_(buffer),
+ free_space_(free_space),
+ used_space_(used_space) {}
+ virtual ~ProducerThread() {}
+
+ virtual void Run() OVERRIDE {
+ for (size_t n = 0; n < kDataSize; ++n) {
+ free_space_->Wait();
+ buffer_[n % kBufferSize] = kAlphabet[n % kAlphabetSize];
+ used_space_->Signal();
+ }
+ }
+
+ private:
+ char* buffer_;
+ Semaphore* const free_space_;
+ Semaphore* const used_space_;
+};
+
+
+class ConsumerThread FINAL : public Thread {
+ public:
+ ConsumerThread(const char* buffer, Semaphore* free_space,
+ Semaphore* used_space)
+ : Thread(Options("ConsumerThread")),
+ buffer_(buffer),
+ free_space_(free_space),
+ used_space_(used_space) {}
+ virtual ~ConsumerThread() {}
+
+ virtual void Run() OVERRIDE {
+ for (size_t n = 0; n < kDataSize; ++n) {
+ used_space_->Wait();
+ EXPECT_EQ(kAlphabet[n % kAlphabetSize], buffer_[n % kBufferSize]);
+ free_space_->Signal();
+ }
+ }
+
+ private:
+ const char* buffer_;
+ Semaphore* const free_space_;
+ Semaphore* const used_space_;
+};
+
+
+class WaitAndSignalThread FINAL : public Thread {
+ public:
+ explicit WaitAndSignalThread(Semaphore* semaphore)
+ : Thread(Options("WaitAndSignalThread")), semaphore_(semaphore) {}
+ virtual ~WaitAndSignalThread() {}
+
+ virtual void Run() OVERRIDE {
+ for (int n = 0; n < 100; ++n) {
+ semaphore_->Wait();
+ ASSERT_FALSE(semaphore_->WaitFor(TimeDelta::FromMicroseconds(1)));
+ semaphore_->Signal();
+ }
+ }
+
+ private:
+ Semaphore* const semaphore_;
+};
+
+} // namespace
+
+
+TEST(Semaphore, ProducerConsumer) {
+ char buffer[kBufferSize];
+ std::memset(buffer, 0, sizeof(buffer));
+ Semaphore free_space(kBufferSize);
+ Semaphore used_space(0);
+ ProducerThread producer_thread(buffer, &free_space, &used_space);
+ ConsumerThread consumer_thread(buffer, &free_space, &used_space);
+ producer_thread.Start();
+ consumer_thread.Start();
+ producer_thread.Join();
+ consumer_thread.Join();
+}
+
+
+TEST(Semaphore, WaitAndSignal) {
+ Semaphore semaphore(0);
+ WaitAndSignalThread t1(&semaphore);
+ WaitAndSignalThread t2(&semaphore);
+
+ t1.Start();
+ t2.Start();
+
+ // Make something available.
+ semaphore.Signal();
+
+ t1.Join();
+ t2.Join();
+
+ semaphore.Wait();
+
+ EXPECT_FALSE(semaphore.WaitFor(TimeDelta::FromMicroseconds(1)));
+}
+
+
+TEST(Semaphore, WaitFor) {
+ Semaphore semaphore(0);
+
+ // Semaphore not signalled - timeout.
+ ASSERT_FALSE(semaphore.WaitFor(TimeDelta::FromMicroseconds(0)));
+ ASSERT_FALSE(semaphore.WaitFor(TimeDelta::FromMicroseconds(100)));
+ ASSERT_FALSE(semaphore.WaitFor(TimeDelta::FromMicroseconds(1000)));
+
+ // Semaphore signalled - no timeout.
+ semaphore.Signal();
+ ASSERT_TRUE(semaphore.WaitFor(TimeDelta::FromMicroseconds(0)));
+ semaphore.Signal();
+ ASSERT_TRUE(semaphore.WaitFor(TimeDelta::FromMicroseconds(100)));
+ semaphore.Signal();
+ ASSERT_TRUE(semaphore.WaitFor(TimeDelta::FromMicroseconds(1000)));
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/platform/semaphore.cc b/deps/v8/src/base/platform/semaphore.cc
index e11338fd55..0679c00d95 100644
--- a/deps/v8/src/base/platform/semaphore.cc
+++ b/deps/v8/src/base/platform/semaphore.cc
@@ -12,6 +12,7 @@
#include <errno.h>
#include "src/base/logging.h"
+#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/time.h"
namespace v8 {
@@ -106,6 +107,17 @@ void Semaphore::Wait() {
bool Semaphore::WaitFor(const TimeDelta& rel_time) {
+#if V8_OS_NACL
+ // PNaCL doesn't support sem_timedwait, do ugly busy waiting.
+ ElapsedTimer timer;
+ timer.Start();
+ do {
+ int result = sem_trywait(&native_handle_);
+ if (result == 0) return true;
+ DCHECK(errno == EAGAIN || errno == EINTR);
+ } while (!timer.HasExpired(rel_time));
+ return false;
+#else
// Compute the time for end of timeout.
const Time time = Time::NowFromSystemTime() + rel_time;
const struct timespec ts = time.ToTimespec();
@@ -129,6 +141,7 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) {
DCHECK_EQ(-1, result);
DCHECK_EQ(EINTR, errno);
}
+#endif
}
#elif V8_OS_WIN
diff --git a/deps/v8/src/base/platform/semaphore.h b/deps/v8/src/base/platform/semaphore.h
index b3105e36f0..cbf8df2b7d 100644
--- a/deps/v8/src/base/platform/semaphore.h
+++ b/deps/v8/src/base/platform/semaphore.h
@@ -31,7 +31,7 @@ class TimeDelta;
// count reaches zero, threads waiting for the semaphore blocks until the
// count becomes non-zero.
-class Semaphore V8_FINAL {
+class Semaphore FINAL {
public:
explicit Semaphore(int count);
~Semaphore();
@@ -47,7 +47,7 @@ class Semaphore V8_FINAL {
// time has passed. If timeout happens the return value is false and the
// counter is unchanged. Otherwise the semaphore counter is decremented and
// true is returned.
- bool WaitFor(const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT;
+ bool WaitFor(const TimeDelta& rel_time) WARN_UNUSED_RESULT;
#if V8_OS_MACOSX
typedef semaphore_t NativeHandle;
diff --git a/deps/v8/test/base-unittests/platform/time-unittest.cc b/deps/v8/src/base/platform/time-unittest.cc
index 409323a8d6..b3bfbab319 100644
--- a/deps/v8/test/base-unittests/platform/time-unittest.cc
+++ b/deps/v8/src/base/platform/time-unittest.cc
@@ -139,7 +139,7 @@ static void ResolutionTest(T (*Now)(), TimeDelta target_granularity) {
EXPECT_LE(delta, target_granularity);
}
-}
+} // namespace
TEST(Time, NowResolution) {
diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc
index 4d1bec2b25..d47ccaf803 100644
--- a/deps/v8/src/base/platform/time.cc
+++ b/deps/v8/src/base/platform/time.cc
@@ -146,7 +146,7 @@ struct timespec TimeDelta::ToTimespec() const {
// We implement time using the high-resolution timers so that we can get
// timeouts which are smaller than 10-15ms. To avoid any drift, we
// periodically resync the internal clock to the system clock.
-class Clock V8_FINAL {
+class Clock FINAL {
public:
Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {}
@@ -393,7 +393,7 @@ class TickClock {
// (3) System time. The system time provides a low-resolution (typically 10ms
// to 55 milliseconds) time stamp but is comparatively less expensive to
// retrieve and more reliable.
-class HighResolutionTickClock V8_FINAL : public TickClock {
+class HighResolutionTickClock FINAL : public TickClock {
public:
explicit HighResolutionTickClock(int64_t ticks_per_second)
: ticks_per_second_(ticks_per_second) {
@@ -401,7 +401,7 @@ class HighResolutionTickClock V8_FINAL : public TickClock {
}
virtual ~HighResolutionTickClock() {}
- virtual int64_t Now() V8_OVERRIDE {
+ virtual int64_t Now() OVERRIDE {
LARGE_INTEGER now;
BOOL result = QueryPerformanceCounter(&now);
DCHECK(result);
@@ -419,7 +419,7 @@ class HighResolutionTickClock V8_FINAL : public TickClock {
return ticks + 1;
}
- virtual bool IsHighResolution() V8_OVERRIDE {
+ virtual bool IsHighResolution() OVERRIDE {
return true;
}
@@ -428,14 +428,14 @@ class HighResolutionTickClock V8_FINAL : public TickClock {
};
-class RolloverProtectedTickClock V8_FINAL : public TickClock {
+class RolloverProtectedTickClock FINAL : public TickClock {
public:
// We initialize rollover_ms_ to 1 to ensure that we will never
// return 0 from TimeTicks::HighResolutionNow() and TimeTicks::Now() below.
RolloverProtectedTickClock() : last_seen_now_(0), rollover_ms_(1) {}
virtual ~RolloverProtectedTickClock() {}
- virtual int64_t Now() V8_OVERRIDE {
+ virtual int64_t Now() OVERRIDE {
LockGuard<Mutex> lock_guard(&mutex_);
// We use timeGetTime() to implement TimeTicks::Now(), which rolls over
// every ~49.7 days. We try to track rollover ourselves, which works if
@@ -454,7 +454,7 @@ class RolloverProtectedTickClock V8_FINAL : public TickClock {
return (now + rollover_ms_) * Time::kMicrosecondsPerMillisecond;
}
- virtual bool IsHighResolution() V8_OVERRIDE {
+ virtual bool IsHighResolution() OVERRIDE {
return false;
}
diff --git a/deps/v8/src/base/platform/time.h b/deps/v8/src/base/platform/time.h
index b348236ff1..9dfa47d4e5 100644
--- a/deps/v8/src/base/platform/time.h
+++ b/deps/v8/src/base/platform/time.h
@@ -30,7 +30,7 @@ class TimeTicks;
// This class represents a duration of time, internally represented in
// microseonds.
-class TimeDelta V8_FINAL {
+class TimeDelta FINAL {
public:
TimeDelta() : delta_(0) {}
@@ -158,7 +158,7 @@ class TimeDelta V8_FINAL {
// This class represents an absolute point in time, internally represented as
// microseconds (s/1,000,000) since 00:00:00 UTC, January 1, 1970.
-class Time V8_FINAL {
+class Time FINAL {
public:
static const int64_t kMillisecondsPerSecond = 1000;
static const int64_t kMicrosecondsPerMillisecond = 1000;
@@ -295,7 +295,7 @@ inline Time operator+(const TimeDelta& delta, const Time& time) {
// Time::Now() may actually decrease or jump). But note that TimeTicks may
// "stand still", for example if the computer suspended.
-class TimeTicks V8_FINAL {
+class TimeTicks FINAL {
public:
TimeTicks() : ticks_(0) {}
diff --git a/deps/v8/src/base/sys-info-unittest.cc b/deps/v8/src/base/sys-info-unittest.cc
new file mode 100644
index 0000000000..a760f941f6
--- /dev/null
+++ b/deps/v8/src/base/sys-info-unittest.cc
@@ -0,0 +1,32 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/sys-info.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if V8_OS_NACL
+#define DISABLE_ON_NACL(Name) DISABLED_##Name
+#else
+#define DISABLE_ON_NACL(Name) Name
+#endif
+
+namespace v8 {
+namespace base {
+
+TEST(SysInfoTest, NumberOfProcessors) {
+ EXPECT_LT(0, SysInfo::NumberOfProcessors());
+}
+
+
+TEST(SysInfoTest, DISABLE_ON_NACL(AmountOfPhysicalMemory)) {
+ EXPECT_LT(0, SysInfo::AmountOfPhysicalMemory());
+}
+
+
+TEST(SysInfoTest, AmountOfVirtualMemory) {
+ EXPECT_LE(0, SysInfo::AmountOfVirtualMemory());
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/sys-info.cc b/deps/v8/src/base/sys-info.cc
new file mode 100644
index 0000000000..06c4f24eeb
--- /dev/null
+++ b/deps/v8/src/base/sys-info.cc
@@ -0,0 +1,125 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/sys-info.h"
+
+#if V8_OS_POSIX
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+#if V8_OS_BSD
+#include <sys/sysctl.h>
+#endif
+
+#include <limits>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#if V8_OS_WIN
+#include "src/base/win32-headers.h"
+#endif
+
+namespace v8 {
+namespace base {
+
+// static
+int SysInfo::NumberOfProcessors() {
+#if V8_OS_OPENBSD
+ int mib[2] = {CTL_HW, HW_NCPU};
+ int ncpu = 0;
+ size_t len = sizeof(ncpu);
+ if (sysctl(mib, arraysize(mib), &ncpu, &len, NULL, 0) != 0) {
+ UNREACHABLE();
+ return 1;
+ }
+ return ncpu;
+#elif V8_OS_POSIX
+ long result = sysconf(_SC_NPROCESSORS_ONLN); // NOLINT(runtime/int)
+ if (result == -1) {
+ UNREACHABLE();
+ return 1;
+ }
+ return static_cast<int>(result);
+#elif V8_OS_WIN
+ SYSTEM_INFO system_info = {0};
+ ::GetNativeSystemInfo(&system_info);
+ return static_cast<int>(system_info.dwNumberOfProcessors);
+#endif
+}
+
+
+// static
+int64_t SysInfo::AmountOfPhysicalMemory() {
+#if V8_OS_MACOSX
+ int mib[2] = {CTL_HW, HW_MEMSIZE};
+ int64_t memsize = 0;
+ size_t len = sizeof(memsize);
+ if (sysctl(mib, arraysize(mib), &memsize, &len, NULL, 0) != 0) {
+ UNREACHABLE();
+ return 0;
+ }
+ return memsize;
+#elif V8_OS_FREEBSD
+ int pages, page_size;
+ size_t size = sizeof(pages);
+ sysctlbyname("vm.stats.vm.v_page_count", &pages, &size, NULL, 0);
+ sysctlbyname("vm.stats.vm.v_page_size", &page_size, &size, NULL, 0);
+ if (pages == -1 || page_size == -1) {
+ UNREACHABLE();
+ return 0;
+ }
+ return static_cast<int64_t>(pages) * page_size;
+#elif V8_OS_CYGWIN || V8_OS_WIN
+ MEMORYSTATUSEX memory_info;
+ memory_info.dwLength = sizeof(memory_info);
+ if (!GlobalMemoryStatusEx(&memory_info)) {
+ UNREACHABLE();
+ return 0;
+ }
+ int64_t result = static_cast<int64_t>(memory_info.ullTotalPhys);
+ if (result < 0) result = std::numeric_limits<int64_t>::max();
+ return result;
+#elif V8_OS_QNX
+ struct stat stat_buf;
+ if (stat("/proc", &stat_buf) != 0) {
+ UNREACHABLE();
+ return 0;
+ }
+ return static_cast<int64_t>(stat_buf.st_size);
+#elif V8_OS_NACL
+ // No support for _SC_PHYS_PAGES, assume 2GB.
+ return static_cast<int64_t>(1) << 31;
+#elif V8_OS_POSIX
+ long pages = sysconf(_SC_PHYS_PAGES); // NOLINT(runtime/int)
+ long page_size = sysconf(_SC_PAGESIZE); // NOLINT(runtime/int)
+ if (pages == -1 || page_size == -1) {
+ UNREACHABLE();
+ return 0;
+ }
+ return static_cast<int64_t>(pages) * page_size;
+#endif
+}
+
+
+// static
+int64_t SysInfo::AmountOfVirtualMemory() {
+#if V8_OS_NACL || V8_OS_WIN
+ return 0;
+#elif V8_OS_POSIX
+ struct rlimit rlim;
+ int result = getrlimit(RLIMIT_DATA, &rlim);
+ if (result != 0) {
+ UNREACHABLE();
+ return 0;
+ }
+ return (rlim.rlim_cur == RLIM_INFINITY) ? 0 : rlim.rlim_cur;
+#endif
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/sys-info.h b/deps/v8/src/base/sys-info.h
new file mode 100644
index 0000000000..d1658fc09d
--- /dev/null
+++ b/deps/v8/src/base/sys-info.h
@@ -0,0 +1,30 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_SYS_INFO_H_
+#define V8_BASE_SYS_INFO_H_
+
+#include "include/v8stdint.h"
+#include "src/base/compiler-specific.h"
+
+namespace v8 {
+namespace base {
+
+class SysInfo FINAL {
+ public:
+ // Returns the number of logical processors/core on the current machine.
+ static int NumberOfProcessors();
+
+ // Returns the number of bytes of physical memory on the current machine.
+ static int64_t AmountOfPhysicalMemory();
+
+ // Returns the number of bytes of virtual memory of this process. A return
+ // value of zero means that there is no limit on the available virtual memory.
+ static int64_t AmountOfVirtualMemory();
+};
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_SYS_INFO_H_
diff --git a/deps/v8/test/base-unittests/utils/random-number-generator-unittest.cc b/deps/v8/src/base/utils/random-number-generator-unittest.cc
index 7c533db4f0..7c533db4f0 100644
--- a/deps/v8/test/base-unittests/utils/random-number-generator-unittest.cc
+++ b/deps/v8/src/base/utils/random-number-generator-unittest.cc
diff --git a/deps/v8/src/base/utils/random-number-generator.cc b/deps/v8/src/base/utils/random-number-generator.cc
index be79811171..a1ec9d7184 100644
--- a/deps/v8/src/base/utils/random-number-generator.cc
+++ b/deps/v8/src/base/utils/random-number-generator.cc
@@ -79,7 +79,7 @@ RandomNumberGenerator::RandomNumberGenerator() {
int RandomNumberGenerator::NextInt(int max) {
- DCHECK_LE(0, max);
+ DCHECK_LT(0, max);
// Fast path if max is a power of 2.
if (IS_POWER_OF_TWO(max)) {
@@ -125,6 +125,7 @@ int RandomNumberGenerator::Next(int bits) {
void RandomNumberGenerator::SetSeed(int64_t seed) {
+ initial_seed_ = seed;
seed_ = (seed ^ kMultiplier) & kMask;
}
diff --git a/deps/v8/src/base/utils/random-number-generator.h b/deps/v8/src/base/utils/random-number-generator.h
index 5955d66597..479423d658 100644
--- a/deps/v8/src/base/utils/random-number-generator.h
+++ b/deps/v8/src/base/utils/random-number-generator.h
@@ -25,7 +25,7 @@ namespace base {
// https://code.google.com/p/v8/issues/detail?id=2905
// This class is neither reentrant nor threadsafe.
-class RandomNumberGenerator V8_FINAL {
+class RandomNumberGenerator FINAL {
public:
// EntropySource is used as a callback function when V8 needs a source of
// entropy.
@@ -40,7 +40,7 @@ class RandomNumberGenerator V8_FINAL {
// that one int value is pseudorandomly generated and returned.
// All 2^32 possible integer values are produced with (approximately) equal
// probability.
- V8_INLINE int NextInt() V8_WARN_UNUSED_RESULT {
+ V8_INLINE int NextInt() WARN_UNUSED_RESULT {
return Next(32);
}
@@ -50,14 +50,14 @@ class RandomNumberGenerator V8_FINAL {
// one int value in the specified range is pseudorandomly generated and
// returned. All max possible int values are produced with (approximately)
// equal probability.
- int NextInt(int max) V8_WARN_UNUSED_RESULT;
+ int NextInt(int max) WARN_UNUSED_RESULT;
// Returns the next pseudorandom, uniformly distributed boolean value from
// this random number generator's sequence. The general contract of
// |NextBoolean()| is that one boolean value is pseudorandomly generated and
// returned. The values true and false are produced with (approximately) equal
// probability.
- V8_INLINE bool NextBool() V8_WARN_UNUSED_RESULT {
+ V8_INLINE bool NextBool() WARN_UNUSED_RESULT {
return Next(1) != 0;
}
@@ -66,7 +66,7 @@ class RandomNumberGenerator V8_FINAL {
// The general contract of |NextDouble()| is that one double value, chosen
// (approximately) uniformly from the range 0.0 (inclusive) to 1.0
// (exclusive), is pseudorandomly generated and returned.
- double NextDouble() V8_WARN_UNUSED_RESULT;
+ double NextDouble() WARN_UNUSED_RESULT;
// Fills the elements of a specified array of bytes with random numbers.
void NextBytes(void* buffer, size_t buflen);
@@ -74,13 +74,16 @@ class RandomNumberGenerator V8_FINAL {
// Override the current ssed.
void SetSeed(int64_t seed);
+ int64_t initial_seed() const { return initial_seed_; }
+
private:
static const int64_t kMultiplier = V8_2PART_UINT64_C(0x5, deece66d);
static const int64_t kAddend = 0xb;
static const int64_t kMask = V8_2PART_UINT64_C(0xffff, ffffffff);
- int Next(int bits) V8_WARN_UNUSED_RESULT;
+ int Next(int bits) WARN_UNUSED_RESULT;
+ int64_t initial_seed_;
int64_t seed_;
};
diff --git a/deps/v8/src/base/win32-headers.h b/deps/v8/src/base/win32-headers.h
index e6b88bb2ff..2d94abd417 100644
--- a/deps/v8/src/base/win32-headers.h
+++ b/deps/v8/src/base/win32-headers.h
@@ -75,5 +75,7 @@
#undef GetObject
#undef CreateSemaphore
#undef Yield
+#undef RotateRight32
+#undef RotateRight64
#endif // V8_BASE_WIN32_HEADERS_H_
diff --git a/deps/v8/src/basic-block-profiler.cc b/deps/v8/src/basic-block-profiler.cc
new file mode 100644
index 0000000000..ef68ac6228
--- /dev/null
+++ b/deps/v8/src/basic-block-profiler.cc
@@ -0,0 +1,112 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/basic-block-profiler.h"
+
+namespace v8 {
+namespace internal {
+
+BasicBlockProfiler::Data::Data(size_t n_blocks)
+ : n_blocks_(n_blocks), block_ids_(n_blocks_, -1), counts_(n_blocks_, 0) {}
+
+
+BasicBlockProfiler::Data::~Data() {}
+
+
+static void InsertIntoString(OStringStream* os, std::string* string) {
+ string->insert(string->begin(), os->c_str(), &os->c_str()[os->size()]);
+}
+
+
+void BasicBlockProfiler::Data::SetCode(OStringStream* os) {
+ InsertIntoString(os, &code_);
+}
+
+
+void BasicBlockProfiler::Data::SetFunctionName(OStringStream* os) {
+ InsertIntoString(os, &function_name_);
+}
+
+
+void BasicBlockProfiler::Data::SetSchedule(OStringStream* os) {
+ InsertIntoString(os, &schedule_);
+}
+
+
+void BasicBlockProfiler::Data::SetBlockId(size_t offset, int block_id) {
+ DCHECK(offset < n_blocks_);
+ block_ids_[offset] = block_id;
+}
+
+
+uint32_t* BasicBlockProfiler::Data::GetCounterAddress(size_t offset) {
+ DCHECK(offset < n_blocks_);
+ return &counts_[offset];
+}
+
+
+void BasicBlockProfiler::Data::ResetCounts() {
+ for (size_t i = 0; i < n_blocks_; ++i) {
+ counts_[i] = 0;
+ }
+}
+
+
+BasicBlockProfiler::BasicBlockProfiler() {}
+
+
+BasicBlockProfiler::Data* BasicBlockProfiler::NewData(size_t n_blocks) {
+ Data* data = new Data(n_blocks);
+ data_list_.push_back(data);
+ return data;
+}
+
+
+BasicBlockProfiler::~BasicBlockProfiler() {
+ for (DataList::iterator i = data_list_.begin(); i != data_list_.end(); ++i) {
+ delete (*i);
+ }
+}
+
+
+void BasicBlockProfiler::ResetCounts() {
+ for (DataList::iterator i = data_list_.begin(); i != data_list_.end(); ++i) {
+ (*i)->ResetCounts();
+ }
+}
+
+
+OStream& operator<<(OStream& os, const BasicBlockProfiler& p) {
+ os << "---- Start Profiling Data ----" << endl;
+ typedef BasicBlockProfiler::DataList::const_iterator iterator;
+ for (iterator i = p.data_list_.begin(); i != p.data_list_.end(); ++i) {
+ os << **i;
+ }
+ os << "---- End Profiling Data ----" << endl;
+ return os;
+}
+
+
+OStream& operator<<(OStream& os, const BasicBlockProfiler::Data& d) {
+ const char* name = "unknown function";
+ if (!d.function_name_.empty()) {
+ name = d.function_name_.c_str();
+ }
+ if (!d.schedule_.empty()) {
+ os << "schedule for " << name << endl;
+ os << d.schedule_.c_str() << endl;
+ }
+ os << "block counts for " << name << ":" << endl;
+ for (size_t i = 0; i < d.n_blocks_; ++i) {
+ os << "block " << d.block_ids_[i] << " : " << d.counts_[i] << endl;
+ }
+ os << endl;
+ if (!d.code_.empty()) {
+ os << d.code_.c_str() << endl;
+ }
+ return os;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/basic-block-profiler.h b/deps/v8/src/basic-block-profiler.h
new file mode 100644
index 0000000000..e625cd23b5
--- /dev/null
+++ b/deps/v8/src/basic-block-profiler.h
@@ -0,0 +1,73 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASIC_BLOCK_PROFILER_H_
+#define V8_BASIC_BLOCK_PROFILER_H_
+
+#include <list>
+
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+class Schedule;
+class Graph;
+
+class BasicBlockProfiler {
+ public:
+ class Data {
+ public:
+ size_t n_blocks() const { return n_blocks_; }
+ const uint32_t* counts() const { return &counts_[0]; }
+
+ void SetCode(OStringStream* os);
+ void SetFunctionName(OStringStream* os);
+ void SetSchedule(OStringStream* os);
+ void SetBlockId(size_t offset, int block_id);
+ uint32_t* GetCounterAddress(size_t offset);
+
+ private:
+ friend class BasicBlockProfiler;
+ friend OStream& operator<<(OStream& os, const BasicBlockProfiler::Data& s);
+
+ explicit Data(size_t n_blocks);
+ ~Data();
+
+ void ResetCounts();
+
+ const size_t n_blocks_;
+ std::vector<int> block_ids_;
+ std::vector<uint32_t> counts_;
+ std::string function_name_;
+ std::string schedule_;
+ std::string code_;
+ DISALLOW_COPY_AND_ASSIGN(Data);
+ };
+
+ typedef std::list<Data*> DataList;
+
+ BasicBlockProfiler();
+ ~BasicBlockProfiler();
+
+ Data* NewData(size_t n_blocks);
+ void ResetCounts();
+
+ const DataList* data_list() { return &data_list_; }
+
+ private:
+ friend OStream& operator<<(OStream& os, const BasicBlockProfiler& s);
+
+ DataList data_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(BasicBlockProfiler);
+};
+
+OStream& operator<<(OStream& os, const BasicBlockProfiler& s);
+OStream& operator<<(OStream& os, const BasicBlockProfiler::Data& s);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASIC_BLOCK_PROFILER_H_
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 240be71918..250562a2aa 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -54,9 +54,9 @@ Handle<String> Bootstrapper::NativesSourceLookup(int index) {
source.start(),
source.length());
// We do not expect this to throw an exception. Change this if it does.
- Handle<String> source_code =
- isolate_->factory()->NewExternalStringFromAscii(
- resource).ToHandleChecked();
+ Handle<String> source_code = isolate_->factory()
+ ->NewExternalStringFromOneByte(resource)
+ .ToHandleChecked();
heap->natives_source_cache()->set(index, *source_code);
}
Handle<Object> cached_source(heap->natives_source_cache()->get(index),
@@ -99,10 +99,15 @@ void Bootstrapper::InitializeOncePerProcess() {
void Bootstrapper::TearDownExtensions() {
delete free_buffer_extension_;
+ free_buffer_extension_ = NULL;
delete gc_extension_;
+ gc_extension_ = NULL;
delete externalize_string_extension_;
+ externalize_string_extension_ = NULL;
delete statistics_extension_;
+ statistics_extension_ = NULL;
delete trigger_failure_extension_;
+ trigger_failure_extension_ = NULL;
}
@@ -121,7 +126,7 @@ char* Bootstrapper::AllocateAutoDeletedArray(int bytes) {
void Bootstrapper::TearDown() {
if (delete_these_non_arrays_on_tear_down_ != NULL) {
int len = delete_these_non_arrays_on_tear_down_->length();
- DCHECK(len < 27); // Don't use this mechanism for unbounded allocations.
+ DCHECK(len < 28); // Don't use this mechanism for unbounded allocations.
for (int i = 0; i < len; i++) {
delete delete_these_non_arrays_on_tear_down_->at(i);
delete_these_non_arrays_on_tear_down_->at(i) = NULL;
@@ -478,12 +483,14 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
{ // --- O b j e c t ---
Handle<JSFunction> object_fun = factory->NewFunction(object_name);
+ int unused = JSObject::kInitialGlobalObjectUnusedPropertiesCount;
+ int instance_size = JSObject::kHeaderSize + kPointerSize * unused;
Handle<Map> object_function_map =
- factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ factory->NewMap(JS_OBJECT_TYPE, instance_size);
+ object_function_map->set_inobject_properties(unused);
JSFunction::SetInitialMap(object_fun, object_function_map,
isolate->factory()->null_value());
- object_function_map->set_unused_property_fields(
- JSObject::kInitialGlobalObjectUnusedPropertiesCount);
+ object_function_map->set_unused_property_fields(unused);
native_context()->set_object_function(*object_fun);
@@ -506,7 +513,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// Allocate the empty function as the prototype for function ECMAScript
// 262 15.3.4.
Handle<String> empty_string =
- factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("Empty"));
+ factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("Empty"));
Handle<Code> code(isolate->builtins()->builtin(Builtins::kEmptyFunction));
Handle<JSFunction> empty_function = factory->NewFunctionWithoutPrototype(
empty_string, code);
@@ -521,7 +528,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
empty_function->set_map(*empty_function_map);
// --- E m p t y ---
- Handle<String> source = factory->NewStringFromStaticAscii("() {}");
+ Handle<String> source = factory->NewStringFromStaticChars("() {}");
Handle<Script> script = factory->NewScript(source);
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
empty_function->shared()->set_script(*script);
@@ -599,7 +606,7 @@ void Genesis::SetStrictFunctionInstanceDescriptor(
Handle<JSFunction> Genesis::GetStrictPoisonFunction() {
if (strict_poison_function.is_null()) {
Handle<String> name = factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("ThrowTypeError"));
+ STATIC_CHAR_VECTOR("ThrowTypeError"));
Handle<Code> code(isolate()->builtins()->builtin(
Builtins::kStrictModePoisonPill));
strict_poison_function = factory()->NewFunctionWithoutPrototype(name, code);
@@ -615,7 +622,7 @@ Handle<JSFunction> Genesis::GetStrictPoisonFunction() {
Handle<JSFunction> Genesis::GetGeneratorPoisonFunction() {
if (generator_poison_function.is_null()) {
Handle<String> name = factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("ThrowTypeError"));
+ STATIC_CHAR_VECTOR("ThrowTypeError"));
Handle<Code> code(isolate()->builtins()->builtin(
Builtins::kGeneratorPoisonPill));
generator_poison_function = factory()->NewFunctionWithoutPrototype(
@@ -780,7 +787,7 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
name, code, prototype, JS_GLOBAL_OBJECT_TYPE, JSGlobalObject::kSize);
#ifdef DEBUG
LookupIterator it(prototype, factory()->constructor_string(),
- LookupIterator::CHECK_OWN_REAL);
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
Handle<Object> value = JSReceiver::GetProperty(&it).ToHandleChecked();
DCHECK(it.IsFound());
DCHECK_EQ(*isolate()->object_function(), *value);
@@ -821,8 +828,7 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
factory()->GlobalProxyType);
}
- Handle<String> global_name = factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("global"));
+ Handle<String> global_name = factory()->global_string();
global_proxy_function->shared()->set_instance_class_name(*global_name);
global_proxy_function->initial_map()->set_is_access_check_needed(true);
@@ -861,11 +867,8 @@ void Genesis::HookUpGlobalObject(Handle<GlobalObject> global_object) {
native_context()->set_security_token(*global_object);
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- Runtime::DefineObjectProperty(builtins_global,
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("global")),
- global_object,
- attributes).Assert();
+ Runtime::DefineObjectProperty(builtins_global, factory()->global_string(),
+ global_object, attributes).Assert();
// Set up the reference from the global object to the builtins object.
JSGlobalObject::cast(*global_object)->set_builtins(*builtins_global);
TransferNamedProperties(global_object_from_snapshot, global_object);
@@ -1152,11 +1155,12 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
{ // Set up the iterator result object
STATIC_ASSERT(JSGeneratorObject::kResultPropertyCount == 2);
Handle<JSFunction> object_function(native_context()->object_function());
- DCHECK(object_function->initial_map()->inobject_properties() == 0);
Handle<Map> iterator_result_map =
- Map::Create(object_function, JSGeneratorObject::kResultPropertyCount);
- DCHECK(iterator_result_map->inobject_properties() ==
- JSGeneratorObject::kResultPropertyCount);
+ Map::Create(isolate, JSGeneratorObject::kResultPropertyCount);
+ DCHECK_EQ(JSGeneratorObject::kResultSize,
+ iterator_result_map->instance_size());
+ DCHECK_EQ(JSGeneratorObject::kResultPropertyCount,
+ iterator_result_map->inobject_properties());
Map::EnsureDescriptorSlack(iterator_result_map,
JSGeneratorObject::kResultPropertyCount);
@@ -1171,6 +1175,8 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
iterator_result_map->AppendDescriptor(&done_descr);
iterator_result_map->set_unused_property_fields(0);
+ iterator_result_map->set_pre_allocated_property_fields(
+ JSGeneratorObject::kResultPropertyCount);
DCHECK_EQ(JSGeneratorObject::kResultSize,
iterator_result_map->instance_size());
native_context()->set_iterator_result_map(*iterator_result_map);
@@ -1187,8 +1193,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
// Make sure we can recognize argument objects at runtime.
// This is done by introducing an anonymous function with
// class_name equals 'Arguments'.
- Handle<String> arguments_string = factory->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("Arguments"));
+ Handle<String> arguments_string = factory->Arguments_string();
Handle<Code> code(isolate->builtins()->builtin(Builtins::kIllegal));
Handle<JSFunction> function = factory->NewFunctionWithoutPrototype(
arguments_string, code);
@@ -1209,6 +1214,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
DONT_ENUM, Representation::Tagged());
map->AppendDescriptor(&d);
}
+ // @@iterator method is added later.
map->set_function_with_prototype(true);
map->set_pre_allocated_property_fields(2);
@@ -1267,6 +1273,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
CallbacksDescriptor d(factory->caller_string(), caller, attributes);
map->AppendDescriptor(&d);
}
+ // @@iterator method is added later.
map->set_function_with_prototype(true);
map->set_prototype(native_context()->object_function()->prototype());
@@ -1293,7 +1300,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
JSObject::kHeaderSize);
Handle<String> name = factory->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("context_extension"));
+ STATIC_CHAR_VECTOR("context_extension"));
context_extension_fun->shared()->set_instance_class_name(*name);
native_context()->set_context_extension_function(*context_extension_fun);
}
@@ -1351,70 +1358,16 @@ void Genesis::InstallTypedArray(
void Genesis::InitializeExperimentalGlobal() {
- // TODO(mstarzinger): Move this into Genesis::InitializeGlobal once we no
- // longer need to live behind flags, so functions get added to the snapshot.
-
- if (FLAG_harmony_generators) {
- // Create generator meta-objects and install them on the builtins object.
- Handle<JSObject> builtins(native_context()->builtins());
- Handle<JSObject> generator_object_prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
- Handle<JSFunction> generator_function_prototype = InstallFunction(
- builtins, "GeneratorFunctionPrototype", JS_FUNCTION_TYPE,
- JSFunction::kHeaderSize, generator_object_prototype,
- Builtins::kIllegal);
- InstallFunction(builtins, "GeneratorFunction",
- JS_FUNCTION_TYPE, JSFunction::kSize,
- generator_function_prototype, Builtins::kIllegal);
-
- // Create maps for generator functions and their prototypes. Store those
- // maps in the native context.
- Handle<Map> sloppy_function_map(native_context()->sloppy_function_map());
- Handle<Map> generator_function_map = Map::Copy(sloppy_function_map);
- generator_function_map->set_prototype(*generator_function_prototype);
- native_context()->set_sloppy_generator_function_map(
- *generator_function_map);
-
- // The "arguments" and "caller" instance properties aren't specified, so
- // technically we could leave them out. They make even less sense for
- // generators than for functions. Still, the same argument that it makes
- // sense to keep them around but poisoned in strict mode applies to
- // generators as well. With poisoned accessors, naive callers can still
- // iterate over the properties without accessing them.
- //
- // We can't use PoisonArgumentsAndCaller because that mutates accessor pairs
- // in place, and the initial state of the generator function map shares the
- // accessor pair with sloppy functions. Also the error message should be
- // different. Also unhappily, we can't use the API accessors to implement
- // poisoning, because API accessors present themselves as data properties,
- // not accessor properties, and so getOwnPropertyDescriptor raises an
- // exception as it tries to get the values. Sadness.
- Handle<AccessorPair> poison_pair(factory()->NewAccessorPair());
- PropertyAttributes rw_attribs =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- Handle<JSFunction> poison_function = GetGeneratorPoisonFunction();
- poison_pair->set_getter(*poison_function);
- poison_pair->set_setter(*poison_function);
- ReplaceAccessors(generator_function_map, factory()->arguments_string(),
- rw_attribs, poison_pair);
- ReplaceAccessors(generator_function_map, factory()->caller_string(),
- rw_attribs, poison_pair);
-
- Handle<Map> strict_function_map(native_context()->strict_function_map());
- Handle<Map> strict_generator_function_map = Map::Copy(strict_function_map);
- // "arguments" and "caller" already poisoned.
- strict_generator_function_map->set_prototype(*generator_function_prototype);
- native_context()->set_strict_generator_function_map(
- *strict_generator_function_map);
+ // TODO(erikcorry): Move this into Genesis::InitializeGlobal once we no
+ // longer need to live behind a flag.
+ Handle<JSObject> builtins(native_context()->builtins());
- Handle<JSFunction> object_function(native_context()->object_function());
- Handle<Map> generator_object_prototype_map = Map::Create(
- object_function, 0);
- generator_object_prototype_map->set_prototype(
- *generator_object_prototype);
- native_context()->set_generator_object_prototype_map(
- *generator_object_prototype_map);
- }
+ Handle<HeapObject> flag(
+ FLAG_harmony_regexps ? heap()->true_value() : heap()->false_value());
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
+ Runtime::DefineObjectProperty(builtins, factory()->harmony_regexps_string(),
+ flag, attributes).Assert();
}
@@ -1431,9 +1384,8 @@ bool Genesis::CompileExperimentalBuiltin(Isolate* isolate, int index) {
Factory* factory = isolate->factory();
Handle<String> source_code;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, source_code,
- factory->NewStringFromAscii(
- ExperimentalNatives::GetRawScriptSource(index)),
+ isolate, source_code, factory->NewStringFromAscii(
+ ExperimentalNatives::GetRawScriptSource(index)),
false);
return CompileNative(isolate, name, source_code);
}
@@ -1543,11 +1495,12 @@ static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
}
-#define INSTALL_NATIVE(Type, name, var) \
- Handle<String> var##_name = \
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR(name)); \
- Handle<Object> var##_native = Object::GetProperty( \
- handle(native_context()->builtins()), var##_name).ToHandleChecked(); \
+#define INSTALL_NATIVE(Type, name, var) \
+ Handle<String> var##_name = \
+ factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR(name)); \
+ Handle<Object> var##_native = \
+ Object::GetProperty(handle(native_context()->builtins()), var##_name) \
+ .ToHandleChecked(); \
native_context()->set_##var(Type::cast(*var##_native));
#define INSTALL_NATIVE_MATH(name) \
@@ -1601,6 +1554,7 @@ void Genesis::InstallNativeFunctions() {
INSTALL_NATIVE(Symbol, "symbolIterator", iterator_symbol);
INSTALL_NATIVE(Symbol, "symbolUnscopables", unscopables_symbol);
+ INSTALL_NATIVE(JSFunction, "ArrayValues", array_values_iterator);
INSTALL_NATIVE_MATH(abs)
INSTALL_NATIVE_MATH(acos)
@@ -1693,7 +1647,7 @@ bool Genesis::InstallNatives() {
JSBuiltinsObject::kSize);
Handle<String> name =
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("builtins"));
+ factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("builtins"));
builtins_fun->shared()->set_instance_class_name(*name);
builtins_fun->initial_map()->set_dictionary_map(true);
builtins_fun->initial_map()->set_prototype(heap()->null_value());
@@ -1714,11 +1668,11 @@ bool Genesis::InstallNatives() {
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
Handle<String> global_string =
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("global"));
+ factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("global"));
Handle<Object> global_obj(native_context()->global_object(), isolate());
JSObject::AddProperty(builtins, global_string, global_obj, attributes);
Handle<String> builtins_string =
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("builtins"));
+ factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("builtins"));
JSObject::AddProperty(builtins, builtins_string, builtins, attributes);
// Set up the reference from the global object to the builtins object.
@@ -1920,6 +1874,66 @@ bool Genesis::InstallNatives() {
map_iterator_function->initial_map());
}
+ {
+ // Create generator meta-objects and install them on the builtins object.
+ Handle<JSObject> builtins(native_context()->builtins());
+ Handle<JSObject> generator_object_prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ Handle<JSFunction> generator_function_prototype =
+ InstallFunction(builtins, "GeneratorFunctionPrototype",
+ JS_FUNCTION_TYPE, JSFunction::kHeaderSize,
+ generator_object_prototype, Builtins::kIllegal);
+ InstallFunction(builtins, "GeneratorFunction", JS_FUNCTION_TYPE,
+ JSFunction::kSize, generator_function_prototype,
+ Builtins::kIllegal);
+
+ // Create maps for generator functions and their prototypes. Store those
+ // maps in the native context.
+ Handle<Map> generator_function_map =
+ Map::Copy(sloppy_function_map_writable_prototype_);
+ generator_function_map->set_prototype(*generator_function_prototype);
+ native_context()->set_sloppy_generator_function_map(
+ *generator_function_map);
+
+ // The "arguments" and "caller" instance properties aren't specified, so
+ // technically we could leave them out. They make even less sense for
+ // generators than for functions. Still, the same argument that it makes
+ // sense to keep them around but poisoned in strict mode applies to
+ // generators as well. With poisoned accessors, naive callers can still
+ // iterate over the properties without accessing them.
+ //
+ // We can't use PoisonArgumentsAndCaller because that mutates accessor pairs
+ // in place, and the initial state of the generator function map shares the
+ // accessor pair with sloppy functions. Also the error message should be
+ // different. Also unhappily, we can't use the API accessors to implement
+ // poisoning, because API accessors present themselves as data properties,
+ // not accessor properties, and so getOwnPropertyDescriptor raises an
+ // exception as it tries to get the values. Sadness.
+ Handle<AccessorPair> poison_pair(factory()->NewAccessorPair());
+ PropertyAttributes rw_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+ Handle<JSFunction> poison_function = GetGeneratorPoisonFunction();
+ poison_pair->set_getter(*poison_function);
+ poison_pair->set_setter(*poison_function);
+ ReplaceAccessors(generator_function_map, factory()->arguments_string(),
+ rw_attribs, poison_pair);
+ ReplaceAccessors(generator_function_map, factory()->caller_string(),
+ rw_attribs, poison_pair);
+
+ Handle<Map> strict_function_map(native_context()->strict_function_map());
+ Handle<Map> strict_generator_function_map = Map::Copy(strict_function_map);
+ // "arguments" and "caller" already poisoned.
+ strict_generator_function_map->set_prototype(*generator_function_prototype);
+ native_context()->set_strict_generator_function_map(
+ *strict_generator_function_map);
+
+ Handle<JSFunction> object_function(native_context()->object_function());
+ Handle<Map> generator_object_prototype_map = Map::Create(isolate(), 0);
+ generator_object_prototype_map->set_prototype(*generator_object_prototype);
+ native_context()->set_generator_object_prototype_map(
+ *generator_object_prototype_map);
+ }
+
if (FLAG_disable_native_files) {
PrintF("Warning: Running without installed natives!\n");
return true;
@@ -1947,7 +1961,8 @@ bool Genesis::InstallNatives() {
HeapObject::cast(string_function->initial_map()->prototype())->map());
// Install Function.prototype.call and apply.
- { Handle<String> key = factory()->function_class_string();
+ {
+ Handle<String> key = factory()->Function_string();
Handle<JSFunction> function =
Handle<JSFunction>::cast(Object::GetProperty(
handle(native_context()->global_object()), key).ToHandleChecked());
@@ -1964,7 +1979,8 @@ bool Genesis::InstallNatives() {
if (FLAG_vector_ics) {
// Apply embeds an IC, so we need a type vector of size 1 in the shared
// function info.
- Handle<FixedArray> feedback_vector = factory()->NewTypeFeedbackVector(1);
+ Handle<TypeFeedbackVector> feedback_vector =
+ factory()->NewTypeFeedbackVector(1);
apply->shared()->set_feedback_vector(*feedback_vector);
}
@@ -2043,6 +2059,34 @@ bool Genesis::InstallNatives() {
native_context()->set_regexp_result_map(*initial_map);
}
+ // Add @@iterator method to the arguments object maps.
+ {
+ PropertyAttributes attribs = DONT_ENUM;
+ Handle<AccessorInfo> arguments_iterator =
+ Accessors::ArgumentsIteratorInfo(isolate(), attribs);
+ {
+ CallbacksDescriptor d(Handle<Name>(native_context()->iterator_symbol()),
+ arguments_iterator, attribs);
+ Handle<Map> map(native_context()->sloppy_arguments_map());
+ Map::EnsureDescriptorSlack(map, 1);
+ map->AppendDescriptor(&d);
+ }
+ {
+ CallbacksDescriptor d(Handle<Name>(native_context()->iterator_symbol()),
+ arguments_iterator, attribs);
+ Handle<Map> map(native_context()->aliased_arguments_map());
+ Map::EnsureDescriptorSlack(map, 1);
+ map->AppendDescriptor(&d);
+ }
+ {
+ CallbacksDescriptor d(Handle<Name>(native_context()->iterator_symbol()),
+ arguments_iterator, attribs);
+ Handle<Map> map(native_context()->strict_arguments_map());
+ Map::EnsureDescriptorSlack(map, 1);
+ map->AppendDescriptor(&d);
+ }
+ }
+
#ifdef VERIFY_HEAP
builtins->ObjectVerify();
#endif
@@ -2064,9 +2108,9 @@ bool Genesis::InstallExperimentalNatives() {
i < ExperimentalNatives::GetBuiltinsCount();
i++) {
INSTALL_EXPERIMENTAL_NATIVE(i, proxies, "proxy.js")
- INSTALL_EXPERIMENTAL_NATIVE(i, generators, "generator.js")
INSTALL_EXPERIMENTAL_NATIVE(i, strings, "harmony-string.js")
INSTALL_EXPERIMENTAL_NATIVE(i, arrays, "harmony-array.js")
+ INSTALL_EXPERIMENTAL_NATIVE(i, classes, "harmony-classes.js")
}
InstallExperimentalNativeFunctions();
@@ -2172,7 +2216,7 @@ bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
Handle<JSObject> Error = Handle<JSObject>::cast(
Object::GetProperty(isolate, global, "Error").ToHandleChecked());
Handle<String> name =
- factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("stackTraceLimit"));
+ factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("stackTraceLimit"));
Handle<Smi> stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit), isolate);
JSObject::AddProperty(Error, name, stack_trace_limit, NONE);
@@ -2180,20 +2224,20 @@ bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
Handle<String> natives =
factory->InternalizeUtf8String(FLAG_expose_natives_as);
+ uint32_t dummy_index;
+ if (natives->AsArrayIndex(&dummy_index)) return true;
JSObject::AddProperty(global, natives, handle(global->builtins()),
DONT_ENUM);
}
// Expose the stack trace symbol to native JS.
- RETURN_ON_EXCEPTION_VALUE(
- isolate,
- JSObject::SetOwnPropertyIgnoreAttributes(
- handle(native_context->builtins(), isolate),
- factory->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("stack_trace_symbol")),
- factory->stack_trace_symbol(),
- NONE),
- false);
+ RETURN_ON_EXCEPTION_VALUE(isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ handle(native_context->builtins(), isolate),
+ factory->InternalizeOneByteString(
+ STATIC_CHAR_VECTOR("stack_trace_symbol")),
+ factory->stack_trace_symbol(), NONE),
+ false);
// Expose the debug global object in global if a name for it is specified.
if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
@@ -2208,6 +2252,8 @@ bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
debug_context->set_security_token(native_context->security_token());
Handle<String> debug_string =
factory->InternalizeUtf8String(FLAG_expose_debug_as);
+ uint32_t index;
+ if (debug_string->AsArrayIndex(&index)) return true;
Handle<Object> global_proxy(debug_context->global_proxy(), isolate);
JSObject::AddProperty(global, debug_string, global_proxy, DONT_ENUM);
}
@@ -2326,8 +2372,9 @@ bool Genesis::InstallExtension(Isolate* isolate,
}
// We do not expect this to throw an exception. Change this if it does.
Handle<String> source_code =
- isolate->factory()->NewExternalStringFromAscii(
- extension->source()).ToHandleChecked();
+ isolate->factory()
+ ->NewExternalStringFromOneByte(extension->source())
+ .ToHandleChecked();
bool result = CompileScriptCached(isolate,
CStrVector(extension->name()),
source_code,
@@ -2449,11 +2496,11 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
break;
}
case CALLBACKS: {
- LookupResult result(isolate());
- Handle<Name> key(Name::cast(descs->GetKey(i)), isolate());
- to->LookupOwn(key, &result);
+ Handle<Name> key(descs->GetKey(i));
+ LookupIterator it(to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
// If the property is already there we skip it
- if (result.IsFound()) continue;
+ if (it.IsFound()) continue;
HandleScope inner(isolate());
DCHECK(!to->HasFastProperties());
// Add to dictionary.
@@ -2463,12 +2510,8 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
JSObject::SetNormalizedProperty(to, key, callbacks, d);
break;
}
+ // Do not occur since the from object has fast properties.
case NORMAL:
- // Do not occur since the from object has fast properties.
- case HANDLER:
- case INTERCEPTOR:
- case NONEXISTENT:
- // No element in instance descriptors have proxy or interceptor type.
UNREACHABLE();
break;
}
@@ -2482,10 +2525,10 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
if (properties->IsKey(raw_key)) {
DCHECK(raw_key->IsName());
// If the property is already there we skip it.
- LookupResult result(isolate());
Handle<Name> key(Name::cast(raw_key));
- to->LookupOwn(key, &result);
- if (result.IsFound()) continue;
+ LookupIterator it(to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
+ if (it.IsFound()) continue;
// Set the property.
Handle<Object> value = Handle<Object>(properties->ValueAt(i),
isolate());
@@ -2570,9 +2613,6 @@ Genesis::Genesis(Isolate* isolate,
active_(isolate->bootstrapper()) {
NoTrackDoubleFieldsForSerializerScope disable_scope(isolate);
result_ = Handle<Context>::null();
- // If V8 cannot be initialized, just return.
- if (!V8::Initialize(NULL)) return;
-
// Before creating the roots we must save the context and restore it
// on all function exits.
SaveContext saved_context(isolate);
@@ -2625,9 +2665,9 @@ Genesis::Genesis(Isolate* isolate,
isolate->counters()->contexts_created_from_scratch()->Increment();
}
- // Initialize experimental globals and install experimental natives.
- InitializeExperimentalGlobal();
+ // Install experimental natives.
if (!InstallExperimentalNatives()) return;
+ InitializeExperimentalGlobal();
// We can't (de-)serialize typed arrays currently, but we are lucky: The state
// of the random number generator needs no initialization during snapshot
@@ -2648,14 +2688,12 @@ Genesis::Genesis(Isolate* isolate,
Utils::OpenHandle(*buffer)->set_should_be_freed(true);
v8::Local<v8::Uint32Array> ta = v8::Uint32Array::New(buffer, 0, num_elems);
Handle<JSBuiltinsObject> builtins(native_context()->builtins());
- Runtime::DefineObjectProperty(builtins,
- factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("rngstate")),
- Utils::OpenHandle(*ta),
- NONE).Assert();
+ Runtime::DefineObjectProperty(builtins, factory()->InternalizeOneByteString(
+ STATIC_CHAR_VECTOR("rngstate")),
+ Utils::OpenHandle(*ta), NONE).Assert();
// Initialize trigonometric lookup tables and constants.
- const int constants_size = ARRAY_SIZE(fdlibm::MathConstants::constants);
+ const int constants_size = arraysize(fdlibm::MathConstants::constants);
const int table_num_bytes = constants_size * kDoubleSize;
v8::Local<v8::ArrayBuffer> trig_buffer = v8::ArrayBuffer::New(
reinterpret_cast<v8::Isolate*>(isolate),
@@ -2665,7 +2703,7 @@ Genesis::Genesis(Isolate* isolate,
Runtime::DefineObjectProperty(
builtins,
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("kMath")),
+ factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("kMath")),
Utils::OpenHandle(*trig_table), NONE).Assert();
}
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 1899d6a1e2..0cc84861b3 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -11,11 +11,11 @@ namespace v8 {
namespace internal {
// A SourceCodeCache uses a FixedArray to store pairs of
-// (AsciiString*, JSFunction*), mapping names of native code files
+// (OneByteString*, JSFunction*), mapping names of native code files
// (runtime.js, etc.) to precompiled functions. Instead of mapping
// names to functions it might make sense to let the JS2C tool
// generate an index for each native JS file.
-class SourceCodeCache V8_FINAL BASE_EMBEDDED {
+class SourceCodeCache FINAL BASE_EMBEDDED {
public:
explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { }
@@ -24,7 +24,7 @@ class SourceCodeCache V8_FINAL BASE_EMBEDDED {
}
void Iterate(ObjectVisitor* v) {
- v->VisitPointer(BitCast<Object**, FixedArray**>(&cache_));
+ v->VisitPointer(bit_cast<Object**, FixedArray**>(&cache_));
}
bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
@@ -64,7 +64,7 @@ class SourceCodeCache V8_FINAL BASE_EMBEDDED {
// The Boostrapper is the public interface for creating a JavaScript global
// context.
-class Bootstrapper V8_FINAL {
+class Bootstrapper FINAL {
public:
static void InitializeOncePerProcess();
static void TearDownExtensions();
@@ -134,7 +134,7 @@ class Bootstrapper V8_FINAL {
};
-class BootstrapperActive V8_FINAL BASE_EMBEDDED {
+class BootstrapperActive FINAL BASE_EMBEDDED {
public:
explicit BootstrapperActive(Bootstrapper* bootstrapper)
: bootstrapper_(bootstrapper) {
@@ -152,14 +152,14 @@ class BootstrapperActive V8_FINAL BASE_EMBEDDED {
};
-class NativesExternalStringResource V8_FINAL
- : public v8::String::ExternalAsciiStringResource {
+class NativesExternalStringResource FINAL
+ : public v8::String::ExternalOneByteStringResource {
public:
NativesExternalStringResource(Bootstrapper* bootstrapper,
const char* source,
size_t length);
- virtual const char* data() const V8_OVERRIDE { return data_; }
- virtual size_t length() const V8_OVERRIDE { return length_; }
+ virtual const char* data() const OVERRIDE { return data_; }
+ virtual size_t length() const OVERRIDE { return length_; }
private:
const char* data_;
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 498387353d..c52d22852d 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -13,9 +13,9 @@
#include "src/gdb-jit.h"
#include "src/heap/mark-compact.h"
#include "src/heap-profiler.h"
-#include "src/ic-inl.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
#include "src/prototype.h"
-#include "src/stub-cache.h"
#include "src/vm-state-inl.h"
namespace v8 {
@@ -987,12 +987,12 @@ BUILTIN(ArrayConcat) {
Handle<FixedArrayBase> storage(result_array->elements(), isolate);
ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
for (int i = 0; i < n_arguments; i++) {
- // TODO(ishell): It is crucial to keep |array| as a raw pointer to avoid
- // performance degradation. Revisit this later.
+ // It is crucial to keep |array| in a raw pointer form to avoid performance
+ // degradation.
JSArray* array = JSArray::cast(args[i]);
int len = Smi::cast(array->length())->value();
- ElementsKind from_kind = array->GetElementsKind();
if (len > 0) {
+ ElementsKind from_kind = array->GetElementsKind();
accessor->CopyElements(array, 0, from_kind, storage, j, len);
j += len;
}
@@ -1010,15 +1010,17 @@ BUILTIN(ArrayConcat) {
BUILTIN(StrictModePoisonPill) {
HandleScope scope(isolate);
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_poison_pill", HandleVector<Object>(NULL, 0)));
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError("strict_poison_pill", HandleVector<Object>(NULL, 0)));
}
BUILTIN(GeneratorPoisonPill) {
HandleScope scope(isolate);
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "generator_poison_pill", HandleVector<Object>(NULL, 0)));
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError("generator_poison_pill", HandleVector<Object>(NULL, 0)));
}
@@ -1115,10 +1117,9 @@ MUST_USE_RESULT static Object* HandleApiCallHelper(
if (raw_holder->IsNull()) {
// This function cannot be called with the given receiver. Abort!
- Handle<Object> obj =
- isolate->factory()->NewTypeError(
- "illegal_invocation", HandleVector(&function, 1));
- return isolate->Throw(*obj);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError("illegal_invocation", HandleVector(&function, 1)));
}
Object* raw_call_data = fun_data->call_code();
@@ -1288,28 +1289,23 @@ static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
}
-static void Generate_KeyedLoadIC_IndexedInterceptor(MacroAssembler* masm) {
- KeyedLoadIC::GenerateIndexedInterceptor(masm);
+static void Generate_StoreIC_Miss(MacroAssembler* masm) {
+ StoreIC::GenerateMiss(masm);
}
-static void Generate_KeyedLoadIC_SloppyArguments(MacroAssembler* masm) {
- KeyedLoadIC::GenerateSloppyArguments(masm);
+static void Generate_StoreIC_Normal(MacroAssembler* masm) {
+ StoreIC::GenerateNormal(masm);
}
static void Generate_StoreIC_Slow(MacroAssembler* masm) {
- StoreIC::GenerateSlow(masm);
-}
-
-
-static void Generate_StoreIC_Miss(MacroAssembler* masm) {
- StoreIC::GenerateMiss(masm);
+ NamedStoreHandlerCompiler::GenerateSlow(masm);
}
-static void Generate_StoreIC_Normal(MacroAssembler* masm) {
- StoreIC::GenerateNormal(masm);
+static void Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
+ ElementHandlerCompiler::GenerateStoreSlow(masm);
}
@@ -1333,11 +1329,6 @@ static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
}
-static void Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
- KeyedStoreIC::GenerateSlow(masm);
-}
-
-
static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) {
KeyedStoreIC::GenerateInitialize(masm);
}
@@ -1571,7 +1562,7 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
// Move the code into the object heap.
CodeDesc desc;
masm.GetCode(&desc);
- Code::Flags flags = functions[i].flags;
+ Code::Flags flags = functions[i].flags;
Handle<Code> code =
isolate->factory()->NewCode(desc, flags, masm.CodeObject());
// Log the event and add the code to the builtins array.
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index a28dd01cfe..c1ed91df4e 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -63,105 +63,65 @@ enum BuiltinExtraArguments {
V(GeneratorPoisonPill, NO_EXTRA_ARGUMENTS)
// Define list of builtins implemented in assembly.
-#define BUILTIN_LIST_A(V) \
- V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(InOptimizationQueue, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(JSConstructStubApi, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(CompileUnoptimized, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(CompileOptimized, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(CompileOptimizedConcurrent, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(NotifySoftDeoptimized, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- \
- V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, \
- kNoExtraICState) \
- V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
- kNoExtraICState) \
- V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \
- kNoExtraICState) \
- V(KeyedLoadIC_Generic, KEYED_LOAD_IC, GENERIC, \
- kNoExtraICState) \
- V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \
- kNoExtraICState) \
- V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MONOMORPHIC, \
- kNoExtraICState) \
- V(KeyedLoadIC_SloppyArguments, KEYED_LOAD_IC, MONOMORPHIC, \
- kNoExtraICState) \
- \
- V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \
- StoreIC::kStrictModeState) \
- \
- V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
- kNoExtraICState) \
- V(KeyedStoreIC_PreMonomorphic, KEYED_STORE_IC, PREMONOMORPHIC, \
- kNoExtraICState) \
- V(KeyedStoreIC_Generic, KEYED_STORE_IC, GENERIC, \
- kNoExtraICState) \
- \
- V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
- StoreIC::kStrictModeState) \
- V(KeyedStoreIC_PreMonomorphic_Strict, KEYED_STORE_IC, PREMONOMORPHIC, \
- StoreIC::kStrictModeState) \
- V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, GENERIC, \
- StoreIC::kStrictModeState) \
- V(KeyedStoreIC_SloppyArguments, KEYED_STORE_IC, MONOMORPHIC, \
- kNoExtraICState) \
- \
- /* Uses KeyedLoadIC_Initialize; must be after in list. */ \
- V(FunctionCall, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(FunctionApply, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- \
- V(InternalArrayCode, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(ArrayCode, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- \
- V(StringConstructCode, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- \
- V(OnStackReplacement, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(InterruptCheck, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(OsrAfterStackCheck, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(StackCheck, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- \
- V(MarkCodeAsExecutedOnce, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
+#define BUILTIN_LIST_A(V) \
+ V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InOptimizationQueue, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSConstructStubApi, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(CompileLazy, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(CompileOptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(CompileOptimizedConcurrent, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifySoftDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifyStubFailure, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(LoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(StoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, kNoExtraICState) \
+ V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, kNoExtraICState) \
+ V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \
+ kNoExtraICState) \
+ V(KeyedLoadIC_Generic, KEYED_LOAD_IC, GENERIC, kNoExtraICState) \
+ V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, kNoExtraICState) \
+ \
+ V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, StoreIC::kStrictModeState) \
+ \
+ V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, kNoExtraICState) \
+ V(KeyedStoreIC_PreMonomorphic, KEYED_STORE_IC, PREMONOMORPHIC, \
+ kNoExtraICState) \
+ V(KeyedStoreIC_Generic, KEYED_STORE_IC, GENERIC, kNoExtraICState) \
+ \
+ V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
+ StoreIC::kStrictModeState) \
+ V(KeyedStoreIC_PreMonomorphic_Strict, KEYED_STORE_IC, PREMONOMORPHIC, \
+ StoreIC::kStrictModeState) \
+ V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, GENERIC, \
+ StoreIC::kStrictModeState) \
+ V(KeyedStoreIC_SloppyArguments, KEYED_STORE_IC, MONOMORPHIC, \
+ kNoExtraICState) \
+ \
+ /* Uses KeyedLoadIC_Initialize; must be after in list. */ \
+ V(FunctionCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(FunctionApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(InternalArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(ArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(StringConstructCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(OnStackReplacement, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterruptCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(OsrAfterStackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(StackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(MarkCodeAsExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, kNoExtraICState) \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
// Define list of builtin handlers implemented in assembly.
@@ -334,7 +294,7 @@ class Builtins {
static void Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
BuiltinExtraArguments extra_args);
- static void Generate_CompileUnoptimized(MacroAssembler* masm);
+ static void Generate_CompileLazy(MacroAssembler* masm);
static void Generate_InOptimizationQueue(MacroAssembler* masm);
static void Generate_CompileOptimized(MacroAssembler* masm);
static void Generate_CompileOptimizedConcurrent(MacroAssembler* masm);
diff --git a/deps/v8/src/cached-powers.cc b/deps/v8/src/cached-powers.cc
index 5726c49606..dd9e3b4d3f 100644
--- a/deps/v8/src/cached-powers.cc
+++ b/deps/v8/src/cached-powers.cc
@@ -111,7 +111,7 @@ static const CachedPower kCachedPowers[] = {
};
#ifdef DEBUG
-static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers);
+static const int kCachedPowersLength = arraysize(kCachedPowers);
#endif
static const int kCachedPowersOffset = 348; // -1 * the first decimal_exponent.
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
new file mode 100644
index 0000000000..c969c8f1c1
--- /dev/null
+++ b/deps/v8/src/code-factory.cc
@@ -0,0 +1,92 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/bootstrapper.h"
+#include "src/code-factory.h"
+#include "src/ic/ic.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+Callable CodeFactory::LoadIC(Isolate* isolate, ContextualMode mode) {
+ return Callable(
+ LoadIC::initialize_stub(isolate, LoadICState(mode).GetExtraICState()),
+ LoadDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::KeyedLoadIC(Isolate* isolate) {
+ return Callable(isolate->builtins()->KeyedLoadIC_Initialize(),
+ LoadDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::StoreIC(Isolate* isolate, StrictMode mode) {
+ return Callable(StoreIC::initialize_stub(isolate, mode),
+ StoreDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::KeyedStoreIC(Isolate* isolate, StrictMode mode) {
+ Handle<Code> ic = mode == SLOPPY
+ ? isolate->builtins()->KeyedStoreIC_Initialize()
+ : isolate->builtins()->KeyedStoreIC_Initialize_Strict();
+ return Callable(ic, StoreDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::CompareIC(Isolate* isolate, Token::Value op) {
+ Handle<Code> code = CompareIC::GetUninitialized(isolate, op);
+ return Callable(code, BinaryOpDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::BinaryOpIC(Isolate* isolate, Token::Value op,
+ OverwriteMode mode) {
+ BinaryOpICStub stub(isolate, op, mode);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
+Callable CodeFactory::ToBoolean(Isolate* isolate,
+ ToBooleanStub::ResultMode mode,
+ ToBooleanStub::Types types) {
+ ToBooleanStub stub(isolate, mode, types);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
+Callable CodeFactory::ToNumber(Isolate* isolate) {
+ ToNumberStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
+Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags,
+ PretenureFlag pretenure_flag) {
+ StringAddStub stub(isolate, flags, pretenure_flag);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
+Callable CodeFactory::CallFunction(Isolate* isolate, int argc,
+ CallFunctionFlags flags) {
+ CallFunctionStub stub(isolate, argc, flags);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
new file mode 100644
index 0000000000..3add38486f
--- /dev/null
+++ b/deps/v8/src/code-factory.h
@@ -0,0 +1,61 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODE_FACTORY_H_
+#define V8_CODE_FACTORY_H_
+
+#include "src/allocation.h"
+#include "src/assembler.h"
+#include "src/codegen.h"
+#include "src/globals.h"
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+// Associates a body of code with an interface descriptor.
+class Callable FINAL BASE_EMBEDDED {
+ public:
+ Callable(Handle<Code> code, CallInterfaceDescriptor descriptor)
+ : code_(code), descriptor_(descriptor) {}
+
+ Handle<Code> code() const { return code_; }
+ CallInterfaceDescriptor descriptor() const { return descriptor_; }
+
+ private:
+ const Handle<Code> code_;
+ const CallInterfaceDescriptor descriptor_;
+};
+
+
+class CodeFactory FINAL {
+ public:
+ // Initial states for ICs.
+ static Callable LoadIC(Isolate* isolate, ContextualMode mode);
+ static Callable KeyedLoadIC(Isolate* isolate);
+ static Callable StoreIC(Isolate* isolate, StrictMode mode);
+ static Callable KeyedStoreIC(Isolate* isolate, StrictMode mode);
+
+ static Callable CompareIC(Isolate* isolate, Token::Value op);
+
+ static Callable BinaryOpIC(Isolate* isolate, Token::Value op,
+ OverwriteMode mode = NO_OVERWRITE);
+
+ // Code stubs. Add methods here as needed to reduce dependency on
+ // code-stubs.h.
+ static Callable ToBoolean(
+ Isolate* isolate, ToBooleanStub::ResultMode mode,
+ ToBooleanStub::Types types = ToBooleanStub::Types());
+
+ static Callable ToNumber(Isolate* isolate);
+
+ static Callable StringAdd(Isolate* isolate, StringAddFlags flags,
+ PretenureFlag pretenure_flag);
+
+ static Callable CallFunction(Isolate* isolate, int argc,
+ CallFunctionFlags flags);
+};
+}
+}
+#endif // V8_CODE_FACTORY_H_
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index 027517a835..dafef522f1 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -4,6 +4,7 @@
#include "src/v8.h"
+#include "src/bailout-reason.h"
#include "src/code-stubs.h"
#include "src/field-index.h"
#include "src/hydrogen.h"
@@ -37,9 +38,9 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
: HGraphBuilder(&info_),
arguments_length_(NULL),
info_(stub, isolate),
+ descriptor_(stub),
context_(NULL) {
- descriptor_ = stub->GetInterfaceDescriptor();
- int parameter_count = descriptor_->GetEnvironmentParameterCount();
+ int parameter_count = descriptor_.GetEnvironmentParameterCount();
parameters_.Reset(new HParameter*[parameter_count]);
}
virtual bool BuildGraph();
@@ -47,7 +48,7 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
protected:
virtual HValue* BuildCodeStub() = 0;
HParameter* GetParameter(int parameter) {
- DCHECK(parameter < descriptor_->GetEnvironmentParameterCount());
+ DCHECK(parameter < descriptor_.GetEnvironmentParameterCount());
return parameters_[parameter];
}
HValue* GetArgumentsLength() {
@@ -71,6 +72,8 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
MULTIPLE
};
+ HValue* UnmappedCase(HValue* elements, HValue* key);
+
HValue* BuildArrayConstructor(ElementsKind kind,
AllocationSiteOverrideMode override_mode,
ArgumentClass argument_class);
@@ -103,7 +106,7 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
SmartArrayPointer<HParameter*> parameters_;
HValue* arguments_length_;
CompilationInfoWithZone info_;
- CodeStubInterfaceDescriptor* descriptor_;
+ CodeStubDescriptor descriptor_;
HContext* context_;
};
@@ -119,22 +122,22 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
isolate()->GetHTracer()->TraceCompilation(&info_);
}
- int param_count = descriptor_->GetEnvironmentParameterCount();
+ int param_count = descriptor_.GetEnvironmentParameterCount();
HEnvironment* start_environment = graph()->start_environment();
HBasicBlock* next_block = CreateBasicBlock(start_environment);
Goto(next_block);
next_block->SetJoinId(BailoutId::StubEntry());
set_current_block(next_block);
- bool runtime_stack_params = descriptor_->stack_parameter_count().is_valid();
+ bool runtime_stack_params = descriptor_.stack_parameter_count().is_valid();
HInstruction* stack_parameter_count = NULL;
for (int i = 0; i < param_count; ++i) {
- Representation r = descriptor_->GetEnvironmentParameterRepresentation(i);
+ Representation r = descriptor_.GetEnvironmentParameterRepresentation(i);
HParameter* param = Add<HParameter>(i,
HParameter::REGISTER_PARAMETER, r);
start_environment->Bind(i, param);
parameters_[i] = param;
- if (descriptor_->IsEnvironmentParameterCountRegister(i)) {
+ if (descriptor_.IsEnvironmentParameterCountRegister(i)) {
param->set_type(HType::Smi());
stack_parameter_count = param;
arguments_length_ = stack_parameter_count;
@@ -159,16 +162,16 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
// We might have extra expressions to pop from the stack in addition to the
// arguments above.
HInstruction* stack_pop_count = stack_parameter_count;
- if (descriptor_->function_mode() == JS_FUNCTION_STUB_MODE) {
+ if (descriptor_.function_mode() == JS_FUNCTION_STUB_MODE) {
if (!stack_parameter_count->IsConstant() &&
- descriptor_->hint_stack_parameter_count() < 0) {
+ descriptor_.hint_stack_parameter_count() < 0) {
HInstruction* constant_one = graph()->GetConstant1();
stack_pop_count = AddUncasted<HAdd>(stack_parameter_count, constant_one);
stack_pop_count->ClearFlag(HValue::kCanOverflow);
// TODO(mvstanton): verify that stack_parameter_count+1 really fits in a
// smi.
} else {
- int count = descriptor_->hint_stack_parameter_count();
+ int count = descriptor_.hint_stack_parameter_count();
stack_pop_count = Add<HConstant>(count);
}
}
@@ -216,7 +219,8 @@ class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
};
-Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode() {
+Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(
+ ExternalReference miss) {
Factory* factory = isolate()->factory();
// Generate the new code.
@@ -229,7 +233,7 @@ Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode() {
// Generate the code for the stub.
masm.set_generating_stub(true);
NoCurrentFrameScope scope(&masm);
- GenerateLightweightMiss(&masm);
+ GenerateLightweightMiss(&masm, miss);
}
// Create the code object.
@@ -251,19 +255,14 @@ Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode() {
template <class Stub>
static Handle<Code> DoGenerateCode(Stub* stub) {
Isolate* isolate = stub->isolate();
- CodeStub::Major major_key = static_cast<CodeStub*>(stub)->MajorKey();
- CodeStubInterfaceDescriptor* descriptor =
- isolate->code_stub_interface_descriptor(major_key);
- if (!descriptor->IsInitialized()) {
- stub->InitializeInterfaceDescriptor(descriptor);
- }
+ CodeStubDescriptor descriptor(stub);
// If we are uninitialized we can use a light-weight stub to enter
// the runtime that is significantly faster than using the standard
// stub-failure deopt mechanism.
- if (stub->IsUninitialized() && descriptor->has_miss_handler()) {
- DCHECK(!descriptor->stack_parameter_count().is_valid());
- return stub->GenerateLightweightMissCode();
+ if (stub->IsUninitialized() && descriptor.has_miss_handler()) {
+ DCHECK(!descriptor.stack_parameter_count().is_valid());
+ return stub->GenerateLightweightMissCode(descriptor.miss_handler());
}
base::ElapsedTimer timer;
if (FLAG_profile_hydrogen_code_stub_compilation) {
@@ -542,14 +541,10 @@ Handle<Code> CreateAllocationSiteStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<LoadFastElementStub>::BuildCodeStub() {
HInstruction* load = BuildUncheckedMonomorphicElementAccess(
- GetParameter(KeyedLoadIC::kReceiverIndex),
- GetParameter(KeyedLoadIC::kNameIndex),
- NULL,
- casted_stub()->is_js_array(),
- casted_stub()->elements_kind(),
- LOAD,
- NEVER_RETURN_HOLE,
- STANDARD_STORE);
+ GetParameter(LoadDescriptor::kReceiverIndex),
+ GetParameter(LoadDescriptor::kNameIndex), NULL,
+ casted_stub()->is_js_array(), casted_stub()->elements_kind(), LOAD,
+ NEVER_RETURN_HOLE, STANDARD_STORE);
return load;
}
@@ -599,7 +594,7 @@ HValue* CodeStubGraphBuilder<LoadConstantStub>::BuildCodeStub() {
HValue* descriptors =
Add<HLoadNamedField>(map, static_cast<HValue*>(NULL), descriptors_access);
HObjectAccess value_access = HObjectAccess::ForObservableJSObjectOffset(
- DescriptorArray::GetValueOffset(casted_stub()->descriptor()));
+ DescriptorArray::GetValueOffset(casted_stub()->constant_index()));
return Add<HLoadNamedField>(descriptors, static_cast<HValue*>(NULL),
value_access);
}
@@ -608,6 +603,122 @@ HValue* CodeStubGraphBuilder<LoadConstantStub>::BuildCodeStub() {
Handle<Code> LoadConstantStub::GenerateCode() { return DoGenerateCode(this); }
+HValue* CodeStubGraphBuilderBase::UnmappedCase(HValue* elements, HValue* key) {
+ HValue* result;
+ HInstruction* backing_store = Add<HLoadKeyed>(
+ elements, graph()->GetConstant1(), static_cast<HValue*>(NULL),
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+ Add<HCheckMaps>(backing_store, isolate()->factory()->fixed_array_map());
+ HValue* backing_store_length =
+ Add<HLoadNamedField>(backing_store, static_cast<HValue*>(NULL),
+ HObjectAccess::ForFixedArrayLength());
+ IfBuilder in_unmapped_range(this);
+ in_unmapped_range.If<HCompareNumericAndBranch>(key, backing_store_length,
+ Token::LT);
+ in_unmapped_range.Then();
+ {
+ result = Add<HLoadKeyed>(backing_store, key, static_cast<HValue*>(NULL),
+ FAST_HOLEY_ELEMENTS, NEVER_RETURN_HOLE);
+ }
+ in_unmapped_range.ElseDeopt("Outside of range");
+ in_unmapped_range.End();
+ return result;
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<KeyedLoadSloppyArgumentsStub>::BuildCodeStub() {
+ HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
+ HValue* key = GetParameter(LoadDescriptor::kNameIndex);
+
+ // Mapped arguments are actual arguments. Unmapped arguments are values added
+ // to the arguments object after it was created for the call. Mapped arguments
+ // are stored in the context at indexes given by elements[key + 2]. Unmapped
+ // arguments are stored as regular indexed properties in the arguments array,
+ // held at elements[1]. See NewSloppyArguments() in runtime.cc for a detailed
+ // look at argument object construction.
+ //
+ // The sloppy arguments elements array has a special format:
+ //
+ // 0: context
+ // 1: unmapped arguments array
+ // 2: mapped_index0,
+ // 3: mapped_index1,
+ // ...
+ //
+ // length is 2 + min(number_of_actual_arguments, number_of_formal_arguments).
+ // If key + 2 >= elements.length then attempt to look in the unmapped
+ // arguments array (given by elements[1]) and return the value at key, missing
+ // to the runtime if the unmapped arguments array is not a fixed array or if
+ // key >= unmapped_arguments_array.length.
+ //
+ // Otherwise, t = elements[key + 2]. If t is the hole, then look up the value
+ // in the unmapped arguments array, as described above. Otherwise, t is a Smi
+ // index into the context array given at elements[0]. Return the value at
+ // context[t].
+
+ key = AddUncasted<HForceRepresentation>(key, Representation::Smi());
+ IfBuilder positive_smi(this);
+ positive_smi.If<HCompareNumericAndBranch>(key, graph()->GetConstant0(),
+ Token::LT);
+ positive_smi.ThenDeopt("key is negative");
+ positive_smi.End();
+
+ HValue* constant_two = Add<HConstant>(2);
+ HValue* elements = AddLoadElements(receiver, static_cast<HValue*>(NULL));
+ HValue* elements_length =
+ Add<HLoadNamedField>(elements, static_cast<HValue*>(NULL),
+ HObjectAccess::ForFixedArrayLength());
+ HValue* adjusted_length = AddUncasted<HSub>(elements_length, constant_two);
+ IfBuilder in_range(this);
+ in_range.If<HCompareNumericAndBranch>(key, adjusted_length, Token::LT);
+ in_range.Then();
+ {
+ HValue* index = AddUncasted<HAdd>(key, constant_two);
+ HInstruction* mapped_index =
+ Add<HLoadKeyed>(elements, index, static_cast<HValue*>(NULL),
+ FAST_HOLEY_ELEMENTS, ALLOW_RETURN_HOLE);
+
+ IfBuilder is_valid(this);
+ is_valid.IfNot<HCompareObjectEqAndBranch>(mapped_index,
+ graph()->GetConstantHole());
+ is_valid.Then();
+ {
+ // TODO(mvstanton): I'd like to assert from this point, that if the
+ // mapped_index is not the hole that it is indeed, a smi. An unnecessary
+ // smi check is being emitted.
+ HValue* the_context =
+ Add<HLoadKeyed>(elements, graph()->GetConstant0(),
+ static_cast<HValue*>(NULL), FAST_ELEMENTS);
+ DCHECK(Context::kHeaderSize == FixedArray::kHeaderSize);
+ HValue* result =
+ Add<HLoadKeyed>(the_context, mapped_index, static_cast<HValue*>(NULL),
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+ environment()->Push(result);
+ }
+ is_valid.Else();
+ {
+ HValue* result = UnmappedCase(elements, key);
+ environment()->Push(result);
+ }
+ is_valid.End();
+ }
+ in_range.Else();
+ {
+ HValue* result = UnmappedCase(elements, key);
+ environment()->Push(result);
+ }
+ in_range.End();
+
+ return environment()->Pop();
+}
+
+
+Handle<Code> KeyedLoadSloppyArgumentsStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
void CodeStubGraphBuilderBase::BuildStoreNamedField(
HValue* object, HValue* value, FieldIndex index,
Representation representation) {
@@ -661,11 +772,11 @@ Handle<Code> StringLengthStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<StoreFastElementStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(
- GetParameter(StoreIC::kReceiverIndex),
- GetParameter(StoreIC::kNameIndex),
- GetParameter(StoreIC::kValueIndex),
- casted_stub()->is_js_array(), casted_stub()->elements_kind(),
- STORE, NEVER_RETURN_HOLE, casted_stub()->store_mode());
+ GetParameter(StoreDescriptor::kReceiverIndex),
+ GetParameter(StoreDescriptor::kNameIndex),
+ GetParameter(StoreDescriptor::kValueIndex), casted_stub()->is_js_array(),
+ casted_stub()->elements_kind(), STORE, NEVER_RETURN_HOLE,
+ casted_stub()->store_mode());
return GetParameter(2);
}
@@ -913,7 +1024,7 @@ Handle<Code> CompareNilICStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
- BinaryOpIC::State state = casted_stub()->state();
+ BinaryOpICState state = casted_stub()->state();
HValue* left = GetParameter(BinaryOpICStub::kLeft);
HValue* right = GetParameter(BinaryOpICStub::kRight);
@@ -1012,7 +1123,7 @@ Handle<Code> BinaryOpICStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<BinaryOpWithAllocationSiteStub>::BuildCodeStub() {
- BinaryOpIC::State state = casted_stub()->state();
+ BinaryOpICState state = casted_stub()->state();
HValue* allocation_site = GetParameter(
BinaryOpWithAllocationSiteStub::kAllocationSite);
@@ -1067,7 +1178,7 @@ HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
HValue* true_value = NULL;
HValue* false_value = NULL;
- switch (stub->GetMode()) {
+ switch (stub->mode()) {
case ToBooleanStub::RESULT_AS_SMI:
true_value = graph()->GetConstant1();
false_value = graph()->GetConstant0();
@@ -1083,7 +1194,7 @@ HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
}
IfBuilder if_true(this);
- if_true.If<HBranch>(GetParameter(0), stub->GetTypes());
+ if_true.If<HBranch>(GetParameter(0), stub->types());
if_true.Then();
if_true.Return(true_value);
if_true.Else();
@@ -1100,12 +1211,11 @@ Handle<Code> ToBooleanStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
StoreGlobalStub* stub = casted_stub();
- Handle<Object> hole(isolate()->heap()->the_hole_value(), isolate());
Handle<Object> placeholer_value(Smi::FromInt(0), isolate());
Handle<PropertyCell> placeholder_cell =
isolate()->factory()->NewPropertyCell(placeholer_value);
- HParameter* value = GetParameter(StoreIC::kValueIndex);
+ HParameter* value = GetParameter(StoreDescriptor::kValueIndex);
if (stub->check_global()) {
// Check that the map of the global has not changed: use a placeholder map
@@ -1132,7 +1242,7 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
// property has been deleted and that the store must be handled by the
// runtime.
IfBuilder builder(this);
- HValue* hole_value = Add<HConstant>(hole);
+ HValue* hole_value = graph()->GetConstantHole();
builder.If<HCompareObjectEqAndBranch>(cell_contents, hole_value);
builder.Then();
builder.Deopt("Unexpected cell contents in global store");
@@ -1351,7 +1461,7 @@ HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
NOT_TENURED, JS_FUNCTION_TYPE);
int map_index = Context::FunctionMapIndex(casted_stub()->strict_mode(),
- casted_stub()->is_generator());
+ casted_stub()->kind());
// Compute the function map in the current native context and set that
// as the map of the allocated object.
@@ -1451,8 +1561,8 @@ Handle<Code> FastNewContextStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<LoadDictionaryElementStub>::BuildCodeStub() {
- HValue* receiver = GetParameter(KeyedLoadIC::kReceiverIndex);
- HValue* key = GetParameter(KeyedLoadIC::kNameIndex);
+ HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
+ HValue* key = GetParameter(LoadDescriptor::kNameIndex);
Add<HCheckSmi>(key);
@@ -1572,8 +1682,8 @@ void CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildExternalElementLoad(
HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
- HValue* receiver = GetParameter(KeyedLoadIC::kReceiverIndex);
- HValue* key = GetParameter(KeyedLoadIC::kNameIndex);
+ HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
+ HValue* key = GetParameter(LoadDescriptor::kNameIndex);
// Split into a smi/integer case and unique string case.
HIfContinuation index_name_split_continuation(graph()->CreateBasicBlock(),
@@ -1794,4 +1904,47 @@ Handle<Code> KeyedLoadGenericStub::GenerateCode() {
}
+template <>
+HValue* CodeStubGraphBuilder<VectorLoadStub>::BuildCodeStub() {
+ HValue* receiver = GetParameter(VectorLoadICDescriptor::kReceiverIndex);
+ Add<HDeoptimize>("Always deopt", Deoptimizer::EAGER);
+ return receiver;
+}
+
+
+Handle<Code> VectorLoadStub::GenerateCode() { return DoGenerateCode(this); }
+
+
+template <>
+HValue* CodeStubGraphBuilder<VectorKeyedLoadStub>::BuildCodeStub() {
+ HValue* receiver = GetParameter(VectorLoadICDescriptor::kReceiverIndex);
+ Add<HDeoptimize>("Always deopt", Deoptimizer::EAGER);
+ return receiver;
+}
+
+
+Handle<Code> VectorKeyedLoadStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
+Handle<Code> MegamorphicLoadStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<MegamorphicLoadStub>::BuildCodeStub() {
+ // The return address is on the stack.
+ HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
+ HValue* name = GetParameter(LoadDescriptor::kNameIndex);
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ Add<HTailCallThroughMegamorphicCache>(receiver, name, flags);
+
+ // We never continue.
+ return graph()->GetConstant0();
+}
} } // namespace v8::internal
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 0e68ab8a51..5c9e1a2b86 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -9,99 +9,59 @@
#include "src/cpu-profiler.h"
#include "src/factory.h"
#include "src/gdb-jit.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
#include "src/macro-assembler.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
-InterfaceDescriptor::InterfaceDescriptor()
- : register_param_count_(-1) { }
+CodeStubDescriptor::CodeStubDescriptor(CodeStub* stub)
+ : call_descriptor_(stub->GetCallInterfaceDescriptor()),
+ stack_parameter_count_(no_reg),
+ hint_stack_parameter_count_(-1),
+ function_mode_(NOT_JS_FUNCTION_STUB_MODE),
+ deoptimization_handler_(NULL),
+ handler_arguments_mode_(DONT_PASS_ARGUMENTS),
+ miss_handler_(),
+ has_miss_handler_(false) {
+ stub->InitializeDescriptor(this);
+}
-CodeStubInterfaceDescriptor::CodeStubInterfaceDescriptor()
+CodeStubDescriptor::CodeStubDescriptor(Isolate* isolate, uint32_t stub_key)
: stack_parameter_count_(no_reg),
hint_stack_parameter_count_(-1),
function_mode_(NOT_JS_FUNCTION_STUB_MODE),
deoptimization_handler_(NULL),
handler_arguments_mode_(DONT_PASS_ARGUMENTS),
miss_handler_(),
- has_miss_handler_(false) { }
-
-
-void InterfaceDescriptor::Initialize(
- int register_parameter_count,
- Register* registers,
- Representation* register_param_representations,
- PlatformInterfaceDescriptor* platform_descriptor) {
- platform_specific_descriptor_ = platform_descriptor;
- register_param_count_ = register_parameter_count;
-
- // An interface descriptor must have a context register.
- DCHECK(register_parameter_count > 0 && registers[0].is(ContextRegister()));
-
- // InterfaceDescriptor owns a copy of the registers array.
- register_params_.Reset(NewArray<Register>(register_parameter_count));
- for (int i = 0; i < register_parameter_count; i++) {
- register_params_[i] = registers[i];
- }
-
- // If a representations array is specified, then the descriptor owns that as
- // well.
- if (register_param_representations != NULL) {
- register_param_representations_.Reset(
- NewArray<Representation>(register_parameter_count));
- for (int i = 0; i < register_parameter_count; i++) {
- // If there is a context register, the representation must be tagged.
- DCHECK(i != 0 || register_param_representations[i].Equals(
- Representation::Tagged()));
- register_param_representations_[i] = register_param_representations[i];
- }
- }
+ has_miss_handler_(false) {
+ CodeStub::InitializeDescriptor(isolate, stub_key, this);
}
-void CodeStubInterfaceDescriptor::Initialize(
- CodeStub::Major major, int register_parameter_count, Register* registers,
- Address deoptimization_handler,
- Representation* register_param_representations,
- int hint_stack_parameter_count, StubFunctionMode function_mode) {
- InterfaceDescriptor::Initialize(register_parameter_count, registers,
- register_param_representations);
-
+void CodeStubDescriptor::Initialize(Address deoptimization_handler,
+ int hint_stack_parameter_count,
+ StubFunctionMode function_mode) {
deoptimization_handler_ = deoptimization_handler;
-
hint_stack_parameter_count_ = hint_stack_parameter_count;
function_mode_ = function_mode;
- major_ = major;
}
-void CodeStubInterfaceDescriptor::Initialize(
- CodeStub::Major major, int register_parameter_count, Register* registers,
- Register stack_parameter_count, Address deoptimization_handler,
- Representation* register_param_representations,
- int hint_stack_parameter_count, StubFunctionMode function_mode,
- HandlerArgumentsMode handler_mode) {
- Initialize(major, register_parameter_count, registers, deoptimization_handler,
- register_param_representations, hint_stack_parameter_count,
- function_mode);
+void CodeStubDescriptor::Initialize(Register stack_parameter_count,
+ Address deoptimization_handler,
+ int hint_stack_parameter_count,
+ StubFunctionMode function_mode,
+ HandlerArgumentsMode handler_mode) {
+ Initialize(deoptimization_handler, hint_stack_parameter_count, function_mode);
stack_parameter_count_ = stack_parameter_count;
handler_arguments_mode_ = handler_mode;
}
-void CallInterfaceDescriptor::Initialize(
- int register_parameter_count,
- Register* registers,
- Representation* param_representations,
- PlatformInterfaceDescriptor* platform_descriptor) {
- InterfaceDescriptor::Initialize(register_parameter_count, registers,
- param_representations, platform_descriptor);
-}
-
-
bool CodeStub::FindCodeInCache(Code** code_out) {
UnseededNumberDictionary* stubs = isolate()->heap()->code_stubs();
int index = stubs->FindEntry(GetKey());
@@ -175,9 +135,8 @@ Handle<Code> PlatformCodeStub::GenerateCode() {
Handle<Code> CodeStub::GetCode() {
Heap* heap = isolate()->heap();
Code* code;
- if (UseSpecialCache()
- ? FindCodeInSpecialCache(&code)
- : FindCodeInCache(&code)) {
+ if (UseSpecialCache() ? FindCodeInSpecialCache(&code)
+ : FindCodeInCache(&code)) {
DCHECK(GetCodeKind() == code->kind());
return Handle<Code>(code);
}
@@ -229,15 +188,13 @@ const char* CodeStub::MajorName(CodeStub::Major major_key,
#define DEF_CASE(name) case name: return #name "Stub";
CODE_STUB_LIST(DEF_CASE)
#undef DEF_CASE
- case UninitializedMajorKey: return "<UninitializedMajorKey>Stub";
case NoCache:
return "<NoCache>Stub";
- default:
- if (!allow_unknown_keys) {
- UNREACHABLE();
- }
+ case NUMBER_OF_IDS:
+ UNREACHABLE();
return NULL;
}
+ return NULL;
}
@@ -252,6 +209,59 @@ void CodeStub::PrintName(OStream& os) const { // NOLINT
}
+void CodeStub::Dispatch(Isolate* isolate, uint32_t key, void** value_out,
+ DispatchedCall call) {
+ switch (MajorKeyFromKey(key)) {
+#define DEF_CASE(NAME) \
+ case NAME: { \
+ NAME##Stub stub(key, isolate); \
+ CodeStub* pstub = &stub; \
+ call(pstub, value_out); \
+ break; \
+ }
+ CODE_STUB_LIST(DEF_CASE)
+#undef DEF_CASE
+ case NUMBER_OF_IDS:
+ UNREACHABLE();
+ case NoCache:
+ *value_out = NULL;
+ break;
+ }
+}
+
+
+static void InitializeDescriptorDispatchedCall(CodeStub* stub,
+ void** value_out) {
+ CodeStubDescriptor* descriptor_out =
+ reinterpret_cast<CodeStubDescriptor*>(value_out);
+ stub->InitializeDescriptor(descriptor_out);
+ descriptor_out->set_call_descriptor(stub->GetCallInterfaceDescriptor());
+}
+
+
+void CodeStub::InitializeDescriptor(Isolate* isolate, uint32_t key,
+ CodeStubDescriptor* desc) {
+ void** value_out = reinterpret_cast<void**>(desc);
+ Dispatch(isolate, key, value_out, &InitializeDescriptorDispatchedCall);
+}
+
+
+void CodeStub::GetCodeDispatchCall(CodeStub* stub, void** value_out) {
+ Handle<Code>* code_out = reinterpret_cast<Handle<Code>*>(value_out);
+ // Code stubs with special cache cannot be recreated from stub key.
+ *code_out = stub->UseSpecialCache() ? Handle<Code>() : stub->GetCode();
+}
+
+
+MaybeHandle<Code> CodeStub::GetCode(Isolate* isolate, uint32_t key) {
+ HandleScope scope(isolate);
+ Handle<Code> code;
+ void** value_out = reinterpret_cast<void**>(&code);
+ Dispatch(isolate, key, value_out, &GetCodeDispatchCall);
+ return scope.CloseAndEscape(code);
+}
+
+
// static
void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate) {
// Generate the uninitialized versions of the stub.
@@ -265,18 +275,18 @@ void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate) {
}
// Generate special versions of the stub.
- BinaryOpIC::State::GenerateAheadOfTime(isolate, &GenerateAheadOfTime);
+ BinaryOpICState::GenerateAheadOfTime(isolate, &GenerateAheadOfTime);
}
void BinaryOpICStub::PrintState(OStream& os) const { // NOLINT
- os << state_;
+ os << state();
}
// static
void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate,
- const BinaryOpIC::State& state) {
+ const BinaryOpICState& state) {
BinaryOpICStub stub(isolate, state);
stub.GetCode();
}
@@ -285,19 +295,19 @@ void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate,
// static
void BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
// Generate special versions of the stub.
- BinaryOpIC::State::GenerateAheadOfTime(isolate, &GenerateAheadOfTime);
+ BinaryOpICState::GenerateAheadOfTime(isolate, &GenerateAheadOfTime);
}
void BinaryOpICWithAllocationSiteStub::PrintState(
OStream& os) const { // NOLINT
- os << state_;
+ os << state();
}
// static
void BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(
- Isolate* isolate, const BinaryOpIC::State& state) {
+ Isolate* isolate, const BinaryOpICState& state) {
if (state.CouldCreateAllocationMementos()) {
BinaryOpICWithAllocationSiteStub stub(isolate, state);
stub.GetCode();
@@ -320,20 +330,20 @@ void StringAddStub::PrintBaseName(OStream& os) const { // NOLINT
}
-InlineCacheState ICCompareStub::GetICState() const {
- CompareIC::State state = Max(left_, right_);
+InlineCacheState CompareICStub::GetICState() const {
+ CompareICState::State state = Max(left(), right());
switch (state) {
- case CompareIC::UNINITIALIZED:
+ case CompareICState::UNINITIALIZED:
return ::v8::internal::UNINITIALIZED;
- case CompareIC::SMI:
- case CompareIC::NUMBER:
- case CompareIC::INTERNALIZED_STRING:
- case CompareIC::STRING:
- case CompareIC::UNIQUE_NAME:
- case CompareIC::OBJECT:
- case CompareIC::KNOWN_OBJECT:
+ case CompareICState::SMI:
+ case CompareICState::NUMBER:
+ case CompareICState::INTERNALIZED_STRING:
+ case CompareICState::STRING:
+ case CompareICState::UNIQUE_NAME:
+ case CompareICState::OBJECT:
+ case CompareICState::KNOWN_OBJECT:
return MONOMORPHIC;
- case CompareIC::GENERIC:
+ case CompareICState::GENERIC:
return ::v8::internal::GENERIC;
}
UNREACHABLE();
@@ -341,7 +351,12 @@ InlineCacheState ICCompareStub::GetICState() const {
}
-void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
+Condition CompareICStub::GetCondition() const {
+ return CompareIC::ComputeCondition(op());
+}
+
+
+void CompareICStub::AddToSpecialCache(Handle<Code> new_object) {
DCHECK(*known_map_ != NULL);
Isolate* isolate = new_object->GetIsolate();
Factory* factory = isolate->factory();
@@ -353,12 +368,12 @@ void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
}
-bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
+bool CompareICStub::FindCodeInSpecialCache(Code** code_out) {
Factory* factory = isolate()->factory();
Code::Flags flags = Code::ComputeFlags(
GetCodeKind(),
UNINITIALIZED);
- DCHECK(op_ == Token::EQ || op_ == Token::EQ_STRICT);
+ DCHECK(op() == Token::EQ || op() == Token::EQ_STRICT);
Handle<Object> probe(
known_map_->FindInCodeCache(
strict() ?
@@ -369,10 +384,11 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
if (probe->IsCode()) {
*code_out = Code::cast(*probe);
#ifdef DEBUG
- Token::Value cached_op;
- ICCompareStub::DecodeKey((*code_out)->stub_key(), NULL, NULL, NULL,
- &cached_op);
- DCHECK(op_ == cached_op);
+ CompareICStub decode((*code_out)->stub_key(), isolate());
+ DCHECK(op() == decode.op());
+ DCHECK(left() == decode.left());
+ DCHECK(right() == decode.right());
+ DCHECK(state() == decode.state());
#endif
return true;
}
@@ -380,65 +396,34 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
}
-int ICCompareStub::MinorKey() const {
- return OpField::encode(op_ - Token::EQ) |
- LeftStateField::encode(left_) |
- RightStateField::encode(right_) |
- HandlerStateField::encode(state_);
-}
-
-
-void ICCompareStub::DecodeKey(uint32_t stub_key, CompareIC::State* left_state,
- CompareIC::State* right_state,
- CompareIC::State* handler_state,
- Token::Value* op) {
- int minor_key = MinorKeyFromKey(stub_key);
- if (left_state) {
- *left_state =
- static_cast<CompareIC::State>(LeftStateField::decode(minor_key));
- }
- if (right_state) {
- *right_state =
- static_cast<CompareIC::State>(RightStateField::decode(minor_key));
- }
- if (handler_state) {
- *handler_state =
- static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
- }
- if (op) {
- *op = static_cast<Token::Value>(OpField::decode(minor_key) + Token::EQ);
- }
-}
-
-
-void ICCompareStub::Generate(MacroAssembler* masm) {
- switch (state_) {
- case CompareIC::UNINITIALIZED:
+void CompareICStub::Generate(MacroAssembler* masm) {
+ switch (state()) {
+ case CompareICState::UNINITIALIZED:
GenerateMiss(masm);
break;
- case CompareIC::SMI:
+ case CompareICState::SMI:
GenerateSmis(masm);
break;
- case CompareIC::NUMBER:
+ case CompareICState::NUMBER:
GenerateNumbers(masm);
break;
- case CompareIC::STRING:
+ case CompareICState::STRING:
GenerateStrings(masm);
break;
- case CompareIC::INTERNALIZED_STRING:
+ case CompareICState::INTERNALIZED_STRING:
GenerateInternalizedStrings(masm);
break;
- case CompareIC::UNIQUE_NAME:
+ case CompareICState::UNIQUE_NAME:
GenerateUniqueNames(masm);
break;
- case CompareIC::OBJECT:
+ case CompareICState::OBJECT:
GenerateObjects(masm);
break;
- case CompareIC::KNOWN_OBJECT:
+ case CompareICState::KNOWN_OBJECT:
DCHECK(*known_map_ != NULL);
GenerateKnownObjects(masm);
break;
- case CompareIC::GENERIC:
+ case CompareICState::GENERIC:
GenerateGeneric(masm);
break;
}
@@ -446,24 +431,26 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
void CompareNilICStub::UpdateStatus(Handle<Object> object) {
- DCHECK(!state_.Contains(GENERIC));
- State old_state(state_);
+ State state = this->state();
+ DCHECK(!state.Contains(GENERIC));
+ State old_state = state;
if (object->IsNull()) {
- state_.Add(NULL_TYPE);
+ state.Add(NULL_TYPE);
} else if (object->IsUndefined()) {
- state_.Add(UNDEFINED);
+ state.Add(UNDEFINED);
} else if (object->IsUndetectableObject() ||
object->IsOddball() ||
!object->IsHeapObject()) {
- state_.RemoveAll();
- state_.Add(GENERIC);
+ state.RemoveAll();
+ state.Add(GENERIC);
} else if (IsMonomorphic()) {
- state_.RemoveAll();
- state_.Add(GENERIC);
+ state.RemoveAll();
+ state.Add(GENERIC);
} else {
- state_.Add(MONOMORPHIC_MAP);
+ state.Add(MONOMORPHIC_MAP);
}
- TraceTransition(old_state, state_);
+ TraceTransition(old_state, state);
+ set_sub_minor_key(TypesBits::update(sub_minor_key(), state.ToIntegral()));
}
@@ -482,12 +469,12 @@ void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
void CompareNilICStub::PrintBaseName(OStream& os) const { // NOLINT
CodeStub::PrintBaseName(os);
- os << ((nil_value_ == kNullValue) ? "(NullValue)" : "(UndefinedValue)");
+ os << ((nil_value() == kNullValue) ? "(NullValue)" : "(UndefinedValue)");
}
void CompareNilICStub::PrintState(OStream& os) const { // NOLINT
- os << state_;
+ os << state();
}
@@ -524,18 +511,17 @@ OStream& operator<<(OStream& os, const CompareNilICStub::State& s) {
Type* CompareNilICStub::GetType(Zone* zone, Handle<Map> map) {
- if (state_.Contains(CompareNilICStub::GENERIC)) {
- return Type::Any(zone);
- }
+ State state = this->state();
+ if (state.Contains(CompareNilICStub::GENERIC)) return Type::Any(zone);
Type* result = Type::None(zone);
- if (state_.Contains(CompareNilICStub::UNDEFINED)) {
+ if (state.Contains(CompareNilICStub::UNDEFINED)) {
result = Type::Union(result, Type::Undefined(zone), zone);
}
- if (state_.Contains(CompareNilICStub::NULL_TYPE)) {
+ if (state.Contains(CompareNilICStub::NULL_TYPE)) {
result = Type::Union(result, Type::Null(zone), zone);
}
- if (state_.Contains(CompareNilICStub::MONOMORPHIC_MAP)) {
+ if (state.Contains(CompareNilICStub::MONOMORPHIC_MAP)) {
Type* type =
map.is_null() ? Type::Detectable(zone) : Type::Class(map, zone);
result = Type::Union(result, type, zone);
@@ -548,18 +534,18 @@ Type* CompareNilICStub::GetType(Zone* zone, Handle<Map> map) {
Type* CompareNilICStub::GetInputType(Zone* zone, Handle<Map> map) {
Type* output_type = GetType(zone, map);
Type* nil_type =
- nil_value_ == kNullValue ? Type::Null(zone) : Type::Undefined(zone);
+ nil_value() == kNullValue ? Type::Null(zone) : Type::Undefined(zone);
return Type::Union(output_type, nil_type, zone);
}
void CallIC_ArrayStub::PrintState(OStream& os) const { // NOLINT
- os << state_ << " (Array)";
+ os << state() << " (Array)";
}
void CallICStub::PrintState(OStream& os) const { // NOLINT
- os << state_;
+ os << state();
}
@@ -579,91 +565,160 @@ void JSEntryStub::FinishCode(Handle<Code> code) {
}
-void LoadFastElementStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { InterfaceDescriptor::ContextRegister(),
- LoadIC::ReceiverRegister(),
- LoadIC::NameRegister() };
- STATIC_ASSERT(LoadIC::kParameterCount == 2);
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure));
+void LoadFastElementStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ descriptor->Initialize(FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure));
}
-void LoadDictionaryElementStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { InterfaceDescriptor::ContextRegister(),
- LoadIC::ReceiverRegister(),
- LoadIC::NameRegister() };
- STATIC_ASSERT(LoadIC::kParameterCount == 2);
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure));
+void LoadDictionaryElementStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ descriptor->Initialize(FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure));
}
-void KeyedLoadGenericStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { InterfaceDescriptor::ContextRegister(),
- LoadIC::ReceiverRegister(),
- LoadIC::NameRegister() };
- STATIC_ASSERT(LoadIC::kParameterCount == 2);
+void KeyedLoadGenericStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry);
}
-void HandlerStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- if (kind() == Code::LOAD_IC) {
- Register registers[] = {InterfaceDescriptor::ContextRegister(),
- LoadIC::ReceiverRegister(), LoadIC::NameRegister()};
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
+void HandlerStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ if (kind() == Code::STORE_IC) {
+ descriptor->Initialize(FUNCTION_ADDR(StoreIC_MissFromStubFailure));
+ } else if (kind() == Code::KEYED_LOAD_IC) {
+ descriptor->Initialize(FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure));
+ }
+}
+
+
+CallInterfaceDescriptor HandlerStub::GetCallInterfaceDescriptor() {
+ if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) {
+ return LoadDescriptor(isolate());
} else {
DCHECK_EQ(Code::STORE_IC, kind());
- Register registers[] = {InterfaceDescriptor::ContextRegister(),
- StoreIC::ReceiverRegister(),
- StoreIC::NameRegister(), StoreIC::ValueRegister()};
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(StoreIC_MissFromStubFailure));
+ return StoreDescriptor(isolate());
}
}
-void StoreFastElementStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { InterfaceDescriptor::ContextRegister(),
- KeyedStoreIC::ReceiverRegister(),
- KeyedStoreIC::NameRegister(),
- KeyedStoreIC::ValueRegister() };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure));
+void StoreFastElementStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ descriptor->Initialize(FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure));
+}
+
+
+void ElementsTransitionAndStoreStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ descriptor->Initialize(FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss));
+}
+
+
+static void InitializeVectorLoadStub(Isolate* isolate,
+ CodeStubDescriptor* descriptor,
+ Address deoptimization_handler) {
+ DCHECK(FLAG_vector_ics);
+ descriptor->Initialize(deoptimization_handler);
}
-void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { InterfaceDescriptor::ContextRegister(),
- ValueRegister(),
- MapRegister(),
- KeyRegister(),
- ObjectRegister() };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss));
+void VectorLoadStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ InitializeVectorLoadStub(isolate(), descriptor,
+ FUNCTION_ADDR(VectorLoadIC_MissFromStubFailure));
}
-void InstanceofStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { InterfaceDescriptor::ContextRegister(),
- InstanceofStub::left(),
- InstanceofStub::right() };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
+void VectorKeyedLoadStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ InitializeVectorLoadStub(
+ isolate(), descriptor,
+ FUNCTION_ADDR(VectorKeyedLoadIC_MissFromStubFailure));
}
-void LoadDictionaryElementPlatformStub::Generate(MacroAssembler* masm) {
- ElementHandlerCompiler::GenerateLoadDictionaryElement(masm);
+void MegamorphicLoadStub::InitializeDescriptor(CodeStubDescriptor* d) {}
+
+
+void FastNewClosureStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ descriptor->Initialize(
+ Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
+}
+
+
+void FastNewContextStub::InitializeDescriptor(CodeStubDescriptor* d) {}
+
+
+void ToNumberStub::InitializeDescriptor(CodeStubDescriptor* d) {}
+
+
+void NumberToStringStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ NumberToStringDescriptor call_descriptor(isolate());
+ descriptor->Initialize(
+ Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
+}
+
+
+void FastCloneShallowArrayStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ FastCloneShallowArrayDescriptor call_descriptor(isolate());
+ descriptor->Initialize(
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry);
+}
+
+
+void FastCloneShallowObjectStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ FastCloneShallowObjectDescriptor call_descriptor(isolate());
+ descriptor->Initialize(
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
+}
+
+
+void CreateAllocationSiteStub::InitializeDescriptor(CodeStubDescriptor* d) {}
+
+
+void RegExpConstructResultStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ descriptor->Initialize(
+ Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
+}
+
+
+void TransitionElementsKindStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ descriptor->Initialize(
+ Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry);
+}
+
+
+void CompareNilICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ descriptor->Initialize(FUNCTION_ADDR(CompareNilIC_Miss));
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
+}
+
+
+void ToBooleanStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ descriptor->Initialize(FUNCTION_ADDR(ToBooleanIC_Miss));
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
+}
+
+
+void BinaryOpICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ descriptor->Initialize(FUNCTION_ADDR(BinaryOpIC_Miss));
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
+}
+
+
+void BinaryOpWithAllocationSiteStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ descriptor->Initialize(FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
+}
+
+
+void StringAddStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ descriptor->Initialize(Runtime::FunctionForId(Runtime::kStringAdd)->entry);
}
@@ -674,7 +729,7 @@ void CreateAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
void StoreElementStub::Generate(MacroAssembler* masm) {
- switch (elements_kind_) {
+ switch (elements_kind()) {
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -690,7 +745,7 @@ void StoreElementStub::Generate(MacroAssembler* masm) {
UNREACHABLE();
break;
case DICTIONARY_ELEMENTS:
- ElementHandlerCompiler::GenerateStoreDictionaryElement(masm);
+ ElementHandlerCompiler::GenerateStoreSlow(masm);
break;
case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -699,9 +754,27 @@ void StoreElementStub::Generate(MacroAssembler* masm) {
}
+void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
+ switch (type()) {
+ case READ_ELEMENT:
+ GenerateReadElement(masm);
+ break;
+ case NEW_SLOPPY_FAST:
+ GenerateNewSloppyFast(masm);
+ break;
+ case NEW_SLOPPY_SLOW:
+ GenerateNewSloppySlow(masm);
+ break;
+ case NEW_STRICT:
+ GenerateNewStrict(masm);
+ break;
+ }
+}
+
+
void ArgumentsAccessStub::PrintName(OStream& os) const { // NOLINT
os << "ArgumentsAccessStub_";
- switch (type_) {
+ switch (type()) {
case READ_ELEMENT:
os << "ReadElement";
break;
@@ -720,7 +793,7 @@ void ArgumentsAccessStub::PrintName(OStream& os) const { // NOLINT
void CallFunctionStub::PrintName(OStream& os) const { // NOLINT
- os << "CallFunctionStub_Args" << argc_;
+ os << "CallFunctionStub_Args" << argc();
}
@@ -732,7 +805,7 @@ void CallConstructStub::PrintName(OStream& os) const { // NOLINT
void ArrayConstructorStub::PrintName(OStream& os) const { // NOLINT
os << "ArrayConstructorStub";
- switch (argument_count_) {
+ switch (argument_count()) {
case ANY:
os << "_Any";
break;
@@ -761,15 +834,17 @@ OStream& ArrayConstructorStubBase::BasePrintName(OStream& os, // NOLINT
bool ToBooleanStub::UpdateStatus(Handle<Object> object) {
- Types old_types(types_);
- bool to_boolean_value = types_.UpdateStatus(object);
- TraceTransition(old_types, types_);
+ Types new_types = types();
+ Types old_types = new_types;
+ bool to_boolean_value = new_types.UpdateStatus(object);
+ TraceTransition(old_types, new_types);
+ set_sub_minor_key(TypesBits::update(sub_minor_key(), new_types.ToByte()));
return to_boolean_value;
}
void ToBooleanStub::PrintState(OStream& os) const { // NOLINT
- os << types_;
+ os << types();
}
@@ -856,97 +931,9 @@ void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
}
-static void InstallDescriptor(Isolate* isolate, HydrogenCodeStub* stub) {
- int major_key = stub->MajorKey();
- CodeStubInterfaceDescriptor* descriptor =
- isolate->code_stub_interface_descriptor(major_key);
- if (!descriptor->IsInitialized()) {
- stub->InitializeInterfaceDescriptor(descriptor);
- }
-}
-
-
-void ArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) {
- ArrayNoArgumentConstructorStub stub1(isolate, GetInitialFastElementsKind());
- InstallDescriptor(isolate, &stub1);
- ArraySingleArgumentConstructorStub stub2(isolate,
- GetInitialFastElementsKind());
- InstallDescriptor(isolate, &stub2);
- ArrayNArgumentsConstructorStub stub3(isolate, GetInitialFastElementsKind());
- InstallDescriptor(isolate, &stub3);
-}
-
-
-void NumberToStringStub::InstallDescriptors(Isolate* isolate) {
- NumberToStringStub stub(isolate);
- InstallDescriptor(isolate, &stub);
-}
-
-
-void FastNewClosureStub::InstallDescriptors(Isolate* isolate) {
- FastNewClosureStub stub(isolate, STRICT, false);
- InstallDescriptor(isolate, &stub);
-}
-
-
-void FastNewContextStub::InstallDescriptors(Isolate* isolate) {
- FastNewContextStub stub(isolate, FastNewContextStub::kMaximumSlots);
- InstallDescriptor(isolate, &stub);
-}
-
-
-// static
-void FastCloneShallowArrayStub::InstallDescriptors(Isolate* isolate) {
- FastCloneShallowArrayStub stub(isolate, DONT_TRACK_ALLOCATION_SITE);
- InstallDescriptor(isolate, &stub);
-}
-
-
-// static
-void BinaryOpICStub::InstallDescriptors(Isolate* isolate) {
- BinaryOpICStub stub(isolate, Token::ADD, NO_OVERWRITE);
- InstallDescriptor(isolate, &stub);
-}
-
-
-// static
-void BinaryOpWithAllocationSiteStub::InstallDescriptors(Isolate* isolate) {
- BinaryOpWithAllocationSiteStub stub(isolate, Token::ADD, NO_OVERWRITE);
- InstallDescriptor(isolate, &stub);
-}
-
-
-// static
-void StringAddStub::InstallDescriptors(Isolate* isolate) {
- StringAddStub stub(isolate, STRING_ADD_CHECK_NONE, NOT_TENURED);
- InstallDescriptor(isolate, &stub);
-}
-
-
-// static
-void RegExpConstructResultStub::InstallDescriptors(Isolate* isolate) {
- RegExpConstructResultStub stub(isolate);
- InstallDescriptor(isolate, &stub);
-}
-
-
-// static
-void KeyedLoadGenericStub::InstallDescriptors(Isolate* isolate) {
- KeyedLoadGenericStub stub(isolate);
- InstallDescriptor(isolate, &stub);
-}
-
-
-// static
-void StoreFieldStub::InstallDescriptors(Isolate* isolate) {
- StoreFieldStub stub(isolate, FieldIndex::ForInObjectOffset(0),
- Representation::None());
- InstallDescriptor(isolate, &stub);
-}
-
-
ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
- : PlatformCodeStub(isolate), argument_count_(ANY) {
+ : PlatformCodeStub(isolate) {
+ minor_key_ = ArgumentCountBits::encode(ANY);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
@@ -955,11 +942,11 @@ ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate,
int argument_count)
: PlatformCodeStub(isolate) {
if (argument_count == 0) {
- argument_count_ = NONE;
+ minor_key_ = ArgumentCountBits::encode(NONE);
} else if (argument_count == 1) {
- argument_count_ = ONE;
+ minor_key_ = ArgumentCountBits::encode(ONE);
} else if (argument_count >= 2) {
- argument_count_ = MORE_THAN_ONE;
+ minor_key_ = ArgumentCountBits::encode(MORE_THAN_ONE);
} else {
UNREACHABLE();
}
@@ -967,15 +954,6 @@ ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate,
}
-void InternalArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) {
- InternalArrayNoArgumentConstructorStub stub1(isolate, FAST_ELEMENTS);
- InstallDescriptor(isolate, &stub1);
- InternalArraySingleArgumentConstructorStub stub2(isolate, FAST_ELEMENTS);
- InstallDescriptor(isolate, &stub2);
- InternalArrayNArgumentsConstructorStub stub3(isolate, FAST_ELEMENTS);
- InstallDescriptor(isolate, &stub3);
-}
-
InternalArrayConstructorStub::InternalArrayConstructorStub(
Isolate* isolate) : PlatformCodeStub(isolate) {
InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index c1d051b3d7..b127782c6e 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -9,6 +9,8 @@
#include "src/assembler.h"
#include "src/codegen.h"
#include "src/globals.h"
+#include "src/ic/ic-state.h"
+#include "src/interface-descriptors.h"
#include "src/macro-assembler.h"
#include "src/ostreams.h"
@@ -17,89 +19,91 @@ namespace internal {
// List of code stubs used on all platforms.
#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
- V(CallFunction) \
- V(CallConstruct) \
- V(BinaryOpIC) \
+ /* PlatformCodeStubs */ \
+ V(ArgumentsAccess) \
+ V(ArrayConstructor) \
V(BinaryOpICWithAllocationSite) \
- V(BinaryOpWithAllocationSite) \
- V(StringAdd) \
- V(SubString) \
- V(StringCompare) \
- V(Compare) \
- V(CompareIC) \
- V(CompareNilIC) \
- V(MathPow) \
+ V(CallApiFunction) \
+ V(CallApiGetter) \
+ V(CallConstruct) \
+ V(CallFunction) \
V(CallIC) \
V(CallIC_Array) \
+ V(CEntry) \
+ V(CompareIC) \
+ V(DoubleToI) \
V(FunctionPrototype) \
- V(RecordWrite) \
- V(StoreBufferOverflow) \
- V(RegExpExec) \
V(Instanceof) \
- V(ConvertToDouble) \
- V(WriteInt32ToHeapNumber) \
- V(StackCheck) \
- V(Interrupt) \
- V(FastNewClosure) \
- V(FastNewContext) \
- V(FastCloneShallowArray) \
- V(FastCloneShallowObject) \
- V(CreateAllocationSite) \
- V(ToBoolean) \
- V(ToNumber) \
- V(ArgumentsAccess) \
- V(RegExpConstructResult) \
- V(NumberToString) \
- V(DoubleToI) \
- V(CEntry) \
+ V(InternalArrayConstructor) \
V(JSEntry) \
- V(LoadElement) \
- V(KeyedLoadGeneric) \
+ V(KeyedLoadICTrampoline) \
+ V(LoadICTrampoline) \
+ V(LoadIndexedInterceptor) \
+ V(MathPow) \
+ V(ProfileEntryHook) \
+ V(RecordWrite) \
+ V(RegExpExec) \
+ V(StoreArrayLiteralElement) \
+ V(StoreBufferOverflow) \
+ V(StoreElement) \
+ V(StringCompare) \
+ V(StubFailureTrampoline) \
+ V(SubString) \
+ /* HydrogenCodeStubs */ \
+ V(ArrayNArgumentsConstructor) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
- V(ArrayNArgumentsConstructor) \
+ V(BinaryOpIC) \
+ V(BinaryOpWithAllocationSite) \
+ V(CompareNilIC) \
+ V(CreateAllocationSite) \
+ V(ElementsTransitionAndStore) \
+ V(FastCloneShallowArray) \
+ V(FastCloneShallowObject) \
+ V(FastNewClosure) \
+ V(FastNewContext) \
+ V(InternalArrayNArgumentsConstructor) \
V(InternalArrayNoArgumentConstructor) \
V(InternalArraySingleArgumentConstructor) \
- V(InternalArrayNArgumentsConstructor) \
- V(StoreElement) \
- V(DebuggerStatement) \
+ V(KeyedLoadGeneric) \
+ V(LoadDictionaryElement) \
+ V(LoadFastElement) \
+ V(MegamorphicLoad) \
V(NameDictionaryLookup) \
- V(ElementsTransitionAndStore) \
+ V(NumberToString) \
+ V(RegExpConstructResult) \
+ V(StoreFastElement) \
+ V(StringAdd) \
+ V(ToBoolean) \
+ V(ToNumber) \
V(TransitionElementsKind) \
- V(StoreArrayLiteralElement) \
- V(StubFailureTrampoline) \
- V(ArrayConstructor) \
- V(InternalArrayConstructor) \
- V(ProfileEntryHook) \
- V(StoreGlobal) \
- V(CallApiFunction) \
- V(CallApiGetter) \
+ V(VectorKeyedLoad) \
+ V(VectorLoad) \
/* IC Handler stubs */ \
+ V(LoadConstant) \
V(LoadField) \
+ V(KeyedLoadSloppyArguments) \
V(StoreField) \
- V(LoadConstant) \
+ V(StoreGlobal) \
V(StringLength)
// List of code stubs only used on ARM 32 bits platforms.
#if V8_TARGET_ARCH_ARM
-#define CODE_STUB_LIST_ARM(V) \
- V(GetProperty) \
- V(SetProperty) \
- V(InvokeBuiltin) \
- V(DirectCEntry)
+#define CODE_STUB_LIST_ARM(V) \
+ V(DirectCEntry) \
+ V(WriteInt32ToHeapNumber)
+
#else
#define CODE_STUB_LIST_ARM(V)
#endif
// List of code stubs only used on ARM 64 bits platforms.
#if V8_TARGET_ARCH_ARM64
-#define CODE_STUB_LIST_ARM64(V) \
- V(GetProperty) \
- V(SetProperty) \
- V(InvokeBuiltin) \
- V(DirectCEntry) \
- V(StoreRegistersState) \
- V(RestoreRegistersState)
+#define CODE_STUB_LIST_ARM64(V) \
+ V(DirectCEntry) \
+ V(RestoreRegistersState) \
+ V(StoreRegistersState)
+
#else
#define CODE_STUB_LIST_ARM64(V)
#endif
@@ -107,16 +111,16 @@ namespace internal {
// List of code stubs only used on MIPS platforms.
#if V8_TARGET_ARCH_MIPS
#define CODE_STUB_LIST_MIPS(V) \
- V(RegExpCEntry) \
V(DirectCEntry) \
+ V(RestoreRegistersState) \
V(StoreRegistersState) \
- V(RestoreRegistersState)
+ V(WriteInt32ToHeapNumber)
#elif V8_TARGET_ARCH_MIPS64
#define CODE_STUB_LIST_MIPS(V) \
- V(RegExpCEntry) \
V(DirectCEntry) \
+ V(RestoreRegistersState) \
V(StoreRegistersState) \
- V(RestoreRegistersState)
+ V(WriteInt32ToHeapNumber)
#else
#define CODE_STUB_LIST_MIPS(V)
#endif
@@ -132,11 +136,12 @@ namespace internal {
class CodeStub BASE_EMBEDDED {
public:
enum Major {
- UninitializedMajorKey = 0,
+ // TODO(mvstanton): eliminate the NoCache key by getting rid
+ // of the non-monomorphic-cache.
+ NoCache = 0, // marker for stubs that do custom caching]
#define DEF_ENUM(name) name,
CODE_STUB_LIST(DEF_ENUM)
#undef DEF_ENUM
- NoCache, // marker for stubs that do custom caching
NUMBER_OF_IDS
};
@@ -149,7 +154,7 @@ class CodeStub BASE_EMBEDDED {
static Major MajorKeyFromKey(uint32_t key) {
return static_cast<Major>(MajorKeyBits::decode(key));
}
- static int MinorKeyFromKey(uint32_t key) {
+ static uint32_t MinorKeyFromKey(uint32_t key) {
return MinorKeyBits::decode(key);
}
@@ -162,7 +167,7 @@ class CodeStub BASE_EMBEDDED {
static const char* MajorName(Major major_key, bool allow_unknown_keys);
- explicit CodeStub(Isolate* isolate) : isolate_(isolate) { }
+ explicit CodeStub(Isolate* isolate) : minor_key_(0), isolate_(isolate) {}
virtual ~CodeStub() {}
static void GenerateStubsAheadOfTime(Isolate* isolate);
@@ -179,9 +184,18 @@ class CodeStub BASE_EMBEDDED {
// Lookup the code in the (possibly custom) cache.
bool FindCodeInCache(Code** code_out);
+ virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() = 0;
+
+ virtual void InitializeDescriptor(CodeStubDescriptor* descriptor) {}
+
+ static void InitializeDescriptor(Isolate* isolate, uint32_t key,
+ CodeStubDescriptor* desc);
+
+ static MaybeHandle<Code> GetCode(Isolate* isolate, uint32_t key);
+
// Returns information for computing the number key.
virtual Major MajorKey() const = 0;
- virtual int MinorKey() const = 0;
+ uint32_t MinorKey() const { return minor_key_; }
virtual InlineCacheState GetICState() const { return UNINITIALIZED; }
virtual ExtraICState GetExtraICState() const { return kNoExtraICState; }
@@ -197,6 +211,9 @@ class CodeStub BASE_EMBEDDED {
Isolate* isolate() const { return isolate_; }
protected:
+ CodeStub(uint32_t key, Isolate* isolate)
+ : minor_key_(MinorKeyFromKey(key)), isolate_(isolate) {}
+
// Generates the assembler code for the stub.
virtual Handle<Code> GenerateCode() = 0;
@@ -214,6 +231,8 @@ class CodeStub BASE_EMBEDDED {
return MinorKeyBits::encode(MinorKey()) | MajorKeyBits::encode(MajorKey());
}
+ uint32_t minor_key_;
+
private:
// Perform bookkeeping required after code generation when stub code is
// initially generated.
@@ -242,6 +261,14 @@ class CodeStub BASE_EMBEDDED {
// If a stub uses a special cache override this.
virtual bool UseSpecialCache() { return false; }
+ // We use this dispatch to statically instantiate the correct code stub for
+ // the given stub key and call the passed function with that code stub.
+ typedef void (*DispatchedCall)(CodeStub* stub, void** value_out);
+ static void Dispatch(Isolate* isolate, uint32_t key, void** value_out,
+ DispatchedCall call);
+
+ static void GetCodeDispatchCall(CodeStub* stub, void** value_out);
+
STATIC_ASSERT(NUMBER_OF_IDS < (1 << kStubMajorKeyBits));
class MajorKeyBits: public BitField<uint32_t, 0, kStubMajorKeyBits> {};
class MinorKeyBits: public BitField<uint32_t,
@@ -253,111 +280,88 @@ class CodeStub BASE_EMBEDDED {
};
-class PlatformCodeStub : public CodeStub {
- public:
- explicit PlatformCodeStub(Isolate* isolate) : CodeStub(isolate) { }
-
- // Retrieve the code for the stub. Generate the code if needed.
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
- virtual Code::Kind GetCodeKind() const { return Code::STUB; }
-
- protected:
- // Generates the assembler code for the stub.
- virtual void Generate(MacroAssembler* masm) = 0;
-};
-
-
-enum StubFunctionMode { NOT_JS_FUNCTION_STUB_MODE, JS_FUNCTION_STUB_MODE };
-enum HandlerArgumentsMode { DONT_PASS_ARGUMENTS, PASS_ARGUMENTS };
+#define DEFINE_CODE_STUB_BASE(NAME, SUPER) \
+ public: \
+ NAME(uint32_t key, Isolate* isolate) : SUPER(key, isolate) {} \
+ \
+ private: \
+ DISALLOW_COPY_AND_ASSIGN(NAME)
-class PlatformInterfaceDescriptor;
+#define DEFINE_CODE_STUB(NAME, SUPER) \
+ protected: \
+ virtual inline Major MajorKey() const OVERRIDE { \
+ return NAME; \
+ }; \
+ DEFINE_CODE_STUB_BASE(NAME##Stub, SUPER)
-class InterfaceDescriptor {
- public:
- bool IsInitialized() const { return register_param_count_ >= 0; }
+#define DEFINE_PLATFORM_CODE_STUB(NAME, SUPER) \
+ private: \
+ virtual void Generate(MacroAssembler* masm) OVERRIDE; \
+ DEFINE_CODE_STUB(NAME, SUPER)
- int GetEnvironmentLength() const { return register_param_count_; }
- int GetRegisterParameterCount() const { return register_param_count_; }
+#define DEFINE_HYDROGEN_CODE_STUB(NAME, SUPER) \
+ public: \
+ virtual void InitializeDescriptor(CodeStubDescriptor* descriptor) OVERRIDE; \
+ virtual Handle<Code> GenerateCode() OVERRIDE; \
+ DEFINE_CODE_STUB(NAME, SUPER)
- Register GetParameterRegister(int index) const {
- return register_params_[index];
- }
-
- Representation GetParameterRepresentation(int index) const {
- DCHECK(index < register_param_count_);
- if (register_param_representations_.get() == NULL) {
- return Representation::Tagged();
- }
+#define DEFINE_HANDLER_CODE_STUB(NAME, SUPER) \
+ public: \
+ virtual Handle<Code> GenerateCode() OVERRIDE; \
+ DEFINE_CODE_STUB(NAME, SUPER)
- return register_param_representations_[index];
+#define DEFINE_CALL_INTERFACE_DESCRIPTOR(NAME) \
+ public: \
+ virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE { \
+ return NAME##Descriptor(isolate()); \
}
- // "Environment" versions of parameter functions. The first register
- // parameter (context) is not included.
- int GetEnvironmentParameterCount() const {
- return GetEnvironmentLength() - 1;
+// There are some code stubs we just can't describe right now with a
+// CallInterfaceDescriptor. Isolate behavior for those cases with this macro.
+// An attempt to retrieve a descriptor will fail.
+#define DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR() \
+ public: \
+ virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE { \
+ UNREACHABLE(); \
+ return CallInterfaceDescriptor(); \
}
- Register GetEnvironmentParameterRegister(int index) const {
- return GetParameterRegister(index + 1);
- }
-
- Representation GetEnvironmentParameterRepresentation(int index) const {
- return GetParameterRepresentation(index + 1);
- }
- // Some platforms have extra information to associate with the descriptor.
- PlatformInterfaceDescriptor* platform_specific_descriptor() const {
- return platform_specific_descriptor_;
- }
+class PlatformCodeStub : public CodeStub {
+ public:
+ // Retrieve the code for the stub. Generate the code if needed.
+ virtual Handle<Code> GenerateCode() OVERRIDE;
- static const Register ContextRegister();
+ virtual Code::Kind GetCodeKind() const { return Code::STUB; }
protected:
- InterfaceDescriptor();
- virtual ~InterfaceDescriptor() {}
-
- void Initialize(int register_parameter_count, Register* registers,
- Representation* register_param_representations,
- PlatformInterfaceDescriptor* platform_descriptor = NULL);
+ explicit PlatformCodeStub(Isolate* isolate) : CodeStub(isolate) {}
- private:
- int register_param_count_;
+ // Generates the assembler code for the stub.
+ virtual void Generate(MacroAssembler* masm) = 0;
- // The Register params are allocated dynamically by the
- // InterfaceDescriptor, and freed on destruction. This is because static
- // arrays of Registers cause creation of runtime static initializers
- // which we don't want.
- SmartArrayPointer<Register> register_params_;
- // Specifies Representations for the stub's parameter. Points to an array of
- // Representations of the same length of the numbers of parameters to the
- // stub, or if NULL (the default value), Representation of each parameter
- // assumed to be Tagged().
- SmartArrayPointer<Representation> register_param_representations_;
+ DEFINE_CODE_STUB_BASE(PlatformCodeStub, CodeStub);
+};
- PlatformInterfaceDescriptor* platform_specific_descriptor_;
- DISALLOW_COPY_AND_ASSIGN(InterfaceDescriptor);
-};
+enum StubFunctionMode { NOT_JS_FUNCTION_STUB_MODE, JS_FUNCTION_STUB_MODE };
+enum HandlerArgumentsMode { DONT_PASS_ARGUMENTS, PASS_ARGUMENTS };
-class CodeStubInterfaceDescriptor: public InterfaceDescriptor {
+class CodeStubDescriptor {
public:
- CodeStubInterfaceDescriptor();
+ explicit CodeStubDescriptor(CodeStub* stub);
+
+ CodeStubDescriptor(Isolate* isolate, uint32_t stub_key);
- void Initialize(CodeStub::Major major, int register_parameter_count,
- Register* registers, Address deoptimization_handler = NULL,
- Representation* register_param_representations = NULL,
+ void Initialize(Address deoptimization_handler = NULL,
int hint_stack_parameter_count = -1,
StubFunctionMode function_mode = NOT_JS_FUNCTION_STUB_MODE);
- void Initialize(CodeStub::Major major, int register_parameter_count,
- Register* registers, Register stack_parameter_count,
+ void Initialize(Register stack_parameter_count,
Address deoptimization_handler = NULL,
- Representation* register_param_representations = NULL,
int hint_stack_parameter_count = -1,
StubFunctionMode function_mode = NOT_JS_FUNCTION_STUB_MODE,
HandlerArgumentsMode handler_mode = DONT_PASS_ARGUMENTS);
@@ -370,6 +374,17 @@ class CodeStubInterfaceDescriptor: public InterfaceDescriptor {
DCHECK(!stack_parameter_count_.is_valid());
}
+ void set_call_descriptor(CallInterfaceDescriptor d) { call_descriptor_ = d; }
+ CallInterfaceDescriptor call_descriptor() const { return call_descriptor_; }
+
+ int GetEnvironmentParameterCount() const {
+ return call_descriptor().GetEnvironmentParameterCount();
+ }
+
+ Representation GetEnvironmentParameterRepresentation(int index) const {
+ return call_descriptor().GetEnvironmentParameterRepresentation(index);
+ }
+
ExternalReference miss_handler() const {
DCHECK(has_miss_handler_);
return miss_handler_;
@@ -380,11 +395,12 @@ class CodeStubInterfaceDescriptor: public InterfaceDescriptor {
}
bool IsEnvironmentParameterCountRegister(int index) const {
- return GetEnvironmentParameterRegister(index).is(stack_parameter_count_);
+ return call_descriptor().GetEnvironmentParameterRegister(index).is(
+ stack_parameter_count_);
}
int GetHandlerParameterCount() const {
- int params = GetEnvironmentParameterCount();
+ int params = call_descriptor().GetEnvironmentParameterCount();
if (handler_arguments_mode_ == PASS_ARGUMENTS) {
params += 1;
}
@@ -395,9 +411,9 @@ class CodeStubInterfaceDescriptor: public InterfaceDescriptor {
Register stack_parameter_count() const { return stack_parameter_count_; }
StubFunctionMode function_mode() const { return function_mode_; }
Address deoptimization_handler() const { return deoptimization_handler_; }
- CodeStub::Major MajorKey() const { return major_; }
private:
+ CallInterfaceDescriptor call_descriptor_;
Register stack_parameter_count_;
// If hint_stack_parameter_count_ > 0, the code stub can optimize the
// return sequence. Default value is -1, which means it is ignored.
@@ -409,24 +425,6 @@ class CodeStubInterfaceDescriptor: public InterfaceDescriptor {
ExternalReference miss_handler_;
bool has_miss_handler_;
- CodeStub::Major major_;
-};
-
-
-class CallInterfaceDescriptor: public InterfaceDescriptor {
- public:
- CallInterfaceDescriptor() { }
-
- // A copy of the passed in registers and param_representations is made
- // and owned by the CallInterfaceDescriptor.
-
- // TODO(mvstanton): Instead of taking parallel arrays register and
- // param_representations, how about a struct that puts the representation
- // and register side by side (eg, RegRep(r1, Representation::Tagged()).
- // The same should go for the CodeStubInterfaceDescriptor class.
- void Initialize(int register_parameter_count, Register* registers,
- Representation* param_representations,
- PlatformInterfaceDescriptor* platform_descriptor = NULL);
};
@@ -437,50 +435,46 @@ class HydrogenCodeStub : public CodeStub {
INITIALIZED
};
- explicit HydrogenCodeStub(Isolate* isolate,
- InitializationState state = INITIALIZED)
- : CodeStub(isolate) {
- is_uninitialized_ = (state == UNINITIALIZED);
- }
-
virtual Code::Kind GetCodeKind() const { return Code::STUB; }
- CodeStubInterfaceDescriptor* GetInterfaceDescriptor() {
- return isolate()->code_stub_interface_descriptor(MajorKey());
- }
-
- bool IsUninitialized() { return is_uninitialized_; }
-
template<class SubClass>
static Handle<Code> GetUninitialized(Isolate* isolate) {
SubClass::GenerateAheadOfTime(isolate);
return SubClass().GetCode(isolate);
}
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) = 0;
-
// Retrieve the code for the stub. Generate the code if needed.
virtual Handle<Code> GenerateCode() = 0;
- virtual int NotMissMinorKey() const = 0;
+ bool IsUninitialized() const { return IsMissBits::decode(minor_key_); }
- Handle<Code> GenerateLightweightMissCode();
+ Handle<Code> GenerateLightweightMissCode(ExternalReference miss);
template<class StateType>
void TraceTransition(StateType from, StateType to);
- private:
- class MinorKeyBits: public BitField<int, 0, kStubMinorKeyBits - 1> {};
- class IsMissBits: public BitField<bool, kStubMinorKeyBits - 1, 1> {};
+ protected:
+ explicit HydrogenCodeStub(Isolate* isolate,
+ InitializationState state = INITIALIZED)
+ : CodeStub(isolate) {
+ minor_key_ = IsMissBits::encode(state == UNINITIALIZED);
+ }
- void GenerateLightweightMiss(MacroAssembler* masm);
- virtual int MinorKey() const {
- return IsMissBits::encode(is_uninitialized_) |
- MinorKeyBits::encode(NotMissMinorKey());
+ void set_sub_minor_key(uint32_t key) {
+ minor_key_ = SubMinorKeyBits::update(minor_key_, key);
}
- bool is_uninitialized_;
+ uint32_t sub_minor_key() const { return SubMinorKeyBits::decode(minor_key_); }
+
+ static const int kSubMinorKeyBits = kStubMinorKeyBits - 1;
+
+ private:
+ class IsMissBits : public BitField<bool, kSubMinorKeyBits, 1> {};
+ class SubMinorKeyBits : public BitField<int, 0, kSubMinorKeyBits> {};
+
+ void GenerateLightweightMiss(MacroAssembler* masm, ExternalReference miss);
+
+ DEFINE_CODE_STUB_BASE(HydrogenCodeStub, CodeStub);
};
@@ -552,103 +546,72 @@ class ToNumberStub: public HydrogenCodeStub {
public:
explicit ToNumberStub(Isolate* isolate) : HydrogenCodeStub(isolate) { }
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
- static void InstallDescriptors(Isolate* isolate) {
- ToNumberStub stub(isolate);
- stub.InitializeInterfaceDescriptor(
- isolate->code_stub_interface_descriptor(CodeStub::ToNumber));
- }
-
- private:
- Major MajorKey() const { return ToNumber; }
- int NotMissMinorKey() const { return 0; }
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ToNumber);
+ DEFINE_HYDROGEN_CODE_STUB(ToNumber, HydrogenCodeStub);
};
-class NumberToStringStub V8_FINAL : public HydrogenCodeStub {
+class NumberToStringStub FINAL : public HydrogenCodeStub {
public:
explicit NumberToStringStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
- static void InstallDescriptors(Isolate* isolate);
-
// Parameters accessed via CodeStubGraphBuilder::GetParameter()
static const int kNumber = 0;
- private:
- virtual Major MajorKey() const V8_OVERRIDE { return NumberToString; }
- virtual int NotMissMinorKey() const V8_OVERRIDE { return 0; }
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(NumberToString);
+ DEFINE_HYDROGEN_CODE_STUB(NumberToString, HydrogenCodeStub);
};
class FastNewClosureStub : public HydrogenCodeStub {
public:
- FastNewClosureStub(Isolate* isolate,
- StrictMode strict_mode,
- bool is_generator)
- : HydrogenCodeStub(isolate),
- strict_mode_(strict_mode),
- is_generator_(is_generator) { }
-
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+ FastNewClosureStub(Isolate* isolate, StrictMode strict_mode,
+ FunctionKind kind)
+ : HydrogenCodeStub(isolate) {
+ DCHECK(IsValidFunctionKind(kind));
+ set_sub_minor_key(StrictModeBits::encode(strict_mode) |
+ FunctionKindBits::encode(kind));
+ }
- static void InstallDescriptors(Isolate* isolate);
+ StrictMode strict_mode() const {
+ return StrictModeBits::decode(sub_minor_key());
+ }
- StrictMode strict_mode() const { return strict_mode_; }
- bool is_generator() const { return is_generator_; }
+ FunctionKind kind() const {
+ return FunctionKindBits::decode(sub_minor_key());
+ }
+ bool is_arrow() const { return IsArrowFunction(kind()); }
+ bool is_generator() const { return IsGeneratorFunction(kind()); }
+ bool is_concise_method() const { return IsConciseMethod(kind()); }
private:
- class StrictModeBits: public BitField<bool, 0, 1> {};
- class IsGeneratorBits: public BitField<bool, 1, 1> {};
+ class StrictModeBits : public BitField<StrictMode, 0, 1> {};
+ class FunctionKindBits : public BitField<FunctionKind, 1, 3> {};
- Major MajorKey() const { return FastNewClosure; }
- int NotMissMinorKey() const {
- return StrictModeBits::encode(strict_mode_ == STRICT) |
- IsGeneratorBits::encode(is_generator_);
- }
-
- StrictMode strict_mode_;
- bool is_generator_;
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewClosure);
+ DEFINE_HYDROGEN_CODE_STUB(FastNewClosure, HydrogenCodeStub);
};
-class FastNewContextStub V8_FINAL : public HydrogenCodeStub {
+class FastNewContextStub FINAL : public HydrogenCodeStub {
public:
static const int kMaximumSlots = 64;
- FastNewContextStub(Isolate* isolate, int slots)
- : HydrogenCodeStub(isolate), slots_(slots) {
- DCHECK(slots_ > 0 && slots_ <= kMaximumSlots);
+ FastNewContextStub(Isolate* isolate, int slots) : HydrogenCodeStub(isolate) {
+ DCHECK(slots > 0 && slots <= kMaximumSlots);
+ set_sub_minor_key(SlotsBits::encode(slots));
}
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
- static void InstallDescriptors(Isolate* isolate);
-
- int slots() const { return slots_; }
-
- virtual Major MajorKey() const V8_OVERRIDE { return FastNewContext; }
- virtual int NotMissMinorKey() const V8_OVERRIDE { return slots_; }
+ int slots() const { return SlotsBits::decode(sub_minor_key()); }
// Parameters accessed via CodeStubGraphBuilder::GetParameter()
static const int kFunction = 0;
private:
- int slots_;
+ class SlotsBits : public BitField<int, 0, 8> {};
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewContext);
+ DEFINE_HYDROGEN_CODE_STUB(FastNewContext, HydrogenCodeStub);
};
@@ -656,29 +619,19 @@ class FastCloneShallowArrayStub : public HydrogenCodeStub {
public:
FastCloneShallowArrayStub(Isolate* isolate,
AllocationSiteMode allocation_site_mode)
- : HydrogenCodeStub(isolate),
- allocation_site_mode_(allocation_site_mode) {}
+ : HydrogenCodeStub(isolate) {
+ set_sub_minor_key(AllocationSiteModeBits::encode(allocation_site_mode));
+ }
AllocationSiteMode allocation_site_mode() const {
- return allocation_site_mode_;
+ return AllocationSiteModeBits::decode(sub_minor_key());
}
- virtual Handle<Code> GenerateCode();
-
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
- static void InstallDescriptors(Isolate* isolate);
-
private:
- AllocationSiteMode allocation_site_mode_;
-
class AllocationSiteModeBits: public BitField<AllocationSiteMode, 0, 1> {};
- // Ensure data fits within available bits.
- Major MajorKey() const { return FastCloneShallowArray; }
- int NotMissMinorKey() const {
- return AllocationSiteModeBits::encode(allocation_site_mode_);
- }
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(FastCloneShallowArray);
+ DEFINE_HYDROGEN_CODE_STUB(FastCloneShallowArray, HydrogenCodeStub);
};
@@ -688,25 +641,19 @@ class FastCloneShallowObjectStub : public HydrogenCodeStub {
static const int kMaximumClonedProperties = 6;
FastCloneShallowObjectStub(Isolate* isolate, int length)
- : HydrogenCodeStub(isolate), length_(length) {
- DCHECK_GE(length_, 0);
- DCHECK_LE(length_, kMaximumClonedProperties);
+ : HydrogenCodeStub(isolate) {
+ DCHECK_GE(length, 0);
+ DCHECK_LE(length, kMaximumClonedProperties);
+ set_sub_minor_key(LengthBits::encode(length));
}
- int length() const { return length_; }
-
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+ int length() const { return LengthBits::decode(sub_minor_key()); }
private:
- int length_;
+ class LengthBits : public BitField<int, 0, 4> {};
- Major MajorKey() const { return FastCloneShallowObject; }
- int NotMissMinorKey() const { return length_; }
-
- DISALLOW_COPY_AND_ASSIGN(FastCloneShallowObjectStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(FastCloneShallowObject);
+ DEFINE_HYDROGEN_CODE_STUB(FastCloneShallowObject, HydrogenCodeStub);
};
@@ -715,18 +662,10 @@ class CreateAllocationSiteStub : public HydrogenCodeStub {
explicit CreateAllocationSiteStub(Isolate* isolate)
: HydrogenCodeStub(isolate) { }
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
static void GenerateAheadOfTime(Isolate* isolate);
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
- private:
- Major MajorKey() const { return CreateAllocationSite; }
- int NotMissMinorKey() const { return 0; }
-
- DISALLOW_COPY_AND_ASSIGN(CreateAllocationSiteStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(CreateAllocationSite);
+ DEFINE_HYDROGEN_CODE_STUB(CreateAllocationSite, HydrogenCodeStub);
};
@@ -739,36 +678,38 @@ class InstanceofStub: public PlatformCodeStub {
kReturnTrueFalseObject = 1 << 2
};
- InstanceofStub(Isolate* isolate, Flags flags)
- : PlatformCodeStub(isolate), flags_(flags) { }
-
- static Register left();
- static Register right();
+ InstanceofStub(Isolate* isolate, Flags flags) : PlatformCodeStub(isolate) {
+ minor_key_ = FlagBits::encode(flags);
+ }
- void Generate(MacroAssembler* masm);
+ static Register left() { return InstanceofDescriptor::left(); }
+ static Register right() { return InstanceofDescriptor::right(); }
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor);
+ virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
+ if (HasArgsInRegisters()) {
+ return InstanceofDescriptor(isolate());
+ }
+ return ContextOnlyDescriptor(isolate());
+ }
private:
- Major MajorKey() const { return Instanceof; }
- int MinorKey() const { return static_cast<int>(flags_); }
+ Flags flags() const { return FlagBits::decode(minor_key_); }
- bool HasArgsInRegisters() const {
- return (flags_ & kArgsInRegisters) != 0;
- }
+ bool HasArgsInRegisters() const { return (flags() & kArgsInRegisters) != 0; }
bool HasCallSiteInlineCheck() const {
- return (flags_ & kCallSiteInlineCheck) != 0;
+ return (flags() & kCallSiteInlineCheck) != 0;
}
bool ReturnTrueFalseObject() const {
- return (flags_ & kReturnTrueFalseObject) != 0;
+ return (flags() & kReturnTrueFalseObject) != 0;
}
- virtual void PrintName(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual void PrintName(OStream& os) const OVERRIDE; // NOLINT
+
+ class FlagBits : public BitField<Flags, 0, 3> {};
- Flags flags_;
+ DEFINE_PLATFORM_CODE_STUB(Instanceof, PlatformCodeStub);
};
@@ -782,20 +723,25 @@ enum AllocationSiteOverrideMode {
class ArrayConstructorStub: public PlatformCodeStub {
public:
enum ArgumentCountKey { ANY, NONE, ONE, MORE_THAN_ONE };
+
ArrayConstructorStub(Isolate* isolate, int argument_count);
- explicit ArrayConstructorStub(Isolate* isolate);
- void Generate(MacroAssembler* masm);
+ explicit ArrayConstructorStub(Isolate* isolate);
private:
+ ArgumentCountKey argument_count() const {
+ return ArgumentCountBits::decode(minor_key_);
+ }
+
void GenerateDispatchToArrayStub(MacroAssembler* masm,
AllocationSiteOverrideMode mode);
- virtual void PrintName(OStream& os) const V8_OVERRIDE; // NOLINT
- virtual CodeStub::Major MajorKey() const { return ArrayConstructor; }
- virtual int MinorKey() const { return argument_count_; }
+ virtual void PrintName(OStream& os) const OVERRIDE; // NOLINT
+
+ class ArgumentCountBits : public BitField<ArgumentCountKey, 0, 2> {};
- ArgumentCountKey argument_count_;
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayConstructor);
+ DEFINE_PLATFORM_CODE_STUB(ArrayConstructor, PlatformCodeStub);
};
@@ -803,13 +749,11 @@ class InternalArrayConstructorStub: public PlatformCodeStub {
public:
explicit InternalArrayConstructorStub(Isolate* isolate);
- void Generate(MacroAssembler* masm);
-
private:
- virtual CodeStub::Major MajorKey() const { return InternalArrayConstructor; }
- virtual int MinorKey() const { return 0; }
-
void GenerateCase(MacroAssembler* masm, ElementsKind kind);
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(InternalArrayConstructor);
+ DEFINE_PLATFORM_CODE_STUB(InternalArrayConstructor, PlatformCodeStub);
};
@@ -818,71 +762,86 @@ class MathPowStub: public PlatformCodeStub {
enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK };
MathPowStub(Isolate* isolate, ExponentType exponent_type)
- : PlatformCodeStub(isolate), exponent_type_(exponent_type) { }
- virtual void Generate(MacroAssembler* masm);
+ : PlatformCodeStub(isolate) {
+ minor_key_ = ExponentTypeBits::encode(exponent_type);
+ }
+
+ virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
+ if (exponent_type() == TAGGED) {
+ return MathPowTaggedDescriptor(isolate());
+ } else if (exponent_type() == INTEGER) {
+ return MathPowIntegerDescriptor(isolate());
+ }
+ // A CallInterfaceDescriptor doesn't specify double registers (yet).
+ return ContextOnlyDescriptor(isolate());
+ }
private:
- virtual CodeStub::Major MajorKey() const { return MathPow; }
- virtual int MinorKey() const { return exponent_type_; }
+ ExponentType exponent_type() const {
+ return ExponentTypeBits::decode(minor_key_);
+ }
- ExponentType exponent_type_;
+ class ExponentTypeBits : public BitField<ExponentType, 0, 2> {};
+
+ DEFINE_PLATFORM_CODE_STUB(MathPow, PlatformCodeStub);
};
class CallICStub: public PlatformCodeStub {
public:
- CallICStub(Isolate* isolate, const CallIC::State& state)
- : PlatformCodeStub(isolate), state_(state) {}
-
- bool CallAsMethod() const { return state_.CallAsMethod(); }
-
- int arg_count() const { return state_.arg_count(); }
+ CallICStub(Isolate* isolate, const CallICState& state)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = state.GetExtraICState();
+ }
static int ExtractArgcFromMinorKey(int minor_key) {
- CallIC::State state((ExtraICState) minor_key);
+ CallICState state(static_cast<ExtraICState>(minor_key));
return state.arg_count();
}
- virtual void Generate(MacroAssembler* masm);
-
- virtual Code::Kind GetCodeKind() const V8_OVERRIDE {
- return Code::CALL_IC;
- }
+ virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::CALL_IC; }
- virtual InlineCacheState GetICState() const V8_OVERRIDE { return DEFAULT; }
+ virtual InlineCacheState GetICState() const OVERRIDE { return DEFAULT; }
- virtual ExtraICState GetExtraICState() const V8_FINAL V8_OVERRIDE {
- return state_.GetExtraICState();
+ virtual ExtraICState GetExtraICState() const FINAL OVERRIDE {
+ return static_cast<ExtraICState>(minor_key_);
}
protected:
- virtual int MinorKey() const { return GetExtraICState(); }
- virtual void PrintState(OStream& os) const V8_OVERRIDE; // NOLINT
+ bool CallAsMethod() const {
+ return state().call_type() == CallICState::METHOD;
+ }
+
+ int arg_count() const { return state().arg_count(); }
- virtual CodeStub::Major MajorKey() const { return CallIC; }
+ CallICState state() const {
+ return CallICState(static_cast<ExtraICState>(minor_key_));
+ }
// Code generation helpers.
- void GenerateMiss(MacroAssembler* masm, IC::UtilityId id);
+ void GenerateMiss(MacroAssembler* masm);
- const CallIC::State state_;
+ private:
+ virtual void PrintState(OStream& os) const OVERRIDE; // NOLINT
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(CallFunctionWithFeedback);
+ DEFINE_PLATFORM_CODE_STUB(CallIC, PlatformCodeStub);
};
class CallIC_ArrayStub: public CallICStub {
public:
- CallIC_ArrayStub(Isolate* isolate, const CallIC::State& state_in)
+ CallIC_ArrayStub(Isolate* isolate, const CallICState& state_in)
: CallICStub(isolate, state_in) {}
- virtual void Generate(MacroAssembler* masm);
-
- virtual InlineCacheState GetICState() const V8_FINAL V8_OVERRIDE {
+ virtual InlineCacheState GetICState() const FINAL OVERRIDE {
return MONOMORPHIC;
}
- protected:
- virtual void PrintState(OStream& os) const V8_OVERRIDE; // NOLINT
+ private:
+ virtual void PrintState(OStream& os) const OVERRIDE; // NOLINT
- virtual CodeStub::Major MajorKey() const { return CallIC_Array; }
+ DEFINE_PLATFORM_CODE_STUB(CallIC_Array, CallICStub);
};
@@ -891,12 +850,28 @@ class FunctionPrototypeStub : public PlatformCodeStub {
public:
explicit FunctionPrototypeStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
- virtual void Generate(MacroAssembler* masm);
+
virtual Code::Kind GetCodeKind() const { return Code::HANDLER; }
- private:
- virtual CodeStub::Major MajorKey() const { return FunctionPrototype; }
- virtual int MinorKey() const { return 0; }
+ // TODO(mvstanton): only the receiver register is accessed. When this is
+ // translated to a hydrogen code stub, a new CallInterfaceDescriptor
+ // should be created that just uses that register for more efficient code.
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
+ DEFINE_PLATFORM_CODE_STUB(FunctionPrototype, PlatformCodeStub);
+};
+
+
+// TODO(mvstanton): Translate to hydrogen code stub.
+class LoadIndexedInterceptorStub : public PlatformCodeStub {
+ public:
+ explicit LoadIndexedInterceptorStub(Isolate* isolate)
+ : PlatformCodeStub(isolate) {}
+
+ virtual Code::Kind GetCodeKind() const { return Code::HANDLER; }
+ virtual Code::StubType GetStubType() { return Code::FAST; }
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
+ DEFINE_PLATFORM_CODE_STUB(LoadIndexedInterceptor, PlatformCodeStub);
};
@@ -906,73 +881,87 @@ class HandlerStub : public HydrogenCodeStub {
virtual ExtraICState GetExtraICState() const { return kind(); }
virtual InlineCacheState GetICState() const { return MONOMORPHIC; }
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+ virtual void InitializeDescriptor(CodeStubDescriptor* descriptor) OVERRIDE;
+
+ virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE;
protected:
- explicit HandlerStub(Isolate* isolate)
- : HydrogenCodeStub(isolate), bit_field_(0) {}
- virtual int NotMissMinorKey() const { return bit_field_; }
+ explicit HandlerStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
+
virtual Code::Kind kind() const = 0;
- int bit_field_;
+
+ DEFINE_CODE_STUB_BASE(HandlerStub, HydrogenCodeStub);
};
class LoadFieldStub: public HandlerStub {
public:
- LoadFieldStub(Isolate* isolate, FieldIndex index)
- : HandlerStub(isolate), index_(index) {
- int property_index_key = index_.GetFieldAccessStubKey();
- bit_field_ = EncodedLoadFieldByIndexBits::encode(property_index_key);
+ LoadFieldStub(Isolate* isolate, FieldIndex index) : HandlerStub(isolate) {
+ int property_index_key = index.GetFieldAccessStubKey();
+ set_sub_minor_key(LoadFieldByIndexBits::encode(property_index_key));
}
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
- FieldIndex index() const { return index_; }
+ FieldIndex index() const {
+ int property_index_key = LoadFieldByIndexBits::decode(sub_minor_key());
+ return FieldIndex::FromFieldAccessStubKey(property_index_key);
+ }
protected:
- explicit LoadFieldStub(Isolate* isolate);
virtual Code::Kind kind() const { return Code::LOAD_IC; }
virtual Code::StubType GetStubType() { return Code::FAST; }
private:
- class EncodedLoadFieldByIndexBits : public BitField<int, 0, 13> {};
- virtual CodeStub::Major MajorKey() const { return LoadField; }
- FieldIndex index_;
+ class LoadFieldByIndexBits : public BitField<int, 0, 13> {};
+
+ DEFINE_HANDLER_CODE_STUB(LoadField, HandlerStub);
+};
+
+
+class KeyedLoadSloppyArgumentsStub : public HandlerStub {
+ public:
+ explicit KeyedLoadSloppyArgumentsStub(Isolate* isolate)
+ : HandlerStub(isolate) {}
+
+ protected:
+ virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
+ virtual Code::StubType GetStubType() { return Code::FAST; }
+
+ private:
+ DEFINE_HANDLER_CODE_STUB(KeyedLoadSloppyArguments, HandlerStub);
};
class LoadConstantStub : public HandlerStub {
public:
- LoadConstantStub(Isolate* isolate, int descriptor) : HandlerStub(isolate) {
- bit_field_ = descriptor;
+ LoadConstantStub(Isolate* isolate, int constant_index)
+ : HandlerStub(isolate) {
+ set_sub_minor_key(ConstantIndexBits::encode(constant_index));
}
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
- int descriptor() const { return bit_field_; }
+ int constant_index() const {
+ return ConstantIndexBits::decode(sub_minor_key());
+ }
protected:
- explicit LoadConstantStub(Isolate* isolate);
virtual Code::Kind kind() const { return Code::LOAD_IC; }
virtual Code::StubType GetStubType() { return Code::FAST; }
private:
- virtual CodeStub::Major MajorKey() const { return LoadConstant; }
+ class ConstantIndexBits : public BitField<int, 0, kSubMinorKeyBits> {};
+
+ DEFINE_HANDLER_CODE_STUB(LoadConstant, HandlerStub);
};
class StringLengthStub: public HandlerStub {
public:
explicit StringLengthStub(Isolate* isolate) : HandlerStub(isolate) {}
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
protected:
virtual Code::Kind kind() const { return Code::LOAD_IC; }
virtual Code::StubType GetStubType() { return Code::FAST; }
- private:
- virtual CodeStub::Major MajorKey() const { return StringLength; }
+ DEFINE_HANDLER_CODE_STUB(StringLength, HandlerStub);
};
@@ -980,30 +969,32 @@ class StoreFieldStub : public HandlerStub {
public:
StoreFieldStub(Isolate* isolate, FieldIndex index,
Representation representation)
- : HandlerStub(isolate), index_(index), representation_(representation) {
- int property_index_key = index_.GetFieldAccessStubKey();
- bit_field_ = EncodedStoreFieldByIndexBits::encode(property_index_key) |
- RepresentationBits::encode(
- PropertyDetails::EncodeRepresentation(representation));
+ : HandlerStub(isolate) {
+ int property_index_key = index.GetFieldAccessStubKey();
+ uint8_t repr = PropertyDetails::EncodeRepresentation(representation);
+ set_sub_minor_key(StoreFieldByIndexBits::encode(property_index_key) |
+ RepresentationBits::encode(repr));
}
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
+ FieldIndex index() const {
+ int property_index_key = StoreFieldByIndexBits::decode(sub_minor_key());
+ return FieldIndex::FromFieldAccessStubKey(property_index_key);
+ }
- FieldIndex index() const { return index_; }
- Representation representation() { return representation_; }
- static void InstallDescriptors(Isolate* isolate);
+ Representation representation() {
+ uint8_t repr = RepresentationBits::decode(sub_minor_key());
+ return PropertyDetails::DecodeRepresentation(repr);
+ }
protected:
- explicit StoreFieldStub(Isolate* isolate);
virtual Code::Kind kind() const { return Code::STORE_IC; }
virtual Code::StubType GetStubType() { return Code::FAST; }
private:
- class EncodedStoreFieldByIndexBits : public BitField<int, 0, 13> {};
- class RepresentationBits : public BitField<int, 13, 4> {};
- virtual CodeStub::Major MajorKey() const { return StoreField; }
- FieldIndex index_;
- Representation representation_;
+ class StoreFieldByIndexBits : public BitField<int, 0, 13> {};
+ class RepresentationBits : public BitField<uint8_t, 13, 4> {};
+
+ DEFINE_HANDLER_CODE_STUB(StoreField, HandlerStub);
};
@@ -1011,8 +1002,8 @@ class StoreGlobalStub : public HandlerStub {
public:
StoreGlobalStub(Isolate* isolate, bool is_constant, bool check_global)
: HandlerStub(isolate) {
- bit_field_ = IsConstantBits::encode(is_constant) |
- CheckGlobalBits::encode(check_global);
+ set_sub_minor_key(IsConstantBits::encode(is_constant) |
+ CheckGlobalBits::encode(check_global));
}
static Handle<HeapObject> global_placeholder(Isolate* isolate) {
@@ -1036,33 +1027,29 @@ class StoreGlobalStub : public HandlerStub {
virtual Code::Kind kind() const { return Code::STORE_IC; }
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
+ bool is_constant() const { return IsConstantBits::decode(sub_minor_key()); }
+
+ bool check_global() const { return CheckGlobalBits::decode(sub_minor_key()); }
- bool is_constant() const {
- return IsConstantBits::decode(bit_field_);
- }
- bool check_global() const {
- return CheckGlobalBits::decode(bit_field_);
- }
void set_is_constant(bool value) {
- bit_field_ = IsConstantBits::update(bit_field_, value);
+ set_sub_minor_key(IsConstantBits::update(sub_minor_key(), value));
}
Representation representation() {
- return Representation::FromKind(RepresentationBits::decode(bit_field_));
+ return Representation::FromKind(
+ RepresentationBits::decode(sub_minor_key()));
}
+
void set_representation(Representation r) {
- bit_field_ = RepresentationBits::update(bit_field_, r.kind());
+ set_sub_minor_key(RepresentationBits::update(sub_minor_key(), r.kind()));
}
private:
- Major MajorKey() const { return StoreGlobal; }
-
class IsConstantBits: public BitField<bool, 0, 1> {};
class RepresentationBits: public BitField<Representation::Kind, 1, 8> {};
class CheckGlobalBits: public BitField<bool, 9, 1> {};
- DISALLOW_COPY_AND_ASSIGN(StoreGlobalStub);
+ DEFINE_HANDLER_CODE_STUB(StoreGlobal, HandlerStub);
};
@@ -1072,25 +1059,26 @@ class CallApiFunctionStub : public PlatformCodeStub {
bool is_store,
bool call_data_undefined,
int argc) : PlatformCodeStub(isolate) {
- bit_field_ =
- IsStoreBits::encode(is_store) |
- CallDataUndefinedBits::encode(call_data_undefined) |
- ArgumentBits::encode(argc);
+ minor_key_ = IsStoreBits::encode(is_store) |
+ CallDataUndefinedBits::encode(call_data_undefined) |
+ ArgumentBits::encode(argc);
DCHECK(!is_store || argc == 1);
}
private:
- virtual void Generate(MacroAssembler* masm) V8_OVERRIDE;
- virtual Major MajorKey() const V8_OVERRIDE { return CallApiFunction; }
- virtual int MinorKey() const V8_OVERRIDE { return bit_field_; }
+ bool is_store() const { return IsStoreBits::decode(minor_key_); }
+ bool call_data_undefined() const {
+ return CallDataUndefinedBits::decode(minor_key_);
+ }
+ int argc() const { return ArgumentBits::decode(minor_key_); }
class IsStoreBits: public BitField<bool, 0, 1> {};
class CallDataUndefinedBits: public BitField<bool, 1, 1> {};
class ArgumentBits: public BitField<int, 2, Code::kArgumentsBits> {};
+ STATIC_ASSERT(Code::kArgumentsBits + 2 <= kStubMinorKeyBits);
- int bit_field_;
-
- DISALLOW_COPY_AND_ASSIGN(CallApiFunctionStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiFunction);
+ DEFINE_PLATFORM_CODE_STUB(CallApiFunction, PlatformCodeStub);
};
@@ -1098,12 +1086,8 @@ class CallApiGetterStub : public PlatformCodeStub {
public:
explicit CallApiGetterStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
- private:
- virtual void Generate(MacroAssembler* masm) V8_OVERRIDE;
- virtual Major MajorKey() const V8_OVERRIDE { return CallApiGetter; }
- virtual int MinorKey() const V8_OVERRIDE { return 0; }
-
- DISALLOW_COPY_AND_ASSIGN(CallApiGetterStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiGetter);
+ DEFINE_PLATFORM_CODE_STUB(CallApiGetter, PlatformCodeStub);
};
@@ -1111,62 +1095,58 @@ class BinaryOpICStub : public HydrogenCodeStub {
public:
BinaryOpICStub(Isolate* isolate, Token::Value op,
OverwriteMode mode = NO_OVERWRITE)
- : HydrogenCodeStub(isolate, UNINITIALIZED), state_(isolate, op, mode) {}
+ : HydrogenCodeStub(isolate, UNINITIALIZED) {
+ BinaryOpICState state(isolate, op, mode);
+ set_sub_minor_key(state.GetExtraICState());
+ }
- explicit BinaryOpICStub(Isolate* isolate, const BinaryOpIC::State& state)
- : HydrogenCodeStub(isolate), state_(state) {}
+ BinaryOpICStub(Isolate* isolate, const BinaryOpICState& state)
+ : HydrogenCodeStub(isolate) {
+ set_sub_minor_key(state.GetExtraICState());
+ }
static void GenerateAheadOfTime(Isolate* isolate);
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
- static void InstallDescriptors(Isolate* isolate);
-
- virtual Code::Kind GetCodeKind() const V8_OVERRIDE {
+ virtual Code::Kind GetCodeKind() const OVERRIDE {
return Code::BINARY_OP_IC;
}
- virtual InlineCacheState GetICState() const V8_FINAL V8_OVERRIDE {
- return state_.GetICState();
+ virtual InlineCacheState GetICState() const FINAL OVERRIDE {
+ return state().GetICState();
}
- virtual ExtraICState GetExtraICState() const V8_FINAL V8_OVERRIDE {
- return state_.GetExtraICState();
+ virtual ExtraICState GetExtraICState() const FINAL OVERRIDE {
+ return static_cast<ExtraICState>(sub_minor_key());
}
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
- const BinaryOpIC::State& state() const { return state_; }
-
- virtual void PrintState(OStream& os) const V8_FINAL V8_OVERRIDE; // NOLINT
-
- virtual Major MajorKey() const V8_OVERRIDE { return BinaryOpIC; }
- virtual int NotMissMinorKey() const V8_FINAL V8_OVERRIDE {
- return GetExtraICState();
+ BinaryOpICState state() const {
+ return BinaryOpICState(isolate(), GetExtraICState());
}
+ virtual void PrintState(OStream& os) const FINAL OVERRIDE; // NOLINT
+
// Parameters accessed via CodeStubGraphBuilder::GetParameter()
static const int kLeft = 0;
static const int kRight = 1;
private:
static void GenerateAheadOfTime(Isolate* isolate,
- const BinaryOpIC::State& state);
-
- BinaryOpIC::State state_;
+ const BinaryOpICState& state);
- DISALLOW_COPY_AND_ASSIGN(BinaryOpICStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+ DEFINE_HYDROGEN_CODE_STUB(BinaryOpIC, HydrogenCodeStub);
};
// TODO(bmeurer): Merge this into the BinaryOpICStub once we have proper tail
// call support for stubs in Hydrogen.
-class BinaryOpICWithAllocationSiteStub V8_FINAL : public PlatformCodeStub {
+class BinaryOpICWithAllocationSiteStub FINAL : public PlatformCodeStub {
public:
BinaryOpICWithAllocationSiteStub(Isolate* isolate,
- const BinaryOpIC::State& state)
- : PlatformCodeStub(isolate), state_(state) {}
+ const BinaryOpICState& state)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = state.GetExtraICState();
+ }
static void GenerateAheadOfTime(Isolate* isolate);
@@ -1176,67 +1156,54 @@ class BinaryOpICWithAllocationSiteStub V8_FINAL : public PlatformCodeStub {
return CodeStub::GetCodeCopy(pattern);
}
- virtual Code::Kind GetCodeKind() const V8_OVERRIDE {
+ virtual Code::Kind GetCodeKind() const OVERRIDE {
return Code::BINARY_OP_IC;
}
- virtual InlineCacheState GetICState() const V8_OVERRIDE {
- return state_.GetICState();
+ virtual InlineCacheState GetICState() const OVERRIDE {
+ return state().GetICState();
}
- virtual ExtraICState GetExtraICState() const V8_OVERRIDE {
- return state_.GetExtraICState();
+ virtual ExtraICState GetExtraICState() const OVERRIDE {
+ return static_cast<ExtraICState>(minor_key_);
}
- virtual void Generate(MacroAssembler* masm) V8_OVERRIDE;
-
- virtual void PrintState(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual void PrintState(OStream& os) const OVERRIDE; // NOLINT
- virtual Major MajorKey() const V8_OVERRIDE {
- return BinaryOpICWithAllocationSite;
+ private:
+ BinaryOpICState state() const {
+ return BinaryOpICState(isolate(), static_cast<ExtraICState>(minor_key_));
}
- virtual int MinorKey() const V8_OVERRIDE { return GetExtraICState(); }
- private:
static void GenerateAheadOfTime(Isolate* isolate,
- const BinaryOpIC::State& state);
+ const BinaryOpICState& state);
- BinaryOpIC::State state_;
-
- DISALLOW_COPY_AND_ASSIGN(BinaryOpICWithAllocationSiteStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOpWithAllocationSite);
+ DEFINE_PLATFORM_CODE_STUB(BinaryOpICWithAllocationSite, PlatformCodeStub);
};
-class BinaryOpWithAllocationSiteStub V8_FINAL : public BinaryOpICStub {
+class BinaryOpWithAllocationSiteStub FINAL : public BinaryOpICStub {
public:
BinaryOpWithAllocationSiteStub(Isolate* isolate,
Token::Value op,
OverwriteMode mode)
: BinaryOpICStub(isolate, op, mode) {}
- BinaryOpWithAllocationSiteStub(Isolate* isolate,
- const BinaryOpIC::State& state)
+ BinaryOpWithAllocationSiteStub(Isolate* isolate, const BinaryOpICState& state)
: BinaryOpICStub(isolate, state) {}
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
- static void InstallDescriptors(Isolate* isolate);
-
- virtual Code::Kind GetCodeKind() const V8_FINAL V8_OVERRIDE {
+ virtual Code::Kind GetCodeKind() const FINAL OVERRIDE {
return Code::STUB;
}
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
- virtual Major MajorKey() const V8_OVERRIDE {
- return BinaryOpWithAllocationSite;
- }
-
// Parameters accessed via CodeStubGraphBuilder::GetParameter()
static const int kAllocationSite = 0;
static const int kLeft = 1;
static const int kRight = 2;
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOpWithAllocationSite);
+ DEFINE_HYDROGEN_CODE_STUB(BinaryOpWithAllocationSite, BinaryOpICStub);
};
@@ -1252,30 +1219,23 @@ enum StringAddFlags {
};
-class StringAddStub V8_FINAL : public HydrogenCodeStub {
+class StringAddStub FINAL : public HydrogenCodeStub {
public:
- StringAddStub(Isolate* isolate,
- StringAddFlags flags,
+ StringAddStub(Isolate* isolate, StringAddFlags flags,
PretenureFlag pretenure_flag)
- : HydrogenCodeStub(isolate),
- bit_field_(StringAddFlagsBits::encode(flags) |
- PretenureFlagBits::encode(pretenure_flag)) {}
+ : HydrogenCodeStub(isolate) {
+ set_sub_minor_key(StringAddFlagsBits::encode(flags) |
+ PretenureFlagBits::encode(pretenure_flag));
+ }
StringAddFlags flags() const {
- return StringAddFlagsBits::decode(bit_field_);
+ return StringAddFlagsBits::decode(sub_minor_key());
}
PretenureFlag pretenure_flag() const {
- return PretenureFlagBits::decode(bit_field_);
+ return PretenureFlagBits::decode(sub_minor_key());
}
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
- static void InstallDescriptors(Isolate* isolate);
-
// Parameters accessed via CodeStubGraphBuilder::GetParameter()
static const int kLeft = 0;
static const int kRight = 1;
@@ -1283,51 +1243,41 @@ class StringAddStub V8_FINAL : public HydrogenCodeStub {
private:
class StringAddFlagsBits: public BitField<StringAddFlags, 0, 2> {};
class PretenureFlagBits: public BitField<PretenureFlag, 2, 1> {};
- uint32_t bit_field_;
- virtual Major MajorKey() const V8_OVERRIDE { return StringAdd; }
- virtual int NotMissMinorKey() const V8_OVERRIDE { return bit_field_; }
+ virtual void PrintBaseName(OStream& os) const OVERRIDE; // NOLINT
- virtual void PrintBaseName(OStream& os) const V8_OVERRIDE; // NOLINT
-
- DISALLOW_COPY_AND_ASSIGN(StringAddStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(StringAdd);
+ DEFINE_HYDROGEN_CODE_STUB(StringAdd, HydrogenCodeStub);
};
-class ICCompareStub: public PlatformCodeStub {
+class CompareICStub : public PlatformCodeStub {
public:
- ICCompareStub(Isolate* isolate,
- Token::Value op,
- CompareIC::State left,
- CompareIC::State right,
- CompareIC::State handler)
- : PlatformCodeStub(isolate),
- op_(op),
- left_(left),
- right_(right),
- state_(handler) {
+ CompareICStub(Isolate* isolate, Token::Value op, CompareICState::State left,
+ CompareICState::State right, CompareICState::State state)
+ : PlatformCodeStub(isolate) {
DCHECK(Token::IsCompareOp(op));
+ minor_key_ = OpBits::encode(op - Token::EQ) | LeftStateBits::encode(left) |
+ RightStateBits::encode(right) | StateBits::encode(state);
}
- virtual void Generate(MacroAssembler* masm);
-
void set_known_map(Handle<Map> map) { known_map_ = map; }
- static void DecodeKey(uint32_t stub_key, CompareIC::State* left_state,
- CompareIC::State* right_state,
- CompareIC::State* handler_state, Token::Value* op);
-
virtual InlineCacheState GetICState() const;
- private:
- class OpField: public BitField<int, 0, 3> { };
- class LeftStateField: public BitField<int, 3, 4> { };
- class RightStateField: public BitField<int, 7, 4> { };
- class HandlerStateField: public BitField<int, 11, 4> { };
+ Token::Value op() const {
+ return static_cast<Token::Value>(Token::EQ + OpBits::decode(minor_key_));
+ }
- virtual CodeStub::Major MajorKey() const { return CompareIC; }
- virtual int MinorKey() const;
+ CompareICState::State left() const {
+ return LeftStateBits::decode(minor_key_);
+ }
+ CompareICState::State right() const {
+ return RightStateBits::decode(minor_key_);
+ }
+ CompareICState::State state() const { return StateBits::decode(minor_key_); }
+ private:
virtual Code::Kind GetCodeKind() const { return Code::COMPARE_IC; }
void GenerateSmis(MacroAssembler* masm);
@@ -1340,18 +1290,24 @@ class ICCompareStub: public PlatformCodeStub {
void GenerateKnownObjects(MacroAssembler* masm);
void GenerateGeneric(MacroAssembler* masm);
- bool strict() const { return op_ == Token::EQ_STRICT; }
- Condition GetCondition() const { return CompareIC::ComputeCondition(op_); }
+ bool strict() const { return op() == Token::EQ_STRICT; }
+ Condition GetCondition() const;
virtual void AddToSpecialCache(Handle<Code> new_object);
virtual bool FindCodeInSpecialCache(Code** code_out);
- virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECT; }
+ virtual bool UseSpecialCache() {
+ return state() == CompareICState::KNOWN_OBJECT;
+ }
+
+ class OpBits : public BitField<int, 0, 3> {};
+ class LeftStateBits : public BitField<CompareICState::State, 3, 4> {};
+ class RightStateBits : public BitField<CompareICState::State, 7, 4> {};
+ class StateBits : public BitField<CompareICState::State, 11, 4> {};
- Token::Value op_;
- CompareIC::State left_;
- CompareIC::State right_;
- CompareIC::State state_;
Handle<Map> known_map_;
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+ DEFINE_PLATFORM_CODE_STUB(CompareIC, PlatformCodeStub);
};
@@ -1360,35 +1316,26 @@ class CompareNilICStub : public HydrogenCodeStub {
Type* GetType(Zone* zone, Handle<Map> map = Handle<Map>());
Type* GetInputType(Zone* zone, Handle<Map> map);
- CompareNilICStub(Isolate* isolate, NilValue nil)
- : HydrogenCodeStub(isolate), nil_value_(nil) { }
+ CompareNilICStub(Isolate* isolate, NilValue nil) : HydrogenCodeStub(isolate) {
+ set_sub_minor_key(NilValueBits::encode(nil));
+ }
- CompareNilICStub(Isolate* isolate,
- ExtraICState ic_state,
+ CompareNilICStub(Isolate* isolate, ExtraICState ic_state,
InitializationState init_state = INITIALIZED)
- : HydrogenCodeStub(isolate, init_state),
- nil_value_(NilValueField::decode(ic_state)),
- state_(State(TypesField::decode(ic_state))) {
- }
+ : HydrogenCodeStub(isolate, init_state) {
+ set_sub_minor_key(ic_state);
+ }
static Handle<Code> GetUninitialized(Isolate* isolate,
NilValue nil) {
return CompareNilICStub(isolate, nil, UNINITIALIZED).GetCode();
}
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
- static void InstallDescriptors(Isolate* isolate) {
- CompareNilICStub compare_stub(isolate, kNullValue, UNINITIALIZED);
- compare_stub.InitializeInterfaceDescriptor(
- isolate->code_stub_interface_descriptor(CodeStub::CompareNilIC));
- }
-
virtual InlineCacheState GetICState() const {
- if (state_.Contains(GENERIC)) {
+ State state = this->state();
+ if (state.Contains(GENERIC)) {
return MEGAMORPHIC;
- } else if (state_.Contains(MONOMORPHIC_MAP)) {
+ } else if (state.Contains(MONOMORPHIC_MAP)) {
return MONOMORPHIC;
} else {
return PREMONOMORPHIC;
@@ -1397,24 +1344,27 @@ class CompareNilICStub : public HydrogenCodeStub {
virtual Code::Kind GetCodeKind() const { return Code::COMPARE_NIL_IC; }
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
- virtual ExtraICState GetExtraICState() const {
- return NilValueField::encode(nil_value_) |
- TypesField::encode(state_.ToIntegral());
- }
+ virtual ExtraICState GetExtraICState() const { return sub_minor_key(); }
void UpdateStatus(Handle<Object> object);
- bool IsMonomorphic() const { return state_.Contains(MONOMORPHIC_MAP); }
- NilValue GetNilValue() const { return nil_value_; }
- void ClearState() { state_.RemoveAll(); }
+ bool IsMonomorphic() const { return state().Contains(MONOMORPHIC_MAP); }
- virtual void PrintState(OStream& os) const V8_OVERRIDE; // NOLINT
- virtual void PrintBaseName(OStream& os) const V8_OVERRIDE; // NOLINT
+ NilValue nil_value() const { return NilValueBits::decode(sub_minor_key()); }
+
+ void ClearState() {
+ set_sub_minor_key(TypesBits::update(sub_minor_key(), 0));
+ }
+
+ virtual void PrintState(OStream& os) const OVERRIDE; // NOLINT
+ virtual void PrintBaseName(OStream& os) const OVERRIDE; // NOLINT
private:
- friend class CompareNilIC;
+ CompareNilICStub(Isolate* isolate, NilValue nil,
+ InitializationState init_state)
+ : HydrogenCodeStub(isolate, init_state) {
+ set_sub_minor_key(NilValueBits::encode(nil));
+ }
enum CompareNilType {
UNDEFINED,
@@ -1436,21 +1386,15 @@ class CompareNilICStub : public HydrogenCodeStub {
};
friend OStream& operator<<(OStream& os, const State& s);
- CompareNilICStub(Isolate* isolate,
- NilValue nil,
- InitializationState init_state)
- : HydrogenCodeStub(isolate, init_state), nil_value_(nil) { }
-
- class NilValueField : public BitField<NilValue, 0, 1> {};
- class TypesField : public BitField<byte, 1, NUMBER_OF_TYPES> {};
+ State state() const { return State(TypesBits::decode(sub_minor_key())); }
- virtual CodeStub::Major MajorKey() const { return CompareNilIC; }
- virtual int NotMissMinorKey() const { return GetExtraICState(); }
+ class NilValueBits : public BitField<NilValue, 0, 1> {};
+ class TypesBits : public BitField<byte, 1, NUMBER_OF_TYPES> {};
- NilValue nil_value_;
- State state_;
+ friend class CompareNilIC;
- DISALLOW_COPY_AND_ASSIGN(CompareNilICStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(CompareNil);
+ DEFINE_HYDROGEN_CODE_STUB(CompareNilIC, HydrogenCodeStub);
};
@@ -1459,14 +1403,15 @@ OStream& operator<<(OStream& os, const CompareNilICStub::State& s);
class CEntryStub : public PlatformCodeStub {
public:
- CEntryStub(Isolate* isolate,
- int result_size,
+ CEntryStub(Isolate* isolate, int result_size,
SaveFPRegsMode save_doubles = kDontSaveFPRegs)
- : PlatformCodeStub(isolate),
- result_size_(result_size),
- save_doubles_(save_doubles) { }
-
- void Generate(MacroAssembler* masm);
+ : PlatformCodeStub(isolate) {
+ minor_key_ = SaveDoublesBits::encode(save_doubles == kSaveFPRegs);
+ DCHECK(result_size == 1 || result_size == 2);
+#ifdef _WIN64
+ minor_key_ = ResultSizeBits::update(minor_key_, result_size);
+#endif // _WIN64
+ }
// The version of this stub that doesn't save doubles is generated ahead of
// time, so it's OK to call it from other stubs that can't cope with GC during
@@ -1475,48 +1420,47 @@ class CEntryStub : public PlatformCodeStub {
static void GenerateAheadOfTime(Isolate* isolate);
private:
- // Number of pointers/values returned.
- const int result_size_;
- SaveFPRegsMode save_doubles_;
-
- Major MajorKey() const { return CEntry; }
- int MinorKey() const;
+ bool save_doubles() const { return SaveDoublesBits::decode(minor_key_); }
+#ifdef _WIN64
+ int result_size() const { return ResultSizeBits::decode(minor_key_); }
+#endif // _WIN64
bool NeedsImmovableCode();
+
+ class SaveDoublesBits : public BitField<bool, 0, 1> {};
+ class ResultSizeBits : public BitField<int, 1, 3> {};
+
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(CEntry, PlatformCodeStub);
};
class JSEntryStub : public PlatformCodeStub {
public:
- explicit JSEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
-
- void Generate(MacroAssembler* masm) { GenerateBody(masm, false); }
-
- protected:
- void GenerateBody(MacroAssembler* masm, bool is_construct);
+ JSEntryStub(Isolate* isolate, StackFrame::Type type)
+ : PlatformCodeStub(isolate) {
+ DCHECK(type == StackFrame::ENTRY || type == StackFrame::ENTRY_CONSTRUCT);
+ minor_key_ = StackFrameTypeBits::encode(type);
+ }
private:
- Major MajorKey() const { return JSEntry; }
- int MinorKey() const { return 0; }
-
virtual void FinishCode(Handle<Code> code);
- int handler_offset_;
-};
-
+ virtual void PrintName(OStream& os) const OVERRIDE { // NOLINT
+ os << (type() == StackFrame::ENTRY ? "JSEntryStub"
+ : "JSConstructEntryStub");
+ }
-class JSConstructEntryStub : public JSEntryStub {
- public:
- explicit JSConstructEntryStub(Isolate* isolate) : JSEntryStub(isolate) { }
+ StackFrame::Type type() const {
+ return StackFrameTypeBits::decode(minor_key_);
+ }
- void Generate(MacroAssembler* masm) { GenerateBody(masm, true); }
+ class StackFrameTypeBits : public BitField<StackFrame::Type, 0, 5> {};
- private:
- int MinorKey() const { return 1; }
+ int handler_offset_;
- virtual void PrintName(OStream& os) const V8_OVERRIDE { // NOLINT
- os << "JSConstructEntryStub";
- }
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(JSEntry, PlatformCodeStub);
};
@@ -1529,22 +1473,30 @@ class ArgumentsAccessStub: public PlatformCodeStub {
NEW_STRICT
};
- ArgumentsAccessStub(Isolate* isolate, Type type)
- : PlatformCodeStub(isolate), type_(type) { }
+ ArgumentsAccessStub(Isolate* isolate, Type type) : PlatformCodeStub(isolate) {
+ minor_key_ = TypeBits::encode(type);
+ }
+
+ virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
+ if (type() == READ_ELEMENT) {
+ return ArgumentsAccessReadDescriptor(isolate());
+ }
+ return ContextOnlyDescriptor(isolate());
+ }
private:
- Type type_;
-
- Major MajorKey() const { return ArgumentsAccess; }
- int MinorKey() const { return type_; }
+ Type type() const { return TypeBits::decode(minor_key_); }
- void Generate(MacroAssembler* masm);
void GenerateReadElement(MacroAssembler* masm);
void GenerateNewStrict(MacroAssembler* masm);
void GenerateNewSloppyFast(MacroAssembler* masm);
void GenerateNewSloppySlow(MacroAssembler* masm);
- virtual void PrintName(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual void PrintName(OStream& os) const OVERRIDE; // NOLINT
+
+ class TypeBits : public BitField<Type, 0, 2> {};
+
+ DEFINE_PLATFORM_CODE_STUB(ArgumentsAccess, PlatformCodeStub);
};
@@ -1552,108 +1504,84 @@ class RegExpExecStub: public PlatformCodeStub {
public:
explicit RegExpExecStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
- private:
- Major MajorKey() const { return RegExpExec; }
- int MinorKey() const { return 0; }
-
- void Generate(MacroAssembler* masm);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ContextOnly);
+ DEFINE_PLATFORM_CODE_STUB(RegExpExec, PlatformCodeStub);
};
-class RegExpConstructResultStub V8_FINAL : public HydrogenCodeStub {
+class RegExpConstructResultStub FINAL : public HydrogenCodeStub {
public:
explicit RegExpConstructResultStub(Isolate* isolate)
: HydrogenCodeStub(isolate) { }
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
- virtual Major MajorKey() const V8_OVERRIDE { return RegExpConstructResult; }
- virtual int NotMissMinorKey() const V8_OVERRIDE { return 0; }
-
- static void InstallDescriptors(Isolate* isolate);
-
// Parameters accessed via CodeStubGraphBuilder::GetParameter()
static const int kLength = 0;
static const int kIndex = 1;
static const int kInput = 2;
- private:
- DISALLOW_COPY_AND_ASSIGN(RegExpConstructResultStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(RegExpConstructResult);
+ DEFINE_HYDROGEN_CODE_STUB(RegExpConstructResult, HydrogenCodeStub);
};
class CallFunctionStub: public PlatformCodeStub {
public:
CallFunctionStub(Isolate* isolate, int argc, CallFunctionFlags flags)
- : PlatformCodeStub(isolate), argc_(argc), flags_(flags) {
- DCHECK(argc <= Code::kMaxArguments);
+ : PlatformCodeStub(isolate) {
+ DCHECK(argc >= 0 && argc <= Code::kMaxArguments);
+ minor_key_ = ArgcBits::encode(argc) | FlagBits::encode(flags);
}
- void Generate(MacroAssembler* masm);
-
static int ExtractArgcFromMinorKey(int minor_key) {
return ArgcBits::decode(minor_key);
}
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor);
-
private:
- int argc_;
- CallFunctionFlags flags_;
+ int argc() const { return ArgcBits::decode(minor_key_); }
+ int flags() const { return FlagBits::decode(minor_key_); }
+
+ bool CallAsMethod() const {
+ return flags() == CALL_AS_METHOD || flags() == WRAP_AND_CALL;
+ }
- virtual void PrintName(OStream& os) const V8_OVERRIDE; // NOLINT
+ bool NeedsChecks() const { return flags() != WRAP_AND_CALL; }
+
+ virtual void PrintName(OStream& os) const OVERRIDE; // NOLINT
// Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
- class FlagBits: public BitField<CallFunctionFlags, 0, 2> {};
+ class FlagBits : public BitField<CallFunctionFlags, 0, 2> {};
class ArgcBits : public BitField<unsigned, 2, Code::kArgumentsBits> {};
-
STATIC_ASSERT(Code::kArgumentsBits + 2 <= kStubMinorKeyBits);
- Major MajorKey() const { return CallFunction; }
- int MinorKey() const {
- // Encode the parameters in a unique 32 bit value.
- return FlagBits::encode(flags_) | ArgcBits::encode(argc_);
- }
-
- bool CallAsMethod() {
- return flags_ == CALL_AS_METHOD || flags_ == WRAP_AND_CALL;
- }
-
- bool NeedsChecks() {
- return flags_ != WRAP_AND_CALL;
- }
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(CallFunction);
+ DEFINE_PLATFORM_CODE_STUB(CallFunction, PlatformCodeStub);
};
class CallConstructStub: public PlatformCodeStub {
public:
CallConstructStub(Isolate* isolate, CallConstructorFlags flags)
- : PlatformCodeStub(isolate), flags_(flags) {}
-
- void Generate(MacroAssembler* masm);
+ : PlatformCodeStub(isolate) {
+ minor_key_ = FlagBits::encode(flags);
+ }
virtual void FinishCode(Handle<Code> code) {
code->set_has_function_cache(RecordCallTarget());
}
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor);
-
private:
- CallConstructorFlags flags_;
-
- virtual void PrintName(OStream& os) const V8_OVERRIDE; // NOLINT
-
- Major MajorKey() const { return CallConstruct; }
- int MinorKey() const { return flags_; }
+ CallConstructorFlags flags() const { return FlagBits::decode(minor_key_); }
bool RecordCallTarget() const {
- return (flags_ & RECORD_CONSTRUCTOR_TARGET) != 0;
+ return (flags() & RECORD_CONSTRUCTOR_TARGET) != 0;
}
+
+ virtual void PrintName(OStream& os) const OVERRIDE; // NOLINT
+
+ class FlagBits : public BitField<CallConstructorFlags, 0, 1> {};
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(CallConstruct);
+ DEFINE_PLATFORM_CODE_STUB(CallConstruct, PlatformCodeStub);
};
@@ -1836,98 +1764,150 @@ class LoadDictionaryElementStub : public HydrogenCodeStub {
explicit LoadDictionaryElementStub(Isolate* isolate)
: HydrogenCodeStub(isolate) {}
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
+ DEFINE_HYDROGEN_CODE_STUB(LoadDictionaryElement, HydrogenCodeStub);
+};
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
- private:
- Major MajorKey() const { return LoadElement; }
- int NotMissMinorKey() const { return DICTIONARY_ELEMENTS; }
+class KeyedLoadGenericStub : public HydrogenCodeStub {
+ public:
+ explicit KeyedLoadGenericStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
- DISALLOW_COPY_AND_ASSIGN(LoadDictionaryElementStub);
+ virtual Code::Kind GetCodeKind() const { return Code::KEYED_LOAD_IC; }
+ virtual InlineCacheState GetICState() const { return GENERIC; }
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
+ DEFINE_HYDROGEN_CODE_STUB(KeyedLoadGeneric, HydrogenCodeStub);
};
-class LoadDictionaryElementPlatformStub : public PlatformCodeStub {
+class LoadICTrampolineStub : public PlatformCodeStub {
public:
- explicit LoadDictionaryElementPlatformStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {}
+ LoadICTrampolineStub(Isolate* isolate, const LoadICState& state)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = state.GetExtraICState();
+ }
+
+ virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::LOAD_IC; }
- void Generate(MacroAssembler* masm);
+ virtual InlineCacheState GetICState() const FINAL OVERRIDE {
+ return GENERIC;
+ }
+
+ virtual ExtraICState GetExtraICState() const FINAL OVERRIDE {
+ return static_cast<ExtraICState>(minor_key_);
+ }
private:
- Major MajorKey() const { return LoadElement; }
- int MinorKey() const { return DICTIONARY_ELEMENTS; }
+ LoadICState state() const {
+ return LoadICState(static_cast<ExtraICState>(minor_key_));
+ }
- DISALLOW_COPY_AND_ASSIGN(LoadDictionaryElementPlatformStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorLoadICTrampoline);
+ DEFINE_PLATFORM_CODE_STUB(LoadICTrampoline, PlatformCodeStub);
};
-class KeyedLoadGenericStub : public HydrogenCodeStub {
+class KeyedLoadICTrampolineStub : public LoadICTrampolineStub {
public:
- explicit KeyedLoadGenericStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
+ explicit KeyedLoadICTrampolineStub(Isolate* isolate)
+ : LoadICTrampolineStub(isolate, LoadICState(0)) {}
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
+ virtual Code::Kind GetCodeKind() const OVERRIDE {
+ return Code::KEYED_LOAD_IC;
+ }
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+ DEFINE_PLATFORM_CODE_STUB(KeyedLoadICTrampoline, LoadICTrampolineStub);
+};
- static void InstallDescriptors(Isolate* isolate);
- virtual Code::Kind GetCodeKind() const { return Code::KEYED_LOAD_IC; }
- virtual InlineCacheState GetICState() const { return GENERIC; }
+class MegamorphicLoadStub : public HydrogenCodeStub {
+ public:
+ MegamorphicLoadStub(Isolate* isolate, const LoadICState& state)
+ : HydrogenCodeStub(isolate) {
+ set_sub_minor_key(state.GetExtraICState());
+ }
- private:
- Major MajorKey() const { return KeyedLoadGeneric; }
- int NotMissMinorKey() const { return 0; }
+ virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::LOAD_IC; }
+
+ virtual InlineCacheState GetICState() const FINAL OVERRIDE {
+ return MEGAMORPHIC;
+ }
+
+ virtual ExtraICState GetExtraICState() const FINAL OVERRIDE {
+ return static_cast<ExtraICState>(sub_minor_key());
+ }
- DISALLOW_COPY_AND_ASSIGN(KeyedLoadGenericStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
+ DEFINE_HYDROGEN_CODE_STUB(MegamorphicLoad, HydrogenCodeStub);
};
-class DoubleToIStub : public PlatformCodeStub {
+class VectorLoadStub : public HydrogenCodeStub {
public:
- DoubleToIStub(Isolate* isolate,
- Register source,
- Register destination,
- int offset,
- bool is_truncating,
- bool skip_fastpath = false)
- : PlatformCodeStub(isolate), bit_field_(0) {
- bit_field_ = SourceRegisterBits::encode(source.code()) |
- DestinationRegisterBits::encode(destination.code()) |
- OffsetBits::encode(offset) |
- IsTruncatingBits::encode(is_truncating) |
- SkipFastPathBits::encode(skip_fastpath) |
- SSE3Bits::encode(CpuFeatures::IsSupported(SSE3) ? 1 : 0);
+ explicit VectorLoadStub(Isolate* isolate, const LoadICState& state)
+ : HydrogenCodeStub(isolate) {
+ set_sub_minor_key(state.GetExtraICState());
}
- Register source() {
- return Register::from_code(SourceRegisterBits::decode(bit_field_));
- }
+ virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::LOAD_IC; }
- Register destination() {
- return Register::from_code(DestinationRegisterBits::decode(bit_field_));
+ virtual InlineCacheState GetICState() const FINAL OVERRIDE {
+ return GENERIC;
}
- bool is_truncating() {
- return IsTruncatingBits::decode(bit_field_);
+ virtual ExtraICState GetExtraICState() const FINAL OVERRIDE {
+ return static_cast<ExtraICState>(sub_minor_key());
}
- bool skip_fastpath() {
- return SkipFastPathBits::decode(bit_field_);
- }
+ private:
+ LoadICState state() const { return LoadICState(GetExtraICState()); }
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorLoadIC);
+ DEFINE_HYDROGEN_CODE_STUB(VectorLoad, HydrogenCodeStub);
+};
+
+
+class VectorKeyedLoadStub : public VectorLoadStub {
+ public:
+ explicit VectorKeyedLoadStub(Isolate* isolate)
+ : VectorLoadStub(isolate, LoadICState(0)) {}
- int offset() {
- return OffsetBits::decode(bit_field_);
+ virtual Code::Kind GetCodeKind() const OVERRIDE {
+ return Code::KEYED_LOAD_IC;
}
- void Generate(MacroAssembler* masm);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorLoadIC);
+ DEFINE_HYDROGEN_CODE_STUB(VectorKeyedLoad, VectorLoadStub);
+};
+
+
+class DoubleToIStub : public PlatformCodeStub {
+ public:
+ DoubleToIStub(Isolate* isolate, Register source, Register destination,
+ int offset, bool is_truncating, bool skip_fastpath = false)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = SourceRegisterBits::encode(source.code()) |
+ DestinationRegisterBits::encode(destination.code()) |
+ OffsetBits::encode(offset) |
+ IsTruncatingBits::encode(is_truncating) |
+ SkipFastPathBits::encode(skip_fastpath) |
+ SSE3Bits::encode(CpuFeatures::IsSupported(SSE3) ? 1 : 0);
+ }
virtual bool SometimesSetsUpAFrame() { return false; }
private:
+ Register source() const {
+ return Register::from_code(SourceRegisterBits::decode(minor_key_));
+ }
+ Register destination() const {
+ return Register::from_code(DestinationRegisterBits::decode(minor_key_));
+ }
+ bool is_truncating() const { return IsTruncatingBits::decode(minor_key_); }
+ bool skip_fastpath() const { return SkipFastPathBits::decode(minor_key_); }
+ int offset() const { return OffsetBits::decode(minor_key_); }
+
static const int kBitsPerRegisterNumber = 6;
STATIC_ASSERT((1L << kBitsPerRegisterNumber) >= Register::kNumRegisters);
class SourceRegisterBits:
@@ -1944,12 +1924,8 @@ class DoubleToIStub : public PlatformCodeStub {
class SSE3Bits:
public BitField<int, 2 * kBitsPerRegisterNumber + 5, 1> {}; // NOLINT
- Major MajorKey() const { return DoubleToI; }
- int MinorKey() const { return bit_field_; }
-
- int bit_field_;
-
- DISALLOW_COPY_AND_ASSIGN(DoubleToIStub);
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(DoubleToI, PlatformCodeStub);
};
@@ -1958,32 +1934,22 @@ class LoadFastElementStub : public HydrogenCodeStub {
LoadFastElementStub(Isolate* isolate, bool is_js_array,
ElementsKind elements_kind)
: HydrogenCodeStub(isolate) {
- bit_field_ = ElementsKindBits::encode(elements_kind) |
- IsJSArrayBits::encode(is_js_array);
+ set_sub_minor_key(ElementsKindBits::encode(elements_kind) |
+ IsJSArrayBits::encode(is_js_array));
}
- bool is_js_array() const {
- return IsJSArrayBits::decode(bit_field_);
- }
+ bool is_js_array() const { return IsJSArrayBits::decode(sub_minor_key()); }
ElementsKind elements_kind() const {
- return ElementsKindBits::decode(bit_field_);
+ return ElementsKindBits::decode(sub_minor_key());
}
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
private:
class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
class IsJSArrayBits: public BitField<bool, 8, 1> {};
- uint32_t bit_field_;
- Major MajorKey() const { return LoadElement; }
- int NotMissMinorKey() const { return bit_field_; }
-
- DISALLOW_COPY_AND_ASSIGN(LoadFastElementStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
+ DEFINE_HYDROGEN_CODE_STUB(LoadFastElement, HydrogenCodeStub);
};
@@ -1992,38 +1958,28 @@ class StoreFastElementStub : public HydrogenCodeStub {
StoreFastElementStub(Isolate* isolate, bool is_js_array,
ElementsKind elements_kind, KeyedAccessStoreMode mode)
: HydrogenCodeStub(isolate) {
- bit_field_ = ElementsKindBits::encode(elements_kind) |
- IsJSArrayBits::encode(is_js_array) |
- StoreModeBits::encode(mode);
+ set_sub_minor_key(ElementsKindBits::encode(elements_kind) |
+ IsJSArrayBits::encode(is_js_array) |
+ StoreModeBits::encode(mode));
}
- bool is_js_array() const {
- return IsJSArrayBits::decode(bit_field_);
- }
+ bool is_js_array() const { return IsJSArrayBits::decode(sub_minor_key()); }
ElementsKind elements_kind() const {
- return ElementsKindBits::decode(bit_field_);
+ return ElementsKindBits::decode(sub_minor_key());
}
KeyedAccessStoreMode store_mode() const {
- return StoreModeBits::decode(bit_field_);
+ return StoreModeBits::decode(sub_minor_key());
}
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
private:
class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
class StoreModeBits: public BitField<KeyedAccessStoreMode, 8, 4> {};
class IsJSArrayBits: public BitField<bool, 12, 1> {};
- uint32_t bit_field_;
- Major MajorKey() const { return StoreElement; }
- int NotMissMinorKey() const { return bit_field_; }
-
- DISALLOW_COPY_AND_ASSIGN(StoreFastElementStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
+ DEFINE_HYDROGEN_CODE_STUB(StoreFastElement, HydrogenCodeStub);
};
@@ -2033,38 +1989,26 @@ class TransitionElementsKindStub : public HydrogenCodeStub {
ElementsKind from_kind,
ElementsKind to_kind,
bool is_js_array) : HydrogenCodeStub(isolate) {
- bit_field_ = FromKindBits::encode(from_kind) |
- ToKindBits::encode(to_kind) |
- IsJSArrayBits::encode(is_js_array);
+ set_sub_minor_key(FromKindBits::encode(from_kind) |
+ ToKindBits::encode(to_kind) |
+ IsJSArrayBits::encode(is_js_array));
}
ElementsKind from_kind() const {
- return FromKindBits::decode(bit_field_);
- }
-
- ElementsKind to_kind() const {
- return ToKindBits::decode(bit_field_);
- }
-
- bool is_js_array() const {
- return IsJSArrayBits::decode(bit_field_);
+ return FromKindBits::decode(sub_minor_key());
}
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
+ ElementsKind to_kind() const { return ToKindBits::decode(sub_minor_key()); }
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+ bool is_js_array() const { return IsJSArrayBits::decode(sub_minor_key()); }
private:
class FromKindBits: public BitField<ElementsKind, 8, 8> {};
class ToKindBits: public BitField<ElementsKind, 0, 8> {};
class IsJSArrayBits: public BitField<bool, 16, 1> {};
- uint32_t bit_field_;
-
- Major MajorKey() const { return TransitionElementsKind; }
- int NotMissMinorKey() const { return bit_field_; }
- DISALLOW_COPY_AND_ASSIGN(TransitionElementsKindStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(TransitionElementsKind);
+ DEFINE_HYDROGEN_CODE_STUB(TransitionElementsKind, HydrogenCodeStub);
};
@@ -2079,20 +2023,19 @@ class ArrayConstructorStubBase : public HydrogenCodeStub {
// for an ElementsKind and the desired usage of the stub.
DCHECK(override_mode != DISABLE_ALLOCATION_SITES ||
AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE);
- bit_field_ = ElementsKindBits::encode(kind) |
- AllocationSiteOverrideModeBits::encode(override_mode);
+ set_sub_minor_key(ElementsKindBits::encode(kind) |
+ AllocationSiteOverrideModeBits::encode(override_mode));
}
ElementsKind elements_kind() const {
- return ElementsKindBits::decode(bit_field_);
+ return ElementsKindBits::decode(sub_minor_key());
}
AllocationSiteOverrideMode override_mode() const {
- return AllocationSiteOverrideModeBits::decode(bit_field_);
+ return AllocationSiteOverrideModeBits::decode(sub_minor_key());
}
static void GenerateStubsAheadOfTime(Isolate* isolate);
- static void InstallDescriptors(Isolate* isolate);
// Parameters accessed via CodeStubGraphBuilder::GetParameter()
static const int kConstructor = 0;
@@ -2102,17 +2045,14 @@ class ArrayConstructorStubBase : public HydrogenCodeStub {
OStream& BasePrintName(OStream& os, const char* name) const; // NOLINT
private:
- int NotMissMinorKey() const { return bit_field_; }
-
// Ensure data fits within available bits.
STATIC_ASSERT(LAST_ALLOCATION_SITE_OVERRIDE_MODE == 1);
class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
class AllocationSiteOverrideModeBits: public
BitField<AllocationSiteOverrideMode, 8, 1> {}; // NOLINT
- uint32_t bit_field_;
- DISALLOW_COPY_AND_ASSIGN(ArrayConstructorStubBase);
+ DEFINE_CODE_STUB_BASE(ArrayConstructorStubBase, HydrogenCodeStub);
};
@@ -2125,19 +2065,14 @@ class ArrayNoArgumentConstructorStub : public ArrayConstructorStubBase {
: ArrayConstructorStubBase(isolate, kind, override_mode) {
}
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
private:
- Major MajorKey() const { return ArrayNoArgumentConstructor; }
-
- virtual void PrintName(OStream& os) const V8_OVERRIDE { // NOLINT
+ virtual void PrintName(OStream& os) const OVERRIDE { // NOLINT
BasePrintName(os, "ArrayNoArgumentConstructorStub");
}
- DISALLOW_COPY_AND_ASSIGN(ArrayNoArgumentConstructorStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayConstructorConstantArgCount);
+ DEFINE_HYDROGEN_CODE_STUB(ArrayNoArgumentConstructor,
+ ArrayConstructorStubBase);
};
@@ -2150,19 +2085,14 @@ class ArraySingleArgumentConstructorStub : public ArrayConstructorStubBase {
: ArrayConstructorStubBase(isolate, kind, override_mode) {
}
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
private:
- Major MajorKey() const { return ArraySingleArgumentConstructor; }
-
virtual void PrintName(OStream& os) const { // NOLINT
BasePrintName(os, "ArraySingleArgumentConstructorStub");
}
- DISALLOW_COPY_AND_ASSIGN(ArraySingleArgumentConstructorStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayConstructor);
+ DEFINE_HYDROGEN_CODE_STUB(ArraySingleArgumentConstructor,
+ ArrayConstructorStubBase);
};
@@ -2175,19 +2105,14 @@ class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase {
: ArrayConstructorStubBase(isolate, kind, override_mode) {
}
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
private:
- Major MajorKey() const { return ArrayNArgumentsConstructor; }
-
virtual void PrintName(OStream& os) const { // NOLINT
BasePrintName(os, "ArrayNArgumentsConstructorStub");
}
- DISALLOW_COPY_AND_ASSIGN(ArrayNArgumentsConstructorStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayConstructor);
+ DEFINE_HYDROGEN_CODE_STUB(ArrayNArgumentsConstructor,
+ ArrayConstructorStubBase);
};
@@ -2195,23 +2120,22 @@ class InternalArrayConstructorStubBase : public HydrogenCodeStub {
public:
InternalArrayConstructorStubBase(Isolate* isolate, ElementsKind kind)
: HydrogenCodeStub(isolate) {
- kind_ = kind;
+ set_sub_minor_key(ElementsKindBits::encode(kind));
}
static void GenerateStubsAheadOfTime(Isolate* isolate);
- static void InstallDescriptors(Isolate* isolate);
// Parameters accessed via CodeStubGraphBuilder::GetParameter()
static const int kConstructor = 0;
- ElementsKind elements_kind() const { return kind_; }
+ ElementsKind elements_kind() const {
+ return ElementsKindBits::decode(sub_minor_key());
+ }
private:
- int NotMissMinorKey() const { return kind_; }
+ class ElementsKindBits : public BitField<ElementsKind, 0, 8> {};
- ElementsKind kind_;
-
- DISALLOW_COPY_AND_ASSIGN(InternalArrayConstructorStubBase);
+ DEFINE_CODE_STUB_BASE(InternalArrayConstructorStubBase, HydrogenCodeStub);
};
@@ -2222,15 +2146,9 @@ class InternalArrayNoArgumentConstructorStub : public
ElementsKind kind)
: InternalArrayConstructorStubBase(isolate, kind) { }
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
- private:
- Major MajorKey() const { return InternalArrayNoArgumentConstructor; }
-
- DISALLOW_COPY_AND_ASSIGN(InternalArrayNoArgumentConstructorStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(InternalArrayConstructorConstantArgCount);
+ DEFINE_HYDROGEN_CODE_STUB(InternalArrayNoArgumentConstructor,
+ InternalArrayConstructorStubBase);
};
@@ -2241,15 +2159,9 @@ class InternalArraySingleArgumentConstructorStub : public
ElementsKind kind)
: InternalArrayConstructorStubBase(isolate, kind) { }
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
- private:
- Major MajorKey() const { return InternalArraySingleArgumentConstructor; }
-
- DISALLOW_COPY_AND_ASSIGN(InternalArraySingleArgumentConstructorStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(InternalArrayConstructor);
+ DEFINE_HYDROGEN_CODE_STUB(InternalArraySingleArgumentConstructor,
+ InternalArrayConstructorStubBase);
};
@@ -2259,46 +2171,28 @@ class InternalArrayNArgumentsConstructorStub : public
InternalArrayNArgumentsConstructorStub(Isolate* isolate, ElementsKind kind)
: InternalArrayConstructorStubBase(isolate, kind) { }
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
- private:
- Major MajorKey() const { return InternalArrayNArgumentsConstructor; }
-
- DISALLOW_COPY_AND_ASSIGN(InternalArrayNArgumentsConstructorStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(InternalArrayConstructor);
+ DEFINE_HYDROGEN_CODE_STUB(InternalArrayNArgumentsConstructor,
+ InternalArrayConstructorStubBase);
};
class StoreElementStub : public PlatformCodeStub {
public:
- StoreElementStub(Isolate* isolate, bool is_js_array,
- ElementsKind elements_kind, KeyedAccessStoreMode store_mode)
- : PlatformCodeStub(isolate),
- is_js_array_(is_js_array),
- elements_kind_(elements_kind),
- store_mode_(store_mode) {}
-
- Major MajorKey() const { return StoreElement; }
- int MinorKey() const {
- return ElementsKindBits::encode(elements_kind_) |
- IsJSArrayBits::encode(is_js_array_) |
- StoreModeBits::encode(store_mode_);
+ StoreElementStub(Isolate* isolate, ElementsKind elements_kind)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = ElementsKindBits::encode(elements_kind);
}
- void Generate(MacroAssembler* masm);
-
private:
- class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
- class StoreModeBits: public BitField<KeyedAccessStoreMode, 8, 4> {};
- class IsJSArrayBits: public BitField<bool, 12, 1> {};
+ ElementsKind elements_kind() const {
+ return ElementsKindBits::decode(minor_key_);
+ }
- bool is_js_array_;
- ElementsKind elements_kind_;
- KeyedAccessStoreMode store_mode_;
+ class ElementsKindBits : public BitField<ElementsKind, 0, 8> {};
- DISALLOW_COPY_AND_ASSIGN(StoreElementStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
+ DEFINE_PLATFORM_CODE_STUB(StoreElement, PlatformCodeStub);
};
@@ -2341,39 +2235,34 @@ class ToBooleanStub: public HydrogenCodeStub {
};
ToBooleanStub(Isolate* isolate, ResultMode mode, Types types = Types())
- : HydrogenCodeStub(isolate), types_(types), mode_(mode) {}
+ : HydrogenCodeStub(isolate) {
+ set_sub_minor_key(TypesBits::encode(types.ToByte()) |
+ ResultModeBits::encode(mode));
+ }
+
ToBooleanStub(Isolate* isolate, ExtraICState state)
- : HydrogenCodeStub(isolate),
- types_(static_cast<byte>(state)),
- mode_(RESULT_AS_SMI) {}
+ : HydrogenCodeStub(isolate) {
+ set_sub_minor_key(TypesBits::encode(static_cast<byte>(state)) |
+ ResultModeBits::encode(RESULT_AS_SMI));
+ }
bool UpdateStatus(Handle<Object> object);
- Types GetTypes() { return types_; }
- ResultMode GetMode() { return mode_; }
-
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+ Types types() const { return Types(TypesBits::decode(sub_minor_key())); }
+ ResultMode mode() const { return ResultModeBits::decode(sub_minor_key()); }
virtual Code::Kind GetCodeKind() const { return Code::TO_BOOLEAN_IC; }
- virtual void PrintState(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual void PrintState(OStream& os) const OVERRIDE; // NOLINT
virtual bool SometimesSetsUpAFrame() { return false; }
- static void InstallDescriptors(Isolate* isolate) {
- ToBooleanStub stub(isolate, RESULT_AS_SMI);
- stub.InitializeInterfaceDescriptor(
- isolate->code_stub_interface_descriptor(CodeStub::ToBoolean));
- }
-
static Handle<Code> GetUninitialized(Isolate* isolate) {
return ToBooleanStub(isolate, UNINITIALIZED).GetCode();
}
- virtual ExtraICState GetExtraICState() const { return types_.ToIntegral(); }
+ virtual ExtraICState GetExtraICState() const { return types().ToIntegral(); }
virtual InlineCacheState GetICState() const {
- if (types_.IsEmpty()) {
+ if (types().IsEmpty()) {
return ::v8::internal::UNINITIALIZED;
} else {
return MONOMORPHIC;
@@ -2381,19 +2270,16 @@ class ToBooleanStub: public HydrogenCodeStub {
}
private:
- class TypesBits : public BitField<byte, 0, NUMBER_OF_TYPES> {};
- class ResultModeBits : public BitField<ResultMode, NUMBER_OF_TYPES, 2> {};
-
- Major MajorKey() const { return ToBoolean; }
- int NotMissMinorKey() const {
- return TypesBits::encode(types_.ToByte()) | ResultModeBits::encode(mode_);
+ ToBooleanStub(Isolate* isolate, InitializationState init_state)
+ : HydrogenCodeStub(isolate, init_state) {
+ set_sub_minor_key(ResultModeBits::encode(RESULT_AS_SMI));
}
- ToBooleanStub(Isolate* isolate, InitializationState init_state)
- : HydrogenCodeStub(isolate, init_state), mode_(RESULT_AS_SMI) {}
+ class TypesBits : public BitField<byte, 0, NUMBER_OF_TYPES> {};
+ class ResultModeBits : public BitField<ResultMode, NUMBER_OF_TYPES, 2> {};
- Types types_;
- ResultMode mode_;
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ToBoolean);
+ DEFINE_HYDROGEN_CODE_STUB(ToBoolean, HydrogenCodeStub);
};
@@ -2402,26 +2288,21 @@ OStream& operator<<(OStream& os, const ToBooleanStub::Types& t);
class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
public:
- ElementsTransitionAndStoreStub(Isolate* isolate,
- ElementsKind from_kind,
- ElementsKind to_kind,
- bool is_jsarray,
+ ElementsTransitionAndStoreStub(Isolate* isolate, ElementsKind from_kind,
+ ElementsKind to_kind, bool is_jsarray,
KeyedAccessStoreMode store_mode)
- : HydrogenCodeStub(isolate),
- from_kind_(from_kind),
- to_kind_(to_kind),
- is_jsarray_(is_jsarray),
- store_mode_(store_mode) {}
-
- ElementsKind from_kind() const { return from_kind_; }
- ElementsKind to_kind() const { return to_kind_; }
- bool is_jsarray() const { return is_jsarray_; }
- KeyedAccessStoreMode store_mode() const { return store_mode_; }
-
- virtual Handle<Code> GenerateCode() V8_OVERRIDE;
+ : HydrogenCodeStub(isolate) {
+ set_sub_minor_key(FromBits::encode(from_kind) | ToBits::encode(to_kind) |
+ IsJSArrayBits::encode(is_jsarray) |
+ StoreModeBits::encode(store_mode));
+ }
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+ ElementsKind from_kind() const { return FromBits::decode(sub_minor_key()); }
+ ElementsKind to_kind() const { return ToBits::decode(sub_minor_key()); }
+ bool is_jsarray() const { return IsJSArrayBits::decode(sub_minor_key()); }
+ KeyedAccessStoreMode store_mode() const {
+ return StoreModeBits::decode(sub_minor_key());
+ }
// Parameters accessed via CodeStubGraphBuilder::GetParameter()
enum ParameterIndices {
@@ -2433,34 +2314,26 @@ class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
};
static const Register ValueRegister() {
- return KeyedStoreIC::ValueRegister();
+ return ElementTransitionAndStoreDescriptor::ValueRegister();
+ }
+ static const Register MapRegister() {
+ return ElementTransitionAndStoreDescriptor::MapRegister();
+ }
+ static const Register KeyRegister() {
+ return ElementTransitionAndStoreDescriptor::NameRegister();
}
- static const Register MapRegister() { return KeyedStoreIC::MapRegister(); }
- static const Register KeyRegister() { return KeyedStoreIC::NameRegister(); }
static const Register ObjectRegister() {
- return KeyedStoreIC::ReceiverRegister();
+ return ElementTransitionAndStoreDescriptor::ReceiverRegister();
}
private:
- class FromBits: public BitField<ElementsKind, 0, 8> {};
- class ToBits: public BitField<ElementsKind, 8, 8> {};
- class IsJSArrayBits: public BitField<bool, 16, 1> {};
- class StoreModeBits: public BitField<KeyedAccessStoreMode, 17, 4> {};
-
- Major MajorKey() const { return ElementsTransitionAndStore; }
- int NotMissMinorKey() const {
- return FromBits::encode(from_kind_) |
- ToBits::encode(to_kind_) |
- IsJSArrayBits::encode(is_jsarray_) |
- StoreModeBits::encode(store_mode_);
- }
-
- ElementsKind from_kind_;
- ElementsKind to_kind_;
- bool is_jsarray_;
- KeyedAccessStoreMode store_mode_;
+ class FromBits : public BitField<ElementsKind, 0, 8> {};
+ class ToBits : public BitField<ElementsKind, 8, 8> {};
+ class IsJSArrayBits : public BitField<bool, 16, 1> {};
+ class StoreModeBits : public BitField<KeyedAccessStoreMode, 17, 4> {};
- DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStoreStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ElementTransitionAndStore);
+ DEFINE_HYDROGEN_CODE_STUB(ElementsTransitionAndStore, HydrogenCodeStub);
};
@@ -2469,35 +2342,29 @@ class StoreArrayLiteralElementStub : public PlatformCodeStub {
explicit StoreArrayLiteralElementStub(Isolate* isolate)
: PlatformCodeStub(isolate) { }
- private:
- Major MajorKey() const { return StoreArrayLiteralElement; }
- int MinorKey() const { return 0; }
-
- void Generate(MacroAssembler* masm);
-
- DISALLOW_COPY_AND_ASSIGN(StoreArrayLiteralElementStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreArrayLiteralElement);
+ DEFINE_PLATFORM_CODE_STUB(StoreArrayLiteralElement, PlatformCodeStub);
};
class StubFailureTrampolineStub : public PlatformCodeStub {
public:
StubFailureTrampolineStub(Isolate* isolate, StubFunctionMode function_mode)
- : PlatformCodeStub(isolate),
- function_mode_(function_mode) {}
+ : PlatformCodeStub(isolate) {
+ minor_key_ = FunctionModeField::encode(function_mode);
+ }
static void GenerateAheadOfTime(Isolate* isolate);
private:
- class FunctionModeField: public BitField<StubFunctionMode, 0, 1> {};
-
- Major MajorKey() const { return StubFailureTrampoline; }
- int MinorKey() const { return FunctionModeField::encode(function_mode_); }
-
- void Generate(MacroAssembler* masm);
+ StubFunctionMode function_mode() const {
+ return FunctionModeField::decode(minor_key_);
+ }
- StubFunctionMode function_mode_;
+ class FunctionModeField : public BitField<StubFunctionMode, 0, 1> {};
- DISALLOW_COPY_AND_ASSIGN(StubFailureTrampolineStub);
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(StubFailureTrampoline, PlatformCodeStub);
};
@@ -2516,20 +2383,57 @@ class ProfileEntryHookStub : public PlatformCodeStub {
intptr_t stack_pointer,
Isolate* isolate);
- Major MajorKey() const { return ProfileEntryHook; }
- int MinorKey() const { return 0; }
+ // ProfileEntryHookStub is called at the start of a function, so it has the
+ // same register set.
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(CallFunction)
+ DEFINE_PLATFORM_CODE_STUB(ProfileEntryHook, PlatformCodeStub);
+};
+
+
+class StoreBufferOverflowStub : public PlatformCodeStub {
+ public:
+ StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = SaveDoublesBits::encode(save_fp == kSaveFPRegs);
+ }
+
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ bool save_doubles() const { return SaveDoublesBits::decode(minor_key_); }
+
+ class SaveDoublesBits : public BitField<bool, 0, 1> {};
+
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(StoreBufferOverflow, PlatformCodeStub);
+};
- void Generate(MacroAssembler* masm);
- DISALLOW_COPY_AND_ASSIGN(ProfileEntryHookStub);
+class SubStringStub : public PlatformCodeStub {
+ public:
+ explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ContextOnly);
+ DEFINE_PLATFORM_CODE_STUB(SubString, PlatformCodeStub);
};
-class CallDescriptors {
+class StringCompareStub : public PlatformCodeStub {
public:
- static void InitializeForIsolate(Isolate* isolate);
+ explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ContextOnly);
+ DEFINE_PLATFORM_CODE_STUB(StringCompare, PlatformCodeStub);
};
+
+#undef DEFINE_CALL_INTERFACE_DESCRIPTOR
+#undef DEFINE_PLATFORM_CODE_STUB
+#undef DEFINE_HANDLER_CODE_STUB
+#undef DEFINE_HYDROGEN_CODE_STUB
+#undef DEFINE_CODE_STUB
+#undef DEFINE_CODE_STUB_BASE
} } // namespace v8::internal
#endif // V8_CODE_STUBS_H_
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index a24220d9d0..0998685a9f 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -11,8 +11,7 @@
#include "src/debug.h"
#include "src/prettyprinter.h"
#include "src/rewriter.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
@@ -117,6 +116,7 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
CodeStub::MajorName(info->code_stub()->MajorKey(), true);
PrintF("%s", name == NULL ? "<unknown>" : name);
} else {
+ AllowDeferredHandleDereference allow_deference_for_trace;
PrintF("%s", info->function()->debug_name()->ToCString().get());
}
PrintF("]\n");
@@ -235,34 +235,4 @@ bool CodeGenerator::RecordPositions(MacroAssembler* masm,
return false;
}
-
-void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
- switch (type_) {
- case READ_ELEMENT:
- GenerateReadElement(masm);
- break;
- case NEW_SLOPPY_FAST:
- GenerateNewSloppyFast(masm);
- break;
- case NEW_SLOPPY_SLOW:
- GenerateNewSloppySlow(masm);
- break;
- case NEW_STRICT:
- GenerateNewStrict(masm);
- break;
- }
-}
-
-
-int CEntryStub::MinorKey() const {
- int result = (save_doubles_ == kSaveFPRegs) ? 1 : 0;
- DCHECK(result_size_ == 1 || result_size_ == 2);
-#ifdef _WIN64
- return result | ((result_size_ == 1) ? 0 : 2);
-#else
- return result;
-#endif
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index e01a3982a0..ba99a404a3 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -6,7 +6,7 @@
#define V8_CODEGEN_H_
#include "src/code-stubs.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
// Include the declaration of the architecture defined class CodeGenerator.
// The contract to the shared code is that the the CodeGenerator is a subclass
diff --git a/deps/v8/src/collection.js b/deps/v8/src/collection.js
index 5e4421eb10..0027bd7320 100644
--- a/deps/v8/src/collection.js
+++ b/deps/v8/src/collection.js
@@ -23,7 +23,7 @@ function SetConstructor(iterable) {
var iter, adder;
if (!IS_NULL_OR_UNDEFINED(iterable)) {
- iter = GetIterator(iterable);
+ iter = GetIterator(ToObject(iterable));
adder = this.add;
if (!IS_SPEC_FUNCTION(adder)) {
throw MakeTypeError('property_not_function', ['add', this]);
@@ -49,6 +49,13 @@ function SetAddJS(key) {
throw MakeTypeError('incompatible_method_receiver',
['Set.prototype.add', this]);
}
+ // Normalize -0 to +0 as required by the spec.
+ // Even though we use SameValueZero as the comparison for the keys we don't
+ // want to ever store -0 as the key since the key is directly exposed when
+ // doing iteration.
+ if (key === 0) {
+ key = 0;
+ }
return %SetAdd(this, key);
}
@@ -147,7 +154,7 @@ function MapConstructor(iterable) {
var iter, adder;
if (!IS_NULL_OR_UNDEFINED(iterable)) {
- iter = GetIterator(iterable);
+ iter = GetIterator(ToObject(iterable));
adder = this.set;
if (!IS_SPEC_FUNCTION(adder)) {
throw MakeTypeError('property_not_function', ['set', this]);
@@ -186,6 +193,13 @@ function MapSetJS(key, value) {
throw MakeTypeError('incompatible_method_receiver',
['Map.prototype.set', this]);
}
+ // Normalize -0 to +0 as required by the spec.
+ // Even though we use SameValueZero as the comparison for the keys we don't
+ // want to ever store -0 as the key since the key is directly exposed when
+ // doing iteration.
+ if (key === 0) {
+ key = 0;
+ }
return %MapSet(this, key, value);
}
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 559f980ff1..4e02cdd322 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -344,7 +344,7 @@ MaybeHandle<SharedFunctionInfo> CompilationCache::LookupEval(
MaybeHandle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
- JSRegExp::Flags flags) {
+ JSRegExp::Flags flags) {
if (!IsEnabled()) return MaybeHandle<FixedArray>();
return reg_exp_.Lookup(source, flags);
diff --git a/deps/v8/src/compiler-intrinsics.h b/deps/v8/src/compiler-intrinsics.h
deleted file mode 100644
index 669dd28b6a..0000000000
--- a/deps/v8/src/compiler-intrinsics.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_INTRINSICS_H_
-#define V8_COMPILER_INTRINSICS_H_
-
-#include "src/base/macros.h"
-
-namespace v8 {
-namespace internal {
-
-class CompilerIntrinsics {
- public:
- // Returns number of zero bits preceding least significant 1 bit.
- // Undefined for zero value.
- INLINE(static int CountTrailingZeros(uint32_t value));
-
- // Returns number of zero bits following most significant 1 bit.
- // Undefined for zero value.
- INLINE(static int CountLeadingZeros(uint32_t value));
-
- // Returns the number of bits set.
- INLINE(static int CountSetBits(uint32_t value));
-};
-
-#ifdef __GNUC__
-int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
- return __builtin_ctz(value);
-}
-
-int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
- return __builtin_clz(value);
-}
-
-int CompilerIntrinsics::CountSetBits(uint32_t value) {
- return __builtin_popcount(value);
-}
-
-#elif defined(_MSC_VER)
-
-#pragma intrinsic(_BitScanForward)
-#pragma intrinsic(_BitScanReverse)
-
-int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
- unsigned long result; //NOLINT
- _BitScanForward(&result, static_cast<long>(value)); //NOLINT
- return static_cast<int>(result);
-}
-
-int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
- unsigned long result; //NOLINT
- _BitScanReverse(&result, static_cast<long>(value)); //NOLINT
- return 31 - static_cast<int>(result);
-}
-
-int CompilerIntrinsics::CountSetBits(uint32_t value) {
- // Manually count set bits.
- value = ((value >> 1) & 0x55555555) + (value & 0x55555555);
- value = ((value >> 2) & 0x33333333) + (value & 0x33333333);
- value = ((value >> 4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f);
- value = ((value >> 8) & 0x00ff00ff) + (value & 0x00ff00ff);
- value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff);
- return value;
-}
-
-#else
-#error Unsupported compiler
-#endif
-
-} } // namespace v8::internal
-
-#endif // V8_COMPILER_INTRINSICS_H_
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index e496aee62d..d8467e54ee 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -44,13 +44,12 @@ ScriptData::ScriptData(const byte* data, int length)
}
-CompilationInfo::CompilationInfo(Handle<Script> script,
- Zone* zone)
- : flags_(StrictModeField::encode(SLOPPY)),
+CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone)
+ : flags_(kThisHasUses),
script_(script),
+ source_stream_(NULL),
osr_ast_id_(BailoutId::None()),
parameter_count_(0),
- this_has_uses_(true),
optimization_id_(-1),
ast_value_factory_(NULL),
ast_value_factory_owned_(false) {
@@ -59,11 +58,11 @@ CompilationInfo::CompilationInfo(Handle<Script> script,
CompilationInfo::CompilationInfo(Isolate* isolate, Zone* zone)
- : flags_(StrictModeField::encode(SLOPPY)),
+ : flags_(kThisHasUses),
script_(Handle<Script>::null()),
+ source_stream_(NULL),
osr_ast_id_(BailoutId::None()),
parameter_count_(0),
- this_has_uses_(true),
optimization_id_(-1),
ast_value_factory_(NULL),
ast_value_factory_owned_(false) {
@@ -73,12 +72,12 @@ CompilationInfo::CompilationInfo(Isolate* isolate, Zone* zone)
CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
Zone* zone)
- : flags_(StrictModeField::encode(SLOPPY) | IsLazy::encode(true)),
+ : flags_(kLazy | kThisHasUses),
shared_info_(shared_info),
script_(Handle<Script>(Script::cast(shared_info->script()))),
+ source_stream_(NULL),
osr_ast_id_(BailoutId::None()),
parameter_count_(0),
- this_has_uses_(true),
optimization_id_(-1),
ast_value_factory_(NULL),
ast_value_factory_owned_(false) {
@@ -86,16 +85,15 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
}
-CompilationInfo::CompilationInfo(Handle<JSFunction> closure,
- Zone* zone)
- : flags_(StrictModeField::encode(SLOPPY) | IsLazy::encode(true)),
+CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone)
+ : flags_(kLazy | kThisHasUses),
closure_(closure),
shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
script_(Handle<Script>(Script::cast(shared_info_->script()))),
+ source_stream_(NULL),
context_(closure->context()),
osr_ast_id_(BailoutId::None()),
parameter_count_(0),
- this_has_uses_(true),
optimization_id_(-1),
ast_value_factory_(NULL),
ast_value_factory_owned_(false) {
@@ -103,13 +101,12 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure,
}
-CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
- Isolate* isolate,
+CompilationInfo::CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate,
Zone* zone)
- : flags_(StrictModeField::encode(SLOPPY) | IsLazy::encode(true)),
+ : flags_(kLazy | kThisHasUses),
+ source_stream_(NULL),
osr_ast_id_(BailoutId::None()),
parameter_count_(0),
- this_has_uses_(true),
optimization_id_(-1),
ast_value_factory_(NULL),
ast_value_factory_owned_(false) {
@@ -118,6 +115,22 @@ CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
}
+CompilationInfo::CompilationInfo(
+ ScriptCompiler::ExternalSourceStream* stream,
+ ScriptCompiler::StreamedSource::Encoding encoding, Isolate* isolate,
+ Zone* zone)
+ : flags_(kThisHasUses),
+ source_stream_(stream),
+ source_stream_encoding_(encoding),
+ osr_ast_id_(BailoutId::None()),
+ parameter_count_(0),
+ optimization_id_(-1),
+ ast_value_factory_(NULL),
+ ast_value_factory_owned_(false) {
+ Initialize(isolate, BASE, zone);
+}
+
+
void CompilationInfo::Initialize(Isolate* isolate,
Mode mode,
Zone* zone) {
@@ -143,26 +156,33 @@ void CompilationInfo::Initialize(Isolate* isolate,
return;
}
mode_ = mode;
- abort_due_to_dependency_ = false;
- if (script_->type()->value() == Script::TYPE_NATIVE) MarkAsNative();
+ if (!script_.is_null() && script_->type()->value() == Script::TYPE_NATIVE) {
+ MarkAsNative();
+ }
if (isolate_->debug()->is_active()) MarkAsDebug();
+ if (FLAG_context_specialization) MarkAsContextSpecializing();
+ if (FLAG_turbo_inlining) MarkAsInliningEnabled();
+ if (FLAG_turbo_types) MarkAsTypingEnabled();
if (!shared_info_.is_null()) {
DCHECK(strict_mode() == SLOPPY);
SetStrictMode(shared_info_->strict_mode());
}
- set_bailout_reason(kUnknown);
+ bailout_reason_ = kUnknown;
if (!shared_info().is_null() && shared_info()->is_compiled()) {
// We should initialize the CompilationInfo feedback vector from the
// passed in shared info, rather than creating a new one.
- feedback_vector_ = Handle<FixedArray>(shared_info()->feedback_vector(),
- isolate);
+ feedback_vector_ =
+ Handle<TypeFeedbackVector>(shared_info()->feedback_vector(), isolate);
}
}
CompilationInfo::~CompilationInfo() {
+ if (GetFlag(kDisableFutureOptimization)) {
+ shared_info()->DisableOptimization(bailout_reason());
+ }
delete deferred_handles_;
delete no_frame_ranges_;
if (ast_value_factory_owned_) delete ast_value_factory_;
@@ -241,18 +261,6 @@ Code::Flags CompilationInfo::flags() const {
}
-// Disable optimization for the rest of the compilation pipeline.
-void CompilationInfo::DisableOptimization() {
- bool is_optimizable_closure =
- FLAG_optimize_closures &&
- closure_.is_null() &&
- !scope_->HasTrivialOuterContext() &&
- !scope_->outer_scope_calls_sloppy_eval() &&
- !scope_->inside_with();
- SetMode(is_optimizable_closure ? BASE : NONOPT);
-}
-
-
// Primitive functions are unlikely to be picked up by the stack-walking
// profiler, so they trigger their own optimization when they're called
// for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
@@ -285,7 +293,7 @@ class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
}
#define DEF_VISIT(type) \
- virtual void Visit##type(type* node) V8_OVERRIDE { \
+ virtual void Visit##type(type* node) OVERRIDE { \
if (node->position() != RelocInfo::kNoPosition) { \
SetSourcePosition(node->position()); \
} \
@@ -295,7 +303,7 @@ class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
#undef DEF_VISIT
#define DEF_VISIT(type) \
- virtual void Visit##type(type* node) V8_OVERRIDE { \
+ virtual void Visit##type(type* node) OVERRIDE { \
if (node->position() != RelocInfo::kNoPosition) { \
SetSourcePosition(node->position()); \
} \
@@ -305,7 +313,7 @@ class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
#undef DEF_VISIT
#define DEF_VISIT(type) \
- virtual void Visit##type(type* node) V8_OVERRIDE { \
+ virtual void Visit##type(type* node) OVERRIDE { \
HOptimizedGraphBuilder::Visit##type(node); \
}
MODULE_NODE_LIST(DEF_VISIT)
@@ -315,26 +323,16 @@ class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
- DCHECK(isolate()->use_crankshaft());
DCHECK(info()->IsOptimizing());
DCHECK(!info()->IsCompilingForDebugging());
- // We should never arrive here if there is no code object on the
- // shared function object.
- DCHECK(info()->shared_info()->code()->kind() == Code::FUNCTION);
-
// We should never arrive here if optimization has been disabled on the
// shared function info.
DCHECK(!info()->shared_info()->optimization_disabled());
- // Fall back to using the full code generator if it's not possible
- // to use the Hydrogen-based optimizing compiler. We already have
- // generated code for this from the shared function object.
- if (FLAG_always_full_compiler) return AbortOptimization();
-
// Do not use crankshaft if we need to be able to set break points.
if (isolate()->DebuggerHasBreakPoints()) {
- return AbortOptimization(kDebuggerHasBreakPoints);
+ return RetryOptimization(kDebuggerHasBreakPoints);
}
// Limit the number of times we re-compile a functions with
@@ -342,7 +340,7 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
const int kMaxOptCount =
FLAG_deopt_every_n_times == 0 ? FLAG_max_opt_count : 1000;
if (info()->opt_count() > kMaxOptCount) {
- return AbortAndDisableOptimization(kOptimizedTooManyTimes);
+ return AbortOptimization(kOptimizedTooManyTimes);
}
// Due to an encoding limit on LUnallocated operands in the Lithium
@@ -355,17 +353,17 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
const int parameter_limit = -LUnallocated::kMinFixedSlotIndex;
Scope* scope = info()->scope();
if ((scope->num_parameters() + 1) > parameter_limit) {
- return AbortAndDisableOptimization(kTooManyParameters);
+ return AbortOptimization(kTooManyParameters);
}
const int locals_limit = LUnallocated::kMaxFixedSlotIndex;
if (info()->is_osr() &&
scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit) {
- return AbortAndDisableOptimization(kTooManyParametersLocals);
+ return AbortOptimization(kTooManyParametersLocals);
}
if (scope->HasIllegalRedeclaration()) {
- return AbortAndDisableOptimization(kFunctionWithIllegalRedeclaration);
+ return AbortOptimization(kFunctionWithIllegalRedeclaration);
}
// Check the whitelist for Crankshaft.
@@ -384,21 +382,8 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
if (FLAG_hydrogen_stats) {
timer.Start();
}
- CompilationInfoWithZone unoptimized(info()->shared_info());
- // Note that we use the same AST that we will use for generating the
- // optimized code.
- unoptimized.SetFunction(info()->function());
- unoptimized.PrepareForCompilation(info()->scope());
- unoptimized.SetContext(info()->context());
- if (should_recompile) unoptimized.EnableDeoptimizationSupport();
- bool succeeded = FullCodeGenerator::MakeCode(&unoptimized);
- if (should_recompile) {
- if (!succeeded) return SetLastStatus(FAILED);
- Handle<SharedFunctionInfo> shared = info()->shared_info();
- shared->EnableDeoptimizationSupport(*unoptimized.code());
- // The existing unoptimized code was replaced with the new one.
- Compiler::RecordFunctionCompilation(
- Logger::LAZY_COMPILE_TAG, &unoptimized, shared);
+ if (!Compiler::EnsureDeoptimizationSupport(info())) {
+ return SetLastStatus(FAILED);
}
if (FLAG_hydrogen_stats) {
isolate()->GetHStatistics()->IncrementFullCodeGen(timer.Elapsed());
@@ -408,15 +393,14 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
DCHECK(info()->shared_info()->has_deoptimization_support());
// Check the whitelist for TurboFan.
- if (info()->closure()->PassesFilter(FLAG_turbo_filter) &&
- // TODO(turbofan): Make try-catch work and remove this bailout.
- info()->function()->dont_optimize_reason() != kTryCatchStatement &&
- info()->function()->dont_optimize_reason() != kTryFinallyStatement &&
- // TODO(turbofan): Make OSR work and remove this bailout.
- !info()->is_osr()) {
+ if ((FLAG_turbo_asm && info()->shared_info()->asm_function()) ||
+ info()->closure()->PassesFilter(FLAG_turbo_filter)) {
compiler::Pipeline pipeline(info());
pipeline.GenerateCode();
if (!info()->code().is_null()) {
+ if (FLAG_turbo_deoptimization) {
+ info()->context()->native_context()->AddOptimizedCode(*info()->code());
+ }
return SetLastStatus(SUCCEEDED);
}
}
@@ -443,20 +427,11 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
return SetLastStatus(FAILED);
}
- // The function being compiled may have bailed out due to an inline
- // candidate bailing out. In such a case, we don't disable
- // optimization on the shared_info.
- DCHECK(!graph_builder_->inline_bailout() || graph_ == NULL);
- if (graph_ == NULL) {
- if (graph_builder_->inline_bailout()) {
- return AbortOptimization();
- } else {
- return AbortAndDisableOptimization();
- }
- }
+ if (graph_ == NULL) return SetLastStatus(BAILED_OUT);
if (info()->HasAbortedDueToDependencyChange()) {
- return AbortOptimization(kBailedOutDueToDependencyChange);
+ // Dependency has changed during graph creation. Let's try again later.
+ return RetryOptimization(kBailedOutDueToDependencyChange);
}
return SetLastStatus(SUCCEEDED);
@@ -486,7 +461,7 @@ OptimizedCompileJob::Status OptimizedCompileJob::OptimizeGraph() {
graph_builder_->Bailout(bailout_reason);
}
- return AbortOptimization();
+ return SetLastStatus(BAILED_OUT);
}
@@ -513,23 +488,9 @@ OptimizedCompileJob::Status OptimizedCompileJob::GenerateCode() {
Handle<Code> optimized_code = chunk_->Codegen();
if (optimized_code.is_null()) {
if (info()->bailout_reason() == kNoReason) {
- info_->set_bailout_reason(kCodeGenerationFailed);
- } else if (info()->bailout_reason() == kMapBecameDeprecated) {
- if (FLAG_trace_opt) {
- PrintF("[aborted optimizing ");
- info()->closure()->ShortPrint();
- PrintF(" because a map became deprecated]\n");
- }
- return AbortOptimization();
- } else if (info()->bailout_reason() == kMapBecameUnstable) {
- if (FLAG_trace_opt) {
- PrintF("[aborted optimizing ");
- info()->closure()->ShortPrint();
- PrintF(" because a map became unstable]\n");
- }
- return AbortOptimization();
+ return AbortOptimization(kCodeGenerationFailed);
}
- return AbortAndDisableOptimization();
+ return SetLastStatus(BAILED_OUT);
}
info()->SetCode(optimized_code);
}
@@ -601,37 +562,6 @@ void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
}
-static void UpdateSharedFunctionInfo(CompilationInfo* info) {
- // Update the shared function info with the compiled code and the
- // scope info. Please note, that the order of the shared function
- // info initialization is important since set_scope_info might
- // trigger a GC, causing the DCHECK below to be invalid if the code
- // was flushed. By setting the code object last we avoid this.
- Handle<SharedFunctionInfo> shared = info->shared_info();
- Handle<ScopeInfo> scope_info =
- ScopeInfo::Create(info->scope(), info->zone());
- shared->set_scope_info(*scope_info);
-
- Handle<Code> code = info->code();
- CHECK(code->kind() == Code::FUNCTION);
- shared->ReplaceCode(*code);
- if (shared->optimization_disabled()) code->set_optimizable(false);
-
- shared->set_feedback_vector(*info->feedback_vector());
-
- // Set the expected number of properties for instances.
- FunctionLiteral* lit = info->function();
- int expected = lit->expected_property_count();
- SetExpectedNofPropertiesFromEstimate(shared, expected);
-
- // Check the function has compiled code.
- DCHECK(shared->is_compiled());
- shared->set_bailout_reason(lit->dont_optimize_reason());
- shared->set_ast_node_count(lit->ast_node_count());
- shared->set_strict_mode(lit->strict_mode());
-}
-
-
// Sets the function info on a function.
// The start_position points to the first '(' character after the function name
// in the full script source. When counting characters in the script source the
@@ -660,8 +590,42 @@ static void SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
function_info->set_is_function(lit->is_function());
function_info->set_bailout_reason(lit->dont_optimize_reason());
function_info->set_dont_cache(lit->flags()->Contains(kDontCache));
- function_info->set_is_generator(lit->is_generator());
- function_info->set_is_arrow(lit->is_arrow());
+ function_info->set_kind(lit->kind());
+ function_info->set_asm_function(lit->scope()->asm_function());
+}
+
+
+static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
+ CompilationInfo* info,
+ Handle<SharedFunctionInfo> shared) {
+ // SharedFunctionInfo is passed separately, because if CompilationInfo
+ // was created using Script object, it will not have it.
+
+ // Log the code generation. If source information is available include
+ // script name and line number. Check explicitly whether logging is
+ // enabled as finding the line number is not free.
+ if (info->isolate()->logger()->is_logging_code_events() ||
+ info->isolate()->cpu_profiler()->is_profiling()) {
+ Handle<Script> script = info->script();
+ Handle<Code> code = info->code();
+ if (code.is_identical_to(info->isolate()->builtins()->CompileLazy())) {
+ return;
+ }
+ int line_num = Script::GetLineNumber(script, shared->start_position()) + 1;
+ int column_num =
+ Script::GetColumnNumber(script, shared->start_position()) + 1;
+ String* script_name = script->name()->IsString()
+ ? String::cast(script->name())
+ : info->isolate()->heap()->empty_string();
+ Logger::LogEventsAndTags log_tag = Logger::ToNativeByScript(tag, *script);
+ PROFILE(info->isolate(),
+ CodeCreateEvent(log_tag, *code, *shared, info, script_name,
+ line_num, column_num));
+ }
+
+ GDBJIT(AddCode(Handle<String>(shared->DebugName()),
+ Handle<Script>(info->script()), Handle<Code>(info->code()),
+ info));
}
@@ -685,18 +649,159 @@ MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
CompilationInfo* info) {
VMState<COMPILER> state(info->isolate());
PostponeInterruptsScope postpone(info->isolate());
+
+ // Parse and update CompilationInfo with the results.
if (!Parser::Parse(info)) return MaybeHandle<Code>();
- info->SetStrictMode(info->function()->strict_mode());
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ FunctionLiteral* lit = info->function();
+ shared->set_strict_mode(lit->strict_mode());
+ SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count());
+ shared->set_bailout_reason(lit->dont_optimize_reason());
+ shared->set_ast_node_count(lit->ast_node_count());
+ // Compile unoptimized code.
if (!CompileUnoptimizedCode(info)) return MaybeHandle<Code>();
- Compiler::RecordFunctionCompilation(
- Logger::LAZY_COMPILE_TAG, info, info->shared_info());
- UpdateSharedFunctionInfo(info);
- DCHECK_EQ(Code::FUNCTION, info->code()->kind());
+
+ CHECK_EQ(Code::FUNCTION, info->code()->kind());
+ RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
+
+ // Update the shared function info with the scope info. Allocating the
+ // ScopeInfo object may cause a GC.
+ Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope(), info->zone());
+ shared->set_scope_info(*scope_info);
+
+ // Update the code and feedback vector for the shared function info.
+ shared->ReplaceCode(*info->code());
+ if (shared->optimization_disabled()) info->code()->set_optimizable(false);
+ shared->set_feedback_vector(*info->feedback_vector());
+
return info->code();
}
+MUST_USE_RESULT static MaybeHandle<Code> GetCodeFromOptimizedCodeMap(
+ Handle<JSFunction> function, BailoutId osr_ast_id) {
+ if (FLAG_cache_optimized_code) {
+ Handle<SharedFunctionInfo> shared(function->shared());
+ // Bound functions are not cached.
+ if (shared->bound()) return MaybeHandle<Code>();
+ DisallowHeapAllocation no_gc;
+ int index = shared->SearchOptimizedCodeMap(
+ function->context()->native_context(), osr_ast_id);
+ if (index > 0) {
+ if (FLAG_trace_opt) {
+ PrintF("[found optimized code for ");
+ function->ShortPrint();
+ if (!osr_ast_id.IsNone()) {
+ PrintF(" at OSR AST id %d", osr_ast_id.ToInt());
+ }
+ PrintF("]\n");
+ }
+ FixedArray* literals = shared->GetLiteralsFromOptimizedCodeMap(index);
+ if (literals != NULL) function->set_literals(literals);
+ return Handle<Code>(shared->GetCodeFromOptimizedCodeMap(index));
+ }
+ }
+ return MaybeHandle<Code>();
+}
+
+
+static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
+ Handle<Code> code = info->code();
+ if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do.
+
+ // Context specialization folds-in the context, so no sharing can occur.
+ if (code->is_turbofanned() && info->is_context_specializing()) return;
+
+ // Cache optimized code.
+ if (FLAG_cache_optimized_code) {
+ Handle<JSFunction> function = info->closure();
+ Handle<SharedFunctionInfo> shared(function->shared());
+ // Do not cache bound functions.
+ if (shared->bound()) return;
+ Handle<FixedArray> literals(function->literals());
+ Handle<Context> native_context(function->context()->native_context());
+ SharedFunctionInfo::AddToOptimizedCodeMap(shared, native_context, code,
+ literals, info->osr_ast_id());
+ }
+}
+
+
+static bool CompileOptimizedPrologue(CompilationInfo* info) {
+ if (!Parser::Parse(info)) return false;
+ if (!Rewriter::Rewrite(info)) return false;
+ if (!Scope::Analyze(info)) return false;
+ DCHECK(info->scope() != NULL);
+ return true;
+}
+
+
+static bool GetOptimizedCodeNow(CompilationInfo* info) {
+ if (!CompileOptimizedPrologue(info)) return false;
+
+ TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
+
+ OptimizedCompileJob job(info);
+ if (job.CreateGraph() != OptimizedCompileJob::SUCCEEDED ||
+ job.OptimizeGraph() != OptimizedCompileJob::SUCCEEDED ||
+ job.GenerateCode() != OptimizedCompileJob::SUCCEEDED) {
+ if (FLAG_trace_opt) {
+ PrintF("[aborted optimizing ");
+ info->closure()->ShortPrint();
+ PrintF(" because: %s]\n", GetBailoutReason(info->bailout_reason()));
+ }
+ return false;
+ }
+
+ // Success!
+ DCHECK(!info->isolate()->has_pending_exception());
+ InsertCodeIntoOptimizedCodeMap(info);
+ RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info,
+ info->shared_info());
+ if (FLAG_trace_opt) {
+ PrintF("[completed optimizing ");
+ info->closure()->ShortPrint();
+ PrintF("]\n");
+ }
+ return true;
+}
+
+
+static bool GetOptimizedCodeLater(CompilationInfo* info) {
+ Isolate* isolate = info->isolate();
+ if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Compilation queue full, will retry optimizing ");
+ info->closure()->ShortPrint();
+ PrintF(" later.\n");
+ }
+ return false;
+ }
+
+ CompilationHandleScope handle_scope(info);
+ if (!CompileOptimizedPrologue(info)) return false;
+ info->SaveHandles(); // Copy handles to the compilation handle scope.
+
+ TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
+
+ OptimizedCompileJob* job = new (info->zone()) OptimizedCompileJob(info);
+ OptimizedCompileJob::Status status = job->CreateGraph();
+ if (status != OptimizedCompileJob::SUCCEEDED) return false;
+ isolate->optimizing_compiler_thread()->QueueForOptimization(job);
+
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Queued ");
+ info->closure()->ShortPrint();
+ if (info->is_osr()) {
+ PrintF(" for concurrent OSR at %d.\n", info->osr_ast_id().ToInt());
+ } else {
+ PrintF(" for concurrent optimization.\n");
+ }
+ }
+ return true;
+}
+
+
MaybeHandle<Code> Compiler::GetUnoptimizedCode(Handle<JSFunction> function) {
DCHECK(!function->GetIsolate()->has_pending_exception());
DCHECK(!function->is_compiled());
@@ -709,6 +814,38 @@ MaybeHandle<Code> Compiler::GetUnoptimizedCode(Handle<JSFunction> function) {
ASSIGN_RETURN_ON_EXCEPTION(info.isolate(), result,
GetUnoptimizedCodeCommon(&info),
Code);
+ return result;
+}
+
+
+MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
+ DCHECK(!function->GetIsolate()->has_pending_exception());
+ DCHECK(!function->is_compiled());
+
+ if (FLAG_turbo_asm && function->shared()->asm_function()) {
+ CompilationInfoWithZone info(function);
+
+ VMState<COMPILER> state(info.isolate());
+ PostponeInterruptsScope postpone(info.isolate());
+
+ info.SetOptimizing(BailoutId::None(),
+ Handle<Code>(function->shared()->code()));
+
+ info.MarkAsContextSpecializing();
+ info.MarkAsTypingEnabled();
+ info.MarkAsInliningDisabled();
+
+ if (GetOptimizedCodeNow(&info)) return info.code();
+ }
+
+ if (function->shared()->is_compiled()) {
+ return Handle<Code>(function->shared()->code());
+ }
+
+ CompilationInfoWithZone info(function);
+ Handle<Code> result;
+ ASSIGN_RETURN_ON_EXCEPTION(info.isolate(), result,
+ GetUnoptimizedCodeCommon(&info), Code);
if (FLAG_always_opt &&
info.isolate()->use_crankshaft() &&
@@ -739,7 +876,7 @@ MaybeHandle<Code> Compiler::GetUnoptimizedCode(
bool Compiler::EnsureCompiled(Handle<JSFunction> function,
ClearExceptionFlag flag) {
if (function->is_compiled()) return true;
- MaybeHandle<Code> maybe_code = Compiler::GetUnoptimizedCode(function);
+ MaybeHandle<Code> maybe_code = Compiler::GetLazyCode(function);
Handle<Code> code;
if (!maybe_code.ToHandle(&code)) {
if (flag == CLEAR_EXCEPTION) {
@@ -753,6 +890,38 @@ bool Compiler::EnsureCompiled(Handle<JSFunction> function,
}
+// TODO(turbofan): In the future, unoptimized code with deopt support could
+// be generated lazily once deopt is triggered.
+bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
+ if (!info->shared_info()->has_deoptimization_support()) {
+ CompilationInfoWithZone unoptimized(info->shared_info());
+ // Note that we use the same AST that we will use for generating the
+ // optimized code.
+ unoptimized.SetFunction(info->function());
+ unoptimized.PrepareForCompilation(info->scope());
+ unoptimized.SetContext(info->context());
+ unoptimized.EnableDeoptimizationSupport();
+ if (!FullCodeGenerator::MakeCode(&unoptimized)) return false;
+
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ shared->EnableDeoptimizationSupport(*unoptimized.code());
+ shared->set_feedback_vector(*unoptimized.feedback_vector());
+
+ // The scope info might not have been set if a lazily compiled
+ // function is inlined before being called for the first time.
+ if (shared->scope_info() == ScopeInfo::Empty(info->isolate())) {
+ Handle<ScopeInfo> target_scope_info =
+ ScopeInfo::Create(info->scope(), info->zone());
+ shared->set_scope_info(*target_scope_info);
+ }
+
+ // The existing unoptimized code was replaced with the new one.
+ RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, &unoptimized, shared);
+ }
+ return true;
+}
+
+
// Compile full code for debugging. This code will have debug break slots
// and deoptimization information. Deoptimization information is required
// in case that an optimized version of this function is still activated on
@@ -762,7 +931,7 @@ bool Compiler::EnsureCompiled(Handle<JSFunction> function,
// full code without debug break slots to full code with debug break slots
// depends on the generated code is otherwise exactly the same.
// If compilation fails, just keep the existing code.
-MaybeHandle<Code> Compiler::GetCodeForDebugging(Handle<JSFunction> function) {
+MaybeHandle<Code> Compiler::GetDebugCode(Handle<JSFunction> function) {
CompilationInfoWithZone info(function);
Isolate* isolate = info.isolate();
VMState<COMPILER> state(isolate);
@@ -800,7 +969,6 @@ void Compiler::CompileForLiveEdit(Handle<Script> script) {
info.MarkAsGlobal();
if (!Parser::Parse(&info)) return;
- info.SetStrictMode(info.function()->strict_mode());
LiveEditFunctionTracker tracker(info.isolate(), info.function());
if (!CompileUnoptimizedCode(&info)) return;
@@ -813,13 +981,6 @@ void Compiler::CompileForLiveEdit(Handle<Script> script) {
}
-static bool DebuggerWantsEagerCompilation(CompilationInfo* info,
- bool allow_lazy_without_ctx = false) {
- return LiveEditFunctionTracker::IsActive(info->isolate()) ||
- (info->isolate()->DebuggerHasBreakPoints() && !allow_lazy_without_ctx);
-}
-
-
static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
Isolate* isolate = info->isolate();
PostponeInterruptsScope postpone(isolate);
@@ -834,28 +995,32 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
DCHECK(info->is_eval() || info->is_global());
- bool parse_allow_lazy =
- (info->compile_options() == ScriptCompiler::kConsumeParserCache ||
- String::cast(script->source())->length() > FLAG_min_preparse_length) &&
- !DebuggerWantsEagerCompilation(info);
-
- if (!parse_allow_lazy &&
- (info->compile_options() == ScriptCompiler::kProduceParserCache ||
- info->compile_options() == ScriptCompiler::kConsumeParserCache)) {
- // We are going to parse eagerly, but we either 1) have cached data produced
- // by lazy parsing or 2) are asked to generate cached data. We cannot use
- // the existing data, since it won't contain all the symbols we need for
- // eager parsing. In addition, it doesn't make sense to produce the data
- // when parsing eagerly. That data would contain all symbols, but no
- // functions, so it cannot be used to aid lazy parsing later.
- info->SetCachedData(NULL, ScriptCompiler::kNoCompileOptions);
- }
+ info->MarkAsToplevel();
Handle<SharedFunctionInfo> result;
{ VMState<COMPILER> state(info->isolate());
- if (!Parser::Parse(info, parse_allow_lazy)) {
- return Handle<SharedFunctionInfo>::null();
+ if (info->function() == NULL) {
+ // Parse the script if needed (if it's already parsed, function() is
+ // non-NULL).
+ bool parse_allow_lazy =
+ (info->compile_options() == ScriptCompiler::kConsumeParserCache ||
+ String::cast(script->source())->length() >
+ FLAG_min_preparse_length) &&
+ !Compiler::DebuggerWantsEagerCompilation(info);
+
+ if (!parse_allow_lazy &&
+ (info->compile_options() == ScriptCompiler::kProduceParserCache ||
+ info->compile_options() == ScriptCompiler::kConsumeParserCache)) {
+ // We are going to parse eagerly, but we either 1) have cached data
+ // produced by lazy parsing or 2) are asked to generate cached data.
+ // Eager parsing cannot benefit from cached data, and producing cached
+ // data while parsing eagerly is not implemented.
+ info->SetCachedData(NULL, ScriptCompiler::kNoCompileOptions);
+ }
+ if (!Parser::Parse(info, parse_allow_lazy)) {
+ return Handle<SharedFunctionInfo>::null();
+ }
}
FunctionLiteral* lit = info->function();
@@ -877,9 +1042,8 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
// Allocate function.
DCHECK(!info->code().is_null());
result = isolate->factory()->NewSharedFunctionInfo(
- lit->name(), lit->materialized_literal_count(), lit->is_generator(),
- lit->is_arrow(), info->code(),
- ScopeInfo::Create(info->scope(), info->zone()),
+ lit->name(), lit->materialized_literal_count(), lit->kind(),
+ info->code(), ScopeInfo::Create(info->scope(), info->zone()),
info->feedback_vector());
DCHECK_EQ(RelocInfo::kNoPosition, lit->function_token_position());
@@ -902,7 +1066,8 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
SetExpectedNofPropertiesFromEstimate(result,
lit->expected_property_count());
- script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
+ if (!script.is_null())
+ script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
live_edit_tracker.RecordFunctionInfo(result, lit, info->zone());
}
@@ -997,6 +1162,7 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
if (FLAG_serialize_toplevel &&
compile_options == ScriptCompiler::kConsumeCodeCache &&
!isolate->debug()->is_loaded()) {
+ HistogramTimerScope timer(isolate->counters()->compile_deserialize());
return CodeSerializer::Deserialize(isolate, *cached_data, source);
} else {
maybe_result = compilation_cache->LookupScript(
@@ -1043,6 +1209,8 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
compilation_cache->PutScript(source, context, result);
if (FLAG_serialize_toplevel &&
compile_options == ScriptCompiler::kProduceCodeCache) {
+ HistogramTimerScope histogram_timer(
+ isolate->counters()->compile_serialize());
*cached_data = CodeSerializer::Serialize(isolate, result, source);
if (FLAG_profile_deserialization) {
PrintF("[Compiling and serializing %d bytes took %0.3f ms]\n",
@@ -1059,6 +1227,19 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
}
+Handle<SharedFunctionInfo> Compiler::CompileStreamedScript(
+ CompilationInfo* info, int source_length) {
+ Isolate* isolate = info->isolate();
+ isolate->counters()->total_load_size()->Increment(source_length);
+ isolate->counters()->total_compile_size()->Increment(source_length);
+
+ if (FLAG_use_strict) info->SetStrictMode(STRICT);
+ // TODO(marja): FLAG_serialize_toplevel is not honoured and won't be; when the
+ // real code caching lands, streaming needs to be adapted to use it.
+ return CompileToplevel(info);
+}
+
+
Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(
FunctionLiteral* literal, Handle<Script> script,
CompilationInfo* outer_info) {
@@ -1085,10 +1266,17 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(
bool allow_lazy = literal->AllowsLazyCompilation() &&
!DebuggerWantsEagerCompilation(&info, allow_lazy_without_ctx);
+
+ if (outer_info->is_toplevel() && outer_info->will_serialize()) {
+ // Make sure that if the toplevel code (possibly to be serialized),
+ // the inner unction must be allowed to be compiled lazily.
+ DCHECK(allow_lazy);
+ }
+
// Generate code
Handle<ScopeInfo> scope_info;
if (FLAG_lazy && allow_lazy && !literal->is_parenthesized()) {
- Handle<Code> code = isolate->builtins()->CompileUnoptimized();
+ Handle<Code> code = isolate->builtins()->CompileLazy();
info.SetCode(code);
scope_info = Handle<ScopeInfo>(ScopeInfo::Empty(isolate));
} else if (FullCodeGenerator::MakeCode(&info)) {
@@ -1100,9 +1288,8 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(
// Create a shared function info object.
Handle<SharedFunctionInfo> result = factory->NewSharedFunctionInfo(
- literal->name(), literal->materialized_literal_count(),
- literal->is_generator(), literal->is_arrow(), info.code(), scope_info,
- info.feedback_vector());
+ literal->name(), literal->materialized_literal_count(), literal->kind(),
+ info.code(), scope_info, info.feedback_vector());
SetFunctionInfo(result, literal, false, script);
RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result);
result->set_allows_lazy_compilation(allow_lazy);
@@ -1117,120 +1304,6 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(
}
-MUST_USE_RESULT static MaybeHandle<Code> GetCodeFromOptimizedCodeMap(
- Handle<JSFunction> function,
- BailoutId osr_ast_id) {
- if (FLAG_cache_optimized_code) {
- Handle<SharedFunctionInfo> shared(function->shared());
- // Bound functions are not cached.
- if (shared->bound()) return MaybeHandle<Code>();
- DisallowHeapAllocation no_gc;
- int index = shared->SearchOptimizedCodeMap(
- function->context()->native_context(), osr_ast_id);
- if (index > 0) {
- if (FLAG_trace_opt) {
- PrintF("[found optimized code for ");
- function->ShortPrint();
- if (!osr_ast_id.IsNone()) {
- PrintF(" at OSR AST id %d", osr_ast_id.ToInt());
- }
- PrintF("]\n");
- }
- FixedArray* literals = shared->GetLiteralsFromOptimizedCodeMap(index);
- if (literals != NULL) function->set_literals(literals);
- return Handle<Code>(shared->GetCodeFromOptimizedCodeMap(index));
- }
- }
- return MaybeHandle<Code>();
-}
-
-
-static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
- Handle<Code> code = info->code();
- if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do.
-
- // Context specialization folds-in the context, so no sharing can occur.
- if (code->is_turbofanned() && FLAG_context_specialization) return;
-
- // Cache optimized code.
- if (FLAG_cache_optimized_code) {
- Handle<JSFunction> function = info->closure();
- Handle<SharedFunctionInfo> shared(function->shared());
- // Do not cache bound functions.
- if (shared->bound()) return;
- Handle<FixedArray> literals(function->literals());
- Handle<Context> native_context(function->context()->native_context());
- SharedFunctionInfo::AddToOptimizedCodeMap(
- shared, native_context, code, literals, info->osr_ast_id());
- }
-}
-
-
-static bool CompileOptimizedPrologue(CompilationInfo* info) {
- if (!Parser::Parse(info)) return false;
- info->SetStrictMode(info->function()->strict_mode());
-
- if (!Rewriter::Rewrite(info)) return false;
- if (!Scope::Analyze(info)) return false;
- DCHECK(info->scope() != NULL);
- return true;
-}
-
-
-static bool GetOptimizedCodeNow(CompilationInfo* info) {
- if (!CompileOptimizedPrologue(info)) return false;
-
- TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
-
- OptimizedCompileJob job(info);
- if (job.CreateGraph() != OptimizedCompileJob::SUCCEEDED) return false;
- if (job.OptimizeGraph() != OptimizedCompileJob::SUCCEEDED) return false;
- if (job.GenerateCode() != OptimizedCompileJob::SUCCEEDED) return false;
-
- // Success!
- DCHECK(!info->isolate()->has_pending_exception());
- InsertCodeIntoOptimizedCodeMap(info);
- Compiler::RecordFunctionCompilation(
- Logger::LAZY_COMPILE_TAG, info, info->shared_info());
- return true;
-}
-
-
-static bool GetOptimizedCodeLater(CompilationInfo* info) {
- Isolate* isolate = info->isolate();
- if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
- if (FLAG_trace_concurrent_recompilation) {
- PrintF(" ** Compilation queue full, will retry optimizing ");
- info->closure()->PrintName();
- PrintF(" later.\n");
- }
- return false;
- }
-
- CompilationHandleScope handle_scope(info);
- if (!CompileOptimizedPrologue(info)) return false;
- info->SaveHandles(); // Copy handles to the compilation handle scope.
-
- TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
-
- OptimizedCompileJob* job = new(info->zone()) OptimizedCompileJob(info);
- OptimizedCompileJob::Status status = job->CreateGraph();
- if (status != OptimizedCompileJob::SUCCEEDED) return false;
- isolate->optimizing_compiler_thread()->QueueForOptimization(job);
-
- if (FLAG_trace_concurrent_recompilation) {
- PrintF(" ** Queued ");
- info->closure()->PrintName();
- if (info->is_osr()) {
- PrintF(" for concurrent OSR at %d.\n", info->osr_ast_id().ToInt());
- } else {
- PrintF(" for concurrent optimization.\n");
- }
- }
- return true;
-}
-
-
MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
Handle<Code> current_code,
ConcurrencyMode mode,
@@ -1249,9 +1322,17 @@ MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
PostponeInterruptsScope postpone(isolate);
Handle<SharedFunctionInfo> shared = info->shared_info();
- DCHECK_NE(ScopeInfo::Empty(isolate), shared->scope_info());
- int compiled_size = shared->end_position() - shared->start_position();
- isolate->counters()->total_compile_size()->Increment(compiled_size);
+ if (shared->code()->kind() != Code::FUNCTION ||
+ ScopeInfo::Empty(isolate) == shared->scope_info()) {
+ // The function was never compiled. Compile it unoptimized first.
+ // TODO(titzer): reuse the AST and scope info from this compile.
+ CompilationInfoWithZone nested(function);
+ nested.EnableDeoptimizationSupport();
+ if (!GetUnoptimizedCodeCommon(&nested).ToHandle(&current_code)) {
+ return MaybeHandle<Code>();
+ }
+ shared->ReplaceCode(*current_code);
+ }
current_code->set_profiler_ticks(0);
info->SetOptimizing(osr_ast_id, current_code);
@@ -1265,13 +1346,6 @@ MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
if (GetOptimizedCodeNow(info.get())) return info->code();
}
- // Failed.
- if (FLAG_trace_opt) {
- PrintF("[failed to optimize ");
- function->PrintName();
- PrintF(": %s]\n", GetBailoutReason(info->bailout_reason()));
- }
-
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
return MaybeHandle<Code>();
}
@@ -1289,72 +1363,48 @@ Handle<Code> Compiler::GetConcurrentlyOptimizedCode(OptimizedCompileJob* job) {
Handle<SharedFunctionInfo> shared = info->shared_info();
shared->code()->set_profiler_ticks(0);
- // 1) Optimization may have failed.
+ // 1) Optimization on the concurrent thread may have failed.
// 2) The function may have already been optimized by OSR. Simply continue.
// Except when OSR already disabled optimization for some reason.
// 3) The code may have already been invalidated due to dependency change.
// 4) Debugger may have been activated.
-
- if (job->last_status() != OptimizedCompileJob::SUCCEEDED ||
- shared->optimization_disabled() ||
- info->HasAbortedDueToDependencyChange() ||
- isolate->DebuggerHasBreakPoints()) {
- return Handle<Code>::null();
- }
-
- if (job->GenerateCode() != OptimizedCompileJob::SUCCEEDED) {
- return Handle<Code>::null();
- }
-
- Compiler::RecordFunctionCompilation(
- Logger::LAZY_COMPILE_TAG, info.get(), shared);
- if (info->shared_info()->SearchOptimizedCodeMap(
- info->context()->native_context(), info->osr_ast_id()) == -1) {
- InsertCodeIntoOptimizedCodeMap(info.get());
+ // 5) Code generation may have failed.
+ if (job->last_status() == OptimizedCompileJob::SUCCEEDED) {
+ if (shared->optimization_disabled()) {
+ job->RetryOptimization(kOptimizationDisabled);
+ } else if (info->HasAbortedDueToDependencyChange()) {
+ job->RetryOptimization(kBailedOutDueToDependencyChange);
+ } else if (isolate->DebuggerHasBreakPoints()) {
+ job->RetryOptimization(kDebuggerHasBreakPoints);
+ } else if (job->GenerateCode() == OptimizedCompileJob::SUCCEEDED) {
+ RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info.get(), shared);
+ if (info->shared_info()->SearchOptimizedCodeMap(
+ info->context()->native_context(), info->osr_ast_id()) == -1) {
+ InsertCodeIntoOptimizedCodeMap(info.get());
+ }
+ if (FLAG_trace_opt) {
+ PrintF("[completed optimizing ");
+ info->closure()->ShortPrint();
+ PrintF("]\n");
+ }
+ return Handle<Code>(*info->code());
+ }
}
- if (FLAG_trace_concurrent_recompilation) {
- PrintF(" ** Optimized code for ");
- info->closure()->PrintName();
- PrintF(" generated.\n");
+ DCHECK(job->last_status() != OptimizedCompileJob::SUCCEEDED);
+ if (FLAG_trace_opt) {
+ PrintF("[aborted optimizing ");
+ info->closure()->ShortPrint();
+ PrintF(" because: %s]\n", GetBailoutReason(info->bailout_reason()));
}
-
- return Handle<Code>(*info->code());
+ return Handle<Code>::null();
}
-void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
- CompilationInfo* info,
- Handle<SharedFunctionInfo> shared) {
- // SharedFunctionInfo is passed separately, because if CompilationInfo
- // was created using Script object, it will not have it.
-
- // Log the code generation. If source information is available include
- // script name and line number. Check explicitly whether logging is
- // enabled as finding the line number is not free.
- if (info->isolate()->logger()->is_logging_code_events() ||
- info->isolate()->cpu_profiler()->is_profiling()) {
- Handle<Script> script = info->script();
- Handle<Code> code = info->code();
- if (code.is_identical_to(
- info->isolate()->builtins()->CompileUnoptimized())) {
- return;
- }
- int line_num = Script::GetLineNumber(script, shared->start_position()) + 1;
- int column_num =
- Script::GetColumnNumber(script, shared->start_position()) + 1;
- String* script_name = script->name()->IsString()
- ? String::cast(script->name())
- : info->isolate()->heap()->empty_string();
- Logger::LogEventsAndTags log_tag = Logger::ToNativeByScript(tag, *script);
- PROFILE(info->isolate(), CodeCreateEvent(
- log_tag, *code, *shared, info, script_name, line_num, column_num));
- }
-
- GDBJIT(AddCode(Handle<String>(shared->DebugName()),
- Handle<Script>(info->script()),
- Handle<Code>(info->code()),
- info));
+bool Compiler::DebuggerWantsEagerCompilation(CompilationInfo* info,
+ bool allow_lazy_without_ctx) {
+ return LiveEditFunctionTracker::IsActive(info->isolate()) ||
+ (info->isolate()->DebuggerHasBreakPoints() && !allow_lazy_without_ctx);
}
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index e8beca5a05..e9176d322d 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -7,6 +7,7 @@
#include "src/allocation.h"
#include "src/ast.h"
+#include "src/bailout-reason.h"
#include "src/zone.h"
namespace v8 {
@@ -57,11 +58,37 @@ class ScriptData {
DISALLOW_COPY_AND_ASSIGN(ScriptData);
};
-
// CompilationInfo encapsulates some information known at compile time. It
// is constructed based on the resources available at compile-time.
class CompilationInfo {
public:
+ // Various configuration flags for a compilation, as well as some properties
+ // of the compiled code produced by a compilation.
+ enum Flag {
+ kLazy = 1 << 0,
+ kEval = 1 << 1,
+ kGlobal = 1 << 2,
+ kStrictMode = 1 << 3,
+ kThisHasUses = 1 << 4,
+ kNative = 1 << 5,
+ kDeferredCalling = 1 << 6,
+ kNonDeferredCalling = 1 << 7,
+ kSavesCallerDoubles = 1 << 8,
+ kRequiresFrame = 1 << 9,
+ kMustNotHaveEagerFrame = 1 << 10,
+ kDeoptimizationSupport = 1 << 11,
+ kDebug = 1 << 12,
+ kCompilingForDebugging = 1 << 13,
+ kParseRestriction = 1 << 14,
+ kSerializing = 1 << 15,
+ kContextSpecializing = 1 << 16,
+ kInliningEnabled = 1 << 17,
+ kTypingEnabled = 1 << 18,
+ kDisableFutureOptimization = 1 << 19,
+ kAbortedDueToDependency = 1 << 20,
+ kToplevel = 1 << 21
+ };
+
CompilationInfo(Handle<JSFunction> closure, Zone* zone);
CompilationInfo(Isolate* isolate, Zone* zone);
virtual ~CompilationInfo();
@@ -71,10 +98,12 @@ class CompilationInfo {
}
Zone* zone() { return zone_; }
bool is_osr() const { return !osr_ast_id_.IsNone(); }
- bool is_lazy() const { return IsLazy::decode(flags_); }
- bool is_eval() const { return IsEval::decode(flags_); }
- bool is_global() const { return IsGlobal::decode(flags_); }
- StrictMode strict_mode() const { return StrictModeField::decode(flags_); }
+ bool is_lazy() const { return GetFlag(kLazy); }
+ bool is_eval() const { return GetFlag(kEval); }
+ bool is_global() const { return GetFlag(kGlobal); }
+ StrictMode strict_mode() const {
+ return GetFlag(kStrictMode) ? STRICT : SLOPPY;
+ }
FunctionLiteral* function() const { return function_; }
Scope* scope() const { return scope_; }
Scope* global_scope() const { return global_scope_; }
@@ -82,12 +111,19 @@ class CompilationInfo {
Handle<JSFunction> closure() const { return closure_; }
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
Handle<Script> script() const { return script_; }
+ void set_script(Handle<Script> script) { script_ = script; }
HydrogenCodeStub* code_stub() const {return code_stub_; }
v8::Extension* extension() const { return extension_; }
ScriptData** cached_data() const { return cached_data_; }
ScriptCompiler::CompileOptions compile_options() const {
return compile_options_;
}
+ ScriptCompiler::ExternalSourceStream* source_stream() const {
+ return source_stream_;
+ }
+ ScriptCompiler::StreamedSource::Encoding source_stream_encoding() const {
+ return source_stream_encoding_;
+ }
Handle<Context> context() const { return context_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
Handle<Code> unoptimized_code() const { return unoptimized_code_; }
@@ -98,12 +134,12 @@ class CompilationInfo {
void MarkAsEval() {
DCHECK(!is_lazy());
- flags_ |= IsEval::encode(true);
+ SetFlag(kEval);
}
void MarkAsGlobal() {
DCHECK(!is_lazy());
- flags_ |= IsGlobal::encode(true);
+ SetFlag(kGlobal);
}
void set_parameter_count(int parameter_count) {
@@ -112,83 +148,70 @@ class CompilationInfo {
}
void set_this_has_uses(bool has_no_uses) {
- this_has_uses_ = has_no_uses;
+ SetFlag(kThisHasUses, has_no_uses);
}
- bool this_has_uses() {
- return this_has_uses_;
- }
+ bool this_has_uses() { return GetFlag(kThisHasUses); }
void SetStrictMode(StrictMode strict_mode) {
- DCHECK(this->strict_mode() == SLOPPY || this->strict_mode() == strict_mode);
- flags_ = StrictModeField::update(flags_, strict_mode);
+ SetFlag(kStrictMode, strict_mode == STRICT);
}
- void MarkAsNative() {
- flags_ |= IsNative::encode(true);
- }
+ void MarkAsNative() { SetFlag(kNative); }
- bool is_native() const {
- return IsNative::decode(flags_);
- }
+ bool is_native() const { return GetFlag(kNative); }
bool is_calling() const {
- return is_deferred_calling() || is_non_deferred_calling();
+ return GetFlag(kDeferredCalling) || GetFlag(kNonDeferredCalling);
}
- void MarkAsDeferredCalling() {
- flags_ |= IsDeferredCalling::encode(true);
- }
+ void MarkAsDeferredCalling() { SetFlag(kDeferredCalling); }
- bool is_deferred_calling() const {
- return IsDeferredCalling::decode(flags_);
- }
+ bool is_deferred_calling() const { return GetFlag(kDeferredCalling); }
- void MarkAsNonDeferredCalling() {
- flags_ |= IsNonDeferredCalling::encode(true);
- }
+ void MarkAsNonDeferredCalling() { SetFlag(kNonDeferredCalling); }
- bool is_non_deferred_calling() const {
- return IsNonDeferredCalling::decode(flags_);
- }
+ bool is_non_deferred_calling() const { return GetFlag(kNonDeferredCalling); }
- void MarkAsSavesCallerDoubles() {
- flags_ |= SavesCallerDoubles::encode(true);
- }
+ void MarkAsSavesCallerDoubles() { SetFlag(kSavesCallerDoubles); }
- bool saves_caller_doubles() const {
- return SavesCallerDoubles::decode(flags_);
- }
+ bool saves_caller_doubles() const { return GetFlag(kSavesCallerDoubles); }
- void MarkAsRequiresFrame() {
- flags_ |= RequiresFrame::encode(true);
- }
+ void MarkAsRequiresFrame() { SetFlag(kRequiresFrame); }
- bool requires_frame() const {
- return RequiresFrame::decode(flags_);
- }
+ bool requires_frame() const { return GetFlag(kRequiresFrame); }
- void MarkMustNotHaveEagerFrame() {
- flags_ |= MustNotHaveEagerFrame::encode(true);
- }
+ void MarkMustNotHaveEagerFrame() { SetFlag(kMustNotHaveEagerFrame); }
bool GetMustNotHaveEagerFrame() const {
- return MustNotHaveEagerFrame::decode(flags_);
+ return GetFlag(kMustNotHaveEagerFrame);
}
- void MarkAsDebug() {
- flags_ |= IsDebug::encode(true);
- }
+ void MarkAsDebug() { SetFlag(kDebug); }
- bool is_debug() const {
- return IsDebug::decode(flags_);
- }
+ bool is_debug() const { return GetFlag(kDebug); }
- void PrepareForSerializing() {
- flags_ |= PrepareForSerializing::encode(true);
- }
+ void PrepareForSerializing() { SetFlag(kSerializing); }
+
+ bool will_serialize() const { return GetFlag(kSerializing); }
+
+ void MarkAsContextSpecializing() { SetFlag(kContextSpecializing); }
+
+ bool is_context_specializing() const { return GetFlag(kContextSpecializing); }
+
+ void MarkAsInliningEnabled() { SetFlag(kInliningEnabled); }
- bool will_serialize() const { return PrepareForSerializing::decode(flags_); }
+ void MarkAsInliningDisabled() { SetFlag(kInliningEnabled, false); }
+
+ bool is_inlining_enabled() const { return GetFlag(kInliningEnabled); }
+
+ void MarkAsTypingEnabled() { SetFlag(kTypingEnabled); }
+
+ bool is_typing_enabled() const { return GetFlag(kTypingEnabled); }
+
+ void MarkAsToplevel() { SetFlag(kToplevel); }
+
+ bool is_toplevel() const { return GetFlag(kToplevel); }
bool IsCodePreAgingActive() const {
return FLAG_optimize_for_size && FLAG_age_code && !will_serialize() &&
@@ -196,11 +219,12 @@ class CompilationInfo {
}
void SetParseRestriction(ParseRestriction restriction) {
- flags_ = ParseRestricitonField::update(flags_, restriction);
+ SetFlag(kParseRestriction, restriction != NO_PARSE_RESTRICTION);
}
ParseRestriction parse_restriction() const {
- return ParseRestricitonField::decode(flags_);
+ return GetFlag(kParseRestriction) ? ONLY_SINGLE_FUNCTION_LITERAL
+ : NO_PARSE_RESTRICTION;
}
void SetFunction(FunctionLiteral* literal) {
@@ -212,7 +236,7 @@ class CompilationInfo {
DCHECK(global_scope_ == NULL);
global_scope_ = global_scope;
}
- Handle<FixedArray> feedback_vector() const {
+ Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
void SetCode(Handle<Code> code) { code_ = code; }
@@ -234,12 +258,8 @@ class CompilationInfo {
context_ = context;
}
- void MarkCompilingForDebugging() {
- flags_ |= IsCompilingForDebugging::encode(true);
- }
- bool IsCompilingForDebugging() {
- return IsCompilingForDebugging::decode(flags_);
- }
+ void MarkCompilingForDebugging() { SetFlag(kCompilingForDebugging); }
+ bool IsCompilingForDebugging() { return GetFlag(kCompilingForDebugging); }
void MarkNonOptimizable() {
SetMode(CompilationInfo::NONOPT);
}
@@ -269,15 +289,14 @@ class CompilationInfo {
unoptimized_code_ = unoptimized;
optimization_id_ = isolate()->NextOptimizationId();
}
- void DisableOptimization();
// Deoptimization support.
bool HasDeoptimizationSupport() const {
- return SupportsDeoptimization::decode(flags_);
+ return GetFlag(kDeoptimizationSupport);
}
void EnableDeoptimizationSupport() {
DCHECK(IsOptimizable());
- flags_ |= SupportsDeoptimization::encode(true);
+ SetFlag(kDeoptimizationSupport);
}
// Determines whether or not to insert a self-optimization header.
@@ -308,8 +327,16 @@ class CompilationInfo {
SaveHandle(&unoptimized_code_);
}
+ void AbortOptimization(BailoutReason reason) {
+ if (bailout_reason_ != kNoReason) bailout_reason_ = reason;
+ SetFlag(kDisableFutureOptimization);
+ }
+
+ void RetryOptimization(BailoutReason reason) {
+ if (bailout_reason_ != kNoReason) bailout_reason_ = reason;
+ }
+
BailoutReason bailout_reason() const { return bailout_reason_; }
- void set_bailout_reason(BailoutReason reason) { bailout_reason_ = reason; }
int prologue_offset() const {
DCHECK_NE(Code::kPrologueOffsetNotSet, prologue_offset_);
@@ -344,12 +371,12 @@ class CompilationInfo {
void AbortDueToDependencyChange() {
DCHECK(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
- abort_due_to_dependency_ = true;
+ SetFlag(kAbortedDueToDependency);
}
- bool HasAbortedDueToDependencyChange() {
+ bool HasAbortedDueToDependencyChange() const {
DCHECK(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
- return abort_due_to_dependency_;
+ return GetFlag(kAbortedDueToDependency);
}
bool HasSameOsrEntry(Handle<JSFunction> function, BailoutId osr_ast_id) {
@@ -365,6 +392,8 @@ class CompilationInfo {
ast_value_factory_owned_ = owned;
}
+ AstNode::IdGen* ast_node_id_gen() { return &ast_node_id_gen_; }
+
protected:
CompilationInfo(Handle<Script> script,
Zone* zone);
@@ -373,6 +402,10 @@ class CompilationInfo {
CompilationInfo(HydrogenCodeStub* stub,
Isolate* isolate,
Zone* zone);
+ CompilationInfo(ScriptCompiler::ExternalSourceStream* source_stream,
+ ScriptCompiler::StreamedSource::Encoding encoding,
+ Isolate* isolate, Zone* zone);
+
private:
Isolate* isolate_;
@@ -395,41 +428,13 @@ class CompilationInfo {
mode_ = mode;
}
- // Flags using template class BitField<type, start, length>. All are
- // false by default.
- //
- // Compilation is either eager or lazy.
- class IsLazy: public BitField<bool, 0, 1> {};
- // Flags that can be set for eager compilation.
- class IsEval: public BitField<bool, 1, 1> {};
- class IsGlobal: public BitField<bool, 2, 1> {};
- // If the function is being compiled for the debugger.
- class IsDebug: public BitField<bool, 3, 1> {};
- // Strict mode - used in eager compilation.
- class StrictModeField: public BitField<StrictMode, 4, 1> {};
- // Is this a function from our natives.
- class IsNative: public BitField<bool, 5, 1> {};
- // Is this code being compiled with support for deoptimization..
- class SupportsDeoptimization: public BitField<bool, 6, 1> {};
- // If compiling for debugging produce just full code matching the
- // initial mode setting.
- class IsCompilingForDebugging: public BitField<bool, 7, 1> {};
- // If the compiled code contains calls that require building a frame
- class IsCalling: public BitField<bool, 8, 1> {};
- // If the compiled code contains calls that require building a frame
- class IsDeferredCalling: public BitField<bool, 9, 1> {};
- // If the compiled code contains calls that require building a frame
- class IsNonDeferredCalling: public BitField<bool, 10, 1> {};
- // If the compiled code saves double caller registers that it clobbers.
- class SavesCallerDoubles: public BitField<bool, 11, 1> {};
- // If the set of valid statements is restricted.
- class ParseRestricitonField: public BitField<ParseRestriction, 12, 1> {};
- // If the function requires a frame (for unspecified reasons)
- class RequiresFrame: public BitField<bool, 13, 1> {};
- // If the function cannot build a frame (for unspecified reasons)
- class MustNotHaveEagerFrame: public BitField<bool, 14, 1> {};
- // If we plan to serialize the compiled code.
- class PrepareForSerializing : public BitField<bool, 15, 1> {};
+ void SetFlag(Flag flag) { flags_ |= flag; }
+
+ void SetFlag(Flag flag, bool value) {
+ flags_ = value ? flags_ | flag : flags_ & ~flag;
+ }
+
+ bool GetFlag(Flag flag) const { return (flags_ & flag) != 0; }
unsigned flags_;
@@ -450,6 +455,8 @@ class CompilationInfo {
Handle<JSFunction> closure_;
Handle<SharedFunctionInfo> shared_info_;
Handle<Script> script_;
+ ScriptCompiler::ExternalSourceStream* source_stream_; // Not owned.
+ ScriptCompiler::StreamedSource::Encoding source_stream_encoding_;
// Fields possibly needed for eager compilation, NULL by default.
v8::Extension* extension_;
@@ -461,7 +468,7 @@ class CompilationInfo {
Handle<Context> context_;
// Used by codegen, ultimately kept rooted by the SharedFunctionInfo.
- Handle<FixedArray> feedback_vector_;
+ Handle<TypeFeedbackVector> feedback_vector_;
// Compilation mode flag and whether deoptimization is allowed.
Mode mode_;
@@ -471,9 +478,6 @@ class CompilationInfo {
// data. Keep track which code we patched.
Handle<Code> unoptimized_code_;
- // Flag whether compilation needs to be aborted due to dependency change.
- bool abort_due_to_dependency_;
-
// The zone from which the compilation pipeline working on this
// CompilationInfo allocates.
Zone* zone_;
@@ -503,14 +507,13 @@ class CompilationInfo {
// Number of parameters used for compilation of stubs that require arguments.
int parameter_count_;
- bool this_has_uses_;
-
Handle<Foreign> object_wrapper_;
int optimization_id_;
AstValueFactory* ast_value_factory_;
bool ast_value_factory_owned_;
+ AstNode::IdGen ast_node_id_gen_;
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
@@ -532,6 +535,10 @@ class CompilationInfoWithZone: public CompilationInfo {
CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate)
: CompilationInfo(stub, isolate, &zone_),
zone_(isolate) {}
+ CompilationInfoWithZone(ScriptCompiler::ExternalSourceStream* stream,
+ ScriptCompiler::StreamedSource::Encoding encoding,
+ Isolate* isolate)
+ : CompilationInfo(stream, encoding, isolate, &zone_), zone_(isolate) {}
// Virtual destructor because a CompilationInfoWithZone has to exit the
// zone scope and get rid of dependent maps even when the destructor is
@@ -594,18 +601,13 @@ class OptimizedCompileJob: public ZoneObject {
CompilationInfo* info() const { return info_; }
Isolate* isolate() const { return info()->isolate(); }
- MUST_USE_RESULT Status AbortOptimization(
- BailoutReason reason = kNoReason) {
- if (reason != kNoReason) info_->set_bailout_reason(reason);
+ Status RetryOptimization(BailoutReason reason) {
+ info_->RetryOptimization(reason);
return SetLastStatus(BAILED_OUT);
}
- MUST_USE_RESULT Status AbortAndDisableOptimization(
- BailoutReason reason = kNoReason) {
- if (reason != kNoReason) info_->set_bailout_reason(reason);
- // Reference to shared function info does not change between phases.
- AllowDeferredHandleDereference allow_handle_dereference;
- info_->shared_info()->DisableOptimization(info_->bailout_reason());
+ Status AbortOptimization(BailoutReason reason) {
+ info_->AbortOptimization(reason);
return SetLastStatus(BAILED_OUT);
}
@@ -666,12 +668,17 @@ class Compiler : public AllStatic {
public:
MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCode(
Handle<JSFunction> function);
+ MUST_USE_RESULT static MaybeHandle<Code> GetLazyCode(
+ Handle<JSFunction> function);
MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCode(
Handle<SharedFunctionInfo> shared);
+ MUST_USE_RESULT static MaybeHandle<Code> GetDebugCode(
+ Handle<JSFunction> function);
+
static bool EnsureCompiled(Handle<JSFunction> function,
ClearExceptionFlag flag);
- MUST_USE_RESULT static MaybeHandle<Code> GetCodeForDebugging(
- Handle<JSFunction> function);
+
+ static bool EnsureDeoptimizationSupport(CompilationInfo* info);
static void CompileForLiveEdit(Handle<Script> script);
@@ -691,6 +698,9 @@ class Compiler : public AllStatic {
ScriptCompiler::CompileOptions compile_options,
NativesFlag is_natives_code);
+ static Handle<SharedFunctionInfo> CompileStreamedScript(CompilationInfo* info,
+ int source_length);
+
// Create a shared function info object (the code may be lazily compiled).
static Handle<SharedFunctionInfo> BuildFunctionInfo(FunctionLiteral* node,
Handle<Script> script,
@@ -711,9 +721,8 @@ class Compiler : public AllStatic {
// On failure, return the empty handle.
static Handle<Code> GetConcurrentlyOptimizedCode(OptimizedCompileJob* job);
- static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
- CompilationInfo* info,
- Handle<SharedFunctionInfo> shared);
+ static bool DebuggerWantsEagerCompilation(
+ CompilationInfo* info, bool allow_lazy_without_ctx = false);
};
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
new file mode 100644
index 0000000000..749c04a0c5
--- /dev/null
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -0,0 +1,90 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/access-builder.h"
+#include "src/types-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// static
+FieldAccess AccessBuilder::ForMap() {
+ return {kTaggedBase, HeapObject::kMapOffset, Handle<Name>(), Type::Any(),
+ kMachAnyTagged};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSObjectProperties() {
+ return {kTaggedBase, JSObject::kPropertiesOffset, Handle<Name>(), Type::Any(),
+ kMachAnyTagged};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSObjectElements() {
+ return {kTaggedBase, JSObject::kElementsOffset, Handle<Name>(),
+ Type::Internal(), kMachAnyTagged};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSFunctionContext() {
+ return {kTaggedBase, JSFunction::kContextOffset, Handle<Name>(),
+ Type::Internal(), kMachAnyTagged};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
+ return {kTaggedBase, JSArrayBuffer::kBackingStoreOffset, Handle<Name>(),
+ Type::UntaggedPtr(), kMachPtr};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForExternalArrayPointer() {
+ return {kTaggedBase, ExternalArray::kExternalPointerOffset, Handle<Name>(),
+ Type::UntaggedPtr(), kMachPtr};
+}
+
+
+// static
+ElementAccess AccessBuilder::ForFixedArrayElement() {
+ return {kTaggedBase, FixedArray::kHeaderSize, Type::Any(), kMachAnyTagged};
+}
+
+
+// static
+ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
+ bool is_external) {
+ BaseTaggedness taggedness = is_external ? kUntaggedBase : kTaggedBase;
+ int header_size = is_external ? 0 : FixedTypedArrayBase::kDataOffset;
+ switch (type) {
+ case kExternalInt8Array:
+ return {taggedness, header_size, Type::Signed32(), kMachInt8};
+ case kExternalUint8Array:
+ case kExternalUint8ClampedArray:
+ return {taggedness, header_size, Type::Unsigned32(), kMachUint8};
+ case kExternalInt16Array:
+ return {taggedness, header_size, Type::Signed32(), kMachInt16};
+ case kExternalUint16Array:
+ return {taggedness, header_size, Type::Unsigned32(), kMachUint16};
+ case kExternalInt32Array:
+ return {taggedness, header_size, Type::Signed32(), kMachInt32};
+ case kExternalUint32Array:
+ return {taggedness, header_size, Type::Unsigned32(), kMachUint32};
+ case kExternalFloat32Array:
+ return {taggedness, header_size, Type::Number(), kRepFloat32};
+ case kExternalFloat64Array:
+ return {taggedness, header_size, Type::Number(), kRepFloat64};
+ }
+ UNREACHABLE();
+ return {kUntaggedBase, 0, Type::None(), kMachNone};
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
new file mode 100644
index 0000000000..72dd0234aa
--- /dev/null
+++ b/deps/v8/src/compiler/access-builder.h
@@ -0,0 +1,52 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ACCESS_BUILDER_H_
+#define V8_COMPILER_ACCESS_BUILDER_H_
+
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// This access builder provides a set of static methods constructing commonly
+// used FieldAccess and ElementAccess descriptors. These descriptors server as
+// parameters to simplified load/store operators.
+class AccessBuilder FINAL : public AllStatic {
+ public:
+ // Provides access to HeapObject::map() field.
+ static FieldAccess ForMap();
+
+ // Provides access to JSObject::properties() field.
+ static FieldAccess ForJSObjectProperties();
+
+ // Provides access to JSObject::elements() field.
+ static FieldAccess ForJSObjectElements();
+
+ // Provides access to JSFunction::context() field.
+ static FieldAccess ForJSFunctionContext();
+
+ // Provides access to JSArrayBuffer::backing_store() field.
+ static FieldAccess ForJSArrayBufferBackingStore();
+
+ // Provides access to ExternalArray::external_pointer() field.
+ static FieldAccess ForExternalArrayPointer();
+
+ // Provides access to FixedArray elements.
+ static ElementAccess ForFixedArrayElement();
+
+ // Provides access to Fixed{type}TypedArray and External{type}Array elements.
+ static ElementAccess ForTypedArrayElement(ExternalArrayType type,
+ bool is_external);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AccessBuilder);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_ACCESS_BUILDER_H_
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index 90eb7cd4dd..fabcfdcdc5 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -22,11 +22,35 @@ namespace compiler {
// Adds Arm-specific methods to convert InstructionOperands.
-class ArmOperandConverter : public InstructionOperandConverter {
+class ArmOperandConverter FINAL : public InstructionOperandConverter {
public:
ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
+ SwVfpRegister OutputFloat32Register(int index = 0) {
+ return ToFloat32Register(instr_->OutputAt(index));
+ }
+
+ SwVfpRegister InputFloat32Register(int index) {
+ return ToFloat32Register(instr_->InputAt(index));
+ }
+
+ SwVfpRegister ToFloat32Register(InstructionOperand* op) {
+ return ToFloat64Register(op).low();
+ }
+
+ LowDwVfpRegister OutputFloat64Register(int index = 0) {
+ return ToFloat64Register(instr_->OutputAt(index));
+ }
+
+ LowDwVfpRegister InputFloat64Register(int index) {
+ return ToFloat64Register(instr_->InputAt(index));
+ }
+
+ LowDwVfpRegister ToFloat64Register(InstructionOperand* op) {
+ return LowDwVfpRegister::from_code(ToDoubleRegister(op).code());
+ }
+
SBit OutputSBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
@@ -44,6 +68,9 @@ class ArmOperandConverter : public InstructionOperandConverter {
switch (constant.type()) {
case Constant::kInt32:
return Operand(constant.ToInt32());
+ case Constant::kFloat32:
+ return Operand(
+ isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
case Constant::kFloat64:
return Operand(
isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
@@ -136,6 +163,35 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ArmOperandConverter i(this, instr);
switch (ArchOpcodeField::decode(instr->opcode())) {
+ case kArchCallCodeObject: {
+ EnsureSpaceForLazyDeopt();
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
+ RelocInfo::CODE_TARGET);
+ } else {
+ __ add(ip, i.InputRegister(0),
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(ip);
+ }
+ AddSafepointAndDeopt(instr);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArchCallJSFunction: {
+ EnsureSpaceForLazyDeopt();
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ ldr(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ cmp(cp, kScratchReg);
+ __ Assert(eq, kWrongFunctionContext);
+ }
+ __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Call(ip);
+ AddSafepointAndDeopt(instr);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
case kArchJmp:
__ b(code_->GetLabel(i.InputBlock(0)));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -148,16 +204,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleReturn();
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- case kArchDeoptimize: {
- int deoptimization_id = MiscField::decode(instr->opcode());
- BuildTranslation(instr, deoptimization_id);
-
- Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, Deoptimizer::LAZY);
- __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ case kArchTruncateDoubleToI:
+ __ TruncateDoubleToI(i.OutputRegister(), i.InputFloat64Register(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- }
case kArmAdd:
__ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
i.OutputSBit());
@@ -198,8 +248,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kArmMov:
- __ Move(i.OutputRegister(), i.InputOperand2(0));
- DCHECK_EQ(LeaveCC, i.OutputSBit());
+ __ Move(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
break;
case kArmMvn:
__ mvn(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
@@ -233,57 +282,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
- case kArmCallCodeObject: {
- if (instr->InputAt(0)->IsImmediate()) {
- Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
- __ Call(code, RelocInfo::CODE_TARGET);
- RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- } else {
- Register reg = i.InputRegister(0);
- int entry = Code::kHeaderSize - kHeapObjectTag;
- __ ldr(reg, MemOperand(reg, entry));
- __ Call(reg);
- RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- }
- bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
- if (lazy_deopt) {
- RecordLazyDeoptimizationEntry(instr);
- }
- DCHECK_EQ(LeaveCC, i.OutputSBit());
- break;
- }
- case kArmCallJSFunction: {
- Register func = i.InputRegister(0);
-
- // TODO(jarin) The load of the context should be separated from the call.
- __ ldr(cp, FieldMemOperand(func, JSFunction::kContextOffset));
- __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
- __ Call(ip);
-
- RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- RecordLazyDeoptimizationEntry(instr);
- DCHECK_EQ(LeaveCC, i.OutputSBit());
- break;
- }
- case kArmCallAddress: {
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(masm(), i.InputRegister(0));
- DCHECK_EQ(LeaveCC, i.OutputSBit());
- break;
- }
- case kArmPush:
- __ Push(i.InputRegister(0));
- DCHECK_EQ(LeaveCC, i.OutputSBit());
- break;
- case kArmDrop: {
- int words = MiscField::decode(instr->opcode());
- __ Drop(words);
- DCHECK_EQ(LeaveCC, i.OutputSBit());
- break;
- }
case kArmCmp:
__ cmp(i.InputRegister(0), i.InputOperand2(1));
DCHECK_EQ(SetCC, i.OutputSBit());
@@ -301,38 +299,38 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(SetCC, i.OutputSBit());
break;
case kArmVcmpF64:
- __ VFPCompareAndSetFlags(i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
+ __ VFPCompareAndSetFlags(i.InputFloat64Register(0),
+ i.InputFloat64Register(1));
DCHECK_EQ(SetCC, i.OutputSBit());
break;
case kArmVaddF64:
- __ vadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
+ __ vadd(i.OutputFloat64Register(), i.InputFloat64Register(0),
+ i.InputFloat64Register(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVsubF64:
- __ vsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
+ __ vsub(i.OutputFloat64Register(), i.InputFloat64Register(0),
+ i.InputFloat64Register(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmulF64:
- __ vmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
+ __ vmul(i.OutputFloat64Register(), i.InputFloat64Register(0),
+ i.InputFloat64Register(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmlaF64:
- __ vmla(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(2));
+ __ vmla(i.OutputFloat64Register(), i.InputFloat64Register(1),
+ i.InputFloat64Register(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmlsF64:
- __ vmls(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(2));
+ __ vmls(i.OutputFloat64Register(), i.InputFloat64Register(1),
+ i.InputFloat64Register(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVdivF64:
- __ vdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
+ __ vdiv(i.OutputFloat64Register(), i.InputFloat64Register(0),
+ i.InputFloat64Register(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmodF64: {
@@ -340,88 +338,124 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// and generate a CallAddress instruction instead.
FrameScope scope(masm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg);
- __ MovToFloatParameters(i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
+ __ MovToFloatParameters(i.InputFloat64Register(0),
+ i.InputFloat64Register(1));
__ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
0, 2);
// Move the result in the double result register.
- __ MovFromFloatResult(i.OutputDoubleRegister());
+ __ MovFromFloatResult(i.OutputFloat64Register());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmVsqrtF64:
+ __ vsqrt(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ break;
case kArmVnegF64:
- __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ vneg(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ break;
+ case kArmVcvtF32F64: {
+ __ vcvt_f32_f64(i.OutputFloat32Register(), i.InputFloat64Register(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmVcvtF64F32: {
+ __ vcvt_f64_f32(i.OutputFloat64Register(), i.InputFloat32Register(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ }
case kArmVcvtF64S32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vmov(scratch, i.InputRegister(0));
- __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
+ __ vcvt_f64_s32(i.OutputFloat64Register(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF64U32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vmov(scratch, i.InputRegister(0));
- __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
+ __ vcvt_f64_u32(i.OutputFloat64Register(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtS32F64: {
SwVfpRegister scratch = kScratchDoubleReg.low();
- __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
+ __ vcvt_s32_f64(scratch, i.InputFloat64Register(0));
__ vmov(i.OutputRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtU32F64: {
SwVfpRegister scratch = kScratchDoubleReg.low();
- __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
+ __ vcvt_u32_f64(scratch, i.InputFloat64Register(0));
__ vmov(i.OutputRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
- case kArmLoadWord8:
+ case kArmLdrb:
__ ldrb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- case kArmStoreWord8: {
+ case kArmLdrsb:
+ __ ldrsb(i.OutputRegister(), i.InputOffset());
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmStrb: {
int index = 0;
MemOperand operand = i.InputOffset(&index);
__ strb(i.InputRegister(index), operand);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
- case kArmLoadWord16:
+ case kArmLdrh:
__ ldrh(i.OutputRegister(), i.InputOffset());
break;
- case kArmStoreWord16: {
+ case kArmLdrsh:
+ __ ldrsh(i.OutputRegister(), i.InputOffset());
+ break;
+ case kArmStrh: {
int index = 0;
MemOperand operand = i.InputOffset(&index);
__ strh(i.InputRegister(index), operand);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
- case kArmLoadWord32:
+ case kArmLdr:
__ ldr(i.OutputRegister(), i.InputOffset());
break;
- case kArmStoreWord32: {
+ case kArmStr: {
int index = 0;
MemOperand operand = i.InputOffset(&index);
__ str(i.InputRegister(index), operand);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
- case kArmFloat64Load:
- __ vldr(i.OutputDoubleRegister(), i.InputOffset());
+ case kArmVldrF32: {
+ __ vldr(i.OutputFloat32Register(), i.InputOffset());
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmVstrF32: {
+ int index = 0;
+ MemOperand operand = i.InputOffset(&index);
+ __ vstr(i.InputFloat32Register(index), operand);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmVldrF64:
+ __ vldr(i.OutputFloat64Register(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- case kArmFloat64Store: {
+ case kArmVstrF64: {
int index = 0;
MemOperand operand = i.InputOffset(&index);
- __ vstr(i.InputDoubleRegister(index), operand);
+ __ vstr(i.InputFloat64Register(index), operand);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmPush:
+ __ Push(i.InputRegister(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
case kArmStoreWriteBarrier: {
Register object = i.InputRegister(0);
Register index = i.InputRegister(1);
@@ -600,14 +634,31 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
}
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+ Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+ isolate(), deoptimization_id, Deoptimizer::LAZY);
+ __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
- __ Push(lr, fp);
- __ mov(fp, sp);
+ bool saved_pp;
+ if (FLAG_enable_ool_constant_pool) {
+ __ Push(lr, fp, pp);
+ // Adjust FP to point to saved FP.
+ __ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+ saved_pp = true;
+ } else {
+ __ Push(lr, fp);
+ __ mov(fp, sp);
+ saved_pp = false;
+ }
const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0) { // Save callee-saved registers.
- int register_save_area_size = 0;
+ if (saves != 0 || saved_pp) {
+ // Save callee-saved registers.
+ int register_save_area_size = saved_pp ? kPointerSize : 0;
for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
if (!((1 << i) & saves)) continue;
register_save_area_size += kPointerSize;
@@ -665,14 +716,13 @@ void CodeGenerator::AssembleReturn() {
__ ldm(ia_w, sp, saves);
}
}
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ __ LeaveFrame(StackFrame::MANUAL);
__ Ret();
} else {
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- int pop_count =
- descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
+ __ LeaveFrame(StackFrame::MANUAL);
+ int pop_count = descriptor->IsJSFunctionCall()
+ ? static_cast<int>(descriptor->JSParameterCount())
+ : 0;
__ Drop(pop_count);
__ Ret();
}
@@ -703,10 +753,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ str(temp, g.ToMemOperand(destination));
}
} else if (source->IsConstant()) {
+ Constant src = g.ToConstant(source);
if (destination->IsRegister() || destination->IsStackSlot()) {
Register dst =
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
- Constant src = g.ToConstant(source);
switch (src.type()) {
case Constant::kInt32:
__ mov(dst, Operand(src.ToInt32()));
@@ -714,6 +764,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
case Constant::kInt64:
UNREACHABLE();
break;
+ case Constant::kFloat32:
+ __ Move(dst,
+ isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+ break;
case Constant::kFloat64:
__ Move(dst,
isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
@@ -726,14 +780,25 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
break;
}
if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
- } else if (destination->IsDoubleRegister()) {
- DwVfpRegister result = g.ToDoubleRegister(destination);
- __ vmov(result, g.ToDouble(source));
+ } else if (src.type() == Constant::kFloat32) {
+ SwVfpRegister dst = destination->IsDoubleRegister()
+ ? g.ToFloat32Register(destination)
+ : kScratchDoubleReg.low();
+ // TODO(turbofan): Can we do better here?
+ __ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
+ __ vmov(dst, ip);
+ if (destination->IsDoubleStackSlot()) {
+ __ vstr(dst, g.ToMemOperand(destination));
+ }
} else {
- DCHECK(destination->IsDoubleStackSlot());
- DwVfpRegister temp = kScratchDoubleReg;
- __ vmov(temp, g.ToDouble(source));
- __ vstr(temp, g.ToMemOperand(destination));
+ DCHECK_EQ(Constant::kFloat64, src.type());
+ DwVfpRegister dst = destination->IsDoubleRegister()
+ ? g.ToFloat64Register(destination)
+ : kScratchDoubleReg;
+ __ vmov(dst, src.ToFloat64());
+ if (destination->IsDoubleStackSlot()) {
+ __ vstr(dst, g.ToMemOperand(destination));
+ }
}
} else if (source->IsDoubleRegister()) {
DwVfpRegister src = g.ToDoubleRegister(source);
@@ -798,7 +863,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
DwVfpRegister dst = g.ToDoubleRegister(destination);
__ Move(temp, src);
__ Move(src, dst);
- __ Move(src, temp);
+ __ Move(dst, temp);
} else {
DCHECK(destination->IsDoubleStackSlot());
MemOperand dst = g.ToMemOperand(destination);
@@ -829,20 +894,31 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
void CodeGenerator::AddNopForSmiCodeInlining() {
// On 32-bit ARM we do not insert nops for inlined Smi code.
- UNREACHABLE();
}
-#ifdef DEBUG
-// Checks whether the code between start_pc and end_pc is a no-op.
-bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
- int end_pc) {
- return false;
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ int space_needed = Deoptimizer::patch_size();
+ if (!linkage()->info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ // Block literal pool emission for duration of padding.
+ v8::internal::Assembler::BlockConstPoolScope block_const_pool(masm());
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= v8::internal::Assembler::kInstrSize;
+ }
+ }
+ }
+ MarkLazyDeoptSite();
}
-#endif // DEBUG
-
#undef __
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index 1d5b5c7f33..8654a2ce6b 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -32,11 +32,6 @@ namespace compiler {
V(ArmMvn) \
V(ArmBfc) \
V(ArmUbfx) \
- V(ArmCallCodeObject) \
- V(ArmCallJSFunction) \
- V(ArmCallAddress) \
- V(ArmPush) \
- V(ArmDrop) \
V(ArmVcmpF64) \
V(ArmVaddF64) \
V(ArmVsubF64) \
@@ -46,18 +41,26 @@ namespace compiler {
V(ArmVdivF64) \
V(ArmVmodF64) \
V(ArmVnegF64) \
+ V(ArmVsqrtF64) \
+ V(ArmVcvtF32F64) \
+ V(ArmVcvtF64F32) \
V(ArmVcvtF64S32) \
V(ArmVcvtF64U32) \
V(ArmVcvtS32F64) \
V(ArmVcvtU32F64) \
- V(ArmFloat64Load) \
- V(ArmFloat64Store) \
- V(ArmLoadWord8) \
- V(ArmStoreWord8) \
- V(ArmLoadWord16) \
- V(ArmStoreWord16) \
- V(ArmLoadWord32) \
- V(ArmStoreWord32) \
+ V(ArmVldrF32) \
+ V(ArmVstrF32) \
+ V(ArmVldrF64) \
+ V(ArmVstrF64) \
+ V(ArmLdrb) \
+ V(ArmLdrsb) \
+ V(ArmStrb) \
+ V(ArmLdrh) \
+ V(ArmLdrsh) \
+ V(ArmStrh) \
+ V(ArmLdr) \
+ V(ArmStr) \
+ V(ArmPush) \
V(ArmStoreWriteBarrier)
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm-unittest.cc b/deps/v8/src/compiler/arm/instruction-selector-arm-unittest.cc
new file mode 100644
index 0000000000..ec5eaf3b5f
--- /dev/null
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm-unittest.cc
@@ -0,0 +1,1927 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+typedef RawMachineAssembler::Label MLabel;
+typedef Node* (RawMachineAssembler::*Constructor)(Node*, Node*);
+
+
+// Data processing instructions.
+struct DPI {
+ Constructor constructor;
+ const char* constructor_name;
+ ArchOpcode arch_opcode;
+ ArchOpcode reverse_arch_opcode;
+ ArchOpcode test_arch_opcode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const DPI& dpi) {
+ return os << dpi.constructor_name;
+}
+
+
+static const DPI kDPIs[] = {
+ {&RawMachineAssembler::Word32And, "Word32And", kArmAnd, kArmAnd, kArmTst},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kArmOrr, kArmOrr, kArmOrr},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kArmEor, kArmEor, kArmTeq},
+ {&RawMachineAssembler::Int32Add, "Int32Add", kArmAdd, kArmAdd, kArmCmn},
+ {&RawMachineAssembler::Int32Sub, "Int32Sub", kArmSub, kArmRsb, kArmCmp}};
+
+
+// Data processing instructions with overflow.
+struct ODPI {
+ Constructor constructor;
+ const char* constructor_name;
+ ArchOpcode arch_opcode;
+ ArchOpcode reverse_arch_opcode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const ODPI& odpi) {
+ return os << odpi.constructor_name;
+}
+
+
+static const ODPI kODPIs[] = {{&RawMachineAssembler::Int32AddWithOverflow,
+ "Int32AddWithOverflow", kArmAdd, kArmAdd},
+ {&RawMachineAssembler::Int32SubWithOverflow,
+ "Int32SubWithOverflow", kArmSub, kArmRsb}};
+
+
+// Shifts.
+struct Shift {
+ Constructor constructor;
+ const char* constructor_name;
+ int32_t i_low; // lowest possible immediate
+ int32_t i_high; // highest possible immediate
+ AddressingMode i_mode; // Operand2_R_<shift>_I
+ AddressingMode r_mode; // Operand2_R_<shift>_R
+};
+
+
+std::ostream& operator<<(std::ostream& os, const Shift& shift) {
+ return os << shift.constructor_name;
+}
+
+
+static const Shift kShifts[] = {
+ {&RawMachineAssembler::Word32Sar, "Word32Sar", 1, 32,
+ kMode_Operand2_R_ASR_I, kMode_Operand2_R_ASR_R},
+ {&RawMachineAssembler::Word32Shl, "Word32Shl", 0, 31,
+ kMode_Operand2_R_LSL_I, kMode_Operand2_R_LSL_R},
+ {&RawMachineAssembler::Word32Shr, "Word32Shr", 1, 32,
+ kMode_Operand2_R_LSR_I, kMode_Operand2_R_LSR_R},
+ {&RawMachineAssembler::Word32Ror, "Word32Ror", 1, 31,
+ kMode_Operand2_R_ROR_I, kMode_Operand2_R_ROR_R}};
+
+
+// Immediates (random subset).
+static const int32_t kImmediates[] = {
+ -2147483617, -2147483606, -2113929216, -2080374784, -1996488704,
+ -1879048192, -1459617792, -1358954496, -1342177265, -1275068414,
+ -1073741818, -1073741777, -855638016, -805306368, -402653184,
+ -268435444, -16777216, 0, 35, 61,
+ 105, 116, 171, 245, 255,
+ 692, 1216, 1248, 1520, 1600,
+ 1888, 3744, 4080, 5888, 8384,
+ 9344, 9472, 9792, 13312, 15040,
+ 15360, 20736, 22272, 23296, 32000,
+ 33536, 37120, 45824, 47872, 56320,
+ 59392, 65280, 72704, 101376, 147456,
+ 161792, 164864, 167936, 173056, 195584,
+ 209920, 212992, 356352, 655360, 704512,
+ 716800, 851968, 901120, 1044480, 1523712,
+ 2572288, 3211264, 3588096, 3833856, 3866624,
+ 4325376, 5177344, 6488064, 7012352, 7471104,
+ 14090240, 16711680, 19398656, 22282240, 28573696,
+ 30408704, 30670848, 43253760, 54525952, 55312384,
+ 56623104, 68157440, 115343360, 131072000, 187695104,
+ 188743680, 195035136, 197132288, 203423744, 218103808,
+ 267386880, 268435470, 285212672, 402653185, 415236096,
+ 595591168, 603979776, 603979778, 629145600, 1073741835,
+ 1073741855, 1073741861, 1073741884, 1157627904, 1476395008,
+ 1476395010, 1610612741, 2030043136, 2080374785, 2097152000};
+
+} // namespace
+
+
+// -----------------------------------------------------------------------------
+// Data processing instructions.
+
+
+typedef InstructionSelectorTestWithParam<DPI> InstructionSelectorDPITest;
+
+
+TEST_P(InstructionSelectorDPITest, Parameters) {
+ const DPI dpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorDPITest, Immediate) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorDPITest, ShiftByParameter) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return((m.*dpi.constructor)(
+ m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Parameter(2))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return((m.*dpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
+ m.Parameter(2)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorDPITest, ShiftByImmediate) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return((m.*dpi.constructor)(
+ m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return((m.*dpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+ m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchWithParameters) {
+ const DPI dpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchWithImmediate) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)), &a,
+ &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)), &a,
+ &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchWithShiftByParameter) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(
+ m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Parameter(2))),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
+ m.Parameter(2)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchWithShiftByImmediate) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(m.Parameter(0),
+ (m.*shift.constructor)(
+ m.Parameter(1), m.Int32Constant(imm))),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(5U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+ m.Parameter(1)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(5U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+ }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchIfZeroWithParameters) {
+ const DPI dpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Word32Equal((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchIfNotZeroWithParameters) {
+ const DPI dpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(
+ m.Word32NotEqual((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchIfZeroWithImmediate) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Word32Equal(
+ (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Word32Equal(
+ (m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchIfNotZeroWithImmediate) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Word32NotEqual(
+ (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Word32NotEqual(
+ (m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorDPITest,
+ ::testing::ValuesIn(kDPIs));
+
+
+// -----------------------------------------------------------------------------
+// Data processing instructions with overflow.
+
+
+typedef InstructionSelectorTestWithParam<ODPI> InstructionSelectorODPITest;
+
+
+TEST_P(InstructionSelectorODPITest, OvfWithParameters) {
+ const ODPI odpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Projection(1, (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorODPITest, OvfWithImmediate) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 1, (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 1, (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, OvfWithShiftByParameter) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 1, (m.*odpi.constructor)(
+ m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Parameter(2)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 1, (m.*odpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
+ m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, OvfWithShiftByImmediate) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 1, (m.*odpi.constructor)(m.Parameter(0),
+ (m.*shift.constructor)(
+ m.Parameter(1), m.Int32Constant(imm)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 1, (m.*odpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+ m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, ValWithParameters) {
+ const ODPI odpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Projection(0, (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
+
+
+TEST_P(InstructionSelectorODPITest, ValWithImmediate) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 0, (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 0, (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, ValWithShiftByParameter) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 0, (m.*odpi.constructor)(
+ m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Parameter(2)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 0, (m.*odpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
+ m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, ValWithShiftByImmediate) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 0, (m.*odpi.constructor)(m.Parameter(0),
+ (m.*shift.constructor)(
+ m.Parameter(1), m.Int32Constant(imm)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 0, (m.*odpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+ m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BothWithParameters) {
+ const ODPI odpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorODPITest, BothWithImmediate) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* n = (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BothWithShiftByParameter) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ Node* n = (m.*odpi.constructor)(
+ m.Parameter(0), (m.*shift.constructor)(m.Parameter(1), m.Parameter(2)));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ Node* n = (m.*odpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)), m.Parameter(2));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BothWithShiftByImmediate) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* n = (m.*odpi.constructor)(
+ m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* n = (m.*odpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+ m.Parameter(1));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BranchWithParameters) {
+ const ODPI odpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
+ m.Branch(m.Projection(1, n), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&b);
+ m.Return(m.Projection(0, n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorODPITest, BranchWithImmediate) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
+ m.Branch(m.Projection(1, n), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&b);
+ m.Return(m.Projection(0, n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ Node* n = (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0));
+ m.Branch(m.Projection(1, n), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&b);
+ m.Return(m.Projection(0, n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BranchIfZeroWithParameters) {
+ const ODPI odpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
+ m.Branch(m.Word32Equal(m.Projection(1, n), m.Int32Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Projection(0, n));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorODPITest, BranchIfNotZeroWithParameters) {
+ const ODPI odpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
+ m.Branch(m.Word32NotEqual(m.Projection(1, n), m.Int32Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Projection(0, n));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorODPITest,
+ ::testing::ValuesIn(kODPIs));
+
+
+// -----------------------------------------------------------------------------
+// Shifts.
+
+
+typedef InstructionSelectorTestWithParam<Shift> InstructionSelectorShiftTest;
+
+
+TEST_P(InstructionSelectorShiftTest, Parameters) {
+ const Shift shift = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return((m.*shift.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMov, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Immediate) {
+ const Shift shift = GetParam();
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return((m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMov, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32EqualWithParameter) {
+ const Shift shift = GetParam();
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Word32Equal(m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Parameter(2))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Word32Equal((m.*shift.constructor)(m.Parameter(1), m.Parameter(2)),
+ m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32EqualWithParameterAndImmediate) {
+ const Shift shift = GetParam();
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(
+ (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+ m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(
+ m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32EqualToZeroWithParameters) {
+ const Shift shift = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Word32Equal(m.Int32Constant(0),
+ (m.*shift.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMov, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32EqualToZeroWithImmediate) {
+ const Shift shift = GetParam();
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(
+ m.Int32Constant(0),
+ (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMov, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32NotWithParameters) {
+ const Shift shift = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32Not((m.*shift.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMvn, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32NotWithImmediate) {
+ const Shift shift = GetParam();
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Not(
+ (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMvn, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32AndWithWord32NotWithParameters) {
+ const Shift shift = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Parameter(0), m.Word32Not((m.*shift.constructor)(
+ m.Parameter(1), m.Parameter(2)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmBic, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32AndWithWord32NotWithImmediate) {
+ const Shift shift = GetParam();
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Parameter(0),
+ m.Word32Not((m.*shift.constructor)(
+ m.Parameter(1), m.Int32Constant(imm)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmBic, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
+ ::testing::ValuesIn(kShifts));
+
+
+// -----------------------------------------------------------------------------
+// Memory access instructions.
+
+
+namespace {
+
+struct MemoryAccess {
+ MachineType type;
+ ArchOpcode ldr_opcode;
+ ArchOpcode str_opcode;
+ bool (InstructionSelectorTest::Stream::*val_predicate)(
+ const InstructionOperand*) const;
+ const int32_t immediates[40];
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
+ OStringStream ost;
+ ost << memacc.type;
+ return os << ost.c_str();
+}
+
+
+static const MemoryAccess kMemoryAccesses[] = {
+ {kMachInt8,
+ kArmLdrsb,
+ kArmStrb,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+ -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+ 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {kMachUint8,
+ kArmLdrb,
+ kArmStrb,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3914, -3536, -3234, -3185, -3169, -1073, -990, -859, -720, -434,
+ -127, -124, -122, -105, -91, -86, -64, -55, -53, -30, -10, -3, 0, 20, 28,
+ 39, 58, 64, 73, 75, 100, 108, 121, 686, 963, 1363, 2759, 3449, 4095}},
+ {kMachInt16,
+ kArmLdrsh,
+ kArmStrh,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-255, -251, -232, -220, -144, -138, -130, -126, -116, -115, -102, -101,
+ -98, -69, -59, -56, -39, -35, -23, -19, -7, 0, 22, 26, 37, 68, 83, 87, 98,
+ 102, 108, 111, 117, 171, 195, 203, 204, 245, 246, 255}},
+ {kMachUint16,
+ kArmLdrh,
+ kArmStrh,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-255, -230, -201, -172, -125, -119, -118, -105, -98, -79, -54, -42, -41,
+ -32, -12, -11, -5, -4, 0, 5, 9, 25, 28, 51, 58, 60, 89, 104, 108, 109,
+ 114, 116, 120, 138, 150, 161, 166, 172, 228, 255}},
+ {kMachInt32,
+ kArmLdr,
+ kArmStr,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -1898, -1685, -1562, -1408, -1313, -344, -128, -116, -100, -92,
+ -80, -72, -71, -56, -25, -21, -11, -9, 0, 3, 5, 27, 28, 42, 52, 63, 88,
+ 93, 97, 125, 846, 1037, 2102, 2403, 2597, 2632, 2997, 3935, 4095}},
+ {kMachFloat32,
+ kArmVldrF32,
+ kArmVstrF32,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-1020, -928, -896, -772, -728, -680, -660, -488, -372, -112, -100, -92,
+ -84, -80, -72, -64, -60, -56, -52, -48, -36, -32, -20, -8, -4, 0, 8, 20,
+ 24, 40, 64, 112, 204, 388, 516, 852, 856, 976, 988, 1020}},
+ {kMachFloat64,
+ kArmVldrF64,
+ kArmVstrF64,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-1020, -948, -796, -696, -612, -364, -320, -308, -128, -112, -108, -104,
+ -96, -84, -80, -56, -48, -40, -20, 0, 24, 28, 36, 48, 64, 84, 96, 100,
+ 108, 116, 120, 140, 156, 408, 432, 444, 772, 832, 940, 1020}}};
+
+} // namespace
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccess>
+ InstructionSelectorMemoryAccessTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Offset_RR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, memacc.type, kMachPtr);
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Offset_RI, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
+ }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
+ m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Offset_RR, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
+ m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
+ m.Parameter(1));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Offset_RI, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessTest,
+ ::testing::ValuesIn(kMemoryAccesses));
+
+
+// -----------------------------------------------------------------------------
+// Conversions.
+
+
+TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) {
+ StreamBuilder m(this, kMachFloat64, kMachFloat32);
+ m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVcvtF64F32, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
+ StreamBuilder m(this, kMachFloat32, kMachFloat64);
+ m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVcvtF32F64, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+// -----------------------------------------------------------------------------
+// Miscellaneous.
+
+
+TEST_F(InstructionSelectorTest, Int32AddWithInt32Mul) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Int32Add(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMla, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Int32Add(m.Int32Mul(m.Parameter(1), m.Parameter(2)), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMla, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Int32DivWithParameters) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Div(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(4U, s.size());
+ EXPECT_EQ(kArmVcvtF64S32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kArmVcvtF64S32, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
+ ASSERT_EQ(2U, s[2]->InputCount());
+ ASSERT_EQ(1U, s[2]->OutputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+ EXPECT_EQ(kArmVcvtS32F64, s[3]->arch_opcode());
+ ASSERT_EQ(1U, s[3]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32DivWithParametersForSUDIV) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Div(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build(SUDIV);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmSdiv, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32ModWithParameters) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(6U, s.size());
+ EXPECT_EQ(kArmVcvtF64S32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kArmVcvtF64S32, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
+ ASSERT_EQ(2U, s[2]->InputCount());
+ ASSERT_EQ(1U, s[2]->OutputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+ EXPECT_EQ(kArmVcvtS32F64, s[3]->arch_opcode());
+ ASSERT_EQ(1U, s[3]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
+ EXPECT_EQ(kArmMul, s[4]->arch_opcode());
+ ASSERT_EQ(1U, s[4]->OutputCount());
+ ASSERT_EQ(2U, s[4]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[3]->Output()), s.ToVreg(s[4]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->InputAt(0)), s.ToVreg(s[4]->InputAt(1)));
+ EXPECT_EQ(kArmSub, s[5]->arch_opcode());
+ ASSERT_EQ(1U, s[5]->OutputCount());
+ ASSERT_EQ(2U, s[5]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[5]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[4]->Output()), s.ToVreg(s[5]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32ModWithParametersForSUDIV) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build(SUDIV);
+ ASSERT_EQ(3U, s.size());
+ EXPECT_EQ(kArmSdiv, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(kArmMul, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ ASSERT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
+ EXPECT_EQ(kArmSub, s[2]->arch_opcode());
+ ASSERT_EQ(1U, s[2]->OutputCount());
+ ASSERT_EQ(2U, s[2]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[2]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32ModWithParametersForSUDIVAndMLS) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build(MLS, SUDIV);
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArmSdiv, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(kArmMls, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ ASSERT_EQ(3U, s[1]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[1]->InputAt(2)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32MulWithParameters) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mul(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMul, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
+ // x * (2^k + 1) -> x + (x >> k)
+ TRACED_FORRANGE(int32_t, k, 1, 30) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) + 1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmAdd, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // x * (2^k - 1) -> -x + (x >> k)
+ TRACED_FORRANGE(int32_t, k, 3, 30) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) - 1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmRsb, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // (2^k + 1) * x -> x + (x >> k)
+ TRACED_FORRANGE(int32_t, k, 1, 30) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mul(m.Int32Constant((1 << k) + 1), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmAdd, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // x * (2^k - 1) -> -x + (x >> k)
+ TRACED_FORRANGE(int32_t, k, 3, 30) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mul(m.Int32Constant((1 << k) - 1), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmRsb, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Int32SubWithInt32Mul) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArmMul, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kArmSub, s[1]->arch_opcode());
+ ASSERT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32SubWithInt32MulForMLS) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+ Stream s = m.Build(MLS);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMls, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(3U, s[0]->InputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32UDivWithParameters) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32UDiv(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(4U, s.size());
+ EXPECT_EQ(kArmVcvtF64U32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kArmVcvtF64U32, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
+ ASSERT_EQ(2U, s[2]->InputCount());
+ ASSERT_EQ(1U, s[2]->OutputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+ EXPECT_EQ(kArmVcvtU32F64, s[3]->arch_opcode());
+ ASSERT_EQ(1U, s[3]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32UDivWithParametersForSUDIV) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32UDiv(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build(SUDIV);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUdiv, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32UModWithParameters) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(6U, s.size());
+ EXPECT_EQ(kArmVcvtF64U32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kArmVcvtF64U32, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
+ ASSERT_EQ(2U, s[2]->InputCount());
+ ASSERT_EQ(1U, s[2]->OutputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+ EXPECT_EQ(kArmVcvtU32F64, s[3]->arch_opcode());
+ ASSERT_EQ(1U, s[3]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
+ EXPECT_EQ(kArmMul, s[4]->arch_opcode());
+ ASSERT_EQ(1U, s[4]->OutputCount());
+ ASSERT_EQ(2U, s[4]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[3]->Output()), s.ToVreg(s[4]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->InputAt(0)), s.ToVreg(s[4]->InputAt(1)));
+ EXPECT_EQ(kArmSub, s[5]->arch_opcode());
+ ASSERT_EQ(1U, s[5]->OutputCount());
+ ASSERT_EQ(2U, s[5]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[5]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[4]->Output()), s.ToVreg(s[5]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32UModWithParametersForSUDIV) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build(SUDIV);
+ ASSERT_EQ(3U, s.size());
+ EXPECT_EQ(kArmUdiv, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(kArmMul, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ ASSERT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
+ EXPECT_EQ(kArmSub, s[2]->arch_opcode());
+ ASSERT_EQ(1U, s[2]->OutputCount());
+ ASSERT_EQ(2U, s[2]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[2]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32UModWithParametersForSUDIVAndMLS) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build(MLS, SUDIV);
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArmUdiv, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(kArmMls, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ ASSERT_EQ(3U, s[1]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[1]->InputAt(2)));
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithUbfxImmediateForARMv7) {
+ TRACED_FORRANGE(int32_t, width, 1, 32) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Parameter(0),
+ m.Int32Constant(0xffffffffu >> (32 - width))));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ TRACED_FORRANGE(int32_t, width, 1, 32) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
+ m.Parameter(0)));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithBfcImmediateForARMv7) {
+ TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, (32 - lsb) - 1) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(
+ m.Parameter(0),
+ m.Int32Constant(~((0xffffffffu >> (32 - width)) << lsb))));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmBfc, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, (32 - lsb) - 1) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(
+ m.Word32And(m.Int32Constant(~((0xffffffffu >> (32 - width)) << lsb)),
+ m.Parameter(0)));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmBfc, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediateForARMv7) {
+ TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ uint32_t max = 1 << lsb;
+ if (max > static_cast<uint32_t>(kMaxInt)) max -= 1;
+ uint32_t jnk = rng()->NextInt(max);
+ uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
+ m.Int32Constant(lsb)));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ uint32_t max = 1 << lsb;
+ if (max > static_cast<uint32_t>(kMaxInt)) max -= 1;
+ uint32_t jnk = rng()->NextInt(max);
+ uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
+ m.Int32Constant(lsb)));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithWord32Not) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Parameter(0), m.Word32Not(m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmBic, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Word32Not(m.Parameter(0)), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmBic, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithParameters) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+}
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithImmediate) {
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ if (imm == 0) continue;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ if (imm == 0) continue;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Int32Constant(imm), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmTst, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmTst, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32NotWithParameter) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Not(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMvn, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrWithImmediateForARMv7) {
+ TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb)),
+ m.Int32Constant(0xffffffffu >> (32 - width))));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
+ m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb))));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index 029d6e3b96..a3ba767eb8 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/base/bits.h"
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler-intrinsics.h"
namespace v8 {
namespace internal {
namespace compiler {
// Adds Arm-specific methods for generating InstructionOperands.
-class ArmOperandGenerator V8_FINAL : public OperandGenerator {
+class ArmOperandGenerator : public OperandGenerator {
public:
explicit ArmOperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
@@ -24,15 +24,9 @@ class ArmOperandGenerator V8_FINAL : public OperandGenerator {
}
bool CanBeImmediate(Node* node, InstructionCode opcode) {
- int32_t value;
- switch (node->opcode()) {
- case IrOpcode::kInt32Constant:
- case IrOpcode::kNumberConstant:
- value = ValueOf<int32_t>(node->op());
- break;
- default:
- return false;
- }
+ Int32Matcher m(node);
+ if (!m.HasValue()) return false;
+ int32_t value = m.Value();
switch (ArchOpcodeField::decode(opcode)) {
case kArmAnd:
case kArmMov:
@@ -55,25 +49,31 @@ class ArmOperandGenerator V8_FINAL : public OperandGenerator {
case kArmRsb:
return ImmediateFitsAddrMode1Instruction(value);
- case kArmFloat64Load:
- case kArmFloat64Store:
+ case kArmVldrF32:
+ case kArmVstrF32:
+ case kArmVldrF64:
+ case kArmVstrF64:
return value >= -1020 && value <= 1020 && (value % 4) == 0;
- case kArmLoadWord8:
- case kArmStoreWord8:
- case kArmLoadWord32:
- case kArmStoreWord32:
+ case kArmLdrb:
+ case kArmLdrsb:
+ case kArmStrb:
+ case kArmLdr:
+ case kArmStr:
case kArmStoreWriteBarrier:
return value >= -4095 && value <= 4095;
- case kArmLoadWord16:
- case kArmStoreWord16:
+ case kArmLdrh:
+ case kArmLdrsh:
+ case kArmStrh:
return value >= -255 && value <= 255;
+ case kArchCallCodeObject:
+ case kArchCallJSFunction:
case kArchJmp:
case kArchNop:
case kArchRet:
- case kArchDeoptimize:
+ case kArchTruncateDoubleToI:
case kArmMul:
case kArmMla:
case kArmMls:
@@ -81,11 +81,6 @@ class ArmOperandGenerator V8_FINAL : public OperandGenerator {
case kArmUdiv:
case kArmBfc:
case kArmUbfx:
- case kArmCallCodeObject:
- case kArmCallJSFunction:
- case kArmCallAddress:
- case kArmPush:
- case kArmDrop:
case kArmVcmpF64:
case kArmVaddF64:
case kArmVsubF64:
@@ -95,10 +90,14 @@ class ArmOperandGenerator V8_FINAL : public OperandGenerator {
case kArmVdivF64:
case kArmVmodF64:
case kArmVnegF64:
+ case kArmVsqrtF64:
+ case kArmVcvtF32F64:
+ case kArmVcvtF64F32:
case kArmVcvtF64S32:
case kArmVcvtF64U32:
case kArmVcvtS32F64:
case kArmVcvtU32F64:
+ case kArmPush:
return false;
}
UNREACHABLE();
@@ -115,9 +114,9 @@ class ArmOperandGenerator V8_FINAL : public OperandGenerator {
static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
ArmOperandGenerator g(selector);
- selector->Emit(opcode, g.DefineAsDoubleRegister(node),
- g.UseDoubleRegister(node->InputAt(0)),
- g.UseDoubleRegister(node->InputAt(1)));
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
}
@@ -126,37 +125,17 @@ static bool TryMatchROR(InstructionSelector* selector,
InstructionOperand** value_return,
InstructionOperand** shift_return) {
ArmOperandGenerator g(selector);
- if (node->opcode() != IrOpcode::kWord32Or) return false;
+ if (node->opcode() != IrOpcode::kWord32Ror) return false;
Int32BinopMatcher m(node);
- Node* shl = m.left().node();
- Node* shr = m.right().node();
- if (m.left().IsWord32Shr() && m.right().IsWord32Shl()) {
- std::swap(shl, shr);
- } else if (!m.left().IsWord32Shl() || !m.right().IsWord32Shr()) {
- return false;
- }
- Int32BinopMatcher mshr(shr);
- Int32BinopMatcher mshl(shl);
- Node* value = mshr.left().node();
- if (value != mshl.left().node()) return false;
- Node* shift = mshr.right().node();
- Int32Matcher mshift(shift);
- if (mshift.IsInRange(1, 31) && mshl.right().Is(32 - mshift.Value())) {
+ *value_return = g.UseRegister(m.left().node());
+ if (m.right().IsInRange(1, 31)) {
*opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ROR_I);
- *value_return = g.UseRegister(value);
- *shift_return = g.UseImmediate(shift);
- return true;
- }
- if (mshl.right().IsInt32Sub()) {
- Int32BinopMatcher mshlright(mshl.right().node());
- if (!mshlright.left().Is(32)) return false;
- if (mshlright.right().node() != shift) return false;
+ *shift_return = g.UseImmediate(m.right().node());
+ } else {
*opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ROR_R);
- *value_return = g.UseRegister(value);
- *shift_return = g.UseRegister(shift);
- return true;
+ *shift_return = g.UseRegister(m.right().node());
}
- return false;
+ return true;
}
@@ -287,8 +266,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
DCHECK_NE(0, input_count);
DCHECK_NE(0, output_count);
- DCHECK_GE(ARRAY_SIZE(inputs), input_count);
- DCHECK_GE(ARRAY_SIZE(outputs), output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
@@ -305,29 +284,30 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = OpParameter<MachineType>(node);
+ MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+ MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- InstructionOperand* result = rep == kMachineFloat64
- ? g.DefineAsDoubleRegister(node)
- : g.DefineAsRegister(node);
-
ArchOpcode opcode;
switch (rep) {
- case kMachineFloat64:
- opcode = kArmFloat64Load;
+ case kRepFloat32:
+ opcode = kArmVldrF32;
+ break;
+ case kRepFloat64:
+ opcode = kArmVldrF64;
break;
- case kMachineWord8:
- opcode = kArmLoadWord8;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = typ == kTypeUint32 ? kArmLdrb : kArmLdrsb;
break;
- case kMachineWord16:
- opcode = kArmLoadWord16;
+ case kRepWord16:
+ opcode = typ == kTypeUint32 ? kArmLdrh : kArmLdrsh;
break;
- case kMachineTagged: // Fall through.
- case kMachineWord32:
- opcode = kArmLoadWord32;
+ case kRepTagged: // Fall through.
+ case kRepWord32:
+ opcode = kArmLdr;
break;
default:
UNREACHABLE();
@@ -335,14 +315,11 @@ void InstructionSelector::VisitLoad(Node* node) {
}
if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), result,
- g.UseRegister(base), g.UseImmediate(index));
- } else if (g.CanBeImmediate(base, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), result,
- g.UseRegister(index), g.UseImmediate(base));
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RI),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
} else {
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), result,
- g.UseRegister(base), g.UseRegister(index));
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
}
}
@@ -354,36 +331,38 @@ void InstructionSelector::VisitStore(Node* node) {
Node* value = node->InputAt(2);
StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
- MachineType rep = store_rep.rep;
- if (store_rep.write_barrier_kind == kFullWriteBarrier) {
- DCHECK(rep == kMachineTagged);
+ MachineType rep = RepresentationOf(store_rep.machine_type());
+ if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+ DCHECK(rep == kRepTagged);
// TODO(dcarney): refactor RecordWrite function to take temp registers
// and pass them here instead of using fixed regs
// TODO(dcarney): handle immediate indices.
InstructionOperand* temps[] = {g.TempRegister(r5), g.TempRegister(r6)};
Emit(kArmStoreWriteBarrier, NULL, g.UseFixed(base, r4),
- g.UseFixed(index, r5), g.UseFixed(value, r6), ARRAY_SIZE(temps),
+ g.UseFixed(index, r5), g.UseFixed(value, r6), arraysize(temps),
temps);
return;
}
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
- InstructionOperand* val = rep == kMachineFloat64 ? g.UseDoubleRegister(value)
- : g.UseRegister(value);
+ DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
ArchOpcode opcode;
switch (rep) {
- case kMachineFloat64:
- opcode = kArmFloat64Store;
+ case kRepFloat32:
+ opcode = kArmVstrF32;
+ break;
+ case kRepFloat64:
+ opcode = kArmVstrF64;
break;
- case kMachineWord8:
- opcode = kArmStoreWord8;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = kArmStrb;
break;
- case kMachineWord16:
- opcode = kArmStoreWord16;
+ case kRepWord16:
+ opcode = kArmStrh;
break;
- case kMachineTagged: // Fall through.
- case kMachineWord32:
- opcode = kArmStoreWord32;
+ case kRepTagged: // Fall through.
+ case kRepWord32:
+ opcode = kArmStr;
break;
default:
UNREACHABLE();
@@ -392,13 +371,10 @@ void InstructionSelector::VisitStore(Node* node) {
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), NULL,
- g.UseRegister(base), g.UseImmediate(index), val);
- } else if (g.CanBeImmediate(base, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), NULL,
- g.UseRegister(index), g.UseImmediate(base), val);
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
} else {
Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), NULL,
- g.UseRegister(base), g.UseRegister(index), val);
+ g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
}
}
@@ -439,10 +415,10 @@ void InstructionSelector::VisitWord32And(Node* node) {
}
if (IsSupported(ARMv7) && m.right().HasValue()) {
uint32_t value = m.right().Value();
- uint32_t width = CompilerIntrinsics::CountSetBits(value);
- uint32_t msb = CompilerIntrinsics::CountLeadingZeros(value);
+ uint32_t width = base::bits::CountPopulation32(value);
+ uint32_t msb = base::bits::CountLeadingZeros32(value);
if (width != 0 && msb + width == 32) {
- DCHECK_EQ(0, CompilerIntrinsics::CountTrailingZeros(value));
+ DCHECK_EQ(0, base::bits::CountTrailingZeros32(value));
if (m.left().IsWord32Shr()) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().IsInRange(0, 31)) {
@@ -458,8 +434,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
}
// Try to interpret this AND as BFC.
width = 32 - width;
- msb = CompilerIntrinsics::CountLeadingZeros(~value);
- uint32_t lsb = CompilerIntrinsics::CountTrailingZeros(~value);
+ msb = base::bits::CountLeadingZeros32(~value);
+ uint32_t lsb = base::bits::CountTrailingZeros32(~value);
if (msb + width + lsb == 32) {
Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.TempImmediate(lsb), g.TempImmediate(width));
@@ -471,14 +447,6 @@ void InstructionSelector::VisitWord32And(Node* node) {
void InstructionSelector::VisitWord32Or(Node* node) {
- ArmOperandGenerator g(this);
- InstructionCode opcode = kArmMov;
- InstructionOperand* value_operand;
- InstructionOperand* shift_operand;
- if (TryMatchROR(this, &opcode, node, &value_operand, &shift_operand)) {
- Emit(opcode, g.DefineAsRegister(node), value_operand, shift_operand);
- return;
- }
VisitBinop(this, node, kArmOrr, kArmOrr);
}
@@ -505,15 +473,44 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
template <typename TryMatchShift>
static inline void VisitShift(InstructionSelector* selector, Node* node,
- TryMatchShift try_match_shift) {
+ TryMatchShift try_match_shift,
+ FlagsContinuation* cont) {
ArmOperandGenerator g(selector);
InstructionCode opcode = kArmMov;
- InstructionOperand* value_operand = NULL;
- InstructionOperand* shift_operand = NULL;
- CHECK(
- try_match_shift(selector, &opcode, node, &value_operand, &shift_operand));
- selector->Emit(opcode, g.DefineAsRegister(node), value_operand,
- shift_operand);
+ InstructionOperand* inputs[4];
+ size_t input_count = 2;
+ InstructionOperand* outputs[2];
+ size_t output_count = 0;
+
+ CHECK(try_match_shift(selector, &opcode, node, &inputs[0], &inputs[1]));
+
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ }
+
+ outputs[output_count++] = g.DefineAsRegister(node);
+ if (cont->IsSet()) {
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
+ }
+
+ DCHECK_NE(0, input_count);
+ DCHECK_NE(0, output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+ DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
+
+ Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+ outputs, input_count, inputs);
+ if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+template <typename TryMatchShift>
+static inline void VisitShift(InstructionSelector* selector, Node* node,
+ TryMatchShift try_match_shift) {
+ FlagsContinuation cont;
+ VisitShift(selector, node, try_match_shift, &cont);
}
@@ -531,10 +528,10 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
uint32_t value = (mleft.right().Value() >> lsb) << lsb;
- uint32_t width = CompilerIntrinsics::CountSetBits(value);
- uint32_t msb = CompilerIntrinsics::CountLeadingZeros(value);
+ uint32_t width = base::bits::CountPopulation32(value);
+ uint32_t msb = base::bits::CountLeadingZeros32(value);
if (msb + width + lsb == 32) {
- DCHECK_EQ(lsb, CompilerIntrinsics::CountTrailingZeros(value));
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(value));
Emit(kArmUbfx, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
g.TempImmediate(width));
@@ -551,6 +548,11 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
}
+void InstructionSelector::VisitWord32Ror(Node* node) {
+ VisitShift(this, node, TryMatchROR);
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -589,14 +591,14 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
Int32BinopMatcher m(node);
if (m.right().HasValue() && m.right().Value() > 0) {
int32_t value = m.right().Value();
- if (IsPowerOf2(value - 1)) {
+ if (base::bits::IsPowerOfTwo32(value - 1)) {
Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
return;
}
- if (value < kMaxInt && IsPowerOf2(value + 1)) {
+ if (value < kMaxInt && base::bits::IsPowerOfTwo32(value + 1)) {
Emit(kArmRsb | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.left().node()),
@@ -683,16 +685,23 @@ void InstructionSelector::VisitInt32UMod(Node* node) {
}
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmVcvtF64F32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
ArmOperandGenerator g(this);
- Emit(kArmVcvtF64S32, g.DefineAsDoubleRegister(node),
+ Emit(kArmVcvtF64S32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
ArmOperandGenerator g(this);
- Emit(kArmVcvtF64U32, g.DefineAsDoubleRegister(node),
+ Emit(kArmVcvtF64U32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
@@ -700,14 +709,21 @@ void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmVcvtS32F64, g.DefineAsRegister(node),
- g.UseDoubleRegister(node->InputAt(0)));
+ g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmVcvtU32F64, g.DefineAsRegister(node),
- g.UseDoubleRegister(node->InputAt(0)));
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmVcvtF32F64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
}
@@ -750,8 +766,7 @@ void InstructionSelector::VisitFloat64Mul(Node* node) {
ArmOperandGenerator g(this);
Float64BinopMatcher m(node);
if (m.right().Is(-1.0)) {
- Emit(kArmVnegF64, g.DefineAsRegister(node),
- g.UseDoubleRegister(m.left().node()));
+ Emit(kArmVnegF64, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
} else {
VisitRRRFloat64(this, kArmVmulF64, node);
}
@@ -765,9 +780,14 @@ void InstructionSelector::VisitFloat64Div(Node* node) {
void InstructionSelector::VisitFloat64Mod(Node* node) {
ArmOperandGenerator g(this);
- Emit(kArmVmodF64, g.DefineAsFixedDouble(node, d0),
- g.UseFixedDouble(node->InputAt(0), d0),
- g.UseFixedDouble(node->InputAt(1), d1))->MarkAsCall();
+ Emit(kArmVmodF64, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
+ g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmVsqrtF64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
@@ -775,58 +795,54 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
BasicBlock* deoptimization) {
ArmOperandGenerator g(this);
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
- CallBuffer buffer(zone(), descriptor); // TODO(turbofan): temp zone here?
+
+ FrameStateDescriptor* frame_state_descriptor = NULL;
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor =
+ GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+ }
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
// Compute InstructionOperands for inputs and outputs.
// TODO(turbofan): on ARM64 it's probably better to use the code object in a
// register if there are multiple uses of it. Improve constant pool and the
// heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(call, &buffer, true, false, continuation,
- deoptimization);
+ InitializeCallBuffer(call, &buffer, true, false);
// TODO(dcarney): might be possible to use claim/poke instead
// Push any stack arguments.
- for (int i = buffer.pushed_count - 1; i >= 0; --i) {
- Node* input = buffer.pushed_nodes[i];
- Emit(kArmPush, NULL, g.UseRegister(input));
+ for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
+ input != buffer.pushed_nodes.rend(); input++) {
+ Emit(kArmPush, NULL, g.UseRegister(*input));
}
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
case CallDescriptor::kCallCodeObject: {
- bool lazy_deopt = descriptor->CanLazilyDeoptimize();
- opcode = kArmCallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
+ opcode = kArchCallCodeObject;
break;
}
- case CallDescriptor::kCallAddress:
- opcode = kArmCallAddress;
- break;
case CallDescriptor::kCallJSFunction:
- opcode = kArmCallJSFunction;
+ opcode = kArchCallJSFunction;
break;
default:
UNREACHABLE();
return;
}
+ opcode |= MiscField::encode(descriptor->flags());
// Emit the call instruction.
Instruction* call_instr =
- Emit(opcode, buffer.output_count, buffer.outputs,
- buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
+ Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+ buffer.instruction_args.size(), &buffer.instruction_args.front());
call_instr->MarkAsCall();
if (deoptimization != NULL) {
DCHECK(continuation != NULL);
call_instr->MarkAsControl();
}
-
- // Caller clean up of stack for C-style calls.
- if (descriptor->kind() == CallDescriptor::kCallAddress &&
- buffer.pushed_count > 0) {
- DCHECK(deoptimization == NULL && continuation == NULL);
- Emit(kArmDrop | MiscField::encode(buffer.pushed_count), NULL);
- }
}
@@ -877,8 +893,8 @@ static void VisitWordCompare(InstructionSelector* selector, Node* node,
}
DCHECK_NE(0, input_count);
- DCHECK_GE(ARRAY_SIZE(inputs), input_count);
- DCHECK_GE(ARRAY_SIZE(outputs), output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
outputs, input_count, inputs);
@@ -898,6 +914,14 @@ void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
return VisitBinop(this, node, kArmOrr, kArmOrr, cont);
case IrOpcode::kWord32Xor:
return VisitWordCompare(this, node, kArmTeq, cont, true);
+ case IrOpcode::kWord32Sar:
+ return VisitShift(this, node, TryMatchASR, cont);
+ case IrOpcode::kWord32Shl:
+ return VisitShift(this, node, TryMatchLSL, cont);
+ case IrOpcode::kWord32Shr:
+ return VisitShift(this, node, TryMatchLSR, cont);
+ case IrOpcode::kWord32Ror:
+ return VisitShift(this, node, TryMatchROR, cont);
default:
break;
}
@@ -927,14 +951,13 @@ void InstructionSelector::VisitFloat64Compare(Node* node,
ArmOperandGenerator g(this);
Float64BinopMatcher m(node);
if (cont->IsBranch()) {
- Emit(cont->Encode(kArmVcmpF64), NULL, g.UseDoubleRegister(m.left().node()),
- g.UseDoubleRegister(m.right().node()), g.Label(cont->true_block()),
+ Emit(cont->Encode(kArmVcmpF64), NULL, g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()), g.Label(cont->true_block()),
g.Label(cont->false_block()))->MarkAsControl();
} else {
DCHECK(cont->IsSet());
Emit(cont->Encode(kArmVcmpF64), g.DefineAsRegister(cont->result()),
- g.UseDoubleRegister(m.left().node()),
- g.UseDoubleRegister(m.right().node()));
+ g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
}
}
diff --git a/deps/v8/src/compiler/arm/linkage-arm.cc b/deps/v8/src/compiler/arm/linkage-arm.cc
index 3b5d5f7d0f..6673a475bd 100644
--- a/deps/v8/src/compiler/arm/linkage-arm.cc
+++ b/deps/v8/src/compiler/arm/linkage-arm.cc
@@ -14,7 +14,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-struct LinkageHelperTraits {
+struct ArmLinkageHelperTraits {
static Register ReturnValueReg() { return r0; }
static Register ReturnValue2Reg() { return r1; }
static Register JSCallFunctionReg() { return r1; }
@@ -33,35 +33,34 @@ struct LinkageHelperTraits {
};
+typedef LinkageHelper<ArmLinkageHelperTraits> LH;
+
CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
- return LinkageHelper::GetJSCallDescriptor<LinkageHelperTraits>(
- zone, parameter_count);
+ return LH::GetJSCallDescriptor(zone, parameter_count);
}
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
Runtime::FunctionId function, int parameter_count,
- Operator::Property properties,
- CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
- return LinkageHelper::GetRuntimeCallDescriptor<LinkageHelperTraits>(
- zone, function, parameter_count, properties, can_deoptimize);
+ Operator::Properties properties, Zone* zone) {
+ return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
+ properties);
}
CallDescriptor* Linkage::GetStubCallDescriptor(
- CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count,
- CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
- return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>(
- zone, descriptor, stack_parameter_count, can_deoptimize);
+ CallInterfaceDescriptor descriptor, int stack_parameter_count,
+ CallDescriptor::Flags flags, Zone* zone) {
+ return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
+ flags);
}
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(
- Zone* zone, int num_params, MachineType return_type,
- const MachineType* param_types) {
- return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>(
- zone, num_params, return_type, param_types);
-}
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+ MachineSignature* sig) {
+ return LH::GetSimplifiedCDescriptor(zone, sig);
}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 065889cfbb..a56de204b7 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -19,7 +19,7 @@ namespace compiler {
// Adds Arm64-specific methods to convert InstructionOperands.
-class Arm64OperandConverter V8_FINAL : public InstructionOperandConverter {
+class Arm64OperandConverter FINAL : public InstructionOperandConverter {
public:
Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
@@ -89,6 +89,9 @@ class Arm64OperandConverter V8_FINAL : public InstructionOperandConverter {
return Operand(constant.ToInt32());
case Constant::kInt64:
return Operand(constant.ToInt64());
+ case Constant::kFloat32:
+ return Operand(
+ isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
case Constant::kFloat64:
return Operand(
isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
@@ -131,6 +134,35 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Arm64OperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
switch (ArchOpcodeField::decode(opcode)) {
+ case kArchCallCodeObject: {
+ EnsureSpaceForLazyDeopt();
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
+ RelocInfo::CODE_TARGET);
+ } else {
+ Register target = i.InputRegister(0);
+ __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
+ __ Call(target);
+ }
+ AddSafepointAndDeopt(instr);
+ break;
+ }
+ case kArchCallJSFunction: {
+ EnsureSpaceForLazyDeopt();
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ UseScratchRegisterScope scope(masm());
+ Register temp = scope.AcquireX();
+ __ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ cmp(cp, temp);
+ __ Assert(eq, kWrongFunctionContext);
+ }
+ __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Call(x10);
+ AddSafepointAndDeopt(instr);
+ break;
+ }
case kArchJmp:
__ B(code_->GetLabel(i.InputBlock(0)));
break;
@@ -140,15 +172,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchRet:
AssembleReturn();
break;
- case kArchDeoptimize: {
- int deoptimization_id = MiscField::decode(instr->opcode());
- BuildTranslation(instr, deoptimization_id);
-
- Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, Deoptimizer::LAZY);
- __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ case kArchTruncateDoubleToI:
+ __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
- }
case kArm64Add:
__ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -166,12 +192,40 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64And32:
__ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
break;
+ case kArm64Bic:
+ __ Bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kArm64Bic32:
+ __ Bic(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+ break;
case kArm64Mul:
__ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
break;
case kArm64Mul32:
__ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
break;
+ case kArm64Madd:
+ __ Madd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputRegister(2));
+ break;
+ case kArm64Madd32:
+ __ Madd(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1),
+ i.InputRegister32(2));
+ break;
+ case kArm64Msub:
+ __ Msub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputRegister(2));
+ break;
+ case kArm64Msub32:
+ __ Msub(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1),
+ i.InputRegister32(2));
+ break;
+ case kArm64Mneg:
+ __ Mneg(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kArm64Mneg32:
+ __ Mneg(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
+ break;
case kArm64Idiv:
__ Sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
break;
@@ -233,12 +287,24 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Or32:
__ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
break;
- case kArm64Xor:
+ case kArm64Orn:
+ __ Orn(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kArm64Orn32:
+ __ Orn(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+ break;
+ case kArm64Eor:
__ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
- case kArm64Xor32:
+ case kArm64Eor32:
__ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
break;
+ case kArm64Eon:
+ __ Eon(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kArm64Eon32:
+ __ Eon(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+ break;
case kArm64Sub:
__ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -268,46 +334,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Sar32:
ASSEMBLE_SHIFT(Asr, 32);
break;
- case kArm64CallCodeObject: {
- if (instr->InputAt(0)->IsImmediate()) {
- Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
- __ Call(code, RelocInfo::CODE_TARGET);
- RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- } else {
- Register reg = i.InputRegister(0);
- int entry = Code::kHeaderSize - kHeapObjectTag;
- __ Ldr(reg, MemOperand(reg, entry));
- __ Call(reg);
- RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- }
- bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
- if (lazy_deopt) {
- RecordLazyDeoptimizationEntry(instr);
- }
- // Meaningless instruction for ICs to overwrite.
- AddNopForSmiCodeInlining();
+ case kArm64Ror:
+ ASSEMBLE_SHIFT(Ror, 64);
break;
- }
- case kArm64CallJSFunction: {
- Register func = i.InputRegister(0);
-
- // TODO(jarin) The load of the context should be separated from the call.
- __ Ldr(cp, FieldMemOperand(func, JSFunction::kContextOffset));
- __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
- __ Call(x10);
-
- RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- RecordLazyDeoptimizationEntry(instr);
+ case kArm64Ror32:
+ ASSEMBLE_SHIFT(Ror, 32);
break;
- }
- case kArm64CallAddress: {
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(masm(), i.InputRegister(0));
+ case kArm64Mov32:
+ __ Mov(i.OutputRegister32(), i.InputRegister32(0));
+ break;
+ case kArm64Sxtw:
+ __ Sxtw(i.OutputRegister(), i.InputRegister32(0));
break;
- }
case kArm64Claim: {
int words = MiscField::decode(instr->opcode());
__ Claim(words);
@@ -330,17 +368,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ PokePair(i.InputRegister(1), i.InputRegister(0), slot * kPointerSize);
break;
}
- case kArm64Drop: {
- int words = MiscField::decode(instr->opcode());
- __ Drop(words);
- break;
- }
case kArm64Cmp:
__ Cmp(i.InputRegister(0), i.InputOperand(1));
break;
case kArm64Cmp32:
__ Cmp(i.InputRegister32(0), i.InputOperand32(1));
break;
+ case kArm64Cmn:
+ __ Cmn(i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kArm64Cmn32:
+ __ Cmn(i.InputRegister32(0), i.InputOperand32(1));
+ break;
case kArm64Tst:
__ Tst(i.InputRegister(0), i.InputOperand(1));
break;
@@ -377,13 +416,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
0, 2);
break;
}
- case kArm64Int32ToInt64:
- __ Sxtw(i.OutputRegister(), i.InputRegister(0));
+ case kArm64Float64Sqrt:
+ __ Fsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
- case kArm64Int64ToInt32:
- if (!i.OutputRegister().is(i.InputRegister(0))) {
- __ Mov(i.OutputRegister(), i.InputRegister(0));
- }
+ case kArm64Float32ToFloat64:
+ __ Fcvt(i.OutputDoubleRegister(), i.InputDoubleRegister(0).S());
+ break;
+ case kArm64Float64ToFloat32:
+ __ Fcvt(i.OutputDoubleRegister().S(), i.InputDoubleRegister(0));
break;
case kArm64Float64ToInt32:
__ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
@@ -397,34 +437,46 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Uint32ToFloat64:
__ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
break;
- case kArm64LoadWord8:
+ case kArm64Ldrb:
__ Ldrb(i.OutputRegister(), i.MemoryOperand());
break;
- case kArm64StoreWord8:
+ case kArm64Ldrsb:
+ __ Ldrsb(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kArm64Strb:
__ Strb(i.InputRegister(2), i.MemoryOperand());
break;
- case kArm64LoadWord16:
+ case kArm64Ldrh:
__ Ldrh(i.OutputRegister(), i.MemoryOperand());
break;
- case kArm64StoreWord16:
+ case kArm64Ldrsh:
+ __ Ldrsh(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kArm64Strh:
__ Strh(i.InputRegister(2), i.MemoryOperand());
break;
- case kArm64LoadWord32:
+ case kArm64LdrW:
__ Ldr(i.OutputRegister32(), i.MemoryOperand());
break;
- case kArm64StoreWord32:
+ case kArm64StrW:
__ Str(i.InputRegister32(2), i.MemoryOperand());
break;
- case kArm64LoadWord64:
+ case kArm64Ldr:
__ Ldr(i.OutputRegister(), i.MemoryOperand());
break;
- case kArm64StoreWord64:
+ case kArm64Str:
__ Str(i.InputRegister(2), i.MemoryOperand());
break;
- case kArm64Float64Load:
+ case kArm64LdrS:
+ __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
+ break;
+ case kArm64StrS:
+ __ Str(i.InputDoubleRegister(2).S(), i.MemoryOperand());
+ break;
+ case kArm64LdrD:
__ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
break;
- case kArm64Float64Store:
+ case kArm64StrD:
__ Str(i.InputDoubleRegister(2), i.MemoryOperand());
break;
case kArm64StoreWriteBarrier: {
@@ -615,6 +667,13 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
}
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+ Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+ isolate(), deoptimization_id, Deoptimizer::LAZY);
+ __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
// TODO(dcarney): increase stack slots in frame once before first use.
static int AlignedStackSlots(int stack_slots) {
if (stack_slots & 1) stack_slots++;
@@ -690,8 +749,9 @@ void CodeGenerator::AssembleReturn() {
} else {
__ Mov(jssp, fp);
__ Pop(fp, lr);
- int pop_count =
- descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
+ int pop_count = descriptor->IsJSFunctionCall()
+ ? static_cast<int>(descriptor->JSParameterCount())
+ : 0;
__ Drop(pop_count);
__ Ret();
}
@@ -723,12 +783,11 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ Str(temp, g.ToMemOperand(destination, masm()));
}
} else if (source->IsConstant()) {
- ConstantOperand* constant_source = ConstantOperand::cast(source);
+ Constant src = g.ToConstant(ConstantOperand::cast(source));
if (destination->IsRegister() || destination->IsStackSlot()) {
UseScratchRegisterScope scope(masm());
Register dst = destination->IsRegister() ? g.ToRegister(destination)
: scope.AcquireX();
- Constant src = g.ToConstant(source);
if (src.type() == Constant::kHeapObject) {
__ LoadObject(dst, src.ToHeapObject());
} else {
@@ -737,15 +796,29 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (destination->IsStackSlot()) {
__ Str(dst, g.ToMemOperand(destination, masm()));
}
- } else if (destination->IsDoubleRegister()) {
- FPRegister result = g.ToDoubleRegister(destination);
- __ Fmov(result, g.ToDouble(constant_source));
+ } else if (src.type() == Constant::kFloat32) {
+ if (destination->IsDoubleRegister()) {
+ FPRegister dst = g.ToDoubleRegister(destination).S();
+ __ Fmov(dst, src.ToFloat32());
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ UseScratchRegisterScope scope(masm());
+ FPRegister temp = scope.AcquireS();
+ __ Fmov(temp, src.ToFloat32());
+ __ Str(temp, g.ToMemOperand(destination, masm()));
+ }
} else {
- DCHECK(destination->IsDoubleStackSlot());
- UseScratchRegisterScope scope(masm());
- FPRegister temp = scope.AcquireD();
- __ Fmov(temp, g.ToDouble(constant_source));
- __ Str(temp, g.ToMemOperand(destination, masm()));
+ DCHECK_EQ(Constant::kFloat64, src.type());
+ if (destination->IsDoubleRegister()) {
+ FPRegister dst = g.ToDoubleRegister(destination);
+ __ Fmov(dst, src.ToFloat64());
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ UseScratchRegisterScope scope(masm());
+ FPRegister temp = scope.AcquireD();
+ __ Fmov(temp, src.ToFloat64());
+ __ Str(temp, g.ToMemOperand(destination, masm()));
+ }
}
} else if (source->IsDoubleRegister()) {
FPRegister src = g.ToDoubleRegister(source);
@@ -813,7 +886,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
FPRegister dst = g.ToDoubleRegister(destination);
__ Fmov(temp, src);
__ Fmov(src, dst);
- __ Fmov(src, temp);
+ __ Fmov(dst, temp);
} else {
DCHECK(destination->IsDoubleStackSlot());
MemOperand dst = g.ToMemOperand(destination, masm());
@@ -830,24 +903,30 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
-#undef __
-#if DEBUG
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ int space_needed = Deoptimizer::patch_size();
+ if (!linkage()->info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ intptr_t current_pc = masm()->pc_offset();
-// Checks whether the code between start_pc and end_pc is a no-op.
-bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
- int end_pc) {
- if (start_pc + 4 != end_pc) {
- return false;
- }
- Address instr_address = code->instruction_start() + start_pc;
+ if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
+ intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK((padding_size % kInstructionSize) == 0);
+ InstructionAccurateScope instruction_accurate(
+ masm(), padding_size / kInstructionSize);
- v8::internal::Instruction* instr =
- reinterpret_cast<v8::internal::Instruction*>(instr_address);
- return instr->IsMovz() && instr->Rd() == xzr.code() && instr->SixtyFourBits();
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= kInstructionSize;
+ }
+ }
+ }
+ MarkLazyDeoptSite();
}
-#endif // DEBUG
+#undef __
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
index 2d71c02ef0..b8484b7d5c 100644
--- a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
@@ -16,18 +16,32 @@ namespace compiler {
V(Arm64Add32) \
V(Arm64And) \
V(Arm64And32) \
+ V(Arm64Bic) \
+ V(Arm64Bic32) \
V(Arm64Cmp) \
V(Arm64Cmp32) \
+ V(Arm64Cmn) \
+ V(Arm64Cmn32) \
V(Arm64Tst) \
V(Arm64Tst32) \
V(Arm64Or) \
V(Arm64Or32) \
- V(Arm64Xor) \
- V(Arm64Xor32) \
+ V(Arm64Orn) \
+ V(Arm64Orn32) \
+ V(Arm64Eor) \
+ V(Arm64Eor32) \
+ V(Arm64Eon) \
+ V(Arm64Eon32) \
V(Arm64Sub) \
V(Arm64Sub32) \
V(Arm64Mul) \
V(Arm64Mul32) \
+ V(Arm64Madd) \
+ V(Arm64Madd32) \
+ V(Arm64Msub) \
+ V(Arm64Msub32) \
+ V(Arm64Mneg) \
+ V(Arm64Mneg32) \
V(Arm64Idiv) \
V(Arm64Idiv32) \
V(Arm64Udiv) \
@@ -46,36 +60,41 @@ namespace compiler {
V(Arm64Shr32) \
V(Arm64Sar) \
V(Arm64Sar32) \
- V(Arm64CallCodeObject) \
- V(Arm64CallJSFunction) \
- V(Arm64CallAddress) \
+ V(Arm64Ror) \
+ V(Arm64Ror32) \
+ V(Arm64Mov32) \
+ V(Arm64Sxtw) \
V(Arm64Claim) \
V(Arm64Poke) \
V(Arm64PokePairZero) \
V(Arm64PokePair) \
- V(Arm64Drop) \
V(Arm64Float64Cmp) \
V(Arm64Float64Add) \
V(Arm64Float64Sub) \
V(Arm64Float64Mul) \
V(Arm64Float64Div) \
V(Arm64Float64Mod) \
- V(Arm64Int32ToInt64) \
- V(Arm64Int64ToInt32) \
+ V(Arm64Float64Sqrt) \
+ V(Arm64Float32ToFloat64) \
+ V(Arm64Float64ToFloat32) \
V(Arm64Float64ToInt32) \
V(Arm64Float64ToUint32) \
V(Arm64Int32ToFloat64) \
V(Arm64Uint32ToFloat64) \
- V(Arm64Float64Load) \
- V(Arm64Float64Store) \
- V(Arm64LoadWord8) \
- V(Arm64StoreWord8) \
- V(Arm64LoadWord16) \
- V(Arm64StoreWord16) \
- V(Arm64LoadWord32) \
- V(Arm64StoreWord32) \
- V(Arm64LoadWord64) \
- V(Arm64StoreWord64) \
+ V(Arm64LdrS) \
+ V(Arm64StrS) \
+ V(Arm64LdrD) \
+ V(Arm64StrD) \
+ V(Arm64Ldrb) \
+ V(Arm64Ldrsb) \
+ V(Arm64Strb) \
+ V(Arm64Ldrh) \
+ V(Arm64Ldrsh) \
+ V(Arm64Strh) \
+ V(Arm64LdrW) \
+ V(Arm64StrW) \
+ V(Arm64Ldr) \
+ V(Arm64Str) \
V(Arm64StoreWriteBarrier)
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64-unittest.cc
new file mode 100644
index 0000000000..bd1471156e
--- /dev/null
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -0,0 +1,1397 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <list>
+
+#include "src/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+typedef RawMachineAssembler::Label MLabel;
+
+template <typename T>
+struct MachInst {
+ T constructor;
+ const char* constructor_name;
+ ArchOpcode arch_opcode;
+ MachineType machine_type;
+};
+
+typedef MachInst<Node* (RawMachineAssembler::*)(Node*)> MachInst1;
+typedef MachInst<Node* (RawMachineAssembler::*)(Node*, Node*)> MachInst2;
+
+
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const MachInst<T>& mi) {
+ return os << mi.constructor_name;
+}
+
+
+// Helper to build Int32Constant or Int64Constant depending on the given
+// machine type.
+Node* BuildConstant(InstructionSelectorTest::StreamBuilder& m, MachineType type,
+ int64_t value) {
+ switch (type) {
+ case kMachInt32:
+ return m.Int32Constant(value);
+ break;
+
+ case kMachInt64:
+ return m.Int64Constant(value);
+ break;
+
+ default:
+ UNIMPLEMENTED();
+ }
+ return NULL;
+}
+
+
+// ARM64 logical instructions.
+static const MachInst2 kLogicalInstructions[] = {
+ {&RawMachineAssembler::Word32And, "Word32And", kArm64And32, kMachInt32},
+ {&RawMachineAssembler::Word64And, "Word64And", kArm64And, kMachInt64},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kArm64Or32, kMachInt32},
+ {&RawMachineAssembler::Word64Or, "Word64Or", kArm64Or, kMachInt64},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kArm64Eor32, kMachInt32},
+ {&RawMachineAssembler::Word64Xor, "Word64Xor", kArm64Eor, kMachInt64}};
+
+
+// ARM64 logical immediates: contiguous set bits, rotated about a power of two
+// sized block. The block is then duplicated across the word. Below is a random
+// subset of the 32-bit immediates.
+static const uint32_t kLogicalImmediates[] = {
+ 0x00000002, 0x00000003, 0x00000070, 0x00000080, 0x00000100, 0x000001c0,
+ 0x00000300, 0x000007e0, 0x00003ffc, 0x00007fc0, 0x0003c000, 0x0003f000,
+ 0x0003ffc0, 0x0003fff8, 0x0007ff00, 0x0007ffe0, 0x000e0000, 0x001e0000,
+ 0x001ffffc, 0x003f0000, 0x003f8000, 0x00780000, 0x007fc000, 0x00ff0000,
+ 0x01800000, 0x01800180, 0x01f801f8, 0x03fe0000, 0x03ffffc0, 0x03fffffc,
+ 0x06000000, 0x07fc0000, 0x07ffc000, 0x07ffffc0, 0x07ffffe0, 0x0ffe0ffe,
+ 0x0ffff800, 0x0ffffff0, 0x0fffffff, 0x18001800, 0x1f001f00, 0x1f801f80,
+ 0x30303030, 0x3ff03ff0, 0x3ff83ff8, 0x3fff0000, 0x3fff8000, 0x3fffffc0,
+ 0x70007000, 0x7f7f7f7f, 0x7fc00000, 0x7fffffc0, 0x8000001f, 0x800001ff,
+ 0x81818181, 0x9fff9fff, 0xc00007ff, 0xc0ffffff, 0xdddddddd, 0xe00001ff,
+ 0xe00003ff, 0xe007ffff, 0xefffefff, 0xf000003f, 0xf001f001, 0xf3fff3ff,
+ 0xf800001f, 0xf80fffff, 0xf87ff87f, 0xfbfbfbfb, 0xfc00001f, 0xfc0000ff,
+ 0xfc0001ff, 0xfc03fc03, 0xfe0001ff, 0xff000001, 0xff03ff03, 0xff800000,
+ 0xff800fff, 0xff801fff, 0xff87ffff, 0xffc0003f, 0xffc007ff, 0xffcfffcf,
+ 0xffe00003, 0xffe1ffff, 0xfff0001f, 0xfff07fff, 0xfff80007, 0xfff87fff,
+ 0xfffc00ff, 0xfffe07ff, 0xffff00ff, 0xffffc001, 0xfffff007, 0xfffff3ff,
+ 0xfffff807, 0xfffff9ff, 0xfffffc0f, 0xfffffeff};
+
+
+// ARM64 arithmetic instructions.
+static const MachInst2 kAddSubInstructions[] = {
+ {&RawMachineAssembler::Int32Add, "Int32Add", kArm64Add32, kMachInt32},
+ {&RawMachineAssembler::Int64Add, "Int64Add", kArm64Add, kMachInt64},
+ {&RawMachineAssembler::Int32Sub, "Int32Sub", kArm64Sub32, kMachInt32},
+ {&RawMachineAssembler::Int64Sub, "Int64Sub", kArm64Sub, kMachInt64}};
+
+
+// ARM64 Add/Sub immediates: 12-bit immediate optionally shifted by 12.
+// Below is a combination of a random subset and some edge values.
+static const int32_t kAddSubImmediates[] = {
+ 0, 1, 69, 493, 599, 701, 719,
+ 768, 818, 842, 945, 1246, 1286, 1429,
+ 1669, 2171, 2179, 2182, 2254, 2334, 2338,
+ 2343, 2396, 2449, 2610, 2732, 2855, 2876,
+ 2944, 3377, 3458, 3475, 3476, 3540, 3574,
+ 3601, 3813, 3871, 3917, 4095, 4096, 16384,
+ 364544, 462848, 970752, 1523712, 1863680, 2363392, 3219456,
+ 3280896, 4247552, 4526080, 4575232, 4960256, 5505024, 5894144,
+ 6004736, 6193152, 6385664, 6795264, 7114752, 7233536, 7348224,
+ 7499776, 7573504, 7729152, 8634368, 8937472, 9465856, 10354688,
+ 10682368, 11059200, 11460608, 13168640, 13176832, 14336000, 15028224,
+ 15597568, 15892480, 16773120};
+
+
+// ARM64 flag setting data processing instructions.
+static const MachInst2 kDPFlagSetInstructions[] = {
+ {&RawMachineAssembler::Word32And, "Word32And", kArm64Tst32, kMachInt32},
+ {&RawMachineAssembler::Int32Add, "Int32Add", kArm64Cmn32, kMachInt32},
+ {&RawMachineAssembler::Int32Sub, "Int32Sub", kArm64Cmp32, kMachInt32}};
+
+
+// ARM64 arithmetic with overflow instructions.
+static const MachInst2 kOvfAddSubInstructions[] = {
+ {&RawMachineAssembler::Int32AddWithOverflow, "Int32AddWithOverflow",
+ kArm64Add32, kMachInt32},
+ {&RawMachineAssembler::Int32SubWithOverflow, "Int32SubWithOverflow",
+ kArm64Sub32, kMachInt32}};
+
+
+// ARM64 shift instructions.
+static const MachInst2 kShiftInstructions[] = {
+ {&RawMachineAssembler::Word32Shl, "Word32Shl", kArm64Shl32, kMachInt32},
+ {&RawMachineAssembler::Word64Shl, "Word64Shl", kArm64Shl, kMachInt64},
+ {&RawMachineAssembler::Word32Shr, "Word32Shr", kArm64Shr32, kMachInt32},
+ {&RawMachineAssembler::Word64Shr, "Word64Shr", kArm64Shr, kMachInt64},
+ {&RawMachineAssembler::Word32Sar, "Word32Sar", kArm64Sar32, kMachInt32},
+ {&RawMachineAssembler::Word64Sar, "Word64Sar", kArm64Sar, kMachInt64},
+ {&RawMachineAssembler::Word32Ror, "Word32Ror", kArm64Ror32, kMachInt32},
+ {&RawMachineAssembler::Word64Ror, "Word64Ror", kArm64Ror, kMachInt64}};
+
+
+// ARM64 Mul/Div instructions.
+static const MachInst2 kMulDivInstructions[] = {
+ {&RawMachineAssembler::Int32Mul, "Int32Mul", kArm64Mul32, kMachInt32},
+ {&RawMachineAssembler::Int64Mul, "Int64Mul", kArm64Mul, kMachInt64},
+ {&RawMachineAssembler::Int32Div, "Int32Div", kArm64Idiv32, kMachInt32},
+ {&RawMachineAssembler::Int64Div, "Int64Div", kArm64Idiv, kMachInt64},
+ {&RawMachineAssembler::Int32UDiv, "Int32UDiv", kArm64Udiv32, kMachInt32},
+ {&RawMachineAssembler::Int64UDiv, "Int64UDiv", kArm64Udiv, kMachInt64}};
+
+
+// ARM64 FP arithmetic instructions.
+static const MachInst2 kFPArithInstructions[] = {
+ {&RawMachineAssembler::Float64Add, "Float64Add", kArm64Float64Add,
+ kMachFloat64},
+ {&RawMachineAssembler::Float64Sub, "Float64Sub", kArm64Float64Sub,
+ kMachFloat64},
+ {&RawMachineAssembler::Float64Mul, "Float64Mul", kArm64Float64Mul,
+ kMachFloat64},
+ {&RawMachineAssembler::Float64Div, "Float64Div", kArm64Float64Div,
+ kMachFloat64}};
+
+
+struct FPCmp {
+ MachInst2 mi;
+ FlagsCondition cond;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const FPCmp& cmp) {
+ return os << cmp.mi;
+}
+
+
+// ARM64 FP comparison instructions.
+static const FPCmp kFPCmpInstructions[] = {
+ {{&RawMachineAssembler::Float64Equal, "Float64Equal", kArm64Float64Cmp,
+ kMachFloat64},
+ kUnorderedEqual},
+ {{&RawMachineAssembler::Float64LessThan, "Float64LessThan",
+ kArm64Float64Cmp, kMachFloat64},
+ kUnorderedLessThan},
+ {{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
+ kArm64Float64Cmp, kMachFloat64},
+ kUnorderedLessThanOrEqual}};
+
+
+struct Conversion {
+ // The machine_type field in MachInst1 represents the destination type.
+ MachInst1 mi;
+ MachineType src_machine_type;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const Conversion& conv) {
+ return os << conv.mi;
+}
+
+
+// ARM64 type conversion instructions.
+static const Conversion kConversionInstructions[] = {
+ {{&RawMachineAssembler::ChangeFloat32ToFloat64, "ChangeFloat32ToFloat64",
+ kArm64Float32ToFloat64, kMachFloat64},
+ kMachFloat32},
+ {{&RawMachineAssembler::TruncateFloat64ToFloat32,
+ "TruncateFloat64ToFloat32", kArm64Float64ToFloat32, kMachFloat32},
+ kMachFloat64},
+ {{&RawMachineAssembler::ChangeInt32ToInt64, "ChangeInt32ToInt64",
+ kArm64Sxtw, kMachInt64},
+ kMachInt32},
+ {{&RawMachineAssembler::ChangeUint32ToUint64, "ChangeUint32ToUint64",
+ kArm64Mov32, kMachUint64},
+ kMachUint32},
+ {{&RawMachineAssembler::TruncateInt64ToInt32, "TruncateInt64ToInt32",
+ kArm64Mov32, kMachInt32},
+ kMachInt64},
+ {{&RawMachineAssembler::ChangeInt32ToFloat64, "ChangeInt32ToFloat64",
+ kArm64Int32ToFloat64, kMachFloat64},
+ kMachInt32},
+ {{&RawMachineAssembler::ChangeUint32ToFloat64, "ChangeUint32ToFloat64",
+ kArm64Uint32ToFloat64, kMachFloat64},
+ kMachUint32},
+ {{&RawMachineAssembler::ChangeFloat64ToInt32, "ChangeFloat64ToInt32",
+ kArm64Float64ToInt32, kMachInt32},
+ kMachFloat64},
+ {{&RawMachineAssembler::ChangeFloat64ToUint32, "ChangeFloat64ToUint32",
+ kArm64Float64ToUint32, kMachUint32},
+ kMachFloat64}};
+
+} // namespace
+
+
+// -----------------------------------------------------------------------------
+// Logical instructions.
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorLogicalTest;
+
+
+TEST_P(InstructionSelectorLogicalTest, Parameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorLogicalTest, Immediate) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ // TODO(all): Add support for testing 64-bit immediates.
+ if (type == kMachInt32) {
+ // Immediate on the right.
+ TRACED_FOREACH(int32_t, imm, kLogicalImmediates) {
+ StreamBuilder m(this, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+
+ // Immediate on the left; all logical ops should commute.
+ TRACED_FOREACH(int32_t, imm, kLogicalImmediates) {
+ StreamBuilder m(this, type, type);
+ m.Return((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorLogicalTest,
+ ::testing::ValuesIn(kLogicalInstructions));
+
+
+// -----------------------------------------------------------------------------
+// Add and Sub instructions.
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorAddSubTest;
+
+
+TEST_P(InstructionSelectorAddSubTest, Parameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorAddSubTest, ImmediateOnRight) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), BuildConstant(m, type, imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorAddSubTest, ImmediateOnLeft) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, type, type);
+ m.Return((m.*dpi.constructor)(BuildConstant(m, type, imm), m.Parameter(0)));
+ Stream s = m.Build();
+
+ // Add can support an immediate on the left by commuting, but Sub can't
+ // commute. We test zero-on-left Sub later.
+ if (strstr(dpi.constructor_name, "Add") != NULL) {
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorAddSubTest,
+ ::testing::ValuesIn(kAddSubInstructions));
+
+
+TEST_F(InstructionSelectorTest, SubZeroOnLeft) {
+ // Subtraction with zero on the left maps to Neg.
+ {
+ // 32-bit subtract.
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Sub(m.Int32Constant(0), m.Parameter(0)));
+ Stream s = m.Build();
+
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Neg32, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ // 64-bit subtract.
+ StreamBuilder m(this, kMachInt64, kMachInt64, kMachInt64);
+ m.Return(m.Int64Sub(m.Int64Constant(0), m.Parameter(0)));
+ Stream s = m.Build();
+
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Neg, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Data processing controlled branches.
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorDPFlagSetTest;
+
+
+TEST_P(InstructionSelectorDPFlagSetTest, BranchWithParameters) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorDPFlagSetTest,
+ ::testing::ValuesIn(kDPFlagSetInstructions));
+
+
+TEST_F(InstructionSelectorTest, AndBranchWithImmediateOnRight) {
+ TRACED_FOREACH(int32_t, imm, kLogicalImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Word32And(m.Parameter(0), m.Int32Constant(imm)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, AddBranchWithImmediateOnRight) {
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Int32Add(m.Parameter(0), m.Int32Constant(imm)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, SubBranchWithImmediateOnRight) {
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Int32Sub(m.Parameter(0), m.Int32Constant(imm)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, AndBranchWithImmediateOnLeft) {
+ TRACED_FOREACH(int32_t, imm, kLogicalImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Word32And(m.Int32Constant(imm), m.Parameter(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+ ASSERT_LE(1U, s[0]->InputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, AddBranchWithImmediateOnLeft) {
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
+ ASSERT_LE(1U, s[0]->InputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Add and subtract instructions with overflow.
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorOvfAddSubTest;
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, OvfParameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return(
+ m.Projection(1, (m.*dpi.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, OvfImmediateOnRight) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, type, type);
+ m.Return(m.Projection(
+ 1, (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, ValParameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return(
+ m.Projection(0, (m.*dpi.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, ValImmediateOnRight) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, type, type);
+ m.Return(m.Projection(
+ 0, (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, BothParameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Parameter(1));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, BothImmediateOnRight) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, type, type);
+ Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, BranchWithParameters) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ MLabel a, b;
+ Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Parameter(1));
+ m.Branch(m.Projection(1, n), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&b);
+ m.Return(m.Projection(0, n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, BranchWithImmediateOnRight) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, type, type);
+ MLabel a, b;
+ Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
+ m.Branch(m.Projection(1, n), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&b);
+ m.Return(m.Projection(0, n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorOvfAddSubTest,
+ ::testing::ValuesIn(kOvfAddSubInstructions));
+
+
+TEST_F(InstructionSelectorTest, OvfFlagAddImmediateOnLeft) {
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 1, m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0))));
+ Stream s = m.Build();
+
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, OvfValAddImmediateOnLeft) {
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 0, m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0))));
+ Stream s = m.Build();
+
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, OvfBothAddImmediateOnLeft) {
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* n = m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, OvfBranchWithImmediateOnLeft) {
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ Node* n = m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0));
+ m.Branch(m.Projection(1, n), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&b);
+ m.Return(m.Projection(0, n));
+ Stream s = m.Build();
+
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ ASSERT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Shift instructions.
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorShiftTest;
+
+
+TEST_P(InstructionSelectorShiftTest, Parameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Immediate) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ TRACED_FORRANGE(int32_t, imm, 0, (ElementSizeOf(type) * 8) - 1) {
+ StreamBuilder m(this, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
+ ::testing::ValuesIn(kShiftInstructions));
+
+
+// -----------------------------------------------------------------------------
+// Mul and Div instructions.
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorMulDivTest;
+
+
+TEST_P(InstructionSelectorMulDivTest, Parameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorMulDivTest,
+ ::testing::ValuesIn(kMulDivInstructions));
+
+
+namespace {
+
+struct MulDPInst {
+ const char* mul_constructor_name;
+ Node* (RawMachineAssembler::*mul_constructor)(Node*, Node*);
+ Node* (RawMachineAssembler::*add_constructor)(Node*, Node*);
+ Node* (RawMachineAssembler::*sub_constructor)(Node*, Node*);
+ ArchOpcode add_arch_opcode;
+ ArchOpcode sub_arch_opcode;
+ ArchOpcode neg_arch_opcode;
+ MachineType machine_type;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MulDPInst& inst) {
+ return os << inst.mul_constructor_name;
+}
+
+} // namespace
+
+
+static const MulDPInst kMulDPInstructions[] = {
+ {"Int32Mul", &RawMachineAssembler::Int32Mul, &RawMachineAssembler::Int32Add,
+ &RawMachineAssembler::Int32Sub, kArm64Madd32, kArm64Msub32, kArm64Mneg32,
+ kMachInt32},
+ {"Int64Mul", &RawMachineAssembler::Int64Mul, &RawMachineAssembler::Int64Add,
+ &RawMachineAssembler::Int64Sub, kArm64Madd, kArm64Msub, kArm64Mneg,
+ kMachInt64}};
+
+
+typedef InstructionSelectorTestWithParam<MulDPInst>
+ InstructionSelectorIntDPWithIntMulTest;
+
+
+TEST_P(InstructionSelectorIntDPWithIntMulTest, AddWithMul) {
+ const MulDPInst mdpi = GetParam();
+ const MachineType type = mdpi.machine_type;
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* n = (m.*mdpi.mul_constructor)(m.Parameter(1), m.Parameter(2));
+ m.Return((m.*mdpi.add_constructor)(m.Parameter(0), n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(mdpi.add_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* n = (m.*mdpi.mul_constructor)(m.Parameter(0), m.Parameter(1));
+ m.Return((m.*mdpi.add_constructor)(n, m.Parameter(2)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(mdpi.add_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorIntDPWithIntMulTest, SubWithMul) {
+ const MulDPInst mdpi = GetParam();
+ const MachineType type = mdpi.machine_type;
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* n = (m.*mdpi.mul_constructor)(m.Parameter(1), m.Parameter(2));
+ m.Return((m.*mdpi.sub_constructor)(m.Parameter(0), n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(mdpi.sub_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorIntDPWithIntMulTest, NegativeMul) {
+ const MulDPInst mdpi = GetParam();
+ const MachineType type = mdpi.machine_type;
+ {
+ StreamBuilder m(this, type, type, type);
+ Node* n =
+ (m.*mdpi.sub_constructor)(BuildConstant(m, type, 0), m.Parameter(0));
+ m.Return((m.*mdpi.mul_constructor)(n, m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(mdpi.neg_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, type, type, type);
+ Node* n =
+ (m.*mdpi.sub_constructor)(BuildConstant(m, type, 0), m.Parameter(1));
+ m.Return((m.*mdpi.mul_constructor)(m.Parameter(0), n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(mdpi.neg_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorIntDPWithIntMulTest,
+ ::testing::ValuesIn(kMulDPInstructions));
+
+
+// -----------------------------------------------------------------------------
+// Floating point instructions.
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorFPArithTest;
+
+
+TEST_P(InstructionSelectorFPArithTest, Parameter) {
+ const MachInst2 fpa = GetParam();
+ StreamBuilder m(this, fpa.machine_type, fpa.machine_type, fpa.machine_type);
+ m.Return((m.*fpa.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(fpa.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPArithTest,
+ ::testing::ValuesIn(kFPArithInstructions));
+
+
+typedef InstructionSelectorTestWithParam<FPCmp> InstructionSelectorFPCmpTest;
+
+
+TEST_P(InstructionSelectorFPCmpTest, Parameter) {
+ const FPCmp cmp = GetParam();
+ StreamBuilder m(this, kMachInt32, cmp.mi.machine_type, cmp.mi.machine_type);
+ m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
+ ::testing::ValuesIn(kFPCmpInstructions));
+
+
+// -----------------------------------------------------------------------------
+// Conversions.
+
+typedef InstructionSelectorTestWithParam<Conversion>
+ InstructionSelectorConversionTest;
+
+
+TEST_P(InstructionSelectorConversionTest, Parameter) {
+ const Conversion conv = GetParam();
+ StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
+ m.Return((m.*conv.mi.constructor)(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorConversionTest,
+ ::testing::ValuesIn(kConversionInstructions));
+
+
+// -----------------------------------------------------------------------------
+// Memory access instructions.
+
+
+namespace {
+
+struct MemoryAccess {
+ MachineType type;
+ ArchOpcode ldr_opcode;
+ ArchOpcode str_opcode;
+ const int32_t immediates[20];
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
+ OStringStream ost;
+ ost << memacc.type;
+ return os << ost.c_str();
+}
+
+} // namespace
+
+
+static const MemoryAccess kMemoryAccesses[] = {
+ {kMachInt8, kArm64Ldrsb, kArm64Strb,
+ {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 257, 258, 1000, 1001,
+ 2121, 2442, 4093, 4094, 4095}},
+ {kMachUint8, kArm64Ldrb, kArm64Strb,
+ {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 257, 258, 1000, 1001,
+ 2121, 2442, 4093, 4094, 4095}},
+ {kMachInt16, kArm64Ldrsh, kArm64Strh,
+ {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 258, 260, 4096, 4098,
+ 4100, 4242, 6786, 8188, 8190}},
+ {kMachUint16, kArm64Ldrh, kArm64Strh,
+ {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 258, 260, 4096, 4098,
+ 4100, 4242, 6786, 8188, 8190}},
+ {kMachInt32, kArm64LdrW, kArm64StrW,
+ {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 260, 4096, 4100, 8192,
+ 8196, 3276, 3280, 16376, 16380}},
+ {kMachUint32, kArm64LdrW, kArm64StrW,
+ {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 260, 4096, 4100, 8192,
+ 8196, 3276, 3280, 16376, 16380}},
+ {kMachInt64, kArm64Ldr, kArm64Str,
+ {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 264, 4096, 4104, 8192,
+ 8200, 16384, 16392, 32752, 32760}},
+ {kMachUint64, kArm64Ldr, kArm64Str,
+ {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 264, 4096, 4104, 8192,
+ 8200, 16384, 16392, 32752, 32760}},
+ {kMachFloat32, kArm64LdrS, kArm64StrS,
+ {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 260, 4096, 4100, 8192,
+ 8196, 3276, 3280, 16376, 16380}},
+ {kMachFloat64, kArm64LdrD, kArm64StrD,
+ {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 264, 4096, 4104, 8192,
+ 8200, 16384, 16392, 32752, 32760}}};
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccess>
+ InstructionSelectorMemoryAccessTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, memacc.type, kMachPtr);
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
+ m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
+ m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
+ m.Parameter(1));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessTest,
+ ::testing::ValuesIn(kMemoryAccesses));
+
+
+// -----------------------------------------------------------------------------
+// Comparison instructions.
+
+static const MachInst2 kComparisonInstructions[] = {
+ {&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32, kMachInt32},
+ {&RawMachineAssembler::Word64Equal, "Word64Equal", kArm64Cmp, kMachInt64},
+};
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorComparisonTest;
+
+
+TEST_P(InstructionSelectorComparisonTest, WithParameters) {
+ const MachInst2 cmp = GetParam();
+ const MachineType type = cmp.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*cmp.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorComparisonTest, WithImmediate) {
+ const MachInst2 cmp = GetParam();
+ const MachineType type = cmp.machine_type;
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ // Compare with 0 are turned into tst instruction.
+ if (imm == 0) continue;
+ StreamBuilder m(this, type, type);
+ m.Return((m.*cmp.constructor)(m.Parameter(0), BuildConstant(m, type, imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ // Compare with 0 are turned into tst instruction.
+ if (imm == 0) continue;
+ StreamBuilder m(this, type, type);
+ m.Return((m.*cmp.constructor)(BuildConstant(m, type, imm), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorComparisonTest,
+ ::testing::ValuesIn(kComparisonInstructions));
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
+ {
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ m.Return(m.Word64Equal(m.Parameter(0), m.Int64Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tst, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ {
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ m.Return(m.Word64Equal(m.Int64Constant(0), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Tst, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Miscellaneous
+
+
+static const MachInst2 kLogicalWithNotRHSs[] = {
+ {&RawMachineAssembler::Word32And, "Word32And", kArm64Bic32, kMachInt32},
+ {&RawMachineAssembler::Word64And, "Word64And", kArm64Bic, kMachInt64},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kArm64Orn32, kMachInt32},
+ {&RawMachineAssembler::Word64Or, "Word64Or", kArm64Orn, kMachInt64},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kArm64Eon32, kMachInt32},
+ {&RawMachineAssembler::Word64Xor, "Word64Xor", kArm64Eon, kMachInt64}};
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorLogicalWithNotRHSTest;
+
+
+TEST_P(InstructionSelectorLogicalWithNotRHSTest, Parameter) {
+ const MachInst2 inst = GetParam();
+ const MachineType type = inst.machine_type;
+ // Test cases where RHS is Xor(x, -1).
+ {
+ StreamBuilder m(this, type, type, type);
+ if (type == kMachInt32) {
+ m.Return((m.*inst.constructor)(
+ m.Parameter(0), m.Word32Xor(m.Parameter(1), m.Int32Constant(-1))));
+ } else {
+ ASSERT_EQ(kMachInt64, type);
+ m.Return((m.*inst.constructor)(
+ m.Parameter(0), m.Word64Xor(m.Parameter(1), m.Int64Constant(-1))));
+ }
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(inst.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, type, type, type);
+ if (type == kMachInt32) {
+ m.Return((m.*inst.constructor)(
+ m.Word32Xor(m.Parameter(0), m.Int32Constant(-1)), m.Parameter(1)));
+ } else {
+ ASSERT_EQ(kMachInt64, type);
+ m.Return((m.*inst.constructor)(
+ m.Word64Xor(m.Parameter(0), m.Int64Constant(-1)), m.Parameter(1)));
+ }
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(inst.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // Test cases where RHS is Not(x).
+ {
+ StreamBuilder m(this, type, type, type);
+ if (type == kMachInt32) {
+ m.Return(
+ (m.*inst.constructor)(m.Parameter(0), m.Word32Not(m.Parameter(1))));
+ } else {
+ ASSERT_EQ(kMachInt64, type);
+ m.Return(
+ (m.*inst.constructor)(m.Parameter(0), m.Word64Not(m.Parameter(1))));
+ }
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(inst.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, type, type, type);
+ if (type == kMachInt32) {
+ m.Return(
+ (m.*inst.constructor)(m.Word32Not(m.Parameter(0)), m.Parameter(1)));
+ } else {
+ ASSERT_EQ(kMachInt64, type);
+ m.Return(
+ (m.*inst.constructor)(m.Word64Not(m.Parameter(0)), m.Parameter(1)));
+ }
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(inst.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorLogicalWithNotRHSTest,
+ ::testing::ValuesIn(kLogicalWithNotRHSs));
+
+
+TEST_F(InstructionSelectorTest, Word32NotWithParameter) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Not(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Not32, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Word64NotWithParameter) {
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ m.Return(m.Word64Not(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Not, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Word32XorMinusOneWithParameter) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Xor(m.Parameter(0), m.Int32Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Not32, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Xor(m.Int32Constant(-1), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Not32, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64XorMinusOneWithParameter) {
+ {
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ m.Return(m.Word64Xor(m.Parameter(0), m.Int64Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Not, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ m.Return(m.Word64Xor(m.Int64Constant(-1), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Not, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 111ca2d956..8d7eee59cb 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -10,18 +10,21 @@ namespace internal {
namespace compiler {
enum ImmediateMode {
- kArithimeticImm, // 12 bit unsigned immediate shifted left 0 or 12 bits
- kShift32Imm, // 0 - 31
- kShift64Imm, // 0 -63
+ kArithmeticImm, // 12 bit unsigned immediate shifted left 0 or 12 bits
+ kShift32Imm, // 0 - 31
+ kShift64Imm, // 0 - 63
kLogical32Imm,
kLogical64Imm,
- kLoadStoreImm, // unsigned 9 bit or signed 7 bit
+ kLoadStoreImm8, // signed 8 bit or 12 bit unsigned scaled by access size
+ kLoadStoreImm16,
+ kLoadStoreImm32,
+ kLoadStoreImm64,
kNoImmediate
};
// Adds Arm64-specific methods for generating operands.
-class Arm64OperandGenerator V8_FINAL : public OperandGenerator {
+class Arm64OperandGenerator FINAL : public OperandGenerator {
public:
explicit Arm64OperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
@@ -35,14 +38,12 @@ class Arm64OperandGenerator V8_FINAL : public OperandGenerator {
bool CanBeImmediate(Node* node, ImmediateMode mode) {
int64_t value;
- switch (node->opcode()) {
- // TODO(turbofan): SMI number constants as immediates.
- case IrOpcode::kInt32Constant:
- value = ValueOf<int32_t>(node->op());
- break;
- default:
- return false;
- }
+ if (node->opcode() == IrOpcode::kInt32Constant)
+ value = OpParameter<int32_t>(node);
+ else if (node->opcode() == IrOpcode::kInt64Constant)
+ value = OpParameter<int64_t>(node);
+ else
+ return false;
unsigned ignored;
switch (mode) {
case kLogical32Imm:
@@ -53,30 +54,33 @@ class Arm64OperandGenerator V8_FINAL : public OperandGenerator {
case kLogical64Imm:
return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
&ignored, &ignored, &ignored);
- case kArithimeticImm:
+ case kArithmeticImm:
// TODO(dcarney): -values can be handled by instruction swapping
return Assembler::IsImmAddSub(value);
case kShift32Imm:
- return 0 <= value && value < 31;
+ return 0 <= value && value < 32;
case kShift64Imm:
- return 0 <= value && value < 63;
- case kLoadStoreImm:
- return (0 <= value && value < (1 << 9)) ||
- (-(1 << 6) <= value && value < (1 << 6));
+ return 0 <= value && value < 64;
+ case kLoadStoreImm8:
+ return IsLoadStoreImmediate(value, LSByte);
+ case kLoadStoreImm16:
+ return IsLoadStoreImmediate(value, LSHalfword);
+ case kLoadStoreImm32:
+ return IsLoadStoreImmediate(value, LSWord);
+ case kLoadStoreImm64:
+ return IsLoadStoreImmediate(value, LSDoubleWord);
case kNoImmediate:
return false;
}
return false;
}
-};
-
-static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
- Node* node) {
- Arm64OperandGenerator g(selector);
- selector->Emit(opcode, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
+ private:
+ bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
+ return Assembler::IsImmLSScaled(value, size) ||
+ Assembler::IsImmLSUnscaled(value);
+ }
+};
static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
@@ -91,9 +95,9 @@ static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
Arm64OperandGenerator g(selector);
- selector->Emit(opcode, g.DefineAsDoubleRegister(node),
- g.UseDoubleRegister(node->InputAt(0)),
- g.UseDoubleRegister(node->InputAt(1)));
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
}
@@ -107,11 +111,12 @@ static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
// Shared routine for multiple binary operations.
+template <typename Matcher>
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, ImmediateMode operand_mode,
FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
- Int32BinopMatcher m(node);
+ Matcher m(node);
InstructionOperand* inputs[4];
size_t input_count = 0;
InstructionOperand* outputs[2];
@@ -132,8 +137,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
DCHECK_NE(0, input_count);
DCHECK_NE(0, output_count);
- DCHECK_GE(ARRAY_SIZE(inputs), input_count);
- DCHECK_GE(ARRAY_SIZE(outputs), output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
outputs, input_count, inputs);
@@ -142,54 +147,59 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
// Shared routine for multiple binary operations.
+template <typename Matcher>
static void VisitBinop(InstructionSelector* selector, Node* node,
ArchOpcode opcode, ImmediateMode operand_mode) {
FlagsContinuation cont;
- VisitBinop(selector, node, opcode, operand_mode, &cont);
+ VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
}
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = OpParameter<MachineType>(node);
+ MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+ MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
-
- InstructionOperand* result = rep == kMachineFloat64
- ? g.DefineAsDoubleRegister(node)
- : g.DefineAsRegister(node);
-
ArchOpcode opcode;
+ ImmediateMode immediate_mode = kNoImmediate;
switch (rep) {
- case kMachineFloat64:
- opcode = kArm64Float64Load;
+ case kRepFloat32:
+ opcode = kArm64LdrS;
+ immediate_mode = kLoadStoreImm32;
break;
- case kMachineWord8:
- opcode = kArm64LoadWord8;
+ case kRepFloat64:
+ opcode = kArm64LdrD;
+ immediate_mode = kLoadStoreImm64;
break;
- case kMachineWord16:
- opcode = kArm64LoadWord16;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = typ == kTypeInt32 ? kArm64Ldrsb : kArm64Ldrb;
+ immediate_mode = kLoadStoreImm8;
break;
- case kMachineWord32:
- opcode = kArm64LoadWord32;
+ case kRepWord16:
+ opcode = typ == kTypeInt32 ? kArm64Ldrsh : kArm64Ldrh;
+ immediate_mode = kLoadStoreImm16;
break;
- case kMachineTagged: // Fall through.
- case kMachineWord64:
- opcode = kArm64LoadWord64;
+ case kRepWord32:
+ opcode = kArm64LdrW;
+ immediate_mode = kLoadStoreImm32;
+ break;
+ case kRepTagged: // Fall through.
+ case kRepWord64:
+ opcode = kArm64Ldr;
+ immediate_mode = kLoadStoreImm64;
break;
default:
UNREACHABLE();
return;
}
- if (g.CanBeImmediate(index, kLoadStoreImm)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), result,
- g.UseRegister(base), g.UseImmediate(index));
- } else if (g.CanBeImmediate(base, kLoadStoreImm)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), result,
- g.UseRegister(index), g.UseImmediate(base));
+ if (g.CanBeImmediate(index, immediate_mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
} else {
- Emit(opcode | AddressingModeField::encode(kMode_MRR), result,
- g.UseRegister(base), g.UseRegister(index));
+ Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
}
}
@@ -201,101 +211,175 @@ void InstructionSelector::VisitStore(Node* node) {
Node* value = node->InputAt(2);
StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
- MachineType rep = store_rep.rep;
- if (store_rep.write_barrier_kind == kFullWriteBarrier) {
- DCHECK(rep == kMachineTagged);
+ MachineType rep = RepresentationOf(store_rep.machine_type());
+ if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+ DCHECK(rep == kRepTagged);
// TODO(dcarney): refactor RecordWrite function to take temp registers
// and pass them here instead of using fixed regs
// TODO(dcarney): handle immediate indices.
InstructionOperand* temps[] = {g.TempRegister(x11), g.TempRegister(x12)};
Emit(kArm64StoreWriteBarrier, NULL, g.UseFixed(base, x10),
- g.UseFixed(index, x11), g.UseFixed(value, x12), ARRAY_SIZE(temps),
+ g.UseFixed(index, x11), g.UseFixed(value, x12), arraysize(temps),
temps);
return;
}
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
- InstructionOperand* val;
- if (rep == kMachineFloat64) {
- val = g.UseDoubleRegister(value);
- } else {
- val = g.UseRegister(value);
- }
+ DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
ArchOpcode opcode;
+ ImmediateMode immediate_mode = kNoImmediate;
switch (rep) {
- case kMachineFloat64:
- opcode = kArm64Float64Store;
+ case kRepFloat32:
+ opcode = kArm64StrS;
+ immediate_mode = kLoadStoreImm32;
+ break;
+ case kRepFloat64:
+ opcode = kArm64StrD;
+ immediate_mode = kLoadStoreImm64;
break;
- case kMachineWord8:
- opcode = kArm64StoreWord8;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = kArm64Strb;
+ immediate_mode = kLoadStoreImm8;
break;
- case kMachineWord16:
- opcode = kArm64StoreWord16;
+ case kRepWord16:
+ opcode = kArm64Strh;
+ immediate_mode = kLoadStoreImm16;
break;
- case kMachineWord32:
- opcode = kArm64StoreWord32;
+ case kRepWord32:
+ opcode = kArm64StrW;
+ immediate_mode = kLoadStoreImm32;
break;
- case kMachineTagged: // Fall through.
- case kMachineWord64:
- opcode = kArm64StoreWord64;
+ case kRepTagged: // Fall through.
+ case kRepWord64:
+ opcode = kArm64Str;
+ immediate_mode = kLoadStoreImm64;
break;
default:
UNREACHABLE();
return;
}
- if (g.CanBeImmediate(index, kLoadStoreImm)) {
+ if (g.CanBeImmediate(index, immediate_mode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
- g.UseRegister(base), g.UseImmediate(index), val);
- } else if (g.CanBeImmediate(base, kLoadStoreImm)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
- g.UseRegister(index), g.UseImmediate(base), val);
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
} else {
Emit(opcode | AddressingModeField::encode(kMode_MRR), NULL,
- g.UseRegister(base), g.UseRegister(index), val);
+ g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+ }
+}
+
+
+template <typename Matcher>
+static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
+ ArchOpcode opcode, bool left_can_cover,
+ bool right_can_cover, ImmediateMode imm_mode) {
+ Arm64OperandGenerator g(selector);
+
+ // Map instruction to equivalent operation with inverted right input.
+ ArchOpcode inv_opcode = opcode;
+ switch (opcode) {
+ case kArm64And32:
+ inv_opcode = kArm64Bic32;
+ break;
+ case kArm64And:
+ inv_opcode = kArm64Bic;
+ break;
+ case kArm64Or32:
+ inv_opcode = kArm64Orn32;
+ break;
+ case kArm64Or:
+ inv_opcode = kArm64Orn;
+ break;
+ case kArm64Eor32:
+ inv_opcode = kArm64Eon32;
+ break;
+ case kArm64Eor:
+ inv_opcode = kArm64Eon;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
+ if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
+ Matcher mleft(m->left().node());
+ if (mleft.right().Is(-1)) {
+ // TODO(all): support shifted operand on right.
+ selector->Emit(inv_opcode, g.DefineAsRegister(node),
+ g.UseRegister(m->right().node()),
+ g.UseRegister(mleft.left().node()));
+ return;
+ }
+ }
+
+ // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
+ if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
+ right_can_cover) {
+ Matcher mright(m->right().node());
+ if (mright.right().Is(-1)) {
+ // TODO(all): support shifted operand on right.
+ selector->Emit(inv_opcode, g.DefineAsRegister(node),
+ g.UseRegister(m->left().node()),
+ g.UseRegister(mright.left().node()));
+ return;
+ }
+ }
+
+ if (m->IsWord32Xor() && m->right().Is(-1)) {
+ selector->Emit(kArm64Not32, g.DefineAsRegister(node),
+ g.UseRegister(m->left().node()));
+ } else if (m->IsWord64Xor() && m->right().Is(-1)) {
+ selector->Emit(kArm64Not, g.DefineAsRegister(node),
+ g.UseRegister(m->left().node()));
+ } else {
+ VisitBinop<Matcher>(selector, node, opcode, imm_mode);
}
}
void InstructionSelector::VisitWord32And(Node* node) {
- VisitBinop(this, node, kArm64And32, kLogical32Imm);
+ Int32BinopMatcher m(node);
+ VisitLogical<Int32BinopMatcher>(
+ this, node, &m, kArm64And32, CanCover(node, m.left().node()),
+ CanCover(node, m.right().node()), kLogical32Imm);
}
void InstructionSelector::VisitWord64And(Node* node) {
- VisitBinop(this, node, kArm64And, kLogical64Imm);
+ Int64BinopMatcher m(node);
+ VisitLogical<Int64BinopMatcher>(
+ this, node, &m, kArm64And, CanCover(node, m.left().node()),
+ CanCover(node, m.right().node()), kLogical64Imm);
}
void InstructionSelector::VisitWord32Or(Node* node) {
- VisitBinop(this, node, kArm64Or32, kLogical32Imm);
+ Int32BinopMatcher m(node);
+ VisitLogical<Int32BinopMatcher>(
+ this, node, &m, kArm64Or32, CanCover(node, m.left().node()),
+ CanCover(node, m.right().node()), kLogical32Imm);
}
void InstructionSelector::VisitWord64Or(Node* node) {
- VisitBinop(this, node, kArm64Or, kLogical64Imm);
-}
-
-
-template <typename T>
-static void VisitXor(InstructionSelector* selector, Node* node,
- ArchOpcode xor_opcode, ArchOpcode not_opcode) {
- Arm64OperandGenerator g(selector);
- BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
- if (m.right().Is(-1)) {
- selector->Emit(not_opcode, g.DefineAsRegister(node),
- g.UseRegister(m.left().node()));
- } else {
- VisitBinop(selector, node, xor_opcode, kLogical32Imm);
- }
+ Int64BinopMatcher m(node);
+ VisitLogical<Int64BinopMatcher>(
+ this, node, &m, kArm64Or, CanCover(node, m.left().node()),
+ CanCover(node, m.right().node()), kLogical64Imm);
}
void InstructionSelector::VisitWord32Xor(Node* node) {
- VisitXor<int32_t>(this, node, kArm64Xor32, kArm64Not32);
+ Int32BinopMatcher m(node);
+ VisitLogical<Int32BinopMatcher>(
+ this, node, &m, kArm64Eor32, CanCover(node, m.left().node()),
+ CanCover(node, m.right().node()), kLogical32Imm);
}
void InstructionSelector::VisitWord64Xor(Node* node) {
- VisitXor<int64_t>(this, node, kArm64Xor, kArm64Not);
+ Int64BinopMatcher m(node);
+ VisitLogical<Int64BinopMatcher>(
+ this, node, &m, kArm64Eor, CanCover(node, m.left().node()),
+ CanCover(node, m.right().node()), kLogical64Imm);
}
@@ -329,46 +413,164 @@ void InstructionSelector::VisitWord64Sar(Node* node) {
}
-void InstructionSelector::VisitInt32Add(Node* node) {
- VisitBinop(this, node, kArm64Add32, kArithimeticImm);
+void InstructionSelector::VisitWord32Ror(Node* node) {
+ VisitRRO(this, kArm64Ror32, node, kShift32Imm);
}
-void InstructionSelector::VisitInt64Add(Node* node) {
- VisitBinop(this, node, kArm64Add, kArithimeticImm);
+void InstructionSelector::VisitWord64Ror(Node* node) {
+ VisitRRO(this, kArm64Ror, node, kShift64Imm);
}
-template <typename T>
-static void VisitSub(InstructionSelector* selector, Node* node,
- ArchOpcode sub_opcode, ArchOpcode neg_opcode) {
- Arm64OperandGenerator g(selector);
- BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
- if (m.left().Is(0)) {
- selector->Emit(neg_opcode, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()));
- } else {
- VisitBinop(selector, node, sub_opcode, kArithimeticImm);
+void InstructionSelector::VisitInt32Add(Node* node) {
+ Arm64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ // Select Madd(x, y, z) for Add(Mul(x, y), z).
+ if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ Emit(kArm64Madd32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
+ return;
+ }
+ // Select Madd(x, y, z) for Add(x, Mul(x, y)).
+ if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ Emit(kArm64Madd32, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
+ return;
}
+ VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm);
+}
+
+
+void InstructionSelector::VisitInt64Add(Node* node) {
+ Arm64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ // Select Madd(x, y, z) for Add(Mul(x, y), z).
+ if (m.left().IsInt64Mul() && CanCover(node, m.left().node())) {
+ Int64BinopMatcher mleft(m.left().node());
+ Emit(kArm64Madd, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
+ return;
+ }
+ // Select Madd(x, y, z) for Add(x, Mul(x, y)).
+ if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
+ Int64BinopMatcher mright(m.right().node());
+ Emit(kArm64Madd, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
+ return;
+ }
+ VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm);
}
void InstructionSelector::VisitInt32Sub(Node* node) {
- VisitSub<int32_t>(this, node, kArm64Sub32, kArm64Neg32);
+ Arm64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+
+ // Select Msub(a, x, y) for Sub(a, Mul(x, y)).
+ if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ Emit(kArm64Msub32, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
+ return;
+ }
+
+ if (m.left().Is(0)) {
+ Emit(kArm64Neg32, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()));
+ } else {
+ VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm);
+ }
}
void InstructionSelector::VisitInt64Sub(Node* node) {
- VisitSub<int64_t>(this, node, kArm64Sub, kArm64Neg);
+ Arm64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+
+ // Select Msub(a, x, y) for Sub(a, Mul(x, y)).
+ if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
+ Int64BinopMatcher mright(m.right().node());
+ Emit(kArm64Msub, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
+ return;
+ }
+
+ if (m.left().Is(0)) {
+ Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+ } else {
+ VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm);
+ }
}
void InstructionSelector::VisitInt32Mul(Node* node) {
+ Arm64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+
+ if (m.left().IsInt32Sub() && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+
+ // Select Mneg(x, y) for Mul(Sub(0, x), y).
+ if (mleft.left().Is(0)) {
+ Emit(kArm64Mneg32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.right().node()),
+ g.UseRegister(m.right().node()));
+ return;
+ }
+ }
+
+ if (m.right().IsInt32Sub() && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+
+ // Select Mneg(x, y) for Mul(x, Sub(0, y)).
+ if (mright.left().Is(0)) {
+ Emit(kArm64Mneg32, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ }
+
VisitRRR(this, kArm64Mul32, node);
}
void InstructionSelector::VisitInt64Mul(Node* node) {
+ Arm64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+
+ if (m.left().IsInt64Sub() && CanCover(node, m.left().node())) {
+ Int64BinopMatcher mleft(m.left().node());
+
+ // Select Mneg(x, y) for Mul(Sub(0, x), y).
+ if (mleft.left().Is(0)) {
+ Emit(kArm64Mneg, g.DefineAsRegister(node),
+ g.UseRegister(mleft.right().node()),
+ g.UseRegister(m.right().node()));
+ return;
+ }
+ }
+
+ if (m.right().IsInt64Sub() && CanCover(node, m.right().node())) {
+ Int64BinopMatcher mright(m.right().node());
+
+ // Select Mneg(x, y) for Mul(x, Sub(0, y)).
+ if (mright.left().Is(0)) {
+ Emit(kArm64Mneg, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ }
+
VisitRRR(this, kArm64Mul, node);
}
@@ -413,26 +615,23 @@ void InstructionSelector::VisitInt64UMod(Node* node) {
}
-void InstructionSelector::VisitConvertInt32ToInt64(Node* node) {
- VisitRR(this, kArm64Int32ToInt64, node);
-}
-
-
-void InstructionSelector::VisitConvertInt64ToInt32(Node* node) {
- VisitRR(this, kArm64Int64ToInt32, node);
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Float32ToFloat64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
Arm64OperandGenerator g(this);
- Emit(kArm64Int32ToFloat64, g.DefineAsDoubleRegister(node),
+ Emit(kArm64Int32ToFloat64, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
Arm64OperandGenerator g(this);
- Emit(kArm64Uint32ToFloat64, g.DefineAsDoubleRegister(node),
+ Emit(kArm64Uint32ToFloat64, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
@@ -440,14 +639,39 @@ void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64Float64ToInt32, g.DefineAsRegister(node),
- g.UseDoubleRegister(node->InputAt(0)));
+ g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64Float64ToUint32, g.DefineAsRegister(node),
- g.UseDoubleRegister(node->InputAt(0)));
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Sxtw, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Float64ToFloat32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
@@ -473,21 +697,28 @@ void InstructionSelector::VisitFloat64Div(Node* node) {
void InstructionSelector::VisitFloat64Mod(Node* node) {
Arm64OperandGenerator g(this);
- Emit(kArm64Float64Mod, g.DefineAsFixedDouble(node, d0),
- g.UseFixedDouble(node->InputAt(0), d0),
- g.UseFixedDouble(node->InputAt(1), d1))->MarkAsCall();
+ Emit(kArm64Float64Mod, g.DefineAsFixed(node, d0),
+ g.UseFixed(node->InputAt(0), d0),
+ g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Float64Sqrt, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
FlagsContinuation* cont) {
- VisitBinop(this, node, kArm64Add32, kArithimeticImm, cont);
+ VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm, cont);
}
void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
FlagsContinuation* cont) {
- VisitBinop(this, node, kArm64Sub32, kArithimeticImm, cont);
+ VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm, cont);
}
@@ -516,10 +747,10 @@ static void VisitWordCompare(InstructionSelector* selector, Node* node,
Node* right = node->InputAt(1);
// Match immediates on left or right side of comparison.
- if (g.CanBeImmediate(right, kArithimeticImm)) {
+ if (g.CanBeImmediate(right, kArithmeticImm)) {
VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
cont);
- } else if (g.CanBeImmediate(left, kArithimeticImm)) {
+ } else if (g.CanBeImmediate(left, kArithmeticImm)) {
if (!commutative) cont->Commute();
VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
cont);
@@ -532,6 +763,10 @@ static void VisitWordCompare(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
switch (node->opcode()) {
+ case IrOpcode::kInt32Add:
+ return VisitWordCompare(this, node, kArm64Cmn32, cont, true);
+ case IrOpcode::kInt32Sub:
+ return VisitWordCompare(this, node, kArm64Cmp32, cont, false);
case IrOpcode::kWord32And:
return VisitWordCompare(this, node, kArm64Tst32, cont, true);
default:
@@ -574,8 +809,8 @@ void InstructionSelector::VisitFloat64Compare(Node* node,
Arm64OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
- VisitCompare(this, kArm64Float64Cmp, g.UseDoubleRegister(left),
- g.UseDoubleRegister(right), cont);
+ VisitCompare(this, kArm64Float64Cmp, g.UseRegister(left),
+ g.UseRegister(right), cont);
}
@@ -583,22 +818,24 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
BasicBlock* deoptimization) {
Arm64OperandGenerator g(this);
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
- CallBuffer buffer(zone(), descriptor); // TODO(turbofan): temp zone here?
+
+ FrameStateDescriptor* frame_state_descriptor = NULL;
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor =
+ GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+ }
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
// Compute InstructionOperands for inputs and outputs.
// TODO(turbofan): on ARM64 it's probably better to use the code object in a
// register if there are multiple uses of it. Improve constant pool and the
// heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(call, &buffer, true, false, continuation,
- deoptimization);
+ InitializeCallBuffer(call, &buffer, true, false);
// Push the arguments to the stack.
- bool is_c_frame = descriptor->kind() == CallDescriptor::kCallAddress;
- bool pushed_count_uneven = buffer.pushed_count & 1;
- int aligned_push_count = buffer.pushed_count;
- if (is_c_frame && pushed_count_uneven) {
- aligned_push_count++;
- }
+ bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
+ int aligned_push_count = buffer.pushed_nodes.size();
// TODO(dcarney): claim and poke probably take small immediates,
// loop here or whatever.
// Bump the stack pointer(s).
@@ -609,12 +846,11 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
}
// Move arguments to the stack.
{
- int slot = buffer.pushed_count - 1;
+ int slot = buffer.pushed_nodes.size() - 1;
// Emit the uneven pushes.
if (pushed_count_uneven) {
Node* input = buffer.pushed_nodes[slot];
- ArchOpcode opcode = is_c_frame ? kArm64PokePairZero : kArm64Poke;
- Emit(opcode | MiscField::encode(slot), NULL, g.UseRegister(input));
+ Emit(kArm64Poke | MiscField::encode(slot), NULL, g.UseRegister(input));
slot--;
}
// Now all pushes can be done in pairs.
@@ -629,37 +865,28 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
InstructionCode opcode;
switch (descriptor->kind()) {
case CallDescriptor::kCallCodeObject: {
- bool lazy_deopt = descriptor->CanLazilyDeoptimize();
- opcode = kArm64CallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
+ opcode = kArchCallCodeObject;
break;
}
- case CallDescriptor::kCallAddress:
- opcode = kArm64CallAddress;
- break;
case CallDescriptor::kCallJSFunction:
- opcode = kArm64CallJSFunction;
+ opcode = kArchCallJSFunction;
break;
default:
UNREACHABLE();
return;
}
+ opcode |= MiscField::encode(descriptor->flags());
// Emit the call instruction.
Instruction* call_instr =
- Emit(opcode, buffer.output_count, buffer.outputs,
- buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
+ Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+ buffer.instruction_args.size(), &buffer.instruction_args.front());
call_instr->MarkAsCall();
if (deoptimization != NULL) {
DCHECK(continuation != NULL);
call_instr->MarkAsControl();
}
-
- // Caller clean up of stack for C-style calls.
- if (is_c_frame && aligned_push_count > 0) {
- DCHECK(deoptimization == NULL && continuation == NULL);
- Emit(kArm64Drop | MiscField::encode(aligned_push_count), NULL);
- }
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/arm64/linkage-arm64.cc b/deps/v8/src/compiler/arm64/linkage-arm64.cc
index 186f2d59da..2be2cb1e47 100644
--- a/deps/v8/src/compiler/arm64/linkage-arm64.cc
+++ b/deps/v8/src/compiler/arm64/linkage-arm64.cc
@@ -14,7 +14,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-struct LinkageHelperTraits {
+struct Arm64LinkageHelperTraits {
static Register ReturnValueReg() { return x0; }
static Register ReturnValue2Reg() { return x1; }
static Register JSCallFunctionReg() { return x1; }
@@ -33,36 +33,34 @@ struct LinkageHelperTraits {
};
+typedef LinkageHelper<Arm64LinkageHelperTraits> LH;
+
CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
- return LinkageHelper::GetJSCallDescriptor<LinkageHelperTraits>(
- zone, parameter_count);
+ return LH::GetJSCallDescriptor(zone, parameter_count);
}
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
Runtime::FunctionId function, int parameter_count,
- Operator::Property properties,
- CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
- return LinkageHelper::GetRuntimeCallDescriptor<LinkageHelperTraits>(
- zone, function, parameter_count, properties, can_deoptimize);
+ Operator::Properties properties, Zone* zone) {
+ return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
+ properties);
}
CallDescriptor* Linkage::GetStubCallDescriptor(
- CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count,
- CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
- return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>(
- zone, descriptor, stack_parameter_count, can_deoptimize);
+ CallInterfaceDescriptor descriptor, int stack_parameter_count,
+ CallDescriptor::Flags flags, Zone* zone) {
+ return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
+ flags);
}
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(
- Zone* zone, int num_params, MachineType return_type,
- const MachineType* param_types) {
- return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>(
- zone, num_params, return_type, param_types);
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+ MachineSignature* sig) {
+ return LH::GetSimplifiedCDescriptor(zone, sig);
}
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/ast-graph-builder.cc b/deps/v8/src/compiler/ast-graph-builder.cc
index 49a67157c7..03640780b6 100644
--- a/deps/v8/src/compiler/ast-graph-builder.cc
+++ b/deps/v8/src/compiler/ast-graph-builder.cc
@@ -6,6 +6,7 @@
#include "src/compiler.h"
#include "src/compiler/control-builders.h"
+#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node-properties-inl.h"
#include "src/full-codegen.h"
@@ -30,7 +31,7 @@ AstGraphBuilder::AstGraphBuilder(CompilationInfo* info, JSGraph* jsgraph)
Node* AstGraphBuilder::GetFunctionClosure() {
if (!function_closure_.is_set()) {
// Parameter -1 is special for the function closure
- Operator* op = common()->Parameter(-1);
+ const Operator* op = common()->Parameter(-1);
Node* node = NewNode(op, graph()->start());
function_closure_.set(node);
}
@@ -41,7 +42,7 @@ Node* AstGraphBuilder::GetFunctionClosure() {
Node* AstGraphBuilder::GetFunctionContext() {
if (!function_context_.is_set()) {
// Parameter (arity + 1) is special for the outer context of the function
- Operator* op = common()->Parameter(info()->num_parameters() + 1);
+ const Operator* op = common()->Parameter(info()->num_parameters() + 1);
Node* node = NewNode(op, graph()->start());
function_context_.set(node);
}
@@ -86,7 +87,8 @@ bool AstGraphBuilder::CreateGraph() {
VisitDeclarations(scope->declarations());
// TODO(mstarzinger): This should do an inlined stack check.
- NewNode(javascript()->Runtime(Runtime::kStackGuard, 0));
+ Node* node = NewNode(javascript()->Runtime(Runtime::kStackGuard, 0));
+ PrepareFrameState(node, BailoutId::FunctionEntry());
// Visit statements in the function body.
VisitStatements(info()->function()->body());
@@ -161,10 +163,7 @@ AstGraphBuilder::Environment::Environment(AstGraphBuilder* builder,
locals_count_(scope->num_stack_slots()),
parameters_node_(NULL),
locals_node_(NULL),
- stack_node_(NULL),
- parameters_dirty_(true),
- locals_dirty_(true),
- stack_dirty_(true) {
+ stack_node_(NULL) {
DCHECK_EQ(scope->num_parameters() + 1, parameters_count());
// Bind the receiver variable.
@@ -193,57 +192,49 @@ AstGraphBuilder::Environment::Environment(const Environment& copy)
locals_count_(copy.locals_count_),
parameters_node_(copy.parameters_node_),
locals_node_(copy.locals_node_),
- stack_node_(copy.stack_node_),
- parameters_dirty_(copy.parameters_dirty_),
- locals_dirty_(copy.locals_dirty_),
- stack_dirty_(copy.stack_dirty_) {}
-
-
-Node* AstGraphBuilder::Environment::Checkpoint(BailoutId ast_id) {
- if (parameters_dirty_) {
- Operator* op = common()->StateValues(parameters_count());
- if (parameters_count() != 0) {
- Node** parameters = &values()->front();
- parameters_node_ = graph()->NewNode(op, parameters_count(), parameters);
- } else {
- parameters_node_ = graph()->NewNode(op);
- }
- parameters_dirty_ = false;
- }
- if (locals_dirty_) {
- Operator* op = common()->StateValues(locals_count());
- if (locals_count() != 0) {
- Node** locals = &values()->at(parameters_count_);
- locals_node_ = graph()->NewNode(op, locals_count(), locals);
- } else {
- locals_node_ = graph()->NewNode(op);
+ stack_node_(copy.stack_node_) {}
+
+
+void AstGraphBuilder::Environment::UpdateStateValues(Node** state_values,
+ int offset, int count) {
+ bool should_update = false;
+ Node** env_values = (count == 0) ? NULL : &values()->at(offset);
+ if (*state_values == NULL || (*state_values)->InputCount() != count) {
+ should_update = true;
+ } else {
+ DCHECK(static_cast<size_t>(offset + count) <= values()->size());
+ for (int i = 0; i < count; i++) {
+ if ((*state_values)->InputAt(i) != env_values[i]) {
+ should_update = true;
+ break;
+ }
}
- locals_dirty_ = false;
}
- if (stack_dirty_) {
- Operator* op = common()->StateValues(stack_height());
- if (stack_height() != 0) {
- Node** stack = &values()->at(parameters_count_ + locals_count_);
- stack_node_ = graph()->NewNode(op, stack_height(), stack);
- } else {
- stack_node_ = graph()->NewNode(op);
- }
- stack_dirty_ = false;
+ if (should_update) {
+ const Operator* op = common()->StateValues(count);
+ (*state_values) = graph()->NewNode(op, count, env_values);
}
+}
- Operator* op = common()->FrameState(ast_id);
- return graph()->NewNode(op, parameters_node_, locals_node_, stack_node_);
+Node* AstGraphBuilder::Environment::Checkpoint(
+ BailoutId ast_id, OutputFrameStateCombine combine) {
+ UpdateStateValues(&parameters_node_, 0, parameters_count());
+ UpdateStateValues(&locals_node_, parameters_count(), locals_count());
+ UpdateStateValues(&stack_node_, parameters_count() + locals_count(),
+ stack_height());
+
+ const Operator* op = common()->FrameState(JS_FRAME, ast_id, combine);
+
+ return graph()->NewNode(op, parameters_node_, locals_node_, stack_node_,
+ GetContext(),
+ builder()->jsgraph()->UndefinedConstant());
}
AstGraphBuilder::AstContext::AstContext(AstGraphBuilder* own,
- Expression::Context kind,
- BailoutId bailout_id)
- : bailout_id_(bailout_id),
- kind_(kind),
- owner_(own),
- outer_(own->ast_context()) {
+ Expression::Context kind)
+ : kind_(kind), owner_(own), outer_(own->ast_context()) {
owner()->set_ast_context(this); // Push.
#ifdef DEBUG
original_height_ = environment()->stack_height();
@@ -271,28 +262,6 @@ AstGraphBuilder::AstTestContext::~AstTestContext() {
}
-void AstGraphBuilder::AstEffectContext::ProduceValueWithLazyBailout(
- Node* value) {
- ProduceValue(value);
- owner()->BuildLazyBailout(value, bailout_id_);
-}
-
-
-void AstGraphBuilder::AstValueContext::ProduceValueWithLazyBailout(
- Node* value) {
- ProduceValue(value);
- owner()->BuildLazyBailout(value, bailout_id_);
-}
-
-
-void AstGraphBuilder::AstTestContext::ProduceValueWithLazyBailout(Node* value) {
- environment()->Push(value);
- owner()->BuildLazyBailout(value, bailout_id_);
- environment()->Pop();
- ProduceValue(value);
-}
-
-
void AstGraphBuilder::AstEffectContext::ProduceValue(Node* value) {
// The value is ignored.
}
@@ -359,7 +328,7 @@ void AstGraphBuilder::VisitForValues(ZoneList<Expression*>* exprs) {
void AstGraphBuilder::VisitForValue(Expression* expr) {
- AstValueContext for_value(this, expr->id());
+ AstValueContext for_value(this);
if (!HasStackOverflow()) {
expr->Accept(this);
}
@@ -367,7 +336,7 @@ void AstGraphBuilder::VisitForValue(Expression* expr) {
void AstGraphBuilder::VisitForEffect(Expression* expr) {
- AstEffectContext for_effect(this, expr->id());
+ AstEffectContext for_effect(this);
if (!HasStackOverflow()) {
expr->Accept(this);
}
@@ -375,7 +344,7 @@ void AstGraphBuilder::VisitForEffect(Expression* expr) {
void AstGraphBuilder::VisitForTest(Expression* expr) {
- AstTestContext for_condition(this, expr->id());
+ AstTestContext for_condition(this);
if (!HasStackOverflow()) {
expr->Accept(this);
}
@@ -405,7 +374,7 @@ void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
case Variable::CONTEXT:
if (hole_init) {
Node* value = jsgraph()->TheHoleConstant();
- Operator* op = javascript()->StoreContext(0, variable->index());
+ const Operator* op = javascript()->StoreContext(0, variable->index());
NewNode(op, current_context(), value);
}
break;
@@ -437,7 +406,7 @@ void AstGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
case Variable::CONTEXT: {
VisitForValue(decl->fun());
Node* value = environment()->Pop();
- Operator* op = javascript()->StoreContext(0, variable->index());
+ const Operator* op = javascript()->StoreContext(0, variable->index());
NewNode(op, current_context(), value);
break;
}
@@ -484,7 +453,7 @@ void AstGraphBuilder::VisitBlock(Block* stmt) {
// Visit statements in the same scope, no declarations.
VisitStatements(stmt->statements());
} else {
- Operator* op = javascript()->CreateBlockContext();
+ const Operator* op = javascript()->CreateBlockContext();
Node* scope_info = jsgraph()->Constant(stmt->scope()->GetScopeInfo());
Node* context = NewNode(op, scope_info, GetFunctionClosure());
ContextScope scope(this, stmt->scope(), context);
@@ -550,7 +519,7 @@ void AstGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
void AstGraphBuilder::VisitWithStatement(WithStatement* stmt) {
VisitForValue(stmt->expression());
Node* value = environment()->Pop();
- Operator* op = javascript()->CreateWithContext();
+ const Operator* op = javascript()->CreateWithContext();
Node* context = NewNode(op, value, GetFunctionClosure());
ContextScope scope(this, stmt->scope(), context);
Visit(stmt->statement());
@@ -582,7 +551,7 @@ void AstGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
// value is still on the operand stack while the label is evaluated.
VisitForValue(clause->label());
Node* label = environment()->Pop();
- Operator* op = javascript()->StrictEqual();
+ const Operator* op = javascript()->StrictEqual();
Node* condition = NewNode(op, tag, label);
compare_switch.BeginLabel(i, condition);
@@ -713,7 +682,7 @@ void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
Node* exit_cond =
NewNode(javascript()->LessThan(), index, cache_length);
// TODO(jarin): provide real bailout id.
- BuildLazyBailout(exit_cond, BailoutId::None());
+ PrepareFrameState(exit_cond, BailoutId::None());
for_loop.BreakUnless(exit_cond);
// TODO(dcarney): this runtime call should be a handful of
// simplified instructions that
@@ -737,13 +706,12 @@ void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
test_should_filter.If(should_filter_cond);
test_should_filter.Then();
value = environment()->Pop();
- // TODO(dcarney): Better load from function context.
- // See comment in BuildLoadBuiltinsObject.
- Handle<JSFunction> function(JSFunction::cast(
- info()->context()->builtins()->javascript_builtin(
- Builtins::FILTER_KEY)));
+ Node* builtins = BuildLoadBuiltinsObject();
+ Node* function = BuildLoadObjectField(
+ builtins,
+ JSBuiltinsObject::OffsetOfFunctionWithId(Builtins::FILTER_KEY));
// Callee.
- environment()->Push(jsgraph()->HeapConstant(function));
+ environment()->Push(function);
// Receiver.
environment()->Push(obj);
// Args.
@@ -753,7 +721,7 @@ void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
Node* res = ProcessArguments(
javascript()->Call(3, NO_CALL_FUNCTION_FLAGS), 3);
// TODO(jarin): provide real bailout id.
- BuildLazyBailout(res, BailoutId::None());
+ PrepareFrameState(res, BailoutId::None());
Node* property_missing = NewNode(javascript()->StrictEqual(), res,
jsgraph()->ZeroConstant());
{
@@ -763,9 +731,9 @@ void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
// Inc counter and continue.
Node* index_inc =
NewNode(javascript()->Add(), index, jsgraph()->OneConstant());
- environment()->Poke(0, index_inc);
// TODO(jarin): provide real bailout id.
- BuildLazyBailout(index_inc, BailoutId::None());
+ PrepareFrameState(index_inc, BailoutId::None());
+ environment()->Poke(0, index_inc);
for_loop.Continue();
is_property_missing.Else();
is_property_missing.End();
@@ -779,13 +747,13 @@ void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
// Bind value and do loop body.
VisitForInAssignment(stmt->each(), value);
VisitIterationBody(stmt, &for_loop, 5);
+ for_loop.EndBody();
// Inc counter and continue.
Node* index_inc =
NewNode(javascript()->Add(), index, jsgraph()->OneConstant());
- environment()->Poke(0, index_inc);
// TODO(jarin): provide real bailout id.
- BuildLazyBailout(index_inc, BailoutId::None());
- for_loop.EndBody();
+ PrepareFrameState(index_inc, BailoutId::None());
+ environment()->Poke(0, index_inc);
for_loop.EndLoop();
environment()->Drop(5);
// PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -817,7 +785,8 @@ void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
void AstGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
// TODO(turbofan): Do we really need a separate reloc-info for this?
- NewNode(javascript()->Runtime(Runtime::kDebugBreak, 0));
+ Node* node = NewNode(javascript()->Runtime(Runtime::kDebugBreak, 0));
+ PrepareFrameState(node, stmt->DebugBreakId());
}
@@ -837,12 +806,18 @@ void AstGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
Node* info = jsgraph()->Constant(shared_info);
Node* pretenure = expr->pretenure() ? jsgraph()->TrueConstant()
: jsgraph()->FalseConstant();
- Operator* op = javascript()->Runtime(Runtime::kNewClosure, 3);
+ const Operator* op = javascript()->Runtime(Runtime::kNewClosure, 3);
Node* value = NewNode(op, context, info, pretenure);
ast_context()->ProduceValue(value);
}
+void AstGraphBuilder::VisitClassLiteral(ClassLiteral* expr) {
+ // TODO(arv): Implement.
+ UNREACHABLE();
+}
+
+
void AstGraphBuilder::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
UNREACHABLE();
}
@@ -875,29 +850,32 @@ void AstGraphBuilder::VisitLiteral(Literal* expr) {
void AstGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
- Handle<JSFunction> closure = info()->closure();
+ Node* closure = GetFunctionClosure();
// Create node to materialize a regular expression literal.
- Node* literals_array = jsgraph()->Constant(handle(closure->literals()));
+ Node* literals_array =
+ BuildLoadObjectField(closure, JSFunction::kLiteralsOffset);
Node* literal_index = jsgraph()->Constant(expr->literal_index());
Node* pattern = jsgraph()->Constant(expr->pattern());
Node* flags = jsgraph()->Constant(expr->flags());
- Operator* op = javascript()->Runtime(Runtime::kMaterializeRegExpLiteral, 4);
+ const Operator* op =
+ javascript()->Runtime(Runtime::kMaterializeRegExpLiteral, 4);
Node* literal = NewNode(op, literals_array, literal_index, pattern, flags);
ast_context()->ProduceValue(literal);
}
void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
- Handle<JSFunction> closure = info()->closure();
+ Node* closure = GetFunctionClosure();
// Create node to deep-copy the literal boilerplate.
expr->BuildConstantProperties(isolate());
- Node* literals_array = jsgraph()->Constant(handle(closure->literals()));
+ Node* literals_array =
+ BuildLoadObjectField(closure, JSFunction::kLiteralsOffset);
Node* literal_index = jsgraph()->Constant(expr->literal_index());
Node* constants = jsgraph()->Constant(expr->constant_properties());
Node* flags = jsgraph()->Constant(expr->ComputeFlags());
- Operator* op = javascript()->Runtime(Runtime::kCreateObjectLiteral, 4);
+ const Operator* op = javascript()->Runtime(Runtime::kCreateObjectLiteral, 4);
Node* literal = NewNode(op, literals_array, literal_index, constants, flags);
// The object is expected on the operand stack during computation of the
@@ -929,10 +907,10 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
VisitForValue(property->value());
Node* value = environment()->Pop();
- PrintableUnique<Name> name = MakeUnique(key->AsPropertyName());
- Node* store =
- NewNode(javascript()->StoreNamed(name), literal, value);
- BuildLazyBailout(store, key->id());
+ Unique<Name> name = MakeUnique(key->AsPropertyName());
+ Node* store = NewNode(javascript()->StoreNamed(strict_mode(), name),
+ literal, value);
+ PrepareFrameState(store, key->id());
} else {
VisitForEffect(property->value());
}
@@ -946,7 +924,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Node* receiver = environment()->Pop();
if (property->emit_store()) {
Node* strict = jsgraph()->Constant(SLOPPY);
- Operator* op = javascript()->Runtime(Runtime::kSetProperty, 4);
+ const Operator* op = javascript()->Runtime(Runtime::kSetProperty, 4);
NewNode(op, receiver, key, value, strict);
}
break;
@@ -957,7 +935,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Node* value = environment()->Pop();
Node* receiver = environment()->Pop();
if (property->emit_store()) {
- Operator* op = javascript()->Runtime(Runtime::kSetPrototype, 2);
+ const Operator* op = javascript()->Runtime(Runtime::kSetPrototype, 2);
NewNode(op, receiver, value);
}
break;
@@ -982,14 +960,15 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Node* getter = environment()->Pop();
Node* name = environment()->Pop();
Node* attr = jsgraph()->Constant(NONE);
- Operator* op =
+ const Operator* op =
javascript()->Runtime(Runtime::kDefineAccessorPropertyUnchecked, 5);
- NewNode(op, literal, name, getter, setter, attr);
+ Node* call = NewNode(op, literal, name, getter, setter, attr);
+ PrepareFrameState(call, it->first->id());
}
// Transform literals that contain functions to fast properties.
if (expr->has_function()) {
- Operator* op = javascript()->Runtime(Runtime::kToFastProperties, 1);
+ const Operator* op = javascript()->Runtime(Runtime::kToFastProperties, 1);
NewNode(op, literal);
}
@@ -998,15 +977,16 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
- Handle<JSFunction> closure = info()->closure();
+ Node* closure = GetFunctionClosure();
// Create node to deep-copy the literal boilerplate.
expr->BuildConstantElements(isolate());
- Node* literals_array = jsgraph()->Constant(handle(closure->literals()));
+ Node* literals_array =
+ BuildLoadObjectField(closure, JSFunction::kLiteralsOffset);
Node* literal_index = jsgraph()->Constant(expr->literal_index());
Node* constants = jsgraph()->Constant(expr->constant_elements());
Node* flags = jsgraph()->Constant(expr->ComputeFlags());
- Operator* op = javascript()->Runtime(Runtime::kCreateArrayLiteral, 4);
+ const Operator* op = javascript()->Runtime(Runtime::kCreateArrayLiteral, 4);
Node* literal = NewNode(op, literals_array, literal_index, constants, flags);
// The array and the literal index are both expected on the operand stack
@@ -1023,8 +1003,9 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForValue(subexpr);
Node* value = environment()->Pop();
Node* index = jsgraph()->Constant(i);
- Node* store = NewNode(javascript()->StoreProperty(), literal, index, value);
- BuildLazyBailout(store, expr->GetIdForElement(i));
+ Node* store = NewNode(javascript()->StoreProperty(strict_mode()), literal,
+ index, value);
+ PrepareFrameState(store, expr->GetIdForElement(i));
}
environment()->Pop(); // Array literal index.
@@ -1052,11 +1033,12 @@ void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value) {
VisitForValue(property->obj());
Node* object = environment()->Pop();
value = environment()->Pop();
- PrintableUnique<Name> name =
+ Unique<Name> name =
MakeUnique(property->key()->AsLiteral()->AsPropertyName());
- Node* store = NewNode(javascript()->StoreNamed(name), object, value);
+ Node* store =
+ NewNode(javascript()->StoreNamed(strict_mode(), name), object, value);
// TODO(jarin) Fill in the correct bailout id.
- BuildLazyBailout(store, BailoutId::None());
+ PrepareFrameState(store, BailoutId::None());
break;
}
case KEYED_PROPERTY: {
@@ -1066,9 +1048,10 @@ void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value) {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
value = environment()->Pop();
- Node* store = NewNode(javascript()->StoreProperty(), object, key, value);
+ Node* store = NewNode(javascript()->StoreProperty(strict_mode()), object,
+ key, value);
// TODO(jarin) Fill in the correct bailout id.
- BuildLazyBailout(store, BailoutId::None());
+ PrepareFrameState(store, BailoutId::None());
break;
}
}
@@ -1109,17 +1092,17 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
}
case NAMED_PROPERTY: {
Node* object = environment()->Top();
- PrintableUnique<Name> name =
+ Unique<Name> name =
MakeUnique(property->key()->AsLiteral()->AsPropertyName());
old_value = NewNode(javascript()->LoadNamed(name), object);
- BuildLazyBailoutWithPushedNode(old_value, property->LoadId());
+ PrepareFrameState(old_value, property->LoadId(), kPushOutput);
break;
}
case KEYED_PROPERTY: {
Node* key = environment()->Top();
Node* object = environment()->Peek(1);
old_value = NewNode(javascript()->LoadProperty(), object, key);
- BuildLazyBailoutWithPushedNode(old_value, property->LoadId());
+ PrepareFrameState(old_value, property->LoadId(), kPushOutput);
break;
}
}
@@ -1128,8 +1111,8 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
Node* right = environment()->Pop();
Node* left = environment()->Pop();
Node* value = BuildBinaryOp(left, right, expr->binary_op());
+ PrepareFrameState(value, expr->binary_operation()->id(), kPushOutput);
environment()->Push(value);
- BuildLazyBailout(value, expr->binary_operation()->id());
} else {
VisitForValue(expr->value());
}
@@ -1145,17 +1128,19 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
}
case NAMED_PROPERTY: {
Node* object = environment()->Pop();
- PrintableUnique<Name> name =
+ Unique<Name> name =
MakeUnique(property->key()->AsLiteral()->AsPropertyName());
- Node* store = NewNode(javascript()->StoreNamed(name), object, value);
- BuildLazyBailout(store, expr->AssignmentId());
+ Node* store =
+ NewNode(javascript()->StoreNamed(strict_mode(), name), object, value);
+ PrepareFrameState(store, expr->AssignmentId());
break;
}
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- Node* store = NewNode(javascript()->StoreProperty(), object, key, value);
- BuildLazyBailout(store, expr->AssignmentId());
+ Node* store = NewNode(javascript()->StoreProperty(strict_mode()), object,
+ key, value);
+ PrepareFrameState(store, expr->AssignmentId());
break;
}
}
@@ -1177,7 +1162,7 @@ void AstGraphBuilder::VisitYield(Yield* expr) {
void AstGraphBuilder::VisitThrow(Throw* expr) {
VisitForValue(expr->exception());
Node* exception = environment()->Pop();
- Operator* op = javascript()->Runtime(Runtime::kThrow, 1);
+ const Operator* op = javascript()->Runtime(Runtime::kThrow, 1);
Node* value = NewNode(op, exception);
ast_context()->ProduceValue(value);
}
@@ -1188,8 +1173,7 @@ void AstGraphBuilder::VisitProperty(Property* expr) {
if (expr->key()->IsPropertyName()) {
VisitForValue(expr->obj());
Node* object = environment()->Pop();
- PrintableUnique<Name> name =
- MakeUnique(expr->key()->AsLiteral()->AsPropertyName());
+ Unique<Name> name = MakeUnique(expr->key()->AsLiteral()->AsPropertyName());
value = NewNode(javascript()->LoadNamed(name), object);
} else {
VisitForValue(expr->obj());
@@ -1198,7 +1182,8 @@ void AstGraphBuilder::VisitProperty(Property* expr) {
Node* object = environment()->Pop();
value = NewNode(javascript()->LoadProperty(), object, key);
}
- ast_context()->ProduceValueWithLazyBailout(value);
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ ast_context()->ProduceValue(value);
}
@@ -1223,7 +1208,7 @@ void AstGraphBuilder::VisitCall(Call* expr) {
Variable* variable = callee->AsVariableProxy()->var();
DCHECK(variable->location() == Variable::LOOKUP);
Node* name = jsgraph()->Constant(variable->name());
- Operator* op = javascript()->Runtime(Runtime::kLoadLookupSlot, 2);
+ const Operator* op = javascript()->Runtime(Runtime::kLoadLookupSlot, 2);
Node* pair = NewNode(op, current_context(), name);
callee_value = NewNode(common()->Projection(0), pair);
receiver_value = NewNode(common()->Projection(1), pair);
@@ -1234,7 +1219,7 @@ void AstGraphBuilder::VisitCall(Call* expr) {
VisitForValue(property->obj());
Node* object = environment()->Top();
if (property->key()->IsPropertyName()) {
- PrintableUnique<Name> name =
+ Unique<Name> name =
MakeUnique(property->key()->AsLiteral()->AsPropertyName());
callee_value = NewNode(javascript()->LoadNamed(name), object);
} else {
@@ -1242,7 +1227,7 @@ void AstGraphBuilder::VisitCall(Call* expr) {
Node* key = environment()->Pop();
callee_value = NewNode(javascript()->LoadProperty(), object, key);
}
- BuildLazyBailoutWithPushedNode(callee_value, property->LoadId());
+ PrepareFrameState(callee_value, property->LoadId(), kPushOutput);
receiver_value = environment()->Pop();
// Note that a PROPERTY_CALL requires the receiver to be wrapped into an
// object for sloppy callees. This could also be modeled explicitly here,
@@ -1283,7 +1268,7 @@ void AstGraphBuilder::VisitCall(Call* expr) {
Node* receiver = environment()->Lookup(info()->scope()->receiver());
Node* strict = jsgraph()->Constant(strict_mode());
Node* position = jsgraph()->Constant(info()->scope()->start_position());
- Operator* op =
+ const Operator* op =
javascript()->Runtime(Runtime::kResolvePossiblyDirectEval, 5);
Node* pair = NewNode(op, callee, source, receiver, strict, position);
Node* new_callee = NewNode(common()->Projection(0), pair);
@@ -1295,9 +1280,10 @@ void AstGraphBuilder::VisitCall(Call* expr) {
}
// Create node to perform the function call.
- Operator* call = javascript()->Call(args->length() + 2, flags);
+ const Operator* call = javascript()->Call(args->length() + 2, flags);
Node* value = ProcessArguments(call, args->length() + 2);
- ast_context()->ProduceValueWithLazyBailout(value);
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ ast_context()->ProduceValue(value);
}
@@ -1309,9 +1295,10 @@ void AstGraphBuilder::VisitCallNew(CallNew* expr) {
VisitForValues(args);
// Create node to perform the construct call.
- Operator* call = javascript()->CallNew(args->length() + 1);
+ const Operator* call = javascript()->CallNew(args->length() + 1);
Node* value = ProcessArguments(call, args->length() + 1);
- ast_context()->ProduceValueWithLazyBailout(value);
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ ast_context()->ProduceValue(value);
}
@@ -1322,12 +1309,12 @@ void AstGraphBuilder::VisitCallJSRuntime(CallRuntime* expr) {
// before arguments are being evaluated.
CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS;
Node* receiver_value = BuildLoadBuiltinsObject();
- PrintableUnique<String> unique = MakeUnique(name);
+ Unique<String> unique = MakeUnique(name);
Node* callee_value = NewNode(javascript()->LoadNamed(unique), receiver_value);
- environment()->Push(callee_value);
// TODO(jarin): Find/create a bailout id to deoptimize to (crankshaft
// refuses to optimize functions with jsruntime calls).
- BuildLazyBailout(callee_value, BailoutId::None());
+ PrepareFrameState(callee_value, BailoutId::None(), kPushOutput);
+ environment()->Push(callee_value);
environment()->Push(receiver_value);
// Evaluate all arguments to the JS runtime call.
@@ -1335,9 +1322,10 @@ void AstGraphBuilder::VisitCallJSRuntime(CallRuntime* expr) {
VisitForValues(args);
// Create node to perform the JS runtime call.
- Operator* call = javascript()->Call(args->length() + 2, flags);
+ const Operator* call = javascript()->Call(args->length() + 2, flags);
Node* value = ProcessArguments(call, args->length() + 2);
- ast_context()->ProduceValueWithLazyBailout(value);
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ ast_context()->ProduceValue(value);
}
@@ -1357,9 +1345,10 @@ void AstGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
// Create node to perform the runtime call.
Runtime::FunctionId functionId = function->function_id;
- Operator* call = javascript()->Runtime(functionId, args->length());
+ const Operator* call = javascript()->Runtime(functionId, args->length());
Node* value = ProcessArguments(call, args->length());
- ast_context()->ProduceValueWithLazyBailout(value);
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ ast_context()->ProduceValue(value);
}
@@ -1403,10 +1392,10 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
VisitForValue(property->obj());
Node* object = environment()->Top();
- PrintableUnique<Name> name =
+ Unique<Name> name =
MakeUnique(property->key()->AsLiteral()->AsPropertyName());
old_value = NewNode(javascript()->LoadNamed(name), object);
- BuildLazyBailoutWithPushedNode(old_value, property->LoadId());
+ PrepareFrameState(old_value, property->LoadId(), kPushOutput);
stack_depth = 1;
break;
}
@@ -1416,7 +1405,7 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
Node* key = environment()->Top();
Node* object = environment()->Peek(1);
old_value = NewNode(javascript()->LoadProperty(), object, key);
- BuildLazyBailoutWithPushedNode(old_value, property->LoadId());
+ PrepareFrameState(old_value, property->LoadId(), kPushOutput);
stack_depth = 2;
break;
}
@@ -1433,29 +1422,37 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
BuildBinaryOp(old_value, jsgraph()->OneConstant(), expr->binary_op());
// TODO(jarin) Insert proper bailout id here (will need to change
// full code generator).
- BuildLazyBailout(value, BailoutId::None());
+ PrepareFrameState(value, BailoutId::None());
// Store the value.
switch (assign_type) {
case VARIABLE: {
Variable* variable = expr->expression()->AsVariableProxy()->var();
+ environment()->Push(value);
BuildVariableAssignment(variable, value, expr->op(),
expr->AssignmentId());
+ environment()->Pop();
break;
}
case NAMED_PROPERTY: {
Node* object = environment()->Pop();
- PrintableUnique<Name> name =
+ Unique<Name> name =
MakeUnique(property->key()->AsLiteral()->AsPropertyName());
- Node* store = NewNode(javascript()->StoreNamed(name), object, value);
- BuildLazyBailout(store, expr->AssignmentId());
+ Node* store =
+ NewNode(javascript()->StoreNamed(strict_mode(), name), object, value);
+ environment()->Push(value);
+ PrepareFrameState(store, expr->AssignmentId());
+ environment()->Pop();
break;
}
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- Node* store = NewNode(javascript()->StoreProperty(), object, key, value);
- BuildLazyBailout(store, expr->AssignmentId());
+ Node* store = NewNode(javascript()->StoreProperty(strict_mode()), object,
+ key, value);
+ environment()->Push(value);
+ PrepareFrameState(store, expr->AssignmentId());
+ environment()->Pop();
break;
}
}
@@ -1480,14 +1477,15 @@ void AstGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
Node* right = environment()->Pop();
Node* left = environment()->Pop();
Node* value = BuildBinaryOp(left, right, expr->op());
- ast_context()->ProduceValueWithLazyBailout(value);
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ ast_context()->ProduceValue(value);
}
}
}
void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
- Operator* op;
+ const Operator* op;
switch (expr->op()) {
case Token::EQ:
op = javascript()->Equal();
@@ -1528,9 +1526,8 @@ void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
Node* right = environment()->Pop();
Node* left = environment()->Pop();
Node* value = NewNode(op, left, right);
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
-
- BuildLazyBailout(value, expr->id());
}
@@ -1540,6 +1537,11 @@ void AstGraphBuilder::VisitThisFunction(ThisFunction* expr) {
}
+void AstGraphBuilder::VisitSuperReference(SuperReference* expr) {
+ UNREACHABLE();
+}
+
+
void AstGraphBuilder::VisitCaseClause(CaseClause* expr) { UNREACHABLE(); }
@@ -1552,10 +1554,10 @@ void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
for (int i = 0; i < globals()->length(); ++i) data->set(i, *globals()->at(i));
int encoded_flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
DeclareGlobalsNativeFlag::encode(info()->is_native()) |
- DeclareGlobalsStrictMode::encode(info()->strict_mode());
+ DeclareGlobalsStrictMode::encode(strict_mode());
Node* flags = jsgraph()->Constant(encoded_flags);
Node* pairs = jsgraph()->Constant(data);
- Operator* op = javascript()->Runtime(Runtime::kDeclareGlobals, 3);
+ const Operator* op = javascript()->Runtime(Runtime::kDeclareGlobals, 3);
NewNode(op, current_context(), pairs, flags);
globals()->Rewind(0);
}
@@ -1662,9 +1664,9 @@ void AstGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
}
-Node* AstGraphBuilder::ProcessArguments(Operator* op, int arity) {
+Node* AstGraphBuilder::ProcessArguments(const Operator* op, int arity) {
DCHECK(environment()->stack_height() >= arity);
- Node** all = info()->zone()->NewArray<Node*>(arity); // XXX: alloca?
+ Node** all = info()->zone()->NewArray<Node*>(arity);
for (int i = arity - 1; i >= 0; --i) {
all[i] = environment()->Pop();
}
@@ -1679,7 +1681,7 @@ Node* AstGraphBuilder::BuildLocalFunctionContext(Node* context, Node* closure) {
set_current_context(context);
// Allocate a new local context.
- Operator* op = javascript()->CreateFunctionContext();
+ const Operator* op = javascript()->CreateFunctionContext();
Node* local_context = NewNode(op, closure);
set_current_context(local_context);
@@ -1693,7 +1695,7 @@ Node* AstGraphBuilder::BuildLocalFunctionContext(Node* context, Node* closure) {
Node* parameter = NewNode(common()->Parameter(i + 1), graph()->start());
// Context variable (at bottom of the context chain).
DCHECK_EQ(0, info()->scope()->ContextChainLength(variable->scope()));
- Operator* op = javascript()->StoreContext(0, variable->index());
+ const Operator* op = javascript()->StoreContext(0, variable->index());
NewNode(op, local_context, parameter);
}
@@ -1706,7 +1708,7 @@ Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
// Allocate and initialize a new arguments object.
Node* callee = GetFunctionClosure();
- Operator* op = javascript()->Runtime(Runtime::kNewArguments, 1);
+ const Operator* op = javascript()->Runtime(Runtime::kNewArguments, 1);
Node* object = NewNode(op, callee);
// Assign the object to the arguments variable.
@@ -1757,10 +1759,10 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
case Variable::UNALLOCATED: {
// Global var, const, or let variable.
Node* global = BuildLoadGlobalObject();
- PrintableUnique<Name> name = MakeUnique(variable->name());
- Operator* op = javascript()->LoadNamed(name, contextual_mode);
+ Unique<Name> name = MakeUnique(variable->name());
+ const Operator* op = javascript()->LoadNamed(name, contextual_mode);
Node* node = NewNode(op, global);
- BuildLazyBailoutWithPushedNode(node, bailout_id);
+ PrepareFrameState(node, bailout_id, kPushOutput);
return node;
}
case Variable::PARAMETER:
@@ -1789,7 +1791,7 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
// Context variable (potentially up the context chain).
int depth = current_scope()->ContextChainLength(variable->scope());
bool immutable = variable->maybe_assigned() == kNotAssigned;
- Operator* op =
+ const Operator* op =
javascript()->LoadContext(depth, variable->index(), immutable);
Node* value = NewNode(op, current_context());
// TODO(titzer): initialization checks are redundant for already
@@ -1812,7 +1814,7 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
(contextual_mode == CONTEXTUAL)
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
- Operator* op = javascript()->Runtime(function_id, 2);
+ const Operator* op = javascript()->Runtime(function_id, 2);
Node* pair = NewNode(op, current_context(), name);
return NewNode(common()->Projection(0), pair);
}
@@ -1828,7 +1830,7 @@ Node* AstGraphBuilder::BuildVariableDelete(Variable* variable) {
// Global var, const, or let variable.
Node* global = BuildLoadGlobalObject();
Node* name = jsgraph()->Constant(variable->name());
- Operator* op = javascript()->DeleteProperty(strict_mode());
+ const Operator* op = javascript()->DeleteProperty(strict_mode());
return NewNode(op, global, name);
}
case Variable::PARAMETER:
@@ -1840,7 +1842,7 @@ Node* AstGraphBuilder::BuildVariableDelete(Variable* variable) {
case Variable::LOOKUP: {
// Dynamic lookup of context variable (anywhere in the chain).
Node* name = jsgraph()->Constant(variable->name());
- Operator* op = javascript()->Runtime(Runtime::kDeleteLookupSlot, 2);
+ const Operator* op = javascript()->Runtime(Runtime::kDeleteLookupSlot, 2);
return NewNode(op, current_context(), name);
}
}
@@ -1858,10 +1860,10 @@ Node* AstGraphBuilder::BuildVariableAssignment(Variable* variable, Node* value,
case Variable::UNALLOCATED: {
// Global var, const, or let variable.
Node* global = BuildLoadGlobalObject();
- PrintableUnique<Name> name = MakeUnique(variable->name());
- Operator* op = javascript()->StoreNamed(name);
+ Unique<Name> name = MakeUnique(variable->name());
+ const Operator* op = javascript()->StoreNamed(strict_mode(), name);
Node* store = NewNode(op, global, value);
- BuildLazyBailout(store, bailout_id);
+ PrepareFrameState(store, bailout_id);
return store;
}
case Variable::PARAMETER:
@@ -1898,7 +1900,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(Variable* variable, Node* value,
int depth = current_scope()->ContextChainLength(variable->scope());
if (mode == CONST_LEGACY && op == Token::INIT_CONST_LEGACY) {
// Perform an initialization check for legacy const variables.
- Operator* op =
+ const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
Node* current = NewNode(op, current_context());
value = BuildHoleCheckSilent(current, value, current);
@@ -1907,7 +1909,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(Variable* variable, Node* value,
return value;
} else if (mode == LET && op != Token::INIT_LET) {
// Perform an initialization check for let declared variables.
- Operator* op =
+ const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
Node* current = NewNode(op, current_context());
value = BuildHoleCheckThrow(current, variable, value);
@@ -1915,7 +1917,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(Variable* variable, Node* value,
// All assignments to const variables are early errors.
UNREACHABLE();
}
- Operator* op = javascript()->StoreContext(depth, variable->index());
+ const Operator* op = javascript()->StoreContext(depth, variable->index());
return NewNode(op, current_context(), value);
}
case Variable::LOOKUP: {
@@ -1924,7 +1926,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(Variable* variable, Node* value,
Node* strict = jsgraph()->Constant(strict_mode());
// TODO(mstarzinger): Use Runtime::kInitializeLegacyConstLookupSlot for
// initializations of const declarations.
- Operator* op = javascript()->Runtime(Runtime::kStoreLookupSlot, 4);
+ const Operator* op = javascript()->Runtime(Runtime::kStoreLookupSlot, 4);
return NewNode(op, value, current_context(), name, strict);
}
}
@@ -1933,25 +1935,27 @@ Node* AstGraphBuilder::BuildVariableAssignment(Variable* variable, Node* value,
}
+Node* AstGraphBuilder::BuildLoadObjectField(Node* object, int offset) {
+ // TODO(sigurds) Use simplified load here once it is ready.
+ Node* field_load = NewNode(jsgraph()->machine()->Load(kMachAnyTagged), object,
+ jsgraph()->Int32Constant(offset - kHeapObjectTag));
+ return field_load;
+}
+
+
Node* AstGraphBuilder::BuildLoadBuiltinsObject() {
- // TODO(mstarzinger): Better load from function context, otherwise optimized
- // code cannot be shared across native contexts.
- return jsgraph()->Constant(handle(info()->context()->builtins()));
+ Node* global = BuildLoadGlobalObject();
+ Node* builtins =
+ BuildLoadObjectField(global, JSGlobalObject::kBuiltinsOffset);
+ return builtins;
}
Node* AstGraphBuilder::BuildLoadGlobalObject() {
-#if 0
Node* context = GetFunctionContext();
- // TODO(mstarzinger): Use mid-level operator on FixedArray instead of the
- // JS-level operator that targets JSObject.
- Node* index = jsgraph()->Constant(Context::GLOBAL_OBJECT_INDEX);
- return NewNode(javascript()->LoadProperty(), context, index);
-#else
- // TODO(mstarzinger): Better load from function context, otherwise optimized
- // code cannot be shared across native contexts. See unused code above.
- return jsgraph()->Constant(handle(info()->context()->global_object()));
-#endif
+ const Operator* load_op =
+ javascript()->LoadContext(0, Context::GLOBAL_OBJECT_INDEX, true);
+ return NewNode(load_op, context);
}
@@ -1964,13 +1968,13 @@ Node* AstGraphBuilder::BuildToBoolean(Node* value) {
Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable) {
// TODO(mstarzinger): Should be unified with the VisitThrow implementation.
Node* variable_name = jsgraph()->Constant(variable->name());
- Operator* op = javascript()->Runtime(Runtime::kThrowReferenceError, 1);
+ const Operator* op = javascript()->Runtime(Runtime::kThrowReferenceError, 1);
return NewNode(op, variable_name);
}
Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op) {
- Operator* js_op;
+ const Operator* js_op;
switch (op) {
case Token::BIT_OR:
js_op = javascript()->BitwiseOr();
@@ -2013,43 +2017,16 @@ Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op) {
}
-void AstGraphBuilder::BuildLazyBailout(Node* node, BailoutId ast_id) {
- if (OperatorProperties::CanLazilyDeoptimize(node->op())) {
- // The deopting node should have an outgoing control dependency.
- DCHECK(environment()->GetControlDependency() == node);
-
- StructuredGraphBuilder::Environment* continuation_env = environment();
- // Create environment for the deoptimization block, and build the block.
- StructuredGraphBuilder::Environment* deopt_env =
- CopyEnvironment(continuation_env);
- set_environment(deopt_env);
-
- NewNode(common()->LazyDeoptimization());
-
- // TODO(jarin) If ast_id.IsNone(), perhaps we should generate an empty
- // deopt block and make sure there is no patch entry for this (so
- // that the deoptimizer dies when trying to deoptimize here).
-
- Node* state_node = environment()->Checkpoint(ast_id);
-
- Node* deoptimize_node = NewNode(common()->Deoptimize(), state_node);
-
- UpdateControlDependencyToLeaveFunction(deoptimize_node);
-
- // Continue with the original environment.
- set_environment(continuation_env);
-
- NewNode(common()->Continuation());
+void AstGraphBuilder::PrepareFrameState(Node* node, BailoutId ast_id,
+ OutputFrameStateCombine combine) {
+ if (OperatorProperties::HasFrameStateInput(node->op())) {
+ DCHECK(NodeProperties::GetFrameStateInput(node)->opcode() ==
+ IrOpcode::kDead);
+ NodeProperties::ReplaceFrameStateInput(
+ node, environment()->Checkpoint(ast_id, combine));
}
}
-
-void AstGraphBuilder::BuildLazyBailoutWithPushedNode(Node* node,
- BailoutId ast_id) {
- environment()->Push(node);
- BuildLazyBailout(node, ast_id);
- environment()->Pop();
-}
}
}
} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/ast-graph-builder.h b/deps/v8/src/compiler/ast-graph-builder.h
index 861bd5baa3..6a7e3db90e 100644
--- a/deps/v8/src/compiler/ast-graph-builder.h
+++ b/deps/v8/src/compiler/ast-graph-builder.h
@@ -88,6 +88,7 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
Node* BuildLoadBuiltinsObject();
Node* BuildLoadGlobalObject();
Node* BuildLoadClosure();
+ Node* BuildLoadObjectField(Node* object, int offset);
// Builders for automatic type conversion.
Node* BuildToBoolean(Node* value);
@@ -139,7 +140,7 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
// Process arguments to a call by popping {arity} elements off the operand
// stack and build a call node using the given call operator.
- Node* ProcessArguments(Operator* op, int arity);
+ Node* ProcessArguments(const Operator* op, int arity);
// Visit statements.
void VisitIfNotNull(Statement* stmt);
@@ -171,8 +172,11 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
// Dispatched from VisitForInStatement.
void VisitForInAssignment(Expression* expr, Node* value);
- void BuildLazyBailout(Node* node, BailoutId ast_id);
- void BuildLazyBailoutWithPushedNode(Node* node, BailoutId ast_id);
+ // Builds deoptimization for a given node.
+ void PrepareFrameState(Node* node, BailoutId ast_id,
+ OutputFrameStateCombine combine = kIgnoreOutput);
+
+ OutputFrameStateCombine StateCombineFromAstContext();
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstGraphBuilder);
@@ -206,11 +210,9 @@ class AstGraphBuilder::Environment
DCHECK(variable->IsStackAllocated());
if (variable->IsParameter()) {
values()->at(variable->index() + 1) = node;
- parameters_dirty_ = true;
} else {
DCHECK(variable->IsStackLocal());
values()->at(variable->index() + parameters_count_) = node;
- locals_dirty_ = true;
}
}
Node* Lookup(Variable* variable) {
@@ -226,7 +228,6 @@ class AstGraphBuilder::Environment
// Operations on the operand stack.
void Push(Node* node) {
values()->push_back(node);
- stack_dirty_ = true;
}
Node* Top() {
DCHECK(stack_height() > 0);
@@ -236,7 +237,6 @@ class AstGraphBuilder::Environment
DCHECK(stack_height() > 0);
Node* back = values()->back();
values()->pop_back();
- stack_dirty_ = true;
return back;
}
@@ -245,7 +245,6 @@ class AstGraphBuilder::Environment
DCHECK(depth >= 0 && depth < stack_height());
int index = static_cast<int>(values()->size()) - depth - 1;
values()->at(index) = node;
- stack_dirty_ = true;
}
Node* Peek(int depth) {
DCHECK(depth >= 0 && depth < stack_height());
@@ -255,22 +254,26 @@ class AstGraphBuilder::Environment
void Drop(int depth) {
DCHECK(depth >= 0 && depth <= stack_height());
values()->erase(values()->end() - depth, values()->end());
- stack_dirty_ = true;
}
// Preserve a checkpoint of the environment for the IR graph. Any
// further mutation of the environment will not affect checkpoints.
- Node* Checkpoint(BailoutId ast_id);
+ Node* Checkpoint(BailoutId ast_id, OutputFrameStateCombine combine);
+
+ protected:
+ AstGraphBuilder* builder() const {
+ return reinterpret_cast<AstGraphBuilder*>(
+ StructuredGraphBuilder::Environment::builder());
+ }
private:
+ void UpdateStateValues(Node** state_values, int offset, int count);
+
int parameters_count_;
int locals_count_;
Node* parameters_node_;
Node* locals_node_;
Node* stack_node_;
- bool parameters_dirty_;
- bool locals_dirty_;
- bool stack_dirty_;
};
@@ -282,10 +285,15 @@ class AstGraphBuilder::AstContext BASE_EMBEDDED {
bool IsValue() const { return kind_ == Expression::kValue; }
bool IsTest() const { return kind_ == Expression::kTest; }
+ // Determines how to combine the frame state with the value
+ // that is about to be plugged into this AstContext.
+ OutputFrameStateCombine GetStateCombine() {
+ return IsEffect() ? kIgnoreOutput : kPushOutput;
+ }
+
// Plug a node into this expression context. Call this function in tail
// position in the Visit functions for expressions.
virtual void ProduceValue(Node* value) = 0;
- virtual void ProduceValueWithLazyBailout(Node* value) = 0;
// Unplugs a node from this expression context. Call this to retrieve the
// result of another Visit function that already plugged the context.
@@ -295,8 +303,7 @@ class AstGraphBuilder::AstContext BASE_EMBEDDED {
void ReplaceValue() { ProduceValue(ConsumeValue()); }
protected:
- AstContext(AstGraphBuilder* owner, Expression::Context kind,
- BailoutId bailout_id);
+ AstContext(AstGraphBuilder* owner, Expression::Context kind);
virtual ~AstContext();
AstGraphBuilder* owner() const { return owner_; }
@@ -308,8 +315,6 @@ class AstGraphBuilder::AstContext BASE_EMBEDDED {
int original_height_;
#endif
- BailoutId bailout_id_;
-
private:
Expression::Context kind_;
AstGraphBuilder* owner_;
@@ -318,38 +323,35 @@ class AstGraphBuilder::AstContext BASE_EMBEDDED {
// Context to evaluate expression for its side effects only.
-class AstGraphBuilder::AstEffectContext V8_FINAL : public AstContext {
+class AstGraphBuilder::AstEffectContext FINAL : public AstContext {
public:
- explicit AstEffectContext(AstGraphBuilder* owner, BailoutId bailout_id)
- : AstContext(owner, Expression::kEffect, bailout_id) {}
+ explicit AstEffectContext(AstGraphBuilder* owner)
+ : AstContext(owner, Expression::kEffect) {}
virtual ~AstEffectContext();
- virtual void ProduceValue(Node* value) V8_OVERRIDE;
- virtual void ProduceValueWithLazyBailout(Node* value) V8_OVERRIDE;
- virtual Node* ConsumeValue() V8_OVERRIDE;
+ virtual void ProduceValue(Node* value) OVERRIDE;
+ virtual Node* ConsumeValue() OVERRIDE;
};
// Context to evaluate expression for its value (and side effects).
-class AstGraphBuilder::AstValueContext V8_FINAL : public AstContext {
+class AstGraphBuilder::AstValueContext FINAL : public AstContext {
public:
- explicit AstValueContext(AstGraphBuilder* owner, BailoutId bailout_id)
- : AstContext(owner, Expression::kValue, bailout_id) {}
+ explicit AstValueContext(AstGraphBuilder* owner)
+ : AstContext(owner, Expression::kValue) {}
virtual ~AstValueContext();
- virtual void ProduceValue(Node* value) V8_OVERRIDE;
- virtual void ProduceValueWithLazyBailout(Node* value) V8_OVERRIDE;
- virtual Node* ConsumeValue() V8_OVERRIDE;
+ virtual void ProduceValue(Node* value) OVERRIDE;
+ virtual Node* ConsumeValue() OVERRIDE;
};
// Context to evaluate expression for a condition value (and side effects).
-class AstGraphBuilder::AstTestContext V8_FINAL : public AstContext {
+class AstGraphBuilder::AstTestContext FINAL : public AstContext {
public:
- explicit AstTestContext(AstGraphBuilder* owner, BailoutId bailout_id)
- : AstContext(owner, Expression::kTest, bailout_id) {}
+ explicit AstTestContext(AstGraphBuilder* owner)
+ : AstContext(owner, Expression::kTest) {}
virtual ~AstTestContext();
- virtual void ProduceValue(Node* value) V8_OVERRIDE;
- virtual void ProduceValueWithLazyBailout(Node* value) V8_OVERRIDE;
- virtual Node* ConsumeValue() V8_OVERRIDE;
+ virtual void ProduceValue(Node* value) OVERRIDE;
+ virtual Node* ConsumeValue() OVERRIDE;
};
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.cc b/deps/v8/src/compiler/basic-block-instrumentor.cc
new file mode 100644
index 0000000000..119a44b9c9
--- /dev/null
+++ b/deps/v8/src/compiler/basic-block-instrumentor.cc
@@ -0,0 +1,103 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/basic-block-instrumentor.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/operator-properties-inl.h"
+#include "src/compiler/schedule.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Find the first place to insert new nodes in a block that's already been
+// scheduled that won't upset the register allocator.
+static NodeVector::iterator FindInsertionPoint(NodeVector* nodes) {
+ NodeVector::iterator i = nodes->begin();
+ for (; i != nodes->end(); ++i) {
+ const Operator* op = (*i)->op();
+ if (OperatorProperties::IsBasicBlockBegin(op)) continue;
+ switch (op->opcode()) {
+ case IrOpcode::kParameter:
+ case IrOpcode::kPhi:
+ case IrOpcode::kEffectPhi:
+ continue;
+ }
+ break;
+ }
+ return i;
+}
+
+
+// TODO(dcarney): need to mark code as non-serializable.
+static const Operator* PointerConstant(CommonOperatorBuilder* common,
+ void* ptr) {
+ return kPointerSize == 8
+ ? common->Int64Constant(reinterpret_cast<intptr_t>(ptr))
+ : common->Int32Constant(
+ static_cast<int32_t>(reinterpret_cast<intptr_t>(ptr)));
+}
+
+
+BasicBlockProfiler::Data* BasicBlockInstrumentor::Instrument(
+ CompilationInfo* info, Graph* graph, Schedule* schedule) {
+ // Skip the exit block in profiles, since the register allocator can't handle
+ // it and entry into it means falling off the end of the function anyway.
+ size_t n_blocks = static_cast<size_t>(schedule->RpoBlockCount()) - 1;
+ BasicBlockProfiler::Data* data =
+ info->isolate()->GetOrCreateBasicBlockProfiler()->NewData(n_blocks);
+ // Set the function name.
+ if (!info->shared_info().is_null() &&
+ info->shared_info()->name()->IsString()) {
+ OStringStream os;
+ String::cast(info->shared_info()->name())->PrintUC16(os);
+ data->SetFunctionName(&os);
+ }
+ // Capture the schedule string before instrumentation.
+ {
+ OStringStream os;
+ os << *schedule;
+ data->SetSchedule(&os);
+ }
+ // Add the increment instructions to the start of every block.
+ CommonOperatorBuilder common(graph->zone());
+ Node* zero = graph->NewNode(common.Int32Constant(0));
+ Node* one = graph->NewNode(common.Int32Constant(1));
+ MachineOperatorBuilder machine;
+ BasicBlockVector* blocks = schedule->rpo_order();
+ size_t block_number = 0;
+ for (BasicBlockVector::iterator it = blocks->begin(); block_number < n_blocks;
+ ++it, ++block_number) {
+ BasicBlock* block = (*it);
+ data->SetBlockId(block_number, block->id());
+ // TODO(dcarney): wire effect and control deps for load and store.
+ // Construct increment operation.
+ Node* base = graph->NewNode(
+ PointerConstant(&common, data->GetCounterAddress(block_number)));
+ Node* load = graph->NewNode(machine.Load(kMachUint32), base, zero);
+ Node* inc = graph->NewNode(machine.Int32Add(), load, one);
+ Node* store = graph->NewNode(
+ machine.Store(StoreRepresentation(kMachUint32, kNoWriteBarrier)), base,
+ zero, inc);
+ // Insert the new nodes.
+ static const int kArraySize = 6;
+ Node* to_insert[kArraySize] = {zero, one, base, load, inc, store};
+ int insertion_start = block_number == 0 ? 0 : 2;
+ NodeVector* nodes = &block->nodes_;
+ NodeVector::iterator insertion_point = FindInsertionPoint(nodes);
+ nodes->insert(insertion_point, &to_insert[insertion_start],
+ &to_insert[kArraySize]);
+ // Tell the scheduler about the new nodes.
+ for (int i = insertion_start; i < kArraySize; ++i) {
+ schedule->SetBlockForNode(block, to_insert[i]);
+ }
+ }
+ return data;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.h b/deps/v8/src/compiler/basic-block-instrumentor.h
new file mode 100644
index 0000000000..7edac0dbe8
--- /dev/null
+++ b/deps/v8/src/compiler/basic-block-instrumentor.h
@@ -0,0 +1,32 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BASIC_BLOCK_INSTRUMENTOR_H_
+#define V8_COMPILER_BASIC_BLOCK_INSTRUMENTOR_H_
+
+#include "src/v8.h"
+
+#include "src/basic-block-profiler.h"
+
+namespace v8 {
+namespace internal {
+
+class CompilationInfo;
+
+namespace compiler {
+
+class Graph;
+class Schedule;
+
+class BasicBlockInstrumentor : public AllStatic {
+ public:
+ static BasicBlockProfiler::Data* Instrument(CompilationInfo* info,
+ Graph* graph, Schedule* schedule);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/src/compiler/change-lowering-unittest.cc b/deps/v8/src/compiler/change-lowering-unittest.cc
new file mode 100644
index 0000000000..994027a83c
--- /dev/null
+++ b/deps/v8/src/compiler/change-lowering-unittest.cc
@@ -0,0 +1,476 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/change-lowering.h"
+#include "src/compiler/compiler-test-utils.h"
+#include "src/compiler/graph-unittest.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/typer.h"
+#include "testing/gmock-support.h"
+
+using testing::_;
+using testing::AllOf;
+using testing::Capture;
+using testing::CaptureEq;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(bmeurer): Find a new home for these functions.
+inline std::ostream& operator<<(std::ostream& os, const MachineType& type) {
+ OStringStream ost;
+ ost << type;
+ return os << ost.c_str();
+}
+
+
+class ChangeLoweringTest : public GraphTest {
+ public:
+ ChangeLoweringTest() : simplified_(zone()) {}
+ virtual ~ChangeLoweringTest() {}
+
+ virtual MachineType WordRepresentation() const = 0;
+
+ protected:
+ int HeapNumberValueOffset() const {
+ STATIC_ASSERT(HeapNumber::kValueOffset % kApiPointerSize == 0);
+ return (HeapNumber::kValueOffset / kApiPointerSize) * PointerSize() -
+ kHeapObjectTag;
+ }
+ bool Is32() const { return WordRepresentation() == kRepWord32; }
+ int PointerSize() const {
+ switch (WordRepresentation()) {
+ case kRepWord32:
+ return 4;
+ case kRepWord64:
+ return 8;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return 0;
+ }
+ int SmiMaxValue() const { return -(SmiMinValue() + 1); }
+ int SmiMinValue() const {
+ return static_cast<int>(0xffffffffu << (SmiValueSize() - 1));
+ }
+ int SmiShiftAmount() const { return kSmiTagSize + SmiShiftSize(); }
+ int SmiShiftSize() const {
+ return Is32() ? SmiTagging<4>::SmiShiftSize()
+ : SmiTagging<8>::SmiShiftSize();
+ }
+ int SmiValueSize() const {
+ return Is32() ? SmiTagging<4>::SmiValueSize()
+ : SmiTagging<8>::SmiValueSize();
+ }
+
+ Node* Parameter(int32_t index = 0) {
+ return graph()->NewNode(common()->Parameter(index), graph()->start());
+ }
+
+ Reduction Reduce(Node* node) {
+ Typer typer(zone());
+ MachineOperatorBuilder machine(WordRepresentation());
+ JSOperatorBuilder javascript(zone());
+ JSGraph jsgraph(graph(), common(), &javascript, &typer, &machine);
+ CompilationInfo info(isolate(), zone());
+ Linkage linkage(&info);
+ ChangeLowering reducer(&jsgraph, &linkage);
+ return reducer.Reduce(node);
+ }
+
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+ Matcher<Node*> IsAllocateHeapNumber(const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return IsCall(
+ _, IsHeapConstant(Unique<HeapObject>::CreateImmovable(
+ CEntryStub(isolate(), 1).GetCode())),
+ IsExternalConstant(ExternalReference(
+ Runtime::FunctionForId(Runtime::kAllocateHeapNumber), isolate())),
+ IsInt32Constant(0), IsNumberConstant(0.0), effect_matcher,
+ control_matcher);
+ }
+ Matcher<Node*> IsWordEqual(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return Is32() ? IsWord32Equal(lhs_matcher, rhs_matcher)
+ : IsWord64Equal(lhs_matcher, rhs_matcher);
+ }
+
+ private:
+ SimplifiedOperatorBuilder simplified_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Common.
+
+
+class ChangeLoweringCommonTest
+ : public ChangeLoweringTest,
+ public ::testing::WithParamInterface<MachineType> {
+ public:
+ virtual ~ChangeLoweringCommonTest() {}
+
+ virtual MachineType WordRepresentation() const FINAL OVERRIDE {
+ return GetParam();
+ }
+};
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeBitToBool) {
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeBitToBool(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ Node* phi = reduction.replacement();
+ Capture<Node*> branch;
+ EXPECT_THAT(phi,
+ IsPhi(static_cast<MachineType>(kTypeBool | kRepTagged),
+ IsTrueConstant(), IsFalseConstant(),
+ IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
+ IsBranch(val, graph()->start()))),
+ IsIfFalse(CaptureEq(&branch)))));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeBoolToBit) {
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeBoolToBit(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ EXPECT_THAT(reduction.replacement(), IsWordEqual(val, IsTrueConstant()));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeFloat64ToTagged) {
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeFloat64ToTagged(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ Node* finish = reduction.replacement();
+ Capture<Node*> heap_number;
+ EXPECT_THAT(
+ finish,
+ IsFinish(
+ AllOf(CaptureEq(&heap_number),
+ IsAllocateHeapNumber(IsValueEffect(val), graph()->start())),
+ IsStore(kMachFloat64, kNoWriteBarrier, CaptureEq(&heap_number),
+ IsInt32Constant(HeapNumberValueOffset()), val,
+ CaptureEq(&heap_number), graph()->start())));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, StringAdd) {
+ Node* node =
+ graph()->NewNode(simplified()->StringAdd(), Parameter(0), Parameter(1));
+ Reduction reduction = Reduce(node);
+ EXPECT_FALSE(reduction.Changed());
+}
+
+
+INSTANTIATE_TEST_CASE_P(ChangeLoweringTest, ChangeLoweringCommonTest,
+ ::testing::Values(kRepWord32, kRepWord64));
+
+
+// -----------------------------------------------------------------------------
+// 32-bit
+
+
+class ChangeLowering32Test : public ChangeLoweringTest {
+ public:
+ virtual ~ChangeLowering32Test() {}
+ virtual MachineType WordRepresentation() const FINAL OVERRIDE {
+ return kRepWord32;
+ }
+};
+
+
+TARGET_TEST_F(ChangeLowering32Test, ChangeInt32ToTagged) {
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ Node* phi = reduction.replacement();
+ Capture<Node*> add, branch, heap_number, if_true;
+ EXPECT_THAT(
+ phi,
+ IsPhi(kMachAnyTagged,
+ IsFinish(
+ AllOf(CaptureEq(&heap_number),
+ IsAllocateHeapNumber(_, CaptureEq(&if_true))),
+ IsStore(kMachFloat64, kNoWriteBarrier, CaptureEq(&heap_number),
+ IsInt32Constant(HeapNumberValueOffset()),
+ IsChangeInt32ToFloat64(val), CaptureEq(&heap_number),
+ CaptureEq(&if_true))),
+ IsProjection(
+ 0, AllOf(CaptureEq(&add), IsInt32AddWithOverflow(val, val))),
+ IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+ IsIfFalse(AllOf(CaptureEq(&branch),
+ IsBranch(IsProjection(1, CaptureEq(&add)),
+ graph()->start()))))));
+}
+
+
+TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToFloat64) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ Node* phi = reduction.replacement();
+ Capture<Node*> branch, if_true;
+ EXPECT_THAT(
+ phi,
+ IsPhi(
+ kMachFloat64,
+ IsLoad(kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
+ IsControlEffect(CaptureEq(&if_true))),
+ IsChangeInt32ToFloat64(
+ IsWord32Sar(val, IsInt32Constant(SmiShiftAmount()))),
+ IsMerge(
+ AllOf(CaptureEq(&if_true),
+ IsIfTrue(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord32And(val, IsInt32Constant(kSmiTagMask)),
+ graph()->start())))),
+ IsIfFalse(CaptureEq(&branch)))));
+}
+
+
+TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToInt32) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToInt32(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ Node* phi = reduction.replacement();
+ Capture<Node*> branch, if_true;
+ EXPECT_THAT(
+ phi,
+ IsPhi(kMachInt32,
+ IsChangeFloat64ToInt32(IsLoad(
+ kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
+ IsControlEffect(CaptureEq(&if_true)))),
+ IsWord32Sar(val, IsInt32Constant(SmiShiftAmount())),
+ IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+ IsIfFalse(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord32And(val, IsInt32Constant(kSmiTagMask)),
+ graph()->start()))))));
+}
+
+
+TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToUint32) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToUint32(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ Node* phi = reduction.replacement();
+ Capture<Node*> branch, if_true;
+ EXPECT_THAT(
+ phi,
+ IsPhi(kMachUint32,
+ IsChangeFloat64ToUint32(IsLoad(
+ kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
+ IsControlEffect(CaptureEq(&if_true)))),
+ IsWord32Sar(val, IsInt32Constant(SmiShiftAmount())),
+ IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+ IsIfFalse(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord32And(val, IsInt32Constant(kSmiTagMask)),
+ graph()->start()))))));
+}
+
+
+TARGET_TEST_F(ChangeLowering32Test, ChangeUint32ToTagged) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeUint32ToTagged(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ Node* phi = reduction.replacement();
+ Capture<Node*> branch, heap_number, if_false;
+ EXPECT_THAT(
+ phi,
+ IsPhi(
+ kMachAnyTagged, IsWord32Shl(val, IsInt32Constant(SmiShiftAmount())),
+ IsFinish(
+ AllOf(CaptureEq(&heap_number),
+ IsAllocateHeapNumber(_, CaptureEq(&if_false))),
+ IsStore(kMachFloat64, kNoWriteBarrier, CaptureEq(&heap_number),
+ IsInt32Constant(HeapNumberValueOffset()),
+ IsChangeUint32ToFloat64(val), CaptureEq(&heap_number),
+ CaptureEq(&if_false))),
+ IsMerge(
+ IsIfTrue(AllOf(CaptureEq(&branch),
+ IsBranch(IsUint32LessThanOrEqual(
+ val, IsInt32Constant(SmiMaxValue())),
+ graph()->start()))),
+ AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
+}
+
+
+// -----------------------------------------------------------------------------
+// 64-bit
+
+
+class ChangeLowering64Test : public ChangeLoweringTest {
+ public:
+ virtual ~ChangeLowering64Test() {}
+ virtual MachineType WordRepresentation() const FINAL OVERRIDE {
+ return kRepWord64;
+ }
+};
+
+
+TARGET_TEST_F(ChangeLowering64Test, ChangeInt32ToTagged) {
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ EXPECT_THAT(reduction.replacement(),
+ IsWord64Shl(IsChangeInt32ToInt64(val),
+ IsInt32Constant(SmiShiftAmount())));
+}
+
+
+TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToFloat64) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ Node* phi = reduction.replacement();
+ Capture<Node*> branch, if_true;
+ EXPECT_THAT(
+ phi,
+ IsPhi(
+ kMachFloat64,
+ IsLoad(kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
+ IsControlEffect(CaptureEq(&if_true))),
+ IsChangeInt32ToFloat64(IsTruncateInt64ToInt32(
+ IsWord64Sar(val, IsInt32Constant(SmiShiftAmount())))),
+ IsMerge(
+ AllOf(CaptureEq(&if_true),
+ IsIfTrue(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord64And(val, IsInt32Constant(kSmiTagMask)),
+ graph()->start())))),
+ IsIfFalse(CaptureEq(&branch)))));
+}
+
+
+TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToInt32) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToInt32(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ Node* phi = reduction.replacement();
+ Capture<Node*> branch, if_true;
+ EXPECT_THAT(
+ phi,
+ IsPhi(kMachInt32,
+ IsChangeFloat64ToInt32(IsLoad(
+ kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
+ IsControlEffect(CaptureEq(&if_true)))),
+ IsTruncateInt64ToInt32(
+ IsWord64Sar(val, IsInt32Constant(SmiShiftAmount()))),
+ IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+ IsIfFalse(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord64And(val, IsInt32Constant(kSmiTagMask)),
+ graph()->start()))))));
+}
+
+
+TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToUint32) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToUint32(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ Node* phi = reduction.replacement();
+ Capture<Node*> branch, if_true;
+ EXPECT_THAT(
+ phi,
+ IsPhi(kMachUint32,
+ IsChangeFloat64ToUint32(IsLoad(
+ kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
+ IsControlEffect(CaptureEq(&if_true)))),
+ IsTruncateInt64ToInt32(
+ IsWord64Sar(val, IsInt32Constant(SmiShiftAmount()))),
+ IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+ IsIfFalse(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord64And(val, IsInt32Constant(kSmiTagMask)),
+ graph()->start()))))));
+}
+
+
+TARGET_TEST_F(ChangeLowering64Test, ChangeUint32ToTagged) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+
+ Node* val = Parameter(0);
+ Node* node = graph()->NewNode(simplified()->ChangeUint32ToTagged(), val);
+ Reduction reduction = Reduce(node);
+ ASSERT_TRUE(reduction.Changed());
+
+ Node* phi = reduction.replacement();
+ Capture<Node*> branch, heap_number, if_false;
+ EXPECT_THAT(
+ phi,
+ IsPhi(
+ kMachAnyTagged, IsWord64Shl(IsChangeUint32ToUint64(val),
+ IsInt32Constant(SmiShiftAmount())),
+ IsFinish(
+ AllOf(CaptureEq(&heap_number),
+ IsAllocateHeapNumber(_, CaptureEq(&if_false))),
+ IsStore(kMachFloat64, kNoWriteBarrier, CaptureEq(&heap_number),
+ IsInt32Constant(HeapNumberValueOffset()),
+ IsChangeUint32ToFloat64(val), CaptureEq(&heap_number),
+ CaptureEq(&if_false))),
+ IsMerge(
+ IsIfTrue(AllOf(CaptureEq(&branch),
+ IsBranch(IsUint32LessThanOrEqual(
+ val, IsInt32Constant(SmiMaxValue())),
+ graph()->start()))),
+ AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/change-lowering.cc b/deps/v8/src/compiler/change-lowering.cc
index 3f8e45b9e7..b13db4cee9 100644
--- a/deps/v8/src/compiler/change-lowering.cc
+++ b/deps/v8/src/compiler/change-lowering.cc
@@ -3,155 +3,140 @@
// found in the LICENSE file.
#include "src/compiler/change-lowering.h"
+#include "src/compiler/machine-operator.h"
-#include "src/compiler/common-node-cache.h"
-#include "src/compiler/graph.h"
+#include "src/compiler/js-graph.h"
namespace v8 {
namespace internal {
namespace compiler {
-ChangeLoweringBase::ChangeLoweringBase(Graph* graph, Linkage* linkage,
- CommonNodeCache* cache)
- : graph_(graph),
- isolate_(graph->zone()->isolate()),
- linkage_(linkage),
- cache_(cache),
- common_(graph->zone()),
- machine_(graph->zone()) {}
+ChangeLowering::~ChangeLowering() {}
-ChangeLoweringBase::~ChangeLoweringBase() {}
-
-
-Node* ChangeLoweringBase::ExternalConstant(ExternalReference reference) {
- Node** loc = cache()->FindExternalConstant(reference);
- if (*loc == NULL) {
- *loc = graph()->NewNode(common()->ExternalConstant(reference));
+Reduction ChangeLowering::Reduce(Node* node) {
+ Node* control = graph()->start();
+ switch (node->opcode()) {
+ case IrOpcode::kChangeBitToBool:
+ return ChangeBitToBool(node->InputAt(0), control);
+ case IrOpcode::kChangeBoolToBit:
+ return ChangeBoolToBit(node->InputAt(0));
+ case IrOpcode::kChangeFloat64ToTagged:
+ return ChangeFloat64ToTagged(node->InputAt(0), control);
+ case IrOpcode::kChangeInt32ToTagged:
+ return ChangeInt32ToTagged(node->InputAt(0), control);
+ case IrOpcode::kChangeTaggedToFloat64:
+ return ChangeTaggedToFloat64(node->InputAt(0), control);
+ case IrOpcode::kChangeTaggedToInt32:
+ return ChangeTaggedToUI32(node->InputAt(0), control, kSigned);
+ case IrOpcode::kChangeTaggedToUint32:
+ return ChangeTaggedToUI32(node->InputAt(0), control, kUnsigned);
+ case IrOpcode::kChangeUint32ToTagged:
+ return ChangeUint32ToTagged(node->InputAt(0), control);
+ default:
+ return NoChange();
}
- return *loc;
-}
-
-
-Node* ChangeLoweringBase::HeapConstant(PrintableUnique<HeapObject> value) {
- // TODO(bmeurer): Use common node cache.
- return graph()->NewNode(common()->HeapConstant(value));
+ UNREACHABLE();
+ return NoChange();
}
-Node* ChangeLoweringBase::ImmovableHeapConstant(Handle<HeapObject> value) {
- return HeapConstant(
- PrintableUnique<HeapObject>::CreateImmovable(graph()->zone(), value));
+Node* ChangeLowering::HeapNumberValueIndexConstant() {
+ STATIC_ASSERT(HeapNumber::kValueOffset % kPointerSize == 0);
+ const int heap_number_value_offset =
+ ((HeapNumber::kValueOffset / kPointerSize) * (machine()->Is64() ? 8 : 4));
+ return jsgraph()->Int32Constant(heap_number_value_offset - kHeapObjectTag);
}
-Node* ChangeLoweringBase::Int32Constant(int32_t value) {
- Node** loc = cache()->FindInt32Constant(value);
- if (*loc == NULL) {
- *loc = graph()->NewNode(common()->Int32Constant(value));
- }
- return *loc;
+Node* ChangeLowering::SmiMaxValueConstant() {
+ const int smi_value_size = machine()->Is32() ? SmiTagging<4>::SmiValueSize()
+ : SmiTagging<8>::SmiValueSize();
+ return jsgraph()->Int32Constant(
+ -(static_cast<int>(0xffffffffu << (smi_value_size - 1)) + 1));
}
-Node* ChangeLoweringBase::NumberConstant(double value) {
- Node** loc = cache()->FindNumberConstant(value);
- if (*loc == NULL) {
- *loc = graph()->NewNode(common()->NumberConstant(value));
- }
- return *loc;
+Node* ChangeLowering::SmiShiftBitsConstant() {
+ const int smi_shift_size = machine()->Is32() ? SmiTagging<4>::SmiShiftSize()
+ : SmiTagging<8>::SmiShiftSize();
+ return jsgraph()->Int32Constant(smi_shift_size + kSmiTagSize);
}
-Node* ChangeLoweringBase::CEntryStubConstant() {
- if (!c_entry_stub_constant_.is_set()) {
- c_entry_stub_constant_.set(
- ImmovableHeapConstant(CEntryStub(isolate(), 1).GetCode()));
- }
- return c_entry_stub_constant_.get();
+Node* ChangeLowering::AllocateHeapNumberWithValue(Node* value, Node* control) {
+ // The AllocateHeapNumber() runtime function does not use the context, so we
+ // can safely pass in Smi zero here.
+ Node* context = jsgraph()->ZeroConstant();
+ Node* effect = graph()->NewNode(common()->ValueEffect(1), value);
+ const Runtime::Function* function =
+ Runtime::FunctionForId(Runtime::kAllocateHeapNumber);
+ DCHECK_EQ(0, function->nargs);
+ CallDescriptor* desc = linkage()->GetRuntimeCallDescriptor(
+ function->function_id, 0, Operator::kNoProperties);
+ Node* heap_number = graph()->NewNode(
+ common()->Call(desc), jsgraph()->CEntryStubConstant(),
+ jsgraph()->ExternalConstant(ExternalReference(function, isolate())),
+ jsgraph()->Int32Constant(function->nargs), context, effect, control);
+ Node* store = graph()->NewNode(
+ machine()->Store(StoreRepresentation(kMachFloat64, kNoWriteBarrier)),
+ heap_number, HeapNumberValueIndexConstant(), value, heap_number, control);
+ return graph()->NewNode(common()->Finish(1), heap_number, store);
}
-Node* ChangeLoweringBase::TrueConstant() {
- if (!true_constant_.is_set()) {
- true_constant_.set(
- ImmovableHeapConstant(isolate()->factory()->true_value()));
+Node* ChangeLowering::ChangeSmiToInt32(Node* value) {
+ value = graph()->NewNode(machine()->WordSar(), value, SmiShiftBitsConstant());
+ if (machine()->Is64()) {
+ value = graph()->NewNode(machine()->TruncateInt64ToInt32(), value);
}
- return true_constant_.get();
+ return value;
}
-Node* ChangeLoweringBase::FalseConstant() {
- if (!false_constant_.is_set()) {
- false_constant_.set(
- ImmovableHeapConstant(isolate()->factory()->false_value()));
- }
- return false_constant_.get();
+Node* ChangeLowering::LoadHeapNumberValue(Node* value, Node* control) {
+ return graph()->NewNode(machine()->Load(kMachFloat64), value,
+ HeapNumberValueIndexConstant(),
+ graph()->NewNode(common()->ControlEffect(), control));
}
-Reduction ChangeLoweringBase::ChangeBitToBool(Node* val, Node* control) {
+Reduction ChangeLowering::ChangeBitToBool(Node* val, Node* control) {
Node* branch = graph()->NewNode(common()->Branch(), val, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* true_value = TrueConstant();
+ Node* true_value = jsgraph()->TrueConstant();
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* false_value = FalseConstant();
+ Node* false_value = jsgraph()->FalseConstant();
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi =
- graph()->NewNode(common()->Phi(2), true_value, false_value, merge);
+ Node* phi = graph()->NewNode(
+ common()->Phi(static_cast<MachineType>(kTypeBool | kRepTagged), 2),
+ true_value, false_value, merge);
return Replace(phi);
}
-template <size_t kPointerSize>
-ChangeLowering<kPointerSize>::ChangeLowering(Graph* graph, Linkage* linkage)
- : ChangeLoweringBase(graph, linkage,
- new (graph->zone()) CommonNodeCache(graph->zone())) {}
-
-
-template <size_t kPointerSize>
-Reduction ChangeLowering<kPointerSize>::Reduce(Node* node) {
- Node* control = graph()->start();
- Node* effect = control;
- switch (node->opcode()) {
- case IrOpcode::kChangeBitToBool:
- return ChangeBitToBool(node->InputAt(0), control);
- case IrOpcode::kChangeBoolToBit:
- return ChangeBoolToBit(node->InputAt(0));
- case IrOpcode::kChangeInt32ToTagged:
- return ChangeInt32ToTagged(node->InputAt(0), effect, control);
- case IrOpcode::kChangeTaggedToFloat64:
- return ChangeTaggedToFloat64(node->InputAt(0), effect, control);
- default:
- return NoChange();
- }
- UNREACHABLE();
- return NoChange();
-}
-
-
-template <>
-Reduction ChangeLowering<4>::ChangeBoolToBit(Node* val) {
+Reduction ChangeLowering::ChangeBoolToBit(Node* val) {
return Replace(
- graph()->NewNode(machine()->Word32Equal(), val, TrueConstant()));
+ graph()->NewNode(machine()->WordEqual(), val, jsgraph()->TrueConstant()));
}
-template <>
-Reduction ChangeLowering<8>::ChangeBoolToBit(Node* val) {
- return Replace(
- graph()->NewNode(machine()->Word64Equal(), val, TrueConstant()));
+Reduction ChangeLowering::ChangeFloat64ToTagged(Node* val, Node* control) {
+ return Replace(AllocateHeapNumberWithValue(val, control));
}
-template <>
-Reduction ChangeLowering<4>::ChangeInt32ToTagged(Node* val, Node* effect,
- Node* control) {
- Node* context = NumberConstant(0);
+Reduction ChangeLowering::ChangeInt32ToTagged(Node* val, Node* control) {
+ if (machine()->Is64()) {
+ return Replace(
+ graph()->NewNode(machine()->Word64Shl(),
+ graph()->NewNode(machine()->ChangeInt32ToInt64(), val),
+ SmiShiftBitsConstant()));
+ }
Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), val, val);
Node* ovf = graph()->NewNode(common()->Projection(1), add);
@@ -159,101 +144,112 @@ Reduction ChangeLowering<4>::ChangeInt32ToTagged(Node* val, Node* effect,
Node* branch = graph()->NewNode(common()->Branch(), ovf, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* number = graph()->NewNode(machine()->ChangeInt32ToFloat64(), val);
-
- // TODO(bmeurer): Inline allocation if possible.
- const Runtime::Function* fn =
- Runtime::FunctionForId(Runtime::kAllocateHeapNumber);
- DCHECK_EQ(0, fn->nargs);
- CallDescriptor* desc = linkage()->GetRuntimeCallDescriptor(
- fn->function_id, 0, Operator::kNoProperties);
- Node* heap_number =
- graph()->NewNode(common()->Call(desc), CEntryStubConstant(),
- ExternalConstant(ExternalReference(fn, isolate())),
- Int32Constant(0), context, effect, if_true);
-
- Node* store = graph()->NewNode(
- machine()->Store(kMachineFloat64, kNoWriteBarrier), heap_number,
- Int32Constant(HeapNumber::kValueOffset - kHeapObjectTag), number, effect,
- heap_number);
+ Node* heap_number = AllocateHeapNumberWithValue(
+ graph()->NewNode(machine()->ChangeInt32ToFloat64(), val), if_true);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* smi = graph()->NewNode(common()->Projection(0), add);
- Node* merge = graph()->NewNode(common()->Merge(2), store, if_false);
- Node* phi = graph()->NewNode(common()->Phi(2), heap_number, smi, merge);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), heap_number,
+ smi, merge);
return Replace(phi);
}
-template <>
-Reduction ChangeLowering<8>::ChangeInt32ToTagged(Node* val, Node* effect,
- Node* control) {
- return Replace(graph()->NewNode(
- machine()->Word64Shl(), val,
- Int32Constant(SmiTagging<8>::kSmiShiftSize + kSmiTagSize)));
+Reduction ChangeLowering::ChangeTaggedToUI32(Node* val, Node* control,
+ Signedness signedness) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagMask == 1);
+
+ Node* tag = graph()->NewNode(machine()->WordAnd(), val,
+ jsgraph()->Int32Constant(kSmiTagMask));
+ Node* branch = graph()->NewNode(common()->Branch(), tag, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ const Operator* op = (signedness == kSigned)
+ ? machine()->ChangeFloat64ToInt32()
+ : machine()->ChangeFloat64ToUint32();
+ Node* change = graph()->NewNode(op, LoadHeapNumberValue(val, if_true));
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* number = ChangeSmiToInt32(val);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(
+ common()->Phi((signedness == kSigned) ? kMachInt32 : kMachUint32, 2),
+ change, number, merge);
+
+ return Replace(phi);
}
-template <>
-Reduction ChangeLowering<4>::ChangeTaggedToFloat64(Node* val, Node* effect,
- Node* control) {
- Node* branch = graph()->NewNode(
- common()->Branch(),
- graph()->NewNode(machine()->Word32And(), val, Int32Constant(kSmiTagMask)),
- control);
+Reduction ChangeLowering::ChangeTaggedToFloat64(Node* val, Node* control) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagMask == 1);
+
+ Node* tag = graph()->NewNode(machine()->WordAnd(), val,
+ jsgraph()->Int32Constant(kSmiTagMask));
+ Node* branch = graph()->NewNode(common()->Branch(), tag, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* load = graph()->NewNode(
- machine()->Load(kMachineFloat64), val,
- Int32Constant(HeapNumber::kValueOffset - kHeapObjectTag), if_true);
+ Node* load = LoadHeapNumberValue(val, if_true);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* number = graph()->NewNode(
- machine()->ChangeInt32ToFloat64(),
- graph()->NewNode(
- machine()->Word32Sar(), val,
- Int32Constant(SmiTagging<4>::kSmiShiftSize + kSmiTagSize)));
+ Node* number = graph()->NewNode(machine()->ChangeInt32ToFloat64(),
+ ChangeSmiToInt32(val));
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(common()->Phi(2), load, number, merge);
+ Node* phi =
+ graph()->NewNode(common()->Phi(kMachFloat64, 2), load, number, merge);
return Replace(phi);
}
-template <>
-Reduction ChangeLowering<8>::ChangeTaggedToFloat64(Node* val, Node* effect,
- Node* control) {
- Node* branch = graph()->NewNode(
- common()->Branch(),
- graph()->NewNode(machine()->Word64And(), val, Int32Constant(kSmiTagMask)),
- control);
+Reduction ChangeLowering::ChangeUint32ToTagged(Node* val, Node* control) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagMask == 1);
+
+ Node* cmp = graph()->NewNode(machine()->Uint32LessThanOrEqual(), val,
+ SmiMaxValueConstant());
+ Node* branch = graph()->NewNode(common()->Branch(), cmp, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* load = graph()->NewNode(
- machine()->Load(kMachineFloat64), val,
- Int32Constant(HeapNumber::kValueOffset - kHeapObjectTag), if_true);
+ Node* smi = graph()->NewNode(
+ machine()->WordShl(),
+ machine()->Is64()
+ ? graph()->NewNode(machine()->ChangeUint32ToUint64(), val)
+ : val,
+ SmiShiftBitsConstant());
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* number = graph()->NewNode(
- machine()->ChangeInt32ToFloat64(),
- graph()->NewNode(
- machine()->ConvertInt64ToInt32(),
- graph()->NewNode(
- machine()->Word64Sar(), val,
- Int32Constant(SmiTagging<8>::kSmiShiftSize + kSmiTagSize))));
+ Node* heap_number = AllocateHeapNumberWithValue(
+ graph()->NewNode(machine()->ChangeUint32ToFloat64(), val), if_false);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(common()->Phi(2), load, number, merge);
+ Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), smi,
+ heap_number, merge);
return Replace(phi);
}
-template class ChangeLowering<4>;
-template class ChangeLowering<8>;
+Isolate* ChangeLowering::isolate() const { return jsgraph()->isolate(); }
+
+
+Graph* ChangeLowering::graph() const { return jsgraph()->graph(); }
+
+
+CommonOperatorBuilder* ChangeLowering::common() const {
+ return jsgraph()->common();
+}
+
+
+MachineOperatorBuilder* ChangeLowering::machine() const {
+ return jsgraph()->machine();
+}
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/change-lowering.h b/deps/v8/src/compiler/change-lowering.h
index 3e16d800de..5d7ab41b83 100644
--- a/deps/v8/src/compiler/change-lowering.h
+++ b/deps/v8/src/compiler/change-lowering.h
@@ -5,71 +5,52 @@
#ifndef V8_COMPILER_CHANGE_LOWERING_H_
#define V8_COMPILER_CHANGE_LOWERING_H_
-#include "include/v8.h"
-#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
-#include "src/compiler/machine-operator.h"
namespace v8 {
namespace internal {
namespace compiler {
// Forward declarations.
-class CommonNodeCache;
+class CommonOperatorBuilder;
+class JSGraph;
class Linkage;
+class MachineOperatorBuilder;
-class ChangeLoweringBase : public Reducer {
+class ChangeLowering FINAL : public Reducer {
public:
- ChangeLoweringBase(Graph* graph, Linkage* linkage, CommonNodeCache* cache);
- virtual ~ChangeLoweringBase();
+ ChangeLowering(JSGraph* jsgraph, Linkage* linkage)
+ : jsgraph_(jsgraph), linkage_(linkage) {}
+ virtual ~ChangeLowering();
- protected:
- Node* ExternalConstant(ExternalReference reference);
- Node* HeapConstant(PrintableUnique<HeapObject> value);
- Node* ImmovableHeapConstant(Handle<HeapObject> value);
- Node* Int32Constant(int32_t value);
- Node* NumberConstant(double value);
- Node* CEntryStubConstant();
- Node* TrueConstant();
- Node* FalseConstant();
-
- Reduction ChangeBitToBool(Node* val, Node* control);
-
- Graph* graph() const { return graph_; }
- Isolate* isolate() const { return isolate_; }
- Linkage* linkage() const { return linkage_; }
- CommonNodeCache* cache() const { return cache_; }
- CommonOperatorBuilder* common() { return &common_; }
- MachineOperatorBuilder* machine() { return &machine_; }
+ virtual Reduction Reduce(Node* node) OVERRIDE;
private:
- Graph* graph_;
- Isolate* isolate_;
- Linkage* linkage_;
- CommonNodeCache* cache_;
- CommonOperatorBuilder common_;
- MachineOperatorBuilder machine_;
-
- SetOncePointer<Node> c_entry_stub_constant_;
- SetOncePointer<Node> true_constant_;
- SetOncePointer<Node> false_constant_;
-};
+ Node* HeapNumberValueIndexConstant();
+ Node* SmiMaxValueConstant();
+ Node* SmiShiftBitsConstant();
+ Node* AllocateHeapNumberWithValue(Node* value, Node* control);
+ Node* ChangeSmiToInt32(Node* value);
+ Node* LoadHeapNumberValue(Node* value, Node* control);
-template <size_t kPointerSize = kApiPointerSize>
-class ChangeLowering V8_FINAL : public ChangeLoweringBase {
- public:
- ChangeLowering(Graph* graph, Linkage* linkage);
- ChangeLowering(Graph* graph, Linkage* linkage, CommonNodeCache* cache)
- : ChangeLoweringBase(graph, linkage, cache) {}
- virtual ~ChangeLowering() {}
-
- virtual Reduction Reduce(Node* node) V8_OVERRIDE;
-
- private:
+ Reduction ChangeBitToBool(Node* val, Node* control);
Reduction ChangeBoolToBit(Node* val);
- Reduction ChangeInt32ToTagged(Node* val, Node* effect, Node* control);
- Reduction ChangeTaggedToFloat64(Node* val, Node* effect, Node* control);
+ Reduction ChangeFloat64ToTagged(Node* val, Node* control);
+ Reduction ChangeInt32ToTagged(Node* val, Node* control);
+ Reduction ChangeTaggedToFloat64(Node* val, Node* control);
+ Reduction ChangeTaggedToUI32(Node* val, Node* control, Signedness signedness);
+ Reduction ChangeUint32ToTagged(Node* val, Node* control);
+
+ Graph* graph() const;
+ Isolate* isolate() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Linkage* linkage() const { return linkage_; }
+ CommonOperatorBuilder* common() const;
+ MachineOperatorBuilder* machine() const;
+
+ JSGraph* jsgraph_;
+ Linkage* linkage_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 878ace3be1..f22c479780 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -19,14 +19,10 @@ CodeGenerator::CodeGenerator(InstructionSequence* code)
masm_(code->zone()->isolate(), NULL, 0),
resolver_(this),
safepoints_(code->zone()),
- lazy_deoptimization_entries_(
- LazyDeoptimizationEntries::allocator_type(code->zone())),
- deoptimization_states_(
- DeoptimizationStates::allocator_type(code->zone())),
- deoptimization_literals_(Literals::allocator_type(code->zone())),
- translations_(code->zone()) {
- deoptimization_states_.resize(code->GetDeoptimizationEntryCount(), NULL);
-}
+ deoptimization_states_(code->zone()),
+ deoptimization_literals_(code->zone()),
+ translations_(code->zone()),
+ last_lazy_deopt_pc_(0) {}
Handle<Code> CodeGenerator::GenerateCode() {
@@ -53,6 +49,14 @@ Handle<Code> CodeGenerator::GenerateCode() {
FinishCode(masm());
+ // Ensure there is space for lazy deopt.
+ if (!info->IsStub()) {
+ int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
+ while (masm()->pc_offset() < target_offset) {
+ masm()->nop();
+ }
+ }
+
safepoints()->Emit(masm(), frame()->GetSpillSlotCount());
// TODO(titzer): what are the right code flags here?
@@ -174,11 +178,10 @@ void CodeGenerator::AssembleGap(GapInstruction* instr) {
void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
CompilationInfo* info = linkage()->info();
- int deopt_count = code()->GetDeoptimizationEntryCount();
- int patch_count = static_cast<int>(lazy_deoptimization_entries_.size());
- if (patch_count == 0 && deopt_count == 0) return;
- Handle<DeoptimizationInputData> data = DeoptimizationInputData::New(
- isolate(), deopt_count, patch_count, TENURED);
+ int deopt_count = static_cast<int>(deoptimization_states_.size());
+ if (deopt_count == 0) return;
+ Handle<DeoptimizationInputData> data =
+ DeoptimizationInputData::New(isolate(), deopt_count, TENURED);
Handle<ByteArray> translation_array =
translations_.CreateByteArray(isolate()->factory());
@@ -213,46 +216,65 @@ void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
// Populate deoptimization entries.
for (int i = 0; i < deopt_count; i++) {
- FrameStateDescriptor* descriptor = code()->GetDeoptimizationEntry(i);
- data->SetAstId(i, descriptor->bailout_id());
+ DeoptimizationState* deoptimization_state = deoptimization_states_[i];
+ data->SetAstId(i, deoptimization_state->bailout_id());
CHECK_NE(NULL, deoptimization_states_[i]);
data->SetTranslationIndex(
- i, Smi::FromInt(deoptimization_states_[i]->translation_id_));
+ i, Smi::FromInt(deoptimization_states_[i]->translation_id()));
data->SetArgumentsStackHeight(i, Smi::FromInt(0));
- data->SetPc(i, Smi::FromInt(-1));
- }
-
- // Populate the return address patcher entries.
- for (int i = 0; i < patch_count; ++i) {
- LazyDeoptimizationEntry entry = lazy_deoptimization_entries_[i];
- DCHECK(entry.position_after_call() == entry.continuation()->pos() ||
- IsNopForSmiCodeInlining(code_object, entry.position_after_call(),
- entry.continuation()->pos()));
- data->SetReturnAddressPc(i, Smi::FromInt(entry.position_after_call()));
- data->SetPatchedAddressPc(i, Smi::FromInt(entry.deoptimization()->pos()));
+ data->SetPc(i, Smi::FromInt(deoptimization_state->pc_offset()));
}
code_object->set_deoptimization_data(*data);
}
-void CodeGenerator::RecordLazyDeoptimizationEntry(Instruction* instr) {
- InstructionOperandConverter i(this, instr);
+void CodeGenerator::AddSafepointAndDeopt(Instruction* instr) {
+ CallDescriptor::Flags flags(MiscField::decode(instr->opcode()));
- Label after_call;
- masm()->bind(&after_call);
+ bool needs_frame_state = (flags & CallDescriptor::kNeedsFrameState);
- // The continuation and deoptimization are the last two inputs:
- BasicBlock* cont_block =
- i.InputBlock(static_cast<int>(instr->InputCount()) - 2);
- BasicBlock* deopt_block =
- i.InputBlock(static_cast<int>(instr->InputCount()) - 1);
+ RecordSafepoint(
+ instr->pointer_map(), Safepoint::kSimple, 0,
+ needs_frame_state ? Safepoint::kLazyDeopt : Safepoint::kNoLazyDeopt);
- Label* cont_label = code_->GetLabel(cont_block);
- Label* deopt_label = code_->GetLabel(deopt_block);
+ if (flags & CallDescriptor::kNeedsNopAfterCall) {
+ AddNopForSmiCodeInlining();
+ }
- lazy_deoptimization_entries_.push_back(
- LazyDeoptimizationEntry(after_call.pos(), cont_label, deopt_label));
+ if (needs_frame_state) {
+ MarkLazyDeoptSite();
+ // If the frame state is present, it starts at argument 1
+ // (just after the code address).
+ InstructionOperandConverter converter(this, instr);
+ // Deoptimization info starts at argument 1
+ size_t frame_state_offset = 1;
+ FrameStateDescriptor* descriptor =
+ GetFrameStateDescriptor(instr, frame_state_offset);
+ int pc_offset = masm()->pc_offset();
+ int deopt_state_id = BuildTranslation(instr, pc_offset, frame_state_offset,
+ descriptor->state_combine());
+ // If the pre-call frame state differs from the post-call one, produce the
+ // pre-call frame state, too.
+ // TODO(jarin) We might want to avoid building the pre-call frame state
+ // because it is only used to get locals and arguments (by the debugger and
+ // f.arguments), and those are the same in the pre-call and post-call
+ // states.
+ if (descriptor->state_combine() != kIgnoreOutput) {
+ deopt_state_id =
+ BuildTranslation(instr, -1, frame_state_offset, kIgnoreOutput);
+ }
+#if DEBUG
+ // Make sure all the values live in stack slots or they are immediates.
+ // (The values should not live in register because registers are clobbered
+ // by calls.)
+ for (size_t i = 0; i < descriptor->size(); i++) {
+ InstructionOperand* op = instr->InputAt(frame_state_offset + 1 + i);
+ CHECK(op->IsStackSlot() || op->IsImmediate());
+ }
+#endif
+ safepoints()->RecordLazyDeoptimizationIndex(deopt_state_id);
+ }
}
@@ -266,24 +288,81 @@ int CodeGenerator::DefineDeoptimizationLiteral(Handle<Object> literal) {
}
-void CodeGenerator::BuildTranslation(Instruction* instr,
- int deoptimization_id) {
- // We should build translation only once.
- DCHECK_EQ(NULL, deoptimization_states_[deoptimization_id]);
+FrameStateDescriptor* CodeGenerator::GetFrameStateDescriptor(
+ Instruction* instr, size_t frame_state_offset) {
+ InstructionOperandConverter i(this, instr);
+ InstructionSequence::StateId state_id = InstructionSequence::StateId::FromInt(
+ i.InputInt32(static_cast<int>(frame_state_offset)));
+ return code()->GetFrameStateDescriptor(state_id);
+}
+
- FrameStateDescriptor* descriptor =
- code()->GetDeoptimizationEntry(deoptimization_id);
- Translation translation(&translations_, 1, 1, zone());
- translation.BeginJSFrame(descriptor->bailout_id(),
- Translation::kSelfLiteralId,
- descriptor->size() - descriptor->parameters_count());
-
- for (int i = 0; i < descriptor->size(); i++) {
- AddTranslationForOperand(&translation, instr, instr->InputAt(i));
+void CodeGenerator::BuildTranslationForFrameStateDescriptor(
+ FrameStateDescriptor* descriptor, Instruction* instr,
+ Translation* translation, size_t frame_state_offset,
+ OutputFrameStateCombine state_combine) {
+ // Outer-most state must be added to translation first.
+ if (descriptor->outer_state() != NULL) {
+ BuildTranslationForFrameStateDescriptor(descriptor->outer_state(), instr,
+ translation, frame_state_offset,
+ kIgnoreOutput);
+ }
+
+ int id = Translation::kSelfLiteralId;
+ if (!descriptor->jsfunction().is_null()) {
+ id = DefineDeoptimizationLiteral(
+ Handle<Object>::cast(descriptor->jsfunction().ToHandleChecked()));
+ }
+
+ switch (descriptor->type()) {
+ case JS_FRAME:
+ translation->BeginJSFrame(
+ descriptor->bailout_id(), id,
+ static_cast<unsigned int>(descriptor->GetHeight(state_combine)));
+ break;
+ case ARGUMENTS_ADAPTOR:
+ translation->BeginArgumentsAdaptorFrame(
+ id, static_cast<unsigned int>(descriptor->parameters_count()));
+ break;
+ }
+
+ frame_state_offset += descriptor->outer_state()->GetTotalSize();
+ for (size_t i = 0; i < descriptor->size(); i++) {
+ AddTranslationForOperand(
+ translation, instr,
+ instr->InputAt(static_cast<int>(frame_state_offset + i)));
}
- deoptimization_states_[deoptimization_id] =
- new (zone()) DeoptimizationState(translation.index());
+ switch (state_combine) {
+ case kPushOutput:
+ DCHECK(instr->OutputCount() == 1);
+ AddTranslationForOperand(translation, instr, instr->OutputAt(0));
+ break;
+ case kIgnoreOutput:
+ break;
+ }
+}
+
+
+int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
+ size_t frame_state_offset,
+ OutputFrameStateCombine state_combine) {
+ FrameStateDescriptor* descriptor =
+ GetFrameStateDescriptor(instr, frame_state_offset);
+ frame_state_offset++;
+
+ Translation translation(
+ &translations_, static_cast<int>(descriptor->GetFrameCount()),
+ static_cast<int>(descriptor->GetJSFrameCount()), zone());
+ BuildTranslationForFrameStateDescriptor(descriptor, instr, &translation,
+ frame_state_offset, state_combine);
+
+ int deoptimization_id = static_cast<int>(deoptimization_states_.size());
+
+ deoptimization_states_.push_back(new (zone()) DeoptimizationState(
+ descriptor->bailout_id(), translation.index(), pc_offset));
+
+ return deoptimization_id;
}
@@ -310,8 +389,7 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
isolate()->factory()->NewNumberFromInt(constant.ToInt32());
break;
case Constant::kFloat64:
- constant_object =
- isolate()->factory()->NewHeapNumber(constant.ToFloat64());
+ constant_object = isolate()->factory()->NewNumber(constant.ToFloat64());
break;
case Constant::kHeapObject:
constant_object = constant.ToHeapObject();
@@ -326,6 +404,11 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
}
}
+
+void CodeGenerator::MarkLazyDeoptSite() {
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
#if !V8_TURBOFAN_BACKEND
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
@@ -345,6 +428,11 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
}
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+ UNIMPLEMENTED();
+}
+
+
void CodeGenerator::AssemblePrologue() { UNIMPLEMENTED(); }
@@ -365,15 +453,6 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
void CodeGenerator::AddNopForSmiCodeInlining() { UNIMPLEMENTED(); }
-
-#ifdef DEBUG
-bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
- int end_pc) {
- UNIMPLEMENTED();
- return false;
-}
-#endif
-
#endif // !V8_TURBOFAN_BACKEND
} // namespace compiler
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index b603c555c3..ddc2f9adb6 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -18,7 +18,7 @@ namespace internal {
namespace compiler {
// Generates native code for a sequence of instructions.
-class CodeGenerator V8_FINAL : public GapResolver::Assembler {
+class CodeGenerator FINAL : public GapResolver::Assembler {
public:
explicit CodeGenerator(InstructionSequence* code);
@@ -62,6 +62,8 @@ class CodeGenerator V8_FINAL : public GapResolver::Assembler {
void AssembleArchBranch(Instruction* instr, FlagsCondition condition);
void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
+ void AssembleDeoptimizerCall(int deoptimization_id);
+
// Generates an architecture-specific, descriptor-specific prologue
// to set up a stack frame.
void AssemblePrologue();
@@ -75,68 +77,58 @@ class CodeGenerator V8_FINAL : public GapResolver::Assembler {
// Interface used by the gap resolver to emit moves and swaps.
virtual void AssembleMove(InstructionOperand* source,
- InstructionOperand* destination) V8_OVERRIDE;
+ InstructionOperand* destination) OVERRIDE;
virtual void AssembleSwap(InstructionOperand* source,
- InstructionOperand* destination) V8_OVERRIDE;
+ InstructionOperand* destination) OVERRIDE;
// ===========================================================================
// Deoptimization table construction
- void RecordLazyDeoptimizationEntry(Instruction* instr);
+ void AddSafepointAndDeopt(Instruction* instr);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
- void BuildTranslation(Instruction* instr, int deoptimization_id);
+ FrameStateDescriptor* GetFrameStateDescriptor(Instruction* instr,
+ size_t frame_state_offset);
+ int BuildTranslation(Instruction* instr, int pc_offset,
+ size_t frame_state_offset,
+ OutputFrameStateCombine state_combine);
+ void BuildTranslationForFrameStateDescriptor(
+ FrameStateDescriptor* descriptor, Instruction* instr,
+ Translation* translation, size_t frame_state_offset,
+ OutputFrameStateCombine state_combine);
void AddTranslationForOperand(Translation* translation, Instruction* instr,
InstructionOperand* op);
void AddNopForSmiCodeInlining();
-#if DEBUG
- static bool IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
- int end_pc);
-#endif // DEBUG
- // ===========================================================================
+ void EnsureSpaceForLazyDeopt();
+ void MarkLazyDeoptSite();
- class LazyDeoptimizationEntry V8_FINAL {
+ // ===========================================================================
+ struct DeoptimizationState : ZoneObject {
public:
- LazyDeoptimizationEntry(int position_after_call, Label* continuation,
- Label* deoptimization)
- : position_after_call_(position_after_call),
- continuation_(continuation),
- deoptimization_(deoptimization) {}
+ BailoutId bailout_id() const { return bailout_id_; }
+ int translation_id() const { return translation_id_; }
+ int pc_offset() const { return pc_offset_; }
- int position_after_call() const { return position_after_call_; }
- Label* continuation() const { return continuation_; }
- Label* deoptimization() const { return deoptimization_; }
+ DeoptimizationState(BailoutId bailout_id, int translation_id, int pc_offset)
+ : bailout_id_(bailout_id),
+ translation_id_(translation_id),
+ pc_offset_(pc_offset) {}
private:
- int position_after_call_;
- Label* continuation_;
- Label* deoptimization_;
- };
-
- struct DeoptimizationState : ZoneObject {
+ BailoutId bailout_id_;
int translation_id_;
-
- explicit DeoptimizationState(int translation_id)
- : translation_id_(translation_id) {}
+ int pc_offset_;
};
- typedef std::deque<LazyDeoptimizationEntry,
- zone_allocator<LazyDeoptimizationEntry> >
- LazyDeoptimizationEntries;
- typedef std::deque<DeoptimizationState*,
- zone_allocator<DeoptimizationState*> >
- DeoptimizationStates;
- typedef std::deque<Handle<Object>, zone_allocator<Handle<Object> > > Literals;
-
InstructionSequence* code_;
BasicBlock* current_block_;
SourcePosition current_source_position_;
MacroAssembler masm_;
GapResolver resolver_;
SafepointTableBuilder safepoints_;
- LazyDeoptimizationEntries lazy_deoptimization_entries_;
- DeoptimizationStates deoptimization_states_;
- Literals deoptimization_literals_;
+ ZoneDeque<DeoptimizationState*> deoptimization_states_;
+ ZoneDeque<Handle<Object> > deoptimization_literals_;
TranslationBuffer translations_;
+ int last_lazy_deopt_pc_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/common-node-cache.h b/deps/v8/src/compiler/common-node-cache.h
index 2b0ac0b6e2..1ed2b0497d 100644
--- a/deps/v8/src/compiler/common-node-cache.h
+++ b/deps/v8/src/compiler/common-node-cache.h
@@ -13,7 +13,7 @@ namespace internal {
namespace compiler {
// Bundles various caches for common nodes.
-class CommonNodeCache V8_FINAL : public ZoneObject {
+class CommonNodeCache FINAL : public ZoneObject {
public:
explicit CommonNodeCache(Zone* zone) : zone_(zone) {}
@@ -23,7 +23,7 @@ class CommonNodeCache V8_FINAL : public ZoneObject {
Node** FindFloat64Constant(double value) {
// We canonicalize double constants at the bit representation level.
- return float64_constants_.Find(zone_, BitCast<int64_t>(value));
+ return float64_constants_.Find(zone_, bit_cast<int64_t>(value));
}
Node** FindExternalConstant(ExternalReference reference) {
@@ -32,7 +32,7 @@ class CommonNodeCache V8_FINAL : public ZoneObject {
Node** FindNumberConstant(double value) {
// We canonicalize double constants at the bit representation level.
- return number_constants_.Find(zone_, BitCast<int64_t>(value));
+ return number_constants_.Find(zone_, bit_cast<int64_t>(value));
}
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/common-operator-unittest.cc b/deps/v8/src/compiler/common-operator-unittest.cc
new file mode 100644
index 0000000000..5001770ddf
--- /dev/null
+++ b/deps/v8/src/compiler/common-operator-unittest.cc
@@ -0,0 +1,183 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+
+#include <limits>
+
+#include "src/compiler/operator-properties-inl.h"
+#include "src/test/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+// -----------------------------------------------------------------------------
+// Shared operators.
+
+
+namespace {
+
+struct SharedOperator {
+ const Operator* (CommonOperatorBuilder::*constructor)();
+ IrOpcode::Value opcode;
+ Operator::Properties properties;
+ int value_input_count;
+ int effect_input_count;
+ int control_input_count;
+ int effect_output_count;
+ int control_output_count;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const SharedOperator& fop) {
+ return os << IrOpcode::Mnemonic(fop.opcode);
+}
+
+
+const SharedOperator kSharedOperators[] = {
+#define SHARED(Name, properties, value_input_count, effect_input_count, \
+ control_input_count, effect_output_count, control_output_count) \
+ { \
+ &CommonOperatorBuilder::Name, IrOpcode::k##Name, properties, \
+ value_input_count, effect_input_count, control_input_count, \
+ effect_output_count, control_output_count \
+ }
+ SHARED(Dead, Operator::kFoldable, 0, 0, 0, 0, 1),
+ SHARED(End, Operator::kFoldable, 0, 0, 1, 0, 0),
+ SHARED(Branch, Operator::kFoldable, 1, 0, 1, 0, 2),
+ SHARED(IfTrue, Operator::kFoldable, 0, 0, 1, 0, 1),
+ SHARED(IfFalse, Operator::kFoldable, 0, 0, 1, 0, 1),
+ SHARED(Throw, Operator::kFoldable, 1, 0, 1, 0, 1),
+ SHARED(Return, Operator::kNoProperties, 1, 1, 1, 1, 1),
+ SHARED(ControlEffect, Operator::kPure, 0, 0, 1, 1, 0)
+#undef SHARED
+};
+
+
+class CommonSharedOperatorTest
+ : public TestWithZone,
+ public ::testing::WithParamInterface<SharedOperator> {};
+
+} // namespace
+
+
+TEST_P(CommonSharedOperatorTest, InstancesAreGloballyShared) {
+ const SharedOperator& sop = GetParam();
+ CommonOperatorBuilder common1(zone());
+ CommonOperatorBuilder common2(zone());
+ EXPECT_EQ((common1.*sop.constructor)(), (common2.*sop.constructor)());
+}
+
+
+TEST_P(CommonSharedOperatorTest, NumberOfInputsAndOutputs) {
+ CommonOperatorBuilder common(zone());
+ const SharedOperator& sop = GetParam();
+ const Operator* op = (common.*sop.constructor)();
+
+ EXPECT_EQ(sop.value_input_count, OperatorProperties::GetValueInputCount(op));
+ EXPECT_EQ(sop.effect_input_count,
+ OperatorProperties::GetEffectInputCount(op));
+ EXPECT_EQ(sop.control_input_count,
+ OperatorProperties::GetControlInputCount(op));
+ EXPECT_EQ(
+ sop.value_input_count + sop.effect_input_count + sop.control_input_count,
+ OperatorProperties::GetTotalInputCount(op));
+
+ EXPECT_EQ(0, OperatorProperties::GetValueOutputCount(op));
+ EXPECT_EQ(sop.effect_output_count,
+ OperatorProperties::GetEffectOutputCount(op));
+ EXPECT_EQ(sop.control_output_count,
+ OperatorProperties::GetControlOutputCount(op));
+}
+
+
+TEST_P(CommonSharedOperatorTest, OpcodeIsCorrect) {
+ CommonOperatorBuilder common(zone());
+ const SharedOperator& sop = GetParam();
+ const Operator* op = (common.*sop.constructor)();
+ EXPECT_EQ(sop.opcode, op->opcode());
+}
+
+
+TEST_P(CommonSharedOperatorTest, Properties) {
+ CommonOperatorBuilder common(zone());
+ const SharedOperator& sop = GetParam();
+ const Operator* op = (common.*sop.constructor)();
+ EXPECT_EQ(sop.properties, op->properties());
+}
+
+
+INSTANTIATE_TEST_CASE_P(CommonOperatorTest, CommonSharedOperatorTest,
+ ::testing::ValuesIn(kSharedOperators));
+
+
+// -----------------------------------------------------------------------------
+// Other operators.
+
+
+namespace {
+
+class CommonOperatorTest : public TestWithZone {
+ public:
+ CommonOperatorTest() : common_(zone()) {}
+ virtual ~CommonOperatorTest() {}
+
+ CommonOperatorBuilder* common() { return &common_; }
+
+ private:
+ CommonOperatorBuilder common_;
+};
+
+
+const int kArguments[] = {1, 5, 6, 42, 100, 10000, kMaxInt};
+
+const float kFloat32Values[] = {
+ std::numeric_limits<float>::min(), -1.0f, -0.0f, 0.0f, 1.0f,
+ std::numeric_limits<float>::max()};
+
+} // namespace
+
+
+TEST_F(CommonOperatorTest, Float32Constant) {
+ TRACED_FOREACH(float, value, kFloat32Values) {
+ const Operator* op = common()->Float32Constant(value);
+ EXPECT_FLOAT_EQ(value, OpParameter<float>(op));
+ EXPECT_EQ(0, OperatorProperties::GetValueInputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
+ EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
+ }
+}
+
+
+TEST_F(CommonOperatorTest, ValueEffect) {
+ TRACED_FOREACH(int, arguments, kArguments) {
+ const Operator* op = common()->ValueEffect(arguments);
+ EXPECT_EQ(arguments, OperatorProperties::GetValueInputCount(op));
+ EXPECT_EQ(arguments, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+ EXPECT_EQ(1, OperatorProperties::GetEffectOutputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetValueOutputCount(op));
+ }
+}
+
+
+TEST_F(CommonOperatorTest, Finish) {
+ TRACED_FOREACH(int, arguments, kArguments) {
+ const Operator* op = common()->Finish(arguments);
+ EXPECT_EQ(1, OperatorProperties::GetValueInputCount(op));
+ EXPECT_EQ(arguments, OperatorProperties::GetEffectInputCount(op));
+ EXPECT_EQ(arguments + 1, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
+ EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
new file mode 100644
index 0000000000..19792bd1da
--- /dev/null
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -0,0 +1,252 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+
+#include "src/assembler.h"
+#include "src/base/lazy-instance.h"
+#include "src/compiler/linkage.h"
+#include "src/unique.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+// TODO(turbofan): Use size_t instead of int here.
+class ControlOperator : public Operator1<int> {
+ public:
+ ControlOperator(IrOpcode::Value opcode, Properties properties, int inputs,
+ int outputs, int controls, const char* mnemonic)
+ : Operator1<int>(opcode, properties, inputs, outputs, mnemonic,
+ controls) {}
+
+ virtual OStream& PrintParameter(OStream& os) const FINAL { return os; }
+};
+
+} // namespace
+
+
+// Specialization for static parameters of type {ExternalReference}.
+template <>
+struct StaticParameterTraits<ExternalReference> {
+ static OStream& PrintTo(OStream& os, ExternalReference reference) {
+ os << reference.address();
+ // TODO(bmeurer): Move to operator<<(os, ExternalReference)
+ const Runtime::Function* function =
+ Runtime::FunctionForEntry(reference.address());
+ if (function) {
+ os << " <" << function->name << ".entry>";
+ }
+ return os;
+ }
+ static int HashCode(ExternalReference reference) {
+ return bit_cast<int>(static_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(reference.address())));
+ }
+ static bool Equals(ExternalReference lhs, ExternalReference rhs) {
+ return lhs == rhs;
+ }
+};
+
+
+#define SHARED_OP_LIST(V) \
+ V(Dead, Operator::kFoldable, 0, 0) \
+ V(End, Operator::kFoldable, 0, 1) \
+ V(Branch, Operator::kFoldable, 1, 1) \
+ V(IfTrue, Operator::kFoldable, 0, 1) \
+ V(IfFalse, Operator::kFoldable, 0, 1) \
+ V(Throw, Operator::kFoldable, 1, 1) \
+ V(Return, Operator::kNoProperties, 1, 1)
+
+
+struct CommonOperatorBuilderImpl FINAL {
+#define SHARED(Name, properties, value_input_count, control_input_count) \
+ struct Name##Operator FINAL : public ControlOperator { \
+ Name##Operator() \
+ : ControlOperator(IrOpcode::k##Name, properties, value_input_count, 0, \
+ control_input_count, #Name) {} \
+ }; \
+ Name##Operator k##Name##Operator;
+ SHARED_OP_LIST(SHARED)
+#undef SHARED
+
+ struct ControlEffectOperator FINAL : public SimpleOperator {
+ ControlEffectOperator()
+ : SimpleOperator(IrOpcode::kControlEffect, Operator::kPure, 0, 0,
+ "ControlEffect") {}
+ };
+ ControlEffectOperator kControlEffectOperator;
+};
+
+
+static base::LazyInstance<CommonOperatorBuilderImpl>::type kImpl =
+ LAZY_INSTANCE_INITIALIZER;
+
+
+CommonOperatorBuilder::CommonOperatorBuilder(Zone* zone)
+ : impl_(kImpl.Get()), zone_(zone) {}
+
+
+#define SHARED(Name, properties, value_input_count, control_input_count) \
+ const Operator* CommonOperatorBuilder::Name() { \
+ return &impl_.k##Name##Operator; \
+ }
+SHARED_OP_LIST(SHARED)
+#undef SHARED
+
+
+const Operator* CommonOperatorBuilder::Start(int num_formal_parameters) {
+ // Outputs are formal parameters, plus context, receiver, and JSFunction.
+ const int value_output_count = num_formal_parameters + 3;
+ return new (zone()) ControlOperator(IrOpcode::kStart, Operator::kFoldable, 0,
+ value_output_count, 0, "Start");
+}
+
+
+const Operator* CommonOperatorBuilder::Merge(int controls) {
+ return new (zone()) ControlOperator(IrOpcode::kMerge, Operator::kFoldable, 0,
+ 0, controls, "Merge");
+}
+
+
+const Operator* CommonOperatorBuilder::Loop(int controls) {
+ return new (zone()) ControlOperator(IrOpcode::kLoop, Operator::kFoldable, 0,
+ 0, controls, "Loop");
+}
+
+
+const Operator* CommonOperatorBuilder::Parameter(int index) {
+ return new (zone()) Operator1<int>(IrOpcode::kParameter, Operator::kPure, 1,
+ 1, "Parameter", index);
+}
+
+
+const Operator* CommonOperatorBuilder::Int32Constant(int32_t value) {
+ return new (zone()) Operator1<int32_t>(
+ IrOpcode::kInt32Constant, Operator::kPure, 0, 1, "Int32Constant", value);
+}
+
+
+const Operator* CommonOperatorBuilder::Int64Constant(int64_t value) {
+ return new (zone()) Operator1<int64_t>(
+ IrOpcode::kInt64Constant, Operator::kPure, 0, 1, "Int64Constant", value);
+}
+
+
+const Operator* CommonOperatorBuilder::Float32Constant(volatile float value) {
+ return new (zone())
+ Operator1<float>(IrOpcode::kFloat32Constant, Operator::kPure, 0, 1,
+ "Float32Constant", value);
+}
+
+
+const Operator* CommonOperatorBuilder::Float64Constant(volatile double value) {
+ return new (zone())
+ Operator1<double>(IrOpcode::kFloat64Constant, Operator::kPure, 0, 1,
+ "Float64Constant", value);
+}
+
+
+const Operator* CommonOperatorBuilder::ExternalConstant(
+ const ExternalReference& value) {
+ return new (zone())
+ Operator1<ExternalReference>(IrOpcode::kExternalConstant, Operator::kPure,
+ 0, 1, "ExternalConstant", value);
+}
+
+
+const Operator* CommonOperatorBuilder::NumberConstant(volatile double value) {
+ return new (zone())
+ Operator1<double>(IrOpcode::kNumberConstant, Operator::kPure, 0, 1,
+ "NumberConstant", value);
+}
+
+
+const Operator* CommonOperatorBuilder::HeapConstant(
+ const Unique<Object>& value) {
+ return new (zone()) Operator1<Unique<Object> >(
+ IrOpcode::kHeapConstant, Operator::kPure, 0, 1, "HeapConstant", value);
+}
+
+
+const Operator* CommonOperatorBuilder::Phi(MachineType type, int arguments) {
+ DCHECK(arguments > 0); // Disallow empty phis.
+ return new (zone()) Operator1<MachineType>(IrOpcode::kPhi, Operator::kPure,
+ arguments, 1, "Phi", type);
+}
+
+
+const Operator* CommonOperatorBuilder::EffectPhi(int arguments) {
+ DCHECK(arguments > 0); // Disallow empty phis.
+ return new (zone()) Operator1<int>(IrOpcode::kEffectPhi, Operator::kPure, 0,
+ 0, "EffectPhi", arguments);
+}
+
+
+const Operator* CommonOperatorBuilder::ControlEffect() {
+ return &impl_.kControlEffectOperator;
+}
+
+
+const Operator* CommonOperatorBuilder::ValueEffect(int arguments) {
+ DCHECK(arguments > 0); // Disallow empty value effects.
+ return new (zone()) SimpleOperator(IrOpcode::kValueEffect, Operator::kPure,
+ arguments, 0, "ValueEffect");
+}
+
+
+const Operator* CommonOperatorBuilder::Finish(int arguments) {
+ DCHECK(arguments > 0); // Disallow empty finishes.
+ return new (zone()) Operator1<int>(IrOpcode::kFinish, Operator::kPure, 1, 1,
+ "Finish", arguments);
+}
+
+
+const Operator* CommonOperatorBuilder::StateValues(int arguments) {
+ return new (zone()) Operator1<int>(IrOpcode::kStateValues, Operator::kPure,
+ arguments, 1, "StateValues", arguments);
+}
+
+
+const Operator* CommonOperatorBuilder::FrameState(
+ FrameStateType type, BailoutId bailout_id,
+ OutputFrameStateCombine state_combine, MaybeHandle<JSFunction> jsfunction) {
+ return new (zone()) Operator1<FrameStateCallInfo>(
+ IrOpcode::kFrameState, Operator::kPure, 4, 1, "FrameState",
+ FrameStateCallInfo(type, bailout_id, state_combine, jsfunction));
+}
+
+
+const Operator* CommonOperatorBuilder::Call(const CallDescriptor* descriptor) {
+ class CallOperator FINAL : public Operator1<const CallDescriptor*> {
+ public:
+ // TODO(titzer): Operator still uses int, whereas CallDescriptor uses
+ // size_t.
+ CallOperator(const CallDescriptor* descriptor, const char* mnemonic)
+ : Operator1<const CallDescriptor*>(
+ IrOpcode::kCall, descriptor->properties(),
+ static_cast<int>(descriptor->InputCount() +
+ descriptor->FrameStateCount()),
+ static_cast<int>(descriptor->ReturnCount()), mnemonic,
+ descriptor) {}
+
+ virtual OStream& PrintParameter(OStream& os) const OVERRIDE {
+ return os << "[" << *parameter() << "]";
+ }
+ };
+ return new (zone()) CallOperator(descriptor, "Call");
+}
+
+
+const Operator* CommonOperatorBuilder::Projection(size_t index) {
+ return new (zone()) Operator1<size_t>(IrOpcode::kProjection, Operator::kPure,
+ 1, 1, "Projection", index);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 3b581ae0cd..a3659adfc2 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -5,280 +5,113 @@
#ifndef V8_COMPILER_COMMON_OPERATOR_H_
#define V8_COMPILER_COMMON_OPERATOR_H_
-#include "src/v8.h"
-
-#include "src/assembler.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/opcodes.h"
-#include "src/compiler/operator.h"
+#include "src/compiler/machine-type.h"
#include "src/unique.h"
namespace v8 {
namespace internal {
+// Forward declarations.
+class ExternalReference;
class OStream;
-namespace compiler {
-
-class ControlOperator : public Operator1<int> {
- public:
- ControlOperator(IrOpcode::Value opcode, uint16_t properties, int inputs,
- int outputs, int controls, const char* mnemonic)
- : Operator1<int>(opcode, properties, inputs, outputs, mnemonic,
- controls) {}
-
- virtual OStream& PrintParameter(OStream& os) const { return os; } // NOLINT
- int ControlInputCount() const { return parameter(); }
-};
-
-class CallOperator : public Operator1<CallDescriptor*> {
- public:
- CallOperator(CallDescriptor* descriptor, const char* mnemonic)
- : Operator1<CallDescriptor*>(
- IrOpcode::kCall, descriptor->properties(), descriptor->InputCount(),
- descriptor->ReturnCount(), mnemonic, descriptor) {}
-
- virtual OStream& PrintParameter(OStream& os) const { // NOLINT
- return os << "[" << *parameter() << "]";
- }
-};
-
-// Interface for building common operators that can be used at any level of IR,
-// including JavaScript, mid-level, and low-level.
-// TODO(titzer): Move the mnemonics into SimpleOperator and Operator1 classes.
-class CommonOperatorBuilder {
- public:
- explicit CommonOperatorBuilder(Zone* zone) : zone_(zone) {}
-
-#define CONTROL_OP(name, inputs, controls) \
- return new (zone_) ControlOperator(IrOpcode::k##name, Operator::kFoldable, \
- inputs, 0, controls, #name);
-
- Operator* Start(int num_formal_parameters) {
- // Outputs are formal parameters, plus context, receiver, and JSFunction.
- int outputs = num_formal_parameters + 3;
- return new (zone_) ControlOperator(IrOpcode::kStart, Operator::kFoldable, 0,
- outputs, 0, "Start");
- }
- Operator* Dead() { CONTROL_OP(Dead, 0, 0); }
- Operator* End() { CONTROL_OP(End, 0, 1); }
- Operator* Branch() { CONTROL_OP(Branch, 1, 1); }
- Operator* IfTrue() { CONTROL_OP(IfTrue, 0, 1); }
- Operator* IfFalse() { CONTROL_OP(IfFalse, 0, 1); }
- Operator* Throw() { CONTROL_OP(Throw, 1, 1); }
- Operator* LazyDeoptimization() { CONTROL_OP(LazyDeoptimization, 0, 1); }
- Operator* Continuation() { CONTROL_OP(Continuation, 0, 1); }
-
- Operator* Deoptimize() {
- return new (zone_)
- ControlOperator(IrOpcode::kDeoptimize, 0, 1, 0, 1, "Deoptimize");
- }
- Operator* Return() {
- return new (zone_) ControlOperator(IrOpcode::kReturn, 0, 1, 0, 1, "Return");
- }
-
- Operator* Merge(int controls) {
- return new (zone_) ControlOperator(IrOpcode::kMerge, Operator::kFoldable, 0,
- 0, controls, "Merge");
- }
+namespace compiler {
- Operator* Loop(int controls) {
- return new (zone_) ControlOperator(IrOpcode::kLoop, Operator::kFoldable, 0,
- 0, controls, "Loop");
- }
+// Forward declarations.
+class CallDescriptor;
+struct CommonOperatorBuilderImpl;
+class Operator;
- Operator* Parameter(int index) {
- return new (zone_) Operator1<int>(IrOpcode::kParameter, Operator::kPure, 1,
- 1, "Parameter", index);
- }
- Operator* Int32Constant(int32_t value) {
- return new (zone_) Operator1<int>(IrOpcode::kInt32Constant, Operator::kPure,
- 0, 1, "Int32Constant", value);
- }
- Operator* Int64Constant(int64_t value) {
- return new (zone_)
- Operator1<int64_t>(IrOpcode::kInt64Constant, Operator::kPure, 0, 1,
- "Int64Constant", value);
- }
- Operator* Float64Constant(double value) {
- return new (zone_)
- Operator1<double>(IrOpcode::kFloat64Constant, Operator::kPure, 0, 1,
- "Float64Constant", value);
- }
- Operator* ExternalConstant(ExternalReference value) {
- return new (zone_) Operator1<ExternalReference>(IrOpcode::kExternalConstant,
- Operator::kPure, 0, 1,
- "ExternalConstant", value);
- }
- Operator* NumberConstant(double value) {
- return new (zone_)
- Operator1<double>(IrOpcode::kNumberConstant, Operator::kPure, 0, 1,
- "NumberConstant", value);
- }
- Operator* HeapConstant(PrintableUnique<Object> value) {
- return new (zone_) Operator1<PrintableUnique<Object> >(
- IrOpcode::kHeapConstant, Operator::kPure, 0, 1, "HeapConstant", value);
- }
- Operator* Phi(int arguments) {
- DCHECK(arguments > 0); // Disallow empty phis.
- return new (zone_) Operator1<int>(IrOpcode::kPhi, Operator::kPure,
- arguments, 1, "Phi", arguments);
- }
- Operator* EffectPhi(int arguments) {
- DCHECK(arguments > 0); // Disallow empty phis.
- return new (zone_) Operator1<int>(IrOpcode::kEffectPhi, Operator::kPure, 0,
- 0, "EffectPhi", arguments);
- }
- Operator* StateValues(int arguments) {
- return new (zone_) Operator1<int>(IrOpcode::kStateValues, Operator::kPure,
- arguments, 1, "StateValues", arguments);
- }
- Operator* FrameState(BailoutId ast_id) {
- return new (zone_) Operator1<BailoutId>(
- IrOpcode::kFrameState, Operator::kPure, 3, 1, "FrameState", ast_id);
- }
- Operator* Call(CallDescriptor* descriptor) {
- return new (zone_) CallOperator(descriptor, "Call");
- }
- Operator* Projection(int index) {
- return new (zone_) Operator1<int>(IrOpcode::kProjection, Operator::kPure, 1,
- 1, "Projection", index);
- }
- private:
- Zone* zone_;
+// Flag that describes how to combine the current environment with
+// the output of a node to obtain a framestate for lazy bailout.
+enum OutputFrameStateCombine {
+ kPushOutput, // Push the output on the expression stack.
+ kIgnoreOutput // Use the frame state as-is.
};
-template <typename T>
-struct CommonOperatorTraits {
- static inline bool Equals(T a, T b);
- static inline bool HasValue(Operator* op);
- static inline T ValueOf(Operator* op);
+// The type of stack frame that a FrameState node represents.
+enum FrameStateType {
+ JS_FRAME, // Represents an unoptimized JavaScriptFrame.
+ ARGUMENTS_ADAPTOR // Represents an ArgumentsAdaptorFrame.
};
-template <>
-struct CommonOperatorTraits<int32_t> {
- static inline bool Equals(int32_t a, int32_t b) { return a == b; }
- static inline bool HasValue(Operator* op) {
- return op->opcode() == IrOpcode::kInt32Constant ||
- op->opcode() == IrOpcode::kNumberConstant;
- }
- static inline int32_t ValueOf(Operator* op) {
- if (op->opcode() == IrOpcode::kNumberConstant) {
- // TODO(titzer): cache the converted int32 value in NumberConstant.
- return FastD2I(reinterpret_cast<Operator1<double>*>(op)->parameter());
- }
- CHECK_EQ(IrOpcode::kInt32Constant, op->opcode());
- return static_cast<Operator1<int32_t>*>(op)->parameter();
- }
-};
-template <>
-struct CommonOperatorTraits<uint32_t> {
- static inline bool Equals(uint32_t a, uint32_t b) { return a == b; }
- static inline bool HasValue(Operator* op) {
- return CommonOperatorTraits<int32_t>::HasValue(op);
- }
- static inline uint32_t ValueOf(Operator* op) {
- if (op->opcode() == IrOpcode::kNumberConstant) {
- // TODO(titzer): cache the converted uint32 value in NumberConstant.
- return FastD2UI(reinterpret_cast<Operator1<double>*>(op)->parameter());
- }
- return static_cast<uint32_t>(CommonOperatorTraits<int32_t>::ValueOf(op));
- }
-};
+class FrameStateCallInfo FINAL {
+ public:
+ FrameStateCallInfo(
+ FrameStateType type, BailoutId bailout_id,
+ OutputFrameStateCombine state_combine,
+ MaybeHandle<JSFunction> jsfunction = MaybeHandle<JSFunction>())
+ : type_(type),
+ bailout_id_(bailout_id),
+ frame_state_combine_(state_combine),
+ jsfunction_(jsfunction) {}
+
+ FrameStateType type() const { return type_; }
+ BailoutId bailout_id() const { return bailout_id_; }
+ OutputFrameStateCombine state_combine() const { return frame_state_combine_; }
+ MaybeHandle<JSFunction> jsfunction() const { return jsfunction_; }
-template <>
-struct CommonOperatorTraits<int64_t> {
- static inline bool Equals(int64_t a, int64_t b) { return a == b; }
- static inline bool HasValue(Operator* op) {
- return op->opcode() == IrOpcode::kInt32Constant ||
- op->opcode() == IrOpcode::kInt64Constant ||
- op->opcode() == IrOpcode::kNumberConstant;
- }
- static inline int64_t ValueOf(Operator* op) {
- if (op->opcode() == IrOpcode::kInt32Constant) {
- return static_cast<int64_t>(CommonOperatorTraits<int32_t>::ValueOf(op));
- }
- CHECK_EQ(IrOpcode::kInt64Constant, op->opcode());
- return static_cast<Operator1<int64_t>*>(op)->parameter();
- }
+ private:
+ FrameStateType type_;
+ BailoutId bailout_id_;
+ OutputFrameStateCombine frame_state_combine_;
+ MaybeHandle<JSFunction> jsfunction_;
};
-template <>
-struct CommonOperatorTraits<uint64_t> {
- static inline bool Equals(uint64_t a, uint64_t b) { return a == b; }
- static inline bool HasValue(Operator* op) {
- return CommonOperatorTraits<int64_t>::HasValue(op);
- }
- static inline uint64_t ValueOf(Operator* op) {
- return static_cast<uint64_t>(CommonOperatorTraits<int64_t>::ValueOf(op));
- }
-};
-template <>
-struct CommonOperatorTraits<double> {
- static inline bool Equals(double a, double b) {
- return DoubleRepresentation(a).bits == DoubleRepresentation(b).bits;
- }
- static inline bool HasValue(Operator* op) {
- return op->opcode() == IrOpcode::kFloat64Constant ||
- op->opcode() == IrOpcode::kInt32Constant ||
- op->opcode() == IrOpcode::kNumberConstant;
- }
- static inline double ValueOf(Operator* op) {
- if (op->opcode() == IrOpcode::kFloat64Constant ||
- op->opcode() == IrOpcode::kNumberConstant) {
- return reinterpret_cast<Operator1<double>*>(op)->parameter();
- }
- return static_cast<double>(CommonOperatorTraits<int32_t>::ValueOf(op));
- }
-};
-
-template <>
-struct CommonOperatorTraits<ExternalReference> {
- static inline bool Equals(ExternalReference a, ExternalReference b) {
- return a == b;
- }
- static inline bool HasValue(Operator* op) {
- return op->opcode() == IrOpcode::kExternalConstant;
- }
- static inline ExternalReference ValueOf(Operator* op) {
- CHECK_EQ(IrOpcode::kExternalConstant, op->opcode());
- return static_cast<Operator1<ExternalReference>*>(op)->parameter();
- }
-};
+// Interface for building common operators that can be used at any level of IR,
+// including JavaScript, mid-level, and low-level.
+class CommonOperatorBuilder FINAL {
+ public:
+ explicit CommonOperatorBuilder(Zone* zone);
+
+ const Operator* Dead();
+ const Operator* End();
+ const Operator* Branch();
+ const Operator* IfTrue();
+ const Operator* IfFalse();
+ const Operator* Throw();
+ const Operator* Return();
+
+ const Operator* Start(int num_formal_parameters);
+ const Operator* Merge(int controls);
+ const Operator* Loop(int controls);
+ const Operator* Parameter(int index);
+
+ const Operator* Int32Constant(int32_t);
+ const Operator* Int64Constant(int64_t);
+ const Operator* Float32Constant(volatile float);
+ const Operator* Float64Constant(volatile double);
+ const Operator* ExternalConstant(const ExternalReference&);
+ const Operator* NumberConstant(volatile double);
+ const Operator* HeapConstant(const Unique<Object>&);
+
+ const Operator* Phi(MachineType type, int arguments);
+ const Operator* EffectPhi(int arguments);
+ const Operator* ControlEffect();
+ const Operator* ValueEffect(int arguments);
+ const Operator* Finish(int arguments);
+ const Operator* StateValues(int arguments);
+ const Operator* FrameState(
+ FrameStateType type, BailoutId bailout_id,
+ OutputFrameStateCombine state_combine,
+ MaybeHandle<JSFunction> jsfunction = MaybeHandle<JSFunction>());
+ const Operator* Call(const CallDescriptor* descriptor);
+ const Operator* Projection(size_t index);
-template <typename T>
-struct CommonOperatorTraits<PrintableUnique<T> > {
- static inline bool HasValue(Operator* op) {
- return op->opcode() == IrOpcode::kHeapConstant;
- }
- static inline PrintableUnique<T> ValueOf(Operator* op) {
- CHECK_EQ(IrOpcode::kHeapConstant, op->opcode());
- return static_cast<Operator1<PrintableUnique<T> >*>(op)->parameter();
- }
-};
+ private:
+ Zone* zone() const { return zone_; }
-template <typename T>
-struct CommonOperatorTraits<Handle<T> > {
- static inline bool HasValue(Operator* op) {
- return CommonOperatorTraits<PrintableUnique<T> >::HasValue(op);
- }
- static inline Handle<T> ValueOf(Operator* op) {
- return CommonOperatorTraits<PrintableUnique<T> >::ValueOf(op).handle();
- }
+ const CommonOperatorBuilderImpl& impl_;
+ Zone* const zone_;
};
-
-template <typename T>
-inline T ValueOf(Operator* op) {
- return CommonOperatorTraits<T>::ValueOf(op);
-}
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_COMMON_OPERATOR_H_
diff --git a/deps/v8/test/compiler-unittests/compiler-unittests.h b/deps/v8/src/compiler/compiler-test-utils.h
index 091b137066..437abd68f6 100644
--- a/deps/v8/test/compiler-unittests/compiler-unittests.h
+++ b/deps/v8/src/compiler/compiler-test-utils.h
@@ -2,11 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_UNITTESTS_COMPILER_UNITTESTS_H_
-#define V8_COMPILER_UNITTESTS_COMPILER_UNITTESTS_H_
+#ifndef V8_COMPILER_COMPILER_TEST_UTILS_H_
+#define V8_COMPILER_COMPILER_TEST_UTILS_H_
-#include "include/v8.h"
-#include "src/zone.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
@@ -33,6 +31,16 @@ namespace compiler {
#endif
+// The TARGET_TEST_P(Case, Name) macro works just like
+// TEST_P(Case, Name), except that the test is disabled
+// if the platform is not a supported TurboFan target.
+#if V8_TURBOFAN_TARGET
+#define TARGET_TEST_P(Case, Name) TEST_P(Case, Name)
+#else
+#define TARGET_TEST_P(Case, Name) TEST_P(Case, DISABLED_##Name)
+#endif
+
+
// The TARGET_TYPED_TEST(Case, Name) macro works just like
// TYPED_TEST(Case, Name), except that the test is disabled
// if the platform is not a supported TurboFan target.
@@ -42,28 +50,8 @@ namespace compiler {
#define TARGET_TYPED_TEST(Case, Name) TYPED_TEST(Case, DISABLED_##Name)
#endif
-
-class CompilerTest : public ::testing::Test {
- public:
- CompilerTest();
- virtual ~CompilerTest();
-
- Isolate* isolate() const { return reinterpret_cast<Isolate*>(isolate_); }
- Zone* zone() { return &zone_; }
-
- static void SetUpTestCase();
- static void TearDownTestCase();
-
- private:
- static v8::Isolate* isolate_;
- v8::Isolate::Scope isolate_scope_;
- v8::HandleScope handle_scope_;
- v8::Context::Scope context_scope_;
- Zone zone_;
-};
-
} // namespace compiler
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_UNITTESTS_COMPILER_UNITTESTS_H_
+#endif // V8_COMPILER_COMPILER_TEST_UTILS_H_
diff --git a/deps/v8/src/compiler/compiler.gyp b/deps/v8/src/compiler/compiler.gyp
new file mode 100644
index 0000000000..ec5ec285cc
--- /dev/null
+++ b/deps/v8/src/compiler/compiler.gyp
@@ -0,0 +1,60 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'v8_code': 1,
+ },
+ 'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
+ 'targets': [
+ {
+ 'target_name': 'compiler-unittests',
+ 'type': 'executable',
+ 'dependencies': [
+ '../test/test.gyp:run-all-unittests',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ 'change-lowering-unittest.cc',
+ 'common-operator-unittest.cc',
+ 'compiler-test-utils.h',
+ 'graph-reducer-unittest.cc',
+ 'graph-unittest.cc',
+ 'graph-unittest.h',
+ 'instruction-selector-unittest.cc',
+ 'instruction-selector-unittest.h',
+ 'js-builtin-reducer-unittest.cc',
+ 'machine-operator-reducer-unittest.cc',
+ 'machine-operator-unittest.cc',
+ 'simplified-operator-reducer-unittest.cc',
+ 'simplified-operator-unittest.cc',
+ 'value-numbering-reducer-unittest.cc',
+ ],
+ 'conditions': [
+ ['v8_target_arch=="arm"', {
+ 'sources': [ ### gcmole(arch:arm) ###
+ 'arm/instruction-selector-arm-unittest.cc',
+ ],
+ }],
+ ['v8_target_arch=="arm64"', {
+ 'sources': [ ### gcmole(arch:arm64) ###
+ 'arm64/instruction-selector-arm64-unittest.cc',
+ ],
+ }],
+ ['v8_target_arch=="ia32"', {
+ 'sources': [ ### gcmole(arch:ia32) ###
+ 'ia32/instruction-selector-ia32-unittest.cc',
+ ],
+ }],
+ ['v8_target_arch=="x64"', {
+ 'sources': [ ### gcmole(arch:x64) ###
+ 'x64/instruction-selector-x64-unittest.cc',
+ ],
+ }],
+ ],
+ },
+ ],
+}
diff --git a/deps/v8/src/compiler/gap-resolver.h b/deps/v8/src/compiler/gap-resolver.h
index 5c3aeada6e..98aaab2b48 100644
--- a/deps/v8/src/compiler/gap-resolver.h
+++ b/deps/v8/src/compiler/gap-resolver.h
@@ -11,7 +11,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-class GapResolver V8_FINAL {
+class GapResolver FINAL {
public:
// Interface used by the gap resolver to emit moves and swaps.
class Assembler {
diff --git a/deps/v8/src/compiler/generic-algorithm.h b/deps/v8/src/compiler/generic-algorithm.h
index 607d339ae4..cd4984f68c 100644
--- a/deps/v8/src/compiler/generic-algorithm.h
+++ b/deps/v8/src/compiler/generic-algorithm.h
@@ -5,7 +5,6 @@
#ifndef V8_COMPILER_GENERIC_ALGORITHM_H_
#define V8_COMPILER_GENERIC_ALGORITHM_H_
-#include <deque>
#include <stack>
#include "src/compiler/generic-graph.h"
@@ -38,18 +37,15 @@ class GenericGraphVisit {
// void PostEdge(Traits::Node* from, int index, Traits::Node* to);
// }
template <class Visitor, class Traits, class RootIterator>
- static void Visit(GenericGraphBase* graph, RootIterator root_begin,
- RootIterator root_end, Visitor* visitor) {
- // TODO(bmeurer): Pass "local" zone as parameter.
- Zone* zone = graph->zone();
+ static void Visit(GenericGraphBase* graph, Zone* zone,
+ RootIterator root_begin, RootIterator root_end,
+ Visitor* visitor) {
typedef typename Traits::Node Node;
typedef typename Traits::Iterator Iterator;
typedef std::pair<Iterator, Iterator> NodeState;
- typedef zone_allocator<NodeState> ZoneNodeStateAllocator;
- typedef std::deque<NodeState, ZoneNodeStateAllocator> NodeStateDeque;
- typedef std::stack<NodeState, NodeStateDeque> NodeStateStack;
- NodeStateStack stack((NodeStateDeque(ZoneNodeStateAllocator(zone))));
- BoolVector visited(Traits::max_id(graph), false, ZoneBoolAllocator(zone));
+ typedef std::stack<NodeState, ZoneDeque<NodeState> > NodeStateStack;
+ NodeStateStack stack((ZoneDeque<NodeState>(zone)));
+ BoolVector visited(Traits::max_id(graph), false, zone);
Node* current = *root_begin;
while (true) {
DCHECK(current != NULL);
@@ -97,10 +93,10 @@ class GenericGraphVisit {
}
template <class Visitor, class Traits>
- static void Visit(GenericGraphBase* graph, typename Traits::Node* current,
- Visitor* visitor) {
+ static void Visit(GenericGraphBase* graph, Zone* zone,
+ typename Traits::Node* current, Visitor* visitor) {
typename Traits::Node* array[] = {current};
- Visit<Visitor, Traits>(graph, &array[0], &array[1], visitor);
+ Visit<Visitor, Traits>(graph, zone, &array[0], &array[1], visitor);
}
template <class B, class S>
diff --git a/deps/v8/src/compiler/generic-node-inl.h b/deps/v8/src/compiler/generic-node-inl.h
index 51d1a50162..c2dc24ee83 100644
--- a/deps/v8/src/compiler/generic-node-inl.h
+++ b/deps/v8/src/compiler/generic-node-inl.h
@@ -64,12 +64,13 @@ void GenericNode<B, S>::ReplaceUses(GenericNode* replace_to) {
if (replace_to->last_use_ == NULL) {
DCHECK_EQ(NULL, replace_to->first_use_);
replace_to->first_use_ = first_use_;
- } else {
+ replace_to->last_use_ = last_use_;
+ } else if (first_use_ != NULL) {
DCHECK_NE(NULL, replace_to->first_use_);
replace_to->last_use_->next = first_use_;
first_use_->prev = replace_to->last_use_;
+ replace_to->last_use_ = last_use_;
}
- replace_to->last_use_ = last_use_;
replace_to->use_count_ += use_count_;
use_count_ = 0;
first_use_ = NULL;
@@ -141,7 +142,7 @@ template <class B, class S>
void GenericNode<B, S>::EnsureAppendableInputs(Zone* zone) {
if (!has_appendable_inputs_) {
void* deque_buffer = zone->New(sizeof(InputDeque));
- InputDeque* deque = new (deque_buffer) InputDeque(ZoneInputAllocator(zone));
+ InputDeque* deque = new (deque_buffer) InputDeque(zone);
for (int i = 0; i < input_count_; ++i) {
deque->push_back(inputs_.static_[i]);
}
@@ -177,6 +178,16 @@ void GenericNode<B, S>::InsertInput(Zone* zone, int index,
}
template <class B, class S>
+void GenericNode<B, S>::RemoveInput(int index) {
+ DCHECK(index >= 0 && index < InputCount());
+ // TODO(turbofan): Optimize this implementation!
+ for (; index < InputCount() - 1; ++index) {
+ ReplaceInput(index, InputAt(index + 1));
+ }
+ TrimInputCount(InputCount() - 1);
+}
+
+template <class B, class S>
void GenericNode<B, S>::AppendUse(Use* use) {
use->next = NULL;
use->prev = last_use_;
diff --git a/deps/v8/src/compiler/generic-node.h b/deps/v8/src/compiler/generic-node.h
index 287d852f5e..3dc324da7e 100644
--- a/deps/v8/src/compiler/generic-node.h
+++ b/deps/v8/src/compiler/generic-node.h
@@ -5,19 +5,14 @@
#ifndef V8_COMPILER_GENERIC_NODE_H_
#define V8_COMPILER_GENERIC_NODE_H_
-#include <deque>
-
#include "src/v8.h"
-#include "src/compiler/operator.h"
-#include "src/zone.h"
-#include "src/zone-allocator.h"
+#include "src/zone-containers.h"
namespace v8 {
namespace internal {
namespace compiler {
-class Operator;
class GenericGraphBase;
typedef int NodeId;
@@ -43,9 +38,10 @@ class GenericNode : public B {
S* InputAt(int index) const {
return static_cast<S*>(GetInputRecordPtr(index)->to);
}
- void ReplaceInput(int index, GenericNode* new_input);
- void AppendInput(Zone* zone, GenericNode* new_input);
- void InsertInput(Zone* zone, int index, GenericNode* new_input);
+ inline void ReplaceInput(int index, GenericNode* new_input);
+ inline void AppendInput(Zone* zone, GenericNode* new_input);
+ inline void InsertInput(Zone* zone, int index, GenericNode* new_input);
+ inline void RemoveInput(int index);
int UseCount() { return use_count_; }
S* UseAt(int index) {
@@ -59,9 +55,9 @@ class GenericNode : public B {
inline void ReplaceUses(GenericNode* replace_to);
template <class UnaryPredicate>
inline void ReplaceUsesIf(UnaryPredicate pred, GenericNode* replace_to);
- void RemoveAllInputs();
+ inline void RemoveAllInputs();
- void TrimInputCount(int input_count);
+ inline void TrimInputCount(int input_count);
class Inputs {
public:
@@ -127,8 +123,8 @@ class GenericNode : public B {
}
}
- void AppendUse(Use* use);
- void RemoveUse(Use* use);
+ inline void AppendUse(Use* use);
+ inline void RemoveUse(Use* use);
void* operator new(size_t, void* location) { return location; }
@@ -137,8 +133,7 @@ class GenericNode : public B {
private:
void AssignUniqueID(GenericGraphBase* graph);
- typedef zone_allocator<Input> ZoneInputAllocator;
- typedef std::deque<Input, ZoneInputAllocator> InputDeque;
+ typedef ZoneDeque<Input> InputDeque;
NodeId id_;
int input_count_ : 31;
@@ -204,6 +199,12 @@ class GenericNode<B, S>::Inputs::iterator {
++index_;
return *this;
}
+ iterator& UpdateToAndIncrement(GenericNode<B, S>* new_to) {
+ typename GenericNode<B, S>::Input* input = GetInput();
+ input->Update(new_to);
+ index_++;
+ return *this;
+ }
int index() { return index_; }
private:
diff --git a/deps/v8/src/compiler/graph-builder.cc b/deps/v8/src/compiler/graph-builder.cc
index 9c414f1bf9..8992881598 100644
--- a/deps/v8/src/compiler/graph-builder.cc
+++ b/deps/v8/src/compiler/graph-builder.cc
@@ -28,9 +28,13 @@ StructuredGraphBuilder::StructuredGraphBuilder(Graph* graph,
exit_control_(NULL) {}
-Node* StructuredGraphBuilder::MakeNode(Operator* op, int value_input_count,
+Node* StructuredGraphBuilder::MakeNode(const Operator* op,
+ int value_input_count,
Node** value_inputs) {
+ DCHECK(op->InputCount() == value_input_count);
+
bool has_context = OperatorProperties::HasContextInput(op);
+ bool has_framestate = OperatorProperties::HasFrameStateInput(op);
bool has_control = OperatorProperties::GetControlInputCount(op) == 1;
bool has_effect = OperatorProperties::GetEffectInputCount(op) == 1;
@@ -38,20 +42,26 @@ Node* StructuredGraphBuilder::MakeNode(Operator* op, int value_input_count,
DCHECK(OperatorProperties::GetEffectInputCount(op) < 2);
Node* result = NULL;
- if (!has_context && !has_control && !has_effect) {
+ if (!has_context && !has_framestate && !has_control && !has_effect) {
result = graph()->NewNode(op, value_input_count, value_inputs);
} else {
int input_count_with_deps = value_input_count;
if (has_context) ++input_count_with_deps;
+ if (has_framestate) ++input_count_with_deps;
if (has_control) ++input_count_with_deps;
if (has_effect) ++input_count_with_deps;
- void* raw_buffer = alloca(kPointerSize * input_count_with_deps);
- Node** buffer = reinterpret_cast<Node**>(raw_buffer);
+ Node** buffer = zone()->NewArray<Node*>(input_count_with_deps);
memcpy(buffer, value_inputs, kPointerSize * value_input_count);
Node** current_input = buffer + value_input_count;
if (has_context) {
*current_input++ = current_context();
}
+ if (has_framestate) {
+ // The frame state will be inserted later. Here we misuse
+ // the dead_control node as a sentinel to be later overwritten
+ // with the real frame state.
+ *current_input++ = dead_control();
+ }
if (has_effect) {
*current_input++ = environment_->GetEffectDependency();
}
@@ -94,7 +104,7 @@ StructuredGraphBuilder::Environment::Environment(
: builder_(builder),
control_dependency_(control_dependency),
effect_dependency_(control_dependency),
- values_(NodeVector::allocator_type(zone())) {}
+ values_(zone()) {}
StructuredGraphBuilder::Environment::Environment(const Environment& copy)
@@ -152,9 +162,8 @@ void StructuredGraphBuilder::Environment::PrepareForLoop() {
Node* StructuredGraphBuilder::NewPhi(int count, Node* input, Node* control) {
- Operator* phi_op = common()->Phi(count);
- void* raw_buffer = alloca(kPointerSize * (count + 1));
- Node** buffer = reinterpret_cast<Node**>(raw_buffer);
+ const Operator* phi_op = common()->Phi(kMachAnyTagged, count);
+ Node** buffer = zone()->NewArray<Node*>(count + 1);
MemsetPointer(buffer, input, count);
buffer[count] = control;
return graph()->NewNode(phi_op, count + 1, buffer);
@@ -164,9 +173,8 @@ Node* StructuredGraphBuilder::NewPhi(int count, Node* input, Node* control) {
// TODO(mstarzinger): Revisit this once we have proper effect states.
Node* StructuredGraphBuilder::NewEffectPhi(int count, Node* input,
Node* control) {
- Operator* phi_op = common()->EffectPhi(count);
- void* raw_buffer = alloca(kPointerSize * (count + 1));
- Node** buffer = reinterpret_cast<Node**>(raw_buffer);
+ const Operator* phi_op = common()->EffectPhi(count);
+ Node** buffer = zone()->NewArray<Node*>(count + 1);
MemsetPointer(buffer, input, count);
buffer[count] = control;
return graph()->NewNode(phi_op, count + 1, buffer);
@@ -177,17 +185,17 @@ Node* StructuredGraphBuilder::MergeControl(Node* control, Node* other) {
int inputs = OperatorProperties::GetControlInputCount(control->op()) + 1;
if (control->opcode() == IrOpcode::kLoop) {
// Control node for loop exists, add input.
- Operator* op = common()->Loop(inputs);
+ const Operator* op = common()->Loop(inputs);
control->AppendInput(zone(), other);
control->set_op(op);
} else if (control->opcode() == IrOpcode::kMerge) {
// Control node for merge exists, add input.
- Operator* op = common()->Merge(inputs);
+ const Operator* op = common()->Merge(inputs);
control->AppendInput(zone(), other);
control->set_op(op);
} else {
// Control node is a singleton, introduce a merge.
- Operator* op = common()->Merge(inputs);
+ const Operator* op = common()->Merge(inputs);
control = graph()->NewNode(op, control, other);
}
return control;
@@ -217,7 +225,7 @@ Node* StructuredGraphBuilder::MergeValue(Node* value, Node* other,
if (value->opcode() == IrOpcode::kPhi &&
NodeProperties::GetControlInput(value) == control) {
// Phi already exists, add input.
- value->set_op(common()->Phi(inputs));
+ value->set_op(common()->Phi(kMachAnyTagged, inputs));
value->InsertInput(zone(), inputs - 1, other);
} else if (value != other) {
// Phi does not exist yet, introduce one.
diff --git a/deps/v8/src/compiler/graph-builder.h b/deps/v8/src/compiler/graph-builder.h
index fc90008554..c966c299b9 100644
--- a/deps/v8/src/compiler/graph-builder.h
+++ b/deps/v8/src/compiler/graph-builder.h
@@ -24,40 +24,41 @@ class GraphBuilder {
explicit GraphBuilder(Graph* graph) : graph_(graph) {}
virtual ~GraphBuilder() {}
- Node* NewNode(Operator* op) {
+ Node* NewNode(const Operator* op) {
return MakeNode(op, 0, static_cast<Node**>(NULL));
}
- Node* NewNode(Operator* op, Node* n1) { return MakeNode(op, 1, &n1); }
+ Node* NewNode(const Operator* op, Node* n1) { return MakeNode(op, 1, &n1); }
- Node* NewNode(Operator* op, Node* n1, Node* n2) {
+ Node* NewNode(const Operator* op, Node* n1, Node* n2) {
Node* buffer[] = {n1, n2};
- return MakeNode(op, ARRAY_SIZE(buffer), buffer);
+ return MakeNode(op, arraysize(buffer), buffer);
}
- Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3) {
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
Node* buffer[] = {n1, n2, n3};
- return MakeNode(op, ARRAY_SIZE(buffer), buffer);
+ return MakeNode(op, arraysize(buffer), buffer);
}
- Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
Node* buffer[] = {n1, n2, n3, n4};
- return MakeNode(op, ARRAY_SIZE(buffer), buffer);
+ return MakeNode(op, arraysize(buffer), buffer);
}
- Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
Node* n5) {
Node* buffer[] = {n1, n2, n3, n4, n5};
- return MakeNode(op, ARRAY_SIZE(buffer), buffer);
+ return MakeNode(op, arraysize(buffer), buffer);
}
- Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4, Node* n5,
- Node* n6) {
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5, Node* n6) {
Node* nodes[] = {n1, n2, n3, n4, n5, n6};
- return MakeNode(op, ARRAY_SIZE(nodes), nodes);
+ return MakeNode(op, arraysize(nodes), nodes);
}
- Node* NewNode(Operator* op, int value_input_count, Node** value_inputs) {
+ Node* NewNode(const Operator* op, int value_input_count,
+ Node** value_inputs) {
return MakeNode(op, value_input_count, value_inputs);
}
@@ -65,7 +66,7 @@ class GraphBuilder {
protected:
// Base implementation used by all factory methods.
- virtual Node* MakeNode(Operator* op, int value_input_count,
+ virtual Node* MakeNode(const Operator* op, int value_input_count,
Node** value_inputs) = 0;
private:
@@ -101,13 +102,14 @@ class StructuredGraphBuilder : public GraphBuilder {
protected:
class Environment;
+ friend class Environment;
friend class ControlBuilder;
// The following method creates a new node having the specified operator and
// ensures effect and control dependencies are wired up. The dependencies
// tracked by the environment might be mutated.
- virtual Node* MakeNode(Operator* op, int value_input_count,
- Node** value_inputs);
+ virtual Node* MakeNode(const Operator* op, int value_input_count,
+ Node** value_inputs) FINAL;
Environment* environment() const { return environment_; }
void set_environment(Environment* env) { environment_ = env; }
@@ -127,8 +129,8 @@ class StructuredGraphBuilder : public GraphBuilder {
// Helper to wrap a Handle<T> into a Unique<T>.
template <class T>
- PrintableUnique<T> MakeUnique(Handle<T> object) {
- return PrintableUnique<T>::CreateUninitialized(zone(), object);
+ Unique<T> MakeUnique(Handle<T> object) {
+ return Unique<T>::CreateUninitialized(object);
}
// Support for control flow builders. The concrete type of the environment
@@ -202,6 +204,8 @@ class StructuredGraphBuilder::Environment : public ZoneObject {
return builder()->CopyEnvironment(this);
}
+ Node* GetContext() { return builder_->current_context(); }
+
protected:
// TODO(mstarzinger): Use phase-local zone instead!
Zone* zone() const { return graph()->zone(); }
diff --git a/deps/v8/src/compiler/graph-inl.h b/deps/v8/src/compiler/graph-inl.h
index f8423c3f89..efebf7bcb9 100644
--- a/deps/v8/src/compiler/graph-inl.h
+++ b/deps/v8/src/compiler/graph-inl.h
@@ -14,8 +14,9 @@ namespace compiler {
template <class Visitor>
void Graph::VisitNodeUsesFrom(Node* node, Visitor* visitor) {
- GenericGraphVisit::Visit<Visitor, NodeUseIterationTraits<Node> >(this, node,
- visitor);
+ Zone tmp_zone(zone()->isolate());
+ GenericGraphVisit::Visit<Visitor, NodeUseIterationTraits<Node> >(
+ this, &tmp_zone, node, visitor);
}
@@ -27,8 +28,9 @@ void Graph::VisitNodeUsesFromStart(Visitor* visitor) {
template <class Visitor>
void Graph::VisitNodeInputsFromEnd(Visitor* visitor) {
+ Zone tmp_zone(zone()->isolate());
GenericGraphVisit::Visit<Visitor, NodeInputIterationTraits<Node> >(
- this, end(), visitor);
+ this, &tmp_zone, end(), visitor);
}
}
}
diff --git a/deps/v8/src/compiler/graph-reducer-unittest.cc b/deps/v8/src/compiler/graph-reducer-unittest.cc
new file mode 100644
index 0000000000..2729d5803a
--- /dev/null
+++ b/deps/v8/src/compiler/graph-reducer-unittest.cc
@@ -0,0 +1,115 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/operator.h"
+#include "src/test/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::_;
+using testing::DefaultValue;
+using testing::Return;
+using testing::Sequence;
+using testing::StrictMock;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+SimpleOperator OP0(0, Operator::kNoWrite, 0, 1, "op0");
+SimpleOperator OP1(1, Operator::kNoProperties, 1, 1, "op1");
+
+
+struct MockReducer : public Reducer {
+ MOCK_METHOD1(Reduce, Reduction(Node*));
+};
+
+} // namespace
+
+
+class GraphReducerTest : public TestWithZone {
+ public:
+ GraphReducerTest() : graph_(zone()) {}
+
+ static void SetUpTestCase() {
+ TestWithZone::SetUpTestCase();
+ DefaultValue<Reduction>::Set(Reducer::NoChange());
+ }
+
+ static void TearDownTestCase() {
+ DefaultValue<Reduction>::Clear();
+ TestWithZone::TearDownTestCase();
+ }
+
+ protected:
+ void ReduceNode(Node* node, Reducer* r) {
+ GraphReducer reducer(graph());
+ reducer.AddReducer(r);
+ reducer.ReduceNode(node);
+ }
+
+ void ReduceNode(Node* node, Reducer* r1, Reducer* r2) {
+ GraphReducer reducer(graph());
+ reducer.AddReducer(r1);
+ reducer.AddReducer(r2);
+ reducer.ReduceNode(node);
+ }
+
+ void ReduceNode(Node* node, Reducer* r1, Reducer* r2, Reducer* r3) {
+ GraphReducer reducer(graph());
+ reducer.AddReducer(r1);
+ reducer.AddReducer(r2);
+ reducer.AddReducer(r3);
+ reducer.ReduceNode(node);
+ }
+
+ Graph* graph() { return &graph_; }
+
+ private:
+ Graph graph_;
+};
+
+
+TEST_F(GraphReducerTest, NodeIsDeadAfterReplace) {
+ StrictMock<MockReducer> r;
+ Node* node0 = graph()->NewNode(&OP0);
+ Node* node1 = graph()->NewNode(&OP1, node0);
+ Node* node2 = graph()->NewNode(&OP1, node0);
+ EXPECT_CALL(r, Reduce(node1)).WillOnce(Return(Reducer::Replace(node2)));
+ ReduceNode(node1, &r);
+ EXPECT_FALSE(node0->IsDead());
+ EXPECT_TRUE(node1->IsDead());
+ EXPECT_FALSE(node2->IsDead());
+}
+
+
+TEST_F(GraphReducerTest, ReduceOnceForEveryReducer) {
+ StrictMock<MockReducer> r1, r2;
+ Node* node0 = graph()->NewNode(&OP0);
+ EXPECT_CALL(r1, Reduce(node0));
+ EXPECT_CALL(r2, Reduce(node0));
+ ReduceNode(node0, &r1, &r2);
+}
+
+
+TEST_F(GraphReducerTest, ReduceAgainAfterChanged) {
+ Sequence s1, s2, s3;
+ StrictMock<MockReducer> r1, r2, r3;
+ Node* node0 = graph()->NewNode(&OP0);
+ EXPECT_CALL(r1, Reduce(node0));
+ EXPECT_CALL(r2, Reduce(node0));
+ EXPECT_CALL(r3, Reduce(node0)).InSequence(s1, s2, s3).WillOnce(
+ Return(Reducer::Changed(node0)));
+ EXPECT_CALL(r1, Reduce(node0)).InSequence(s1);
+ EXPECT_CALL(r2, Reduce(node0)).InSequence(s2);
+ EXPECT_CALL(r3, Reduce(node0)).InSequence(s3);
+ ReduceNode(node0, &r1, &r2, &r3);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
index f062d4bea9..07e8923b18 100644
--- a/deps/v8/src/compiler/graph-reducer.cc
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -13,7 +13,7 @@ namespace internal {
namespace compiler {
GraphReducer::GraphReducer(Graph* graph)
- : graph_(graph), reducers_(Reducers::allocator_type(graph->zone())) {}
+ : graph_(graph), reducers_(graph->zone()) {}
static bool NodeIdIsLessThan(const Node* node, NodeId id) {
@@ -22,25 +22,23 @@ static bool NodeIdIsLessThan(const Node* node, NodeId id) {
void GraphReducer::ReduceNode(Node* node) {
- Reducers::iterator skip = reducers_.end();
static const unsigned kMaxAttempts = 16;
bool reduce = true;
for (unsigned attempts = 0; attempts <= kMaxAttempts; ++attempts) {
if (!reduce) return;
reduce = false; // Assume we don't need to rerun any reducers.
int before = graph_->NodeCount();
- for (Reducers::iterator i = reducers_.begin(); i != reducers_.end(); ++i) {
- if (i == skip) continue; // Skip this reducer.
+ for (ZoneVector<Reducer*>::iterator i = reducers_.begin();
+ i != reducers_.end(); ++i) {
Reduction reduction = (*i)->Reduce(node);
Node* replacement = reduction.replacement();
if (replacement == NULL) {
// No change from this reducer.
} else if (replacement == node) {
// {replacement == node} represents an in-place reduction.
- // Rerun all the reducers except the current one for this node,
- // as now there may be more opportunities for reduction.
+ // Rerun all the reducers for this node, as now there may be more
+ // opportunities for reduction.
reduce = true;
- skip = i;
break;
} else {
if (node == graph_->start()) graph_->SetStart(replacement);
@@ -48,8 +46,8 @@ void GraphReducer::ReduceNode(Node* node) {
// If {node} was replaced by an old node, unlink {node} and assume that
// {replacement} was already reduced and finish.
if (replacement->id() < before) {
- node->RemoveAllInputs();
node->ReplaceUses(replacement);
+ node->Kill();
return;
}
// Otherwise, {node} was replaced by a new node. Replace all old uses of
@@ -58,9 +56,10 @@ void GraphReducer::ReduceNode(Node* node) {
node->ReplaceUsesIf(
std::bind2nd(std::ptr_fun(&NodeIdIsLessThan), before), replacement);
// Unlink {node} if it's no longer used.
- if (node->uses().empty()) node->RemoveAllInputs();
+ if (node->uses().empty()) {
+ node->Kill();
+ }
// Rerun all the reductions on the {replacement}.
- skip = reducers_.end();
node = replacement;
reduce = true;
break;
@@ -71,7 +70,7 @@ void GraphReducer::ReduceNode(Node* node) {
// A helper class to reuse the node traversal algorithm.
-struct GraphReducerVisitor V8_FINAL : public NullNodeVisitor {
+struct GraphReducerVisitor FINAL : public NullNodeVisitor {
explicit GraphReducerVisitor(GraphReducer* reducer) : reducer_(reducer) {}
GenericGraphVisit::Control Post(Node* node) {
reducer_->ReduceNode(node);
@@ -89,6 +88,7 @@ void GraphReducer::ReduceGraph() {
// TODO(titzer): partial graph reductions.
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index 33cded65a7..e0e4f7a3d3 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -5,9 +5,7 @@
#ifndef V8_COMPILER_GRAPH_REDUCER_H_
#define V8_COMPILER_GRAPH_REDUCER_H_
-#include <list>
-
-#include "src/zone-allocator.h"
+#include "src/zone-containers.h"
namespace v8 {
namespace internal {
@@ -19,7 +17,7 @@ class Node;
// Represents the result of trying to reduce a node in the graph.
-class Reduction V8_FINAL {
+class Reduction FINAL {
public:
explicit Reduction(Node* replacement = NULL) : replacement_(replacement) {}
@@ -38,6 +36,7 @@ class Reduction V8_FINAL {
// phase.
class Reducer {
public:
+ Reducer() {}
virtual ~Reducer() {}
// Try to reduce a node if possible.
@@ -47,11 +46,14 @@ class Reducer {
static Reduction NoChange() { return Reduction(); }
static Reduction Replace(Node* node) { return Reduction(node); }
static Reduction Changed(Node* node) { return Reduction(node); }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Reducer);
};
// Performs an iterative reduction of a node graph.
-class GraphReducer V8_FINAL {
+class GraphReducer FINAL {
public:
explicit GraphReducer(Graph* graph);
@@ -65,13 +67,14 @@ class GraphReducer V8_FINAL {
void ReduceGraph();
private:
- typedef std::list<Reducer*, zone_allocator<Reducer*> > Reducers;
-
Graph* graph_;
- Reducers reducers_;
+ ZoneVector<Reducer*> reducers_;
+
+ DISALLOW_COPY_AND_ASSIGN(GraphReducer);
};
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_GRAPH_REDUCER_H_
diff --git a/deps/v8/src/compiler/graph-replay.cc b/deps/v8/src/compiler/graph-replay.cc
index efb1180a77..494d4313fe 100644
--- a/deps/v8/src/compiler/graph-replay.cc
+++ b/deps/v8/src/compiler/graph-replay.cc
@@ -40,7 +40,7 @@ void GraphReplayPrinter::PostEdge(Node* from, int index, Node* to) {
}
-void GraphReplayPrinter::PrintReplayOpCreator(Operator* op) {
+void GraphReplayPrinter::PrintReplayOpCreator(const Operator* op) {
IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
const char* builder =
IrOpcode::IsCommonOpcode(opcode) ? "common_builder" : "js_builder";
diff --git a/deps/v8/src/compiler/graph-replay.h b/deps/v8/src/compiler/graph-replay.h
index cc186d77c9..53d5247995 100644
--- a/deps/v8/src/compiler/graph-replay.h
+++ b/deps/v8/src/compiler/graph-replay.h
@@ -5,21 +5,19 @@
#ifndef V8_COMPILER_GRAPH_REPLAY_H_
#define V8_COMPILER_GRAPH_REPLAY_H_
-#include "src/v8.h"
-
#include "src/compiler/node.h"
namespace v8 {
namespace internal {
namespace compiler {
+// Forward declarations.
class Graph;
-class Operator;
// Helper class to print a full replay of a graph. This replay can be used to
// materialize the same graph within a C++ unit test and hence test subsequent
// optimization passes on a graph without going through the construction steps.
-class GraphReplayPrinter : public NullNodeVisitor {
+class GraphReplayPrinter FINAL : public NullNodeVisitor {
public:
#ifdef DEBUG
static void PrintReplay(Graph* graph);
@@ -33,12 +31,13 @@ class GraphReplayPrinter : public NullNodeVisitor {
private:
GraphReplayPrinter() {}
- static void PrintReplayOpCreator(Operator* op);
+ static void PrintReplayOpCreator(const Operator* op);
DISALLOW_COPY_AND_ASSIGN(GraphReplayPrinter);
};
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_GRAPH_REPLAY_H_
diff --git a/deps/v8/test/compiler-unittests/node-matchers.cc b/deps/v8/src/compiler/graph-unittest.cc
index d580834113..35585e8503 100644
--- a/deps/v8/test/compiler-unittests/node-matchers.cc
+++ b/deps/v8/src/compiler/graph-unittest.cc
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "test/compiler-unittests/node-matchers.h"
+#include "src/compiler/graph-unittest.h"
#include <ostream> // NOLINT(readability/streams)
#include "src/compiler/node-properties-inl.h"
+using testing::_;
using testing::MakeMatcher;
using testing::MatcherInterface;
using testing::MatchResultListener;
@@ -18,13 +19,84 @@ namespace internal {
// TODO(bmeurer): Find a new home for these functions.
template <typename T>
+inline std::ostream& operator<<(std::ostream& os, const Unique<T>& value) {
+ return os << *value.handle();
+}
inline std::ostream& operator<<(std::ostream& os,
- const PrintableUnique<T>& value) {
- return os << value.string();
+ const ExternalReference& value) {
+ OStringStream ost;
+ compiler::StaticParameterTraits<ExternalReference>::PrintTo(ost, value);
+ return os << ost.c_str();
}
namespace compiler {
+GraphTest::GraphTest(int num_parameters) : common_(zone()), graph_(zone()) {
+ graph()->SetStart(graph()->NewNode(common()->Start(num_parameters)));
+}
+
+
+GraphTest::~GraphTest() {}
+
+
+Node* GraphTest::Parameter(int32_t index) {
+ return graph()->NewNode(common()->Parameter(index), graph()->start());
+}
+
+
+Node* GraphTest::Float32Constant(volatile float value) {
+ return graph()->NewNode(common()->Float32Constant(value));
+}
+
+
+Node* GraphTest::Float64Constant(volatile double value) {
+ return graph()->NewNode(common()->Float64Constant(value));
+}
+
+
+Node* GraphTest::Int32Constant(int32_t value) {
+ return graph()->NewNode(common()->Int32Constant(value));
+}
+
+
+Node* GraphTest::Int64Constant(int64_t value) {
+ return graph()->NewNode(common()->Int64Constant(value));
+}
+
+
+Node* GraphTest::NumberConstant(volatile double value) {
+ return graph()->NewNode(common()->NumberConstant(value));
+}
+
+
+Node* GraphTest::HeapConstant(const Unique<HeapObject>& value) {
+ return graph()->NewNode(common()->HeapConstant(value));
+}
+
+
+Node* GraphTest::FalseConstant() {
+ return HeapConstant(
+ Unique<HeapObject>::CreateImmovable(factory()->false_value()));
+}
+
+
+Node* GraphTest::TrueConstant() {
+ return HeapConstant(
+ Unique<HeapObject>::CreateImmovable(factory()->true_value()));
+}
+
+
+Matcher<Node*> GraphTest::IsFalseConstant() {
+ return IsHeapConstant(
+ Unique<HeapObject>::CreateImmovable(factory()->false_value()));
+}
+
+
+Matcher<Node*> GraphTest::IsTrueConstant() {
+ return IsHeapConstant(
+ Unique<HeapObject>::CreateImmovable(factory()->true_value()));
+}
+
namespace {
template <typename T>
@@ -47,18 +119,19 @@ class NodeMatcher : public MatcherInterface<Node*> {
public:
explicit NodeMatcher(IrOpcode::Value opcode) : opcode_(opcode) {}
- virtual void DescribeTo(std::ostream* os) const V8_OVERRIDE {
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
*os << "is a " << IrOpcode::Mnemonic(opcode_) << " node";
}
virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
- V8_OVERRIDE {
+ OVERRIDE {
if (node == NULL) {
*listener << "which is NULL";
return false;
}
if (node->opcode() != opcode_) {
- *listener << "whose opcode is " << IrOpcode::Mnemonic(node->opcode());
+ *listener << "whose opcode is " << IrOpcode::Mnemonic(node->opcode())
+ << " but should have been " << IrOpcode::Mnemonic(opcode_);
return false;
}
return true;
@@ -69,7 +142,7 @@ class NodeMatcher : public MatcherInterface<Node*> {
};
-class IsBranchMatcher V8_FINAL : public NodeMatcher {
+class IsBranchMatcher FINAL : public NodeMatcher {
public:
IsBranchMatcher(const Matcher<Node*>& value_matcher,
const Matcher<Node*>& control_matcher)
@@ -77,7 +150,7 @@ class IsBranchMatcher V8_FINAL : public NodeMatcher {
value_matcher_(value_matcher),
control_matcher_(control_matcher) {}
- virtual void DescribeTo(std::ostream* os) const V8_OVERRIDE {
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
NodeMatcher::DescribeTo(os);
*os << " whose value (";
value_matcher_.DescribeTo(os);
@@ -87,7 +160,7 @@ class IsBranchMatcher V8_FINAL : public NodeMatcher {
}
virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
- V8_OVERRIDE {
+ OVERRIDE {
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
"value", value_matcher_, listener) &&
@@ -101,13 +174,102 @@ class IsBranchMatcher V8_FINAL : public NodeMatcher {
};
+class IsMergeMatcher FINAL : public NodeMatcher {
+ public:
+ IsMergeMatcher(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher)
+ : NodeMatcher(IrOpcode::kMerge),
+ control0_matcher_(control0_matcher),
+ control1_matcher_(control1_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose control0 (";
+ control0_matcher_.DescribeTo(os);
+ *os << ") and control1 (";
+ control1_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+ OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node, 0),
+ "control0", control0_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node, 1),
+ "control1", control1_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> control0_matcher_;
+ const Matcher<Node*> control1_matcher_;
+};
+
+
+class IsControl1Matcher FINAL : public NodeMatcher {
+ public:
+ IsControl1Matcher(IrOpcode::Value opcode,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(opcode), control_matcher_(control_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+ OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsFinishMatcher FINAL : public NodeMatcher {
+ public:
+ IsFinishMatcher(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher)
+ : NodeMatcher(IrOpcode::kFinish),
+ value_matcher_(value_matcher),
+ effect_matcher_(effect_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose value (";
+ value_matcher_.DescribeTo(os);
+ *os << ") and effect (";
+ effect_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+ OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ "value", value_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+ effect_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> value_matcher_;
+ const Matcher<Node*> effect_matcher_;
+};
+
+
template <typename T>
-class IsConstantMatcher V8_FINAL : public NodeMatcher {
+class IsConstantMatcher FINAL : public NodeMatcher {
public:
IsConstantMatcher(IrOpcode::Value opcode, const Matcher<T>& value_matcher)
: NodeMatcher(opcode), value_matcher_(value_matcher) {}
- virtual void DescribeTo(std::ostream* os) const V8_OVERRIDE {
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
NodeMatcher::DescribeTo(os);
*os << " whose value (";
value_matcher_.DescribeTo(os);
@@ -115,7 +277,7 @@ class IsConstantMatcher V8_FINAL : public NodeMatcher {
}
virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
- V8_OVERRIDE {
+ OVERRIDE {
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(OpParameter<T>(node), "value", value_matcher_,
listener));
@@ -126,19 +288,23 @@ class IsConstantMatcher V8_FINAL : public NodeMatcher {
};
-class IsPhiMatcher V8_FINAL : public NodeMatcher {
+class IsPhiMatcher FINAL : public NodeMatcher {
public:
- IsPhiMatcher(const Matcher<Node*>& value0_matcher,
+ IsPhiMatcher(const Matcher<MachineType>& type_matcher,
+ const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& control_matcher)
: NodeMatcher(IrOpcode::kPhi),
+ type_matcher_(type_matcher),
value0_matcher_(value0_matcher),
value1_matcher_(value1_matcher),
control_matcher_(control_matcher) {}
- virtual void DescribeTo(std::ostream* os) const V8_OVERRIDE {
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
NodeMatcher::DescribeTo(os);
- *os << " whose value0 (";
+ *os << " whose type (";
+ type_matcher_.DescribeTo(os);
+ *os << "), value0 (";
value0_matcher_.DescribeTo(os);
*os << "), value1 (";
value1_matcher_.DescribeTo(os);
@@ -148,8 +314,10 @@ class IsPhiMatcher V8_FINAL : public NodeMatcher {
}
virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
- V8_OVERRIDE {
+ OVERRIDE {
return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(OpParameter<MachineType>(node), "type",
+ type_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
"value0", value0_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
@@ -159,21 +327,22 @@ class IsPhiMatcher V8_FINAL : public NodeMatcher {
}
private:
+ const Matcher<MachineType> type_matcher_;
const Matcher<Node*> value0_matcher_;
const Matcher<Node*> value1_matcher_;
const Matcher<Node*> control_matcher_;
};
-class IsProjectionMatcher V8_FINAL : public NodeMatcher {
+class IsProjectionMatcher FINAL : public NodeMatcher {
public:
- IsProjectionMatcher(const Matcher<int32_t>& index_matcher,
+ IsProjectionMatcher(const Matcher<size_t>& index_matcher,
const Matcher<Node*>& base_matcher)
: NodeMatcher(IrOpcode::kProjection),
index_matcher_(index_matcher),
base_matcher_(base_matcher) {}
- virtual void DescribeTo(std::ostream* os) const V8_OVERRIDE {
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
NodeMatcher::DescribeTo(os);
*os << " whose index (";
index_matcher_.DescribeTo(os);
@@ -183,36 +352,101 @@ class IsProjectionMatcher V8_FINAL : public NodeMatcher {
}
virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
- V8_OVERRIDE {
+ OVERRIDE {
return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(OpParameter<int32_t>(node), "index",
+ PrintMatchAndExplain(OpParameter<size_t>(node), "index",
index_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
base_matcher_, listener));
}
private:
- const Matcher<int32_t> index_matcher_;
+ const Matcher<size_t> index_matcher_;
const Matcher<Node*> base_matcher_;
};
-class IsLoadMatcher V8_FINAL : public NodeMatcher {
+class IsCallMatcher FINAL : public NodeMatcher {
public:
- IsLoadMatcher(const Matcher<MachineType>& type_matcher,
+ IsCallMatcher(const Matcher<CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kCall),
+ descriptor_matcher_(descriptor_matcher),
+ value0_matcher_(value0_matcher),
+ value1_matcher_(value1_matcher),
+ value2_matcher_(value2_matcher),
+ value3_matcher_(value3_matcher),
+ effect_matcher_(effect_matcher),
+ control_matcher_(control_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose value0 (";
+ value0_matcher_.DescribeTo(os);
+ *os << ") and value1 (";
+ value1_matcher_.DescribeTo(os);
+ *os << ") and value2 (";
+ value2_matcher_.DescribeTo(os);
+ *os << ") and value3 (";
+ value3_matcher_.DescribeTo(os);
+ *os << ") and effect (";
+ effect_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+ OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(OpParameter<CallDescriptor*>(node),
+ "descriptor", descriptor_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ "value0", value0_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+ "value1", value1_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
+ "value2", value2_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 3),
+ "value3", value3_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+ effect_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<CallDescriptor*> descriptor_matcher_;
+ const Matcher<Node*> value0_matcher_;
+ const Matcher<Node*> value1_matcher_;
+ const Matcher<Node*> value2_matcher_;
+ const Matcher<Node*> value3_matcher_;
+ const Matcher<Node*> effect_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsLoadMatcher FINAL : public NodeMatcher {
+ public:
+ IsLoadMatcher(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& effect_matcher)
: NodeMatcher(IrOpcode::kLoad),
- type_matcher_(type_matcher),
+ rep_matcher_(rep_matcher),
base_matcher_(base_matcher),
index_matcher_(index_matcher),
effect_matcher_(effect_matcher) {}
- virtual void DescribeTo(std::ostream* os) const V8_OVERRIDE {
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
NodeMatcher::DescribeTo(os);
- *os << " whose type (";
- type_matcher_.DescribeTo(os);
+ *os << " whose rep (";
+ rep_matcher_.DescribeTo(os);
*os << "), base (";
base_matcher_.DescribeTo(os);
*os << "), index (";
@@ -223,10 +457,10 @@ class IsLoadMatcher V8_FINAL : public NodeMatcher {
}
virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
- V8_OVERRIDE {
+ OVERRIDE {
return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(OpParameter<MachineType>(node), "type",
- type_matcher_, listener) &&
+ PrintMatchAndExplain(OpParameter<LoadRepresentation>(node), "rep",
+ rep_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
base_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
@@ -236,14 +470,14 @@ class IsLoadMatcher V8_FINAL : public NodeMatcher {
}
private:
- const Matcher<MachineType> type_matcher_;
+ const Matcher<LoadRepresentation> rep_matcher_;
const Matcher<Node*> base_matcher_;
const Matcher<Node*> index_matcher_;
const Matcher<Node*> effect_matcher_;
};
-class IsStoreMatcher V8_FINAL : public NodeMatcher {
+class IsStoreMatcher FINAL : public NodeMatcher {
public:
IsStoreMatcher(const Matcher<MachineType>& type_matcher,
const Matcher<WriteBarrierKind> write_barrier_matcher,
@@ -261,7 +495,7 @@ class IsStoreMatcher V8_FINAL : public NodeMatcher {
effect_matcher_(effect_matcher),
control_matcher_(control_matcher) {}
- virtual void DescribeTo(std::ostream* os) const V8_OVERRIDE {
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
NodeMatcher::DescribeTo(os);
*os << " whose type (";
type_matcher_.DescribeTo(os);
@@ -281,12 +515,13 @@ class IsStoreMatcher V8_FINAL : public NodeMatcher {
}
virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
- V8_OVERRIDE {
+ OVERRIDE {
return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(OpParameter<StoreRepresentation>(node).rep,
- "type", type_matcher_, listener) &&
PrintMatchAndExplain(
- OpParameter<StoreRepresentation>(node).write_barrier_kind,
+ OpParameter<StoreRepresentation>(node).machine_type(), "type",
+ type_matcher_, listener) &&
+ PrintMatchAndExplain(
+ OpParameter<StoreRepresentation>(node).write_barrier_kind(),
"write barrier", write_barrier_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
base_matcher_, listener) &&
@@ -311,7 +546,7 @@ class IsStoreMatcher V8_FINAL : public NodeMatcher {
};
-class IsBinopMatcher V8_FINAL : public NodeMatcher {
+class IsBinopMatcher FINAL : public NodeMatcher {
public:
IsBinopMatcher(IrOpcode::Value opcode, const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher)
@@ -319,7 +554,7 @@ class IsBinopMatcher V8_FINAL : public NodeMatcher {
lhs_matcher_(lhs_matcher),
rhs_matcher_(rhs_matcher) {}
- virtual void DescribeTo(std::ostream* os) const V8_OVERRIDE {
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
NodeMatcher::DescribeTo(os);
*os << " whose lhs (";
lhs_matcher_.DescribeTo(os);
@@ -329,7 +564,7 @@ class IsBinopMatcher V8_FINAL : public NodeMatcher {
}
virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
- V8_OVERRIDE {
+ OVERRIDE {
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "lhs",
lhs_matcher_, listener) &&
@@ -343,12 +578,12 @@ class IsBinopMatcher V8_FINAL : public NodeMatcher {
};
-class IsUnopMatcher V8_FINAL : public NodeMatcher {
+class IsUnopMatcher FINAL : public NodeMatcher {
public:
IsUnopMatcher(IrOpcode::Value opcode, const Matcher<Node*>& input_matcher)
: NodeMatcher(opcode), input_matcher_(input_matcher) {}
- virtual void DescribeTo(std::ostream* os) const V8_OVERRIDE {
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
NodeMatcher::DescribeTo(os);
*os << " whose input (";
input_matcher_.DescribeTo(os);
@@ -356,7 +591,7 @@ class IsUnopMatcher V8_FINAL : public NodeMatcher {
}
virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
- V8_OVERRIDE {
+ OVERRIDE {
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
"input", input_matcher_, listener));
@@ -365,7 +600,6 @@ class IsUnopMatcher V8_FINAL : public NodeMatcher {
private:
const Matcher<Node*> input_matcher_;
};
-
}
@@ -375,39 +609,118 @@ Matcher<Node*> IsBranch(const Matcher<Node*>& value_matcher,
}
-Matcher<Node*> IsInt32Constant(const Matcher<int32_t>& value_matcher) {
+Matcher<Node*> IsMerge(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher) {
+ return MakeMatcher(new IsMergeMatcher(control0_matcher, control1_matcher));
+}
+
+
+Matcher<Node*> IsIfTrue(const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsControl1Matcher(IrOpcode::kIfTrue, control_matcher));
+}
+
+
+Matcher<Node*> IsIfFalse(const Matcher<Node*>& control_matcher) {
return MakeMatcher(
- new IsConstantMatcher<int32_t>(IrOpcode::kInt32Constant, value_matcher));
+ new IsControl1Matcher(IrOpcode::kIfFalse, control_matcher));
+}
+
+
+Matcher<Node*> IsControlEffect(const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(
+ new IsControl1Matcher(IrOpcode::kControlEffect, control_matcher));
+}
+
+
+Matcher<Node*> IsValueEffect(const Matcher<Node*>& value_matcher) {
+ return MakeMatcher(new IsUnopMatcher(IrOpcode::kValueEffect, value_matcher));
+}
+
+
+Matcher<Node*> IsFinish(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher) {
+ return MakeMatcher(new IsFinishMatcher(value_matcher, effect_matcher));
+}
+
+
+Matcher<Node*> IsExternalConstant(
+ const Matcher<ExternalReference>& value_matcher) {
+ return MakeMatcher(new IsConstantMatcher<ExternalReference>(
+ IrOpcode::kExternalConstant, value_matcher));
}
Matcher<Node*> IsHeapConstant(
- const Matcher<PrintableUnique<HeapObject> >& value_matcher) {
- return MakeMatcher(new IsConstantMatcher<PrintableUnique<HeapObject> >(
+ const Matcher<Unique<HeapObject> >& value_matcher) {
+ return MakeMatcher(new IsConstantMatcher<Unique<HeapObject> >(
IrOpcode::kHeapConstant, value_matcher));
}
-Matcher<Node*> IsPhi(const Matcher<Node*>& value0_matcher,
+Matcher<Node*> IsInt32Constant(const Matcher<int32_t>& value_matcher) {
+ return MakeMatcher(
+ new IsConstantMatcher<int32_t>(IrOpcode::kInt32Constant, value_matcher));
+}
+
+
+Matcher<Node*> IsInt64Constant(const Matcher<int64_t>& value_matcher) {
+ return MakeMatcher(
+ new IsConstantMatcher<int64_t>(IrOpcode::kInt64Constant, value_matcher));
+}
+
+
+Matcher<Node*> IsFloat32Constant(const Matcher<float>& value_matcher) {
+ return MakeMatcher(
+ new IsConstantMatcher<float>(IrOpcode::kFloat32Constant, value_matcher));
+}
+
+
+Matcher<Node*> IsFloat64Constant(const Matcher<double>& value_matcher) {
+ return MakeMatcher(
+ new IsConstantMatcher<double>(IrOpcode::kFloat64Constant, value_matcher));
+}
+
+
+Matcher<Node*> IsNumberConstant(const Matcher<double>& value_matcher) {
+ return MakeMatcher(
+ new IsConstantMatcher<double>(IrOpcode::kNumberConstant, value_matcher));
+}
+
+
+Matcher<Node*> IsPhi(const Matcher<MachineType>& type_matcher,
+ const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& merge_matcher) {
- return MakeMatcher(
- new IsPhiMatcher(value0_matcher, value1_matcher, merge_matcher));
+ return MakeMatcher(new IsPhiMatcher(type_matcher, value0_matcher,
+ value1_matcher, merge_matcher));
}
-Matcher<Node*> IsProjection(const Matcher<int32_t>& index_matcher,
+Matcher<Node*> IsProjection(const Matcher<size_t>& index_matcher,
const Matcher<Node*>& base_matcher) {
return MakeMatcher(new IsProjectionMatcher(index_matcher, base_matcher));
}
-Matcher<Node*> IsLoad(const Matcher<MachineType>& type_matcher,
+Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsCallMatcher(
+ descriptor_matcher, value0_matcher, value1_matcher, value2_matcher,
+ value3_matcher, effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& effect_matcher) {
- return MakeMatcher(new IsLoadMatcher(type_matcher, base_matcher,
- index_matcher, effect_matcher));
+ return MakeMatcher(new IsLoadMatcher(rep_matcher, base_matcher, index_matcher,
+ effect_matcher));
}
@@ -430,14 +743,20 @@ Matcher<Node*> IsStore(const Matcher<MachineType>& type_matcher,
return MakeMatcher( \
new IsBinopMatcher(IrOpcode::k##Name, lhs_matcher, rhs_matcher)); \
}
+IS_BINOP_MATCHER(NumberLessThan)
+IS_BINOP_MATCHER(NumberSubtract)
IS_BINOP_MATCHER(Word32And)
IS_BINOP_MATCHER(Word32Sar)
+IS_BINOP_MATCHER(Word32Shl)
+IS_BINOP_MATCHER(Word32Ror)
IS_BINOP_MATCHER(Word32Equal)
IS_BINOP_MATCHER(Word64And)
IS_BINOP_MATCHER(Word64Sar)
IS_BINOP_MATCHER(Word64Shl)
IS_BINOP_MATCHER(Word64Equal)
IS_BINOP_MATCHER(Int32AddWithOverflow)
+IS_BINOP_MATCHER(Int32Mul)
+IS_BINOP_MATCHER(Uint32LessThanOrEqual)
#undef IS_BINOP_MATCHER
@@ -445,8 +764,16 @@ IS_BINOP_MATCHER(Int32AddWithOverflow)
Matcher<Node*> Is##Name(const Matcher<Node*>& input_matcher) { \
return MakeMatcher(new IsUnopMatcher(IrOpcode::k##Name, input_matcher)); \
}
-IS_UNOP_MATCHER(ConvertInt64ToInt32)
+IS_UNOP_MATCHER(ChangeFloat64ToInt32)
+IS_UNOP_MATCHER(ChangeFloat64ToUint32)
IS_UNOP_MATCHER(ChangeInt32ToFloat64)
+IS_UNOP_MATCHER(ChangeInt32ToInt64)
+IS_UNOP_MATCHER(ChangeUint32ToFloat64)
+IS_UNOP_MATCHER(ChangeUint32ToUint64)
+IS_UNOP_MATCHER(TruncateFloat64ToFloat32)
+IS_UNOP_MATCHER(TruncateFloat64ToInt32)
+IS_UNOP_MATCHER(TruncateInt64ToInt32)
+IS_UNOP_MATCHER(Float64Sqrt)
#undef IS_UNOP_MATCHER
} // namespace compiler
diff --git a/deps/v8/src/compiler/graph-unittest.h b/deps/v8/src/compiler/graph-unittest.h
new file mode 100644
index 0000000000..b821165ee5
--- /dev/null
+++ b/deps/v8/src/compiler/graph-unittest.h
@@ -0,0 +1,143 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_UNITTEST_H_
+#define V8_COMPILER_GRAPH_UNITTEST_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/test/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class HeapObject;
+template <class T>
+class Unique;
+
+namespace compiler {
+
+using ::testing::Matcher;
+
+
+class GraphTest : public TestWithContext, public TestWithZone {
+ public:
+ explicit GraphTest(int parameters = 1);
+ virtual ~GraphTest();
+
+ protected:
+ Node* Parameter(int32_t index);
+ Node* Float32Constant(volatile float value);
+ Node* Float64Constant(volatile double value);
+ Node* Int32Constant(int32_t value);
+ Node* Int64Constant(int64_t value);
+ Node* NumberConstant(volatile double value);
+ Node* HeapConstant(const Unique<HeapObject>& value);
+ Node* FalseConstant();
+ Node* TrueConstant();
+
+ Matcher<Node*> IsFalseConstant();
+ Matcher<Node*> IsTrueConstant();
+
+ CommonOperatorBuilder* common() { return &common_; }
+ Graph* graph() { return &graph_; }
+
+ private:
+ CommonOperatorBuilder common_;
+ Graph graph_;
+};
+
+
+Matcher<Node*> IsBranch(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsMerge(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher);
+Matcher<Node*> IsIfTrue(const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsIfFalse(const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsControlEffect(const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsValueEffect(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsFinish(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher);
+Matcher<Node*> IsExternalConstant(
+ const Matcher<ExternalReference>& value_matcher);
+Matcher<Node*> IsHeapConstant(
+ const Matcher<Unique<HeapObject> >& value_matcher);
+Matcher<Node*> IsFloat32Constant(const Matcher<float>& value_matcher);
+Matcher<Node*> IsFloat64Constant(const Matcher<double>& value_matcher);
+Matcher<Node*> IsInt32Constant(const Matcher<int32_t>& value_matcher);
+Matcher<Node*> IsInt64Constant(const Matcher<int64_t>& value_matcher);
+Matcher<Node*> IsNumberConstant(const Matcher<double>& value_matcher);
+Matcher<Node*> IsPhi(const Matcher<MachineType>& type_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& merge_matcher);
+Matcher<Node*> IsProjection(const Matcher<size_t>& index_matcher,
+ const Matcher<Node*>& base_matcher);
+Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+
+Matcher<Node*> IsNumberLessThan(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberSubtract(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+
+Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& effect_matcher);
+Matcher<Node*> IsStore(const Matcher<MachineType>& type_matcher,
+ const Matcher<WriteBarrierKind>& write_barrier_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsWord32And(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32Sar(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32Shl(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32Ror(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32Equal(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord64And(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord64Shl(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord64Sar(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord64Equal(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt32AddWithOverflow(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt32Mul(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsUint32LessThanOrEqual(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsChangeFloat64ToInt32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeFloat64ToUint32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeInt32ToFloat64(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeInt32ToInt64(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeUint32ToFloat64(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeUint32ToUint64(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsTruncateFloat64ToFloat32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsTruncateFloat64ToInt32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsTruncateInt64ToInt32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64Sqrt(const Matcher<Node*>& input_matcher);
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_GRAPH_UNITTEST_H_
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index 144512ad0d..304787552f 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -22,9 +22,144 @@ namespace compiler {
#define DEAD_COLOR "#999999"
+class Escaped {
+ public:
+ explicit Escaped(const OStringStream& os, const char* escaped_chars = "<>|{}")
+ : str_(os.c_str()), escaped_chars_(escaped_chars) {}
+
+ friend OStream& operator<<(OStream& os, const Escaped& e) {
+ for (const char* s = e.str_; *s != '\0'; ++s) {
+ if (e.needs_escape(*s)) os << "\\";
+ os << *s;
+ }
+ return os;
+ }
+
+ private:
+ bool needs_escape(char ch) const {
+ for (size_t i = 0; i < strlen(escaped_chars_); ++i) {
+ if (ch == escaped_chars_[i]) return true;
+ }
+ return false;
+ }
+
+ const char* const str_;
+ const char* const escaped_chars_;
+};
+
+class JSONGraphNodeWriter : public NullNodeVisitor {
+ public:
+ JSONGraphNodeWriter(OStream& os, Zone* zone, const Graph* graph) // NOLINT
+ : os_(os),
+ graph_(graph),
+ first_node_(true) {}
+
+ void Print() { const_cast<Graph*>(graph_)->VisitNodeInputsFromEnd(this); }
+
+ GenericGraphVisit::Control Pre(Node* node);
+
+ private:
+ OStream& os_;
+ const Graph* const graph_;
+ bool first_node_;
+
+ DISALLOW_COPY_AND_ASSIGN(JSONGraphNodeWriter);
+};
+
+
+GenericGraphVisit::Control JSONGraphNodeWriter::Pre(Node* node) {
+ if (first_node_) {
+ first_node_ = false;
+ } else {
+ os_ << ",";
+ }
+ OStringStream label;
+ label << *node->op();
+ os_ << "{\"id\":" << node->id() << ",\"label\":\"" << Escaped(label, "\"")
+ << "\"";
+ IrOpcode::Value opcode = node->opcode();
+ if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
+ os_ << ",\"rankInputs\":[0," << NodeProperties::FirstControlIndex(node)
+ << "]";
+ os_ << ",\"rankWithInput\":[" << NodeProperties::FirstControlIndex(node)
+ << "]";
+ } else if (opcode == IrOpcode::kIfTrue || opcode == IrOpcode::kIfFalse ||
+ opcode == IrOpcode::kLoop) {
+ os_ << ",\"rankInputs\":[" << NodeProperties::FirstControlIndex(node)
+ << "]";
+ }
+ if (opcode == IrOpcode::kBranch) {
+ os_ << ",\"rankInputs\":[0]";
+ }
+ os_ << ",\"opcode\":\"" << IrOpcode::Mnemonic(node->opcode()) << "\"";
+ os_ << ",\"control\":" << (NodeProperties::IsControl(node) ? "true"
+ : "false");
+ os_ << "}";
+ return GenericGraphVisit::CONTINUE;
+}
+
+
+class JSONGraphEdgeWriter : public NullNodeVisitor {
+ public:
+ JSONGraphEdgeWriter(OStream& os, Zone* zone, const Graph* graph) // NOLINT
+ : os_(os),
+ graph_(graph),
+ first_edge_(true) {}
+
+ void Print() { const_cast<Graph*>(graph_)->VisitNodeInputsFromEnd(this); }
+
+ GenericGraphVisit::Control PreEdge(Node* from, int index, Node* to);
+
+ private:
+ OStream& os_;
+ const Graph* const graph_;
+ bool first_edge_;
+
+ DISALLOW_COPY_AND_ASSIGN(JSONGraphEdgeWriter);
+};
+
+
+GenericGraphVisit::Control JSONGraphEdgeWriter::PreEdge(Node* from, int index,
+ Node* to) {
+ if (first_edge_) {
+ first_edge_ = false;
+ } else {
+ os_ << ",";
+ }
+ const char* edge_type = NULL;
+ if (index < NodeProperties::FirstValueIndex(from)) {
+ edge_type = "unknown";
+ } else if (index < NodeProperties::FirstContextIndex(from)) {
+ edge_type = "value";
+ } else if (index < NodeProperties::FirstFrameStateIndex(from)) {
+ edge_type = "context";
+ } else if (index < NodeProperties::FirstEffectIndex(from)) {
+ edge_type = "frame-state";
+ } else if (index < NodeProperties::FirstControlIndex(from)) {
+ edge_type = "effect";
+ } else {
+ edge_type = "control";
+ }
+ os_ << "{\"source\":" << to->id() << ",\"target\":" << from->id()
+ << ",\"index\":" << index << ",\"type\":\"" << edge_type << "\"}";
+ return GenericGraphVisit::CONTINUE;
+}
+
+
+OStream& operator<<(OStream& os, const AsJSON& ad) {
+ Zone tmp_zone(ad.graph.zone()->isolate());
+ os << "{\"nodes\":[";
+ JSONGraphNodeWriter(os, &tmp_zone, &ad.graph).Print();
+ os << "],\"edges\":[";
+ JSONGraphEdgeWriter(os, &tmp_zone, &ad.graph).Print();
+ os << "]}";
+ return os;
+}
+
+
class GraphVisualizer : public NullNodeVisitor {
public:
- GraphVisualizer(OStream& os, const Graph* graph); // NOLINT
+ GraphVisualizer(OStream& os, Zone* zone, const Graph* graph); // NOLINT
void Print();
@@ -33,8 +168,9 @@ class GraphVisualizer : public NullNodeVisitor {
private:
void AnnotateNode(Node* node);
- void PrintEdge(Node* from, int index, Node* to);
+ void PrintEdge(Node::Edge edge);
+ Zone* zone_;
NodeSet all_nodes_;
NodeSet white_nodes_;
bool use_to_def_;
@@ -86,36 +222,6 @@ GenericGraphVisit::Control GraphVisualizer::PreEdge(Node* from, int index,
}
-class Escaped {
- public:
- explicit Escaped(const OStringStream& os) : str_(os.c_str()) {}
-
- friend OStream& operator<<(OStream& os, const Escaped& e) {
- for (const char* s = e.str_; *s != '\0'; ++s) {
- if (needs_escape(*s)) os << "\\";
- os << *s;
- }
- return os;
- }
-
- private:
- static bool needs_escape(char ch) {
- switch (ch) {
- case '>':
- case '<':
- case '|':
- case '}':
- case '{':
- return true;
- default:
- return false;
- }
- }
-
- const char* const str_;
-};
-
-
static bool IsLikelyBackEdge(Node* from, int index, Node* to) {
if (from->opcode() == IrOpcode::kPhi ||
from->opcode() == IrOpcode::kEffectPhi) {
@@ -165,6 +271,10 @@ void GraphVisualizer::AnnotateNode(Node* node) {
++i, j--) {
os_ << "|<I" << i.index() << ">X #" << (*i)->id();
}
+ for (int j = OperatorProperties::GetFrameStateInputCount(node->op()); j > 0;
+ ++i, j--) {
+ os_ << "|<I" << i.index() << ">F #" << (*i)->id();
+ }
for (int j = OperatorProperties::GetEffectInputCount(node->op()); j > 0;
++i, j--) {
os_ << "|<I" << i.index() << ">E #" << (*i)->id();
@@ -191,7 +301,10 @@ void GraphVisualizer::AnnotateNode(Node* node) {
}
-void GraphVisualizer::PrintEdge(Node* from, int index, Node* to) {
+void GraphVisualizer::PrintEdge(Node::Edge edge) {
+ Node* from = edge.from();
+ int index = edge.index();
+ Node* to = edge.to();
bool unconstrained = IsLikelyBackEdge(from, index, to);
os_ << " ID" << from->id();
if (all_nodes_.count(to) == 0) {
@@ -200,11 +313,15 @@ void GraphVisualizer::PrintEdge(Node* from, int index, Node* to) {
GetControlCluster(from) == NULL ||
(OperatorProperties::GetControlInputCount(from->op()) > 0 &&
NodeProperties::GetControlInput(from) != to)) {
- os_ << ":I" << index << ":n -> ID" << to->id() << ":s";
- if (unconstrained) os_ << " [constraint=false,style=dotted]";
+ os_ << ":I" << index << ":n -> ID" << to->id() << ":s"
+ << "[" << (unconstrained ? "constraint=false, " : "")
+ << (NodeProperties::IsControlEdge(edge) ? "style=bold, " : "")
+ << (NodeProperties::IsEffectEdge(edge) ? "style=dotted, " : "")
+ << (NodeProperties::IsContextEdge(edge) ? "style=dashed, " : "") << "]";
} else {
- os_ << " -> ID" << to->id() << ":s [color=transparent"
- << (unconstrained ? ", constraint=false" : "") << "]";
+ os_ << " -> ID" << to->id() << ":s [color=transparent, "
+ << (unconstrained ? "constraint=false, " : "")
+ << (NodeProperties::IsControlEdge(edge) ? "style=dashed, " : "") << "]";
}
os_ << "\n";
}
@@ -214,6 +331,10 @@ void GraphVisualizer::Print() {
os_ << "digraph D {\n"
<< " node [fontsize=8,height=0.25]\n"
<< " rankdir=\"BT\"\n"
+ << " ranksep=\"1.2 equally\"\n"
+ << " overlap=\"false\"\n"
+ << " splines=\"true\"\n"
+ << " concentrate=\"true\"\n"
<< " \n";
// Make sure all nodes have been output before writing out the edges.
@@ -225,8 +346,8 @@ void GraphVisualizer::Print() {
// Visit all uses of white nodes.
use_to_def_ = false;
GenericGraphVisit::Visit<GraphVisualizer, NodeUseIterationTraits<Node> >(
- const_cast<Graph*>(graph_), white_nodes_.begin(), white_nodes_.end(),
- this);
+ const_cast<Graph*>(graph_), zone_, white_nodes_.begin(),
+ white_nodes_.end(), this);
os_ << " DEAD_INPUT [\n"
<< " style=\"filled\" \n"
@@ -239,25 +360,26 @@ void GraphVisualizer::Print() {
Node::Inputs inputs = (*i)->inputs();
for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
++iter) {
- PrintEdge(iter.edge().from(), iter.edge().index(), iter.edge().to());
+ PrintEdge(iter.edge());
}
}
os_ << "}\n";
}
-GraphVisualizer::GraphVisualizer(OStream& os, const Graph* graph) // NOLINT
- : all_nodes_(NodeSet::key_compare(),
- NodeSet::allocator_type(graph->zone())),
- white_nodes_(NodeSet::key_compare(),
- NodeSet::allocator_type(graph->zone())),
+GraphVisualizer::GraphVisualizer(OStream& os, Zone* zone,
+ const Graph* graph) // NOLINT
+ : zone_(zone),
+ all_nodes_(NodeSet::key_compare(), NodeSet::allocator_type(zone)),
+ white_nodes_(NodeSet::key_compare(), NodeSet::allocator_type(zone)),
use_to_def_(true),
os_(os),
graph_(graph) {}
OStream& operator<<(OStream& os, const AsDOT& ad) {
- GraphVisualizer(os, &ad.graph).Print();
+ Zone tmp_zone(ad.graph.zone()->isolate());
+ GraphVisualizer(os, &tmp_zone, &ad.graph).Print();
return os;
}
}
diff --git a/deps/v8/src/compiler/graph-visualizer.h b/deps/v8/src/compiler/graph-visualizer.h
index 12532bacf8..db92dc2a30 100644
--- a/deps/v8/src/compiler/graph-visualizer.h
+++ b/deps/v8/src/compiler/graph-visualizer.h
@@ -22,6 +22,13 @@ struct AsDOT {
};
OStream& operator<<(OStream& os, const AsDOT& ad);
+
+struct AsJSON {
+ explicit AsJSON(const Graph& g) : graph(g) {}
+ const Graph& graph;
+};
+
+OStream& operator<<(OStream& os, const AsJSON& ad);
}
}
} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/graph.cc b/deps/v8/src/compiler/graph.cc
index 3f47eace81..7b5f228aa1 100644
--- a/deps/v8/src/compiler/graph.cc
+++ b/deps/v8/src/compiler/graph.cc
@@ -18,37 +18,20 @@ namespace v8 {
namespace internal {
namespace compiler {
-Graph::Graph(Zone* zone)
- : GenericGraph<Node>(zone),
- decorators_(DecoratorVector::allocator_type(zone)) {}
+Graph::Graph(Zone* zone) : GenericGraph<Node>(zone), decorators_(zone) {}
-Node* Graph::NewNode(Operator* op, int input_count, Node** inputs) {
- DCHECK(op->InputCount() <= input_count);
+Node* Graph::NewNode(const Operator* op, int input_count, Node** inputs) {
+ DCHECK_LE(op->InputCount(), input_count);
Node* result = Node::New(this, input_count, inputs);
result->Initialize(op);
- for (DecoratorVector::iterator i = decorators_.begin();
+ for (ZoneVector<GraphDecorator*>::iterator i = decorators_.begin();
i != decorators_.end(); ++i) {
(*i)->Decorate(result);
}
return result;
}
-
-void Graph::ChangeOperator(Node* node, Operator* op) { node->set_op(op); }
-
-
-void Graph::DeleteNode(Node* node) {
-#if DEBUG
- // Nodes can't be deleted if they have uses.
- Node::Uses::iterator use_iterator(node->uses().begin());
- DCHECK(use_iterator == node->uses().end());
-#endif
-
-#if DEBUG
- memset(node, 0xDE, sizeof(Node));
-#endif
-}
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/graph.h b/deps/v8/src/compiler/graph.h
index 65ea3b30a4..07eb02f9c9 100644
--- a/deps/v8/src/compiler/graph.h
+++ b/deps/v8/src/compiler/graph.h
@@ -25,39 +25,36 @@ class Graph : public GenericGraph<Node> {
explicit Graph(Zone* zone);
// Base implementation used by all factory methods.
- Node* NewNode(Operator* op, int input_count, Node** inputs);
+ Node* NewNode(const Operator* op, int input_count, Node** inputs);
// Factories for nodes with static input counts.
- Node* NewNode(Operator* op) {
+ Node* NewNode(const Operator* op) {
return NewNode(op, 0, static_cast<Node**>(NULL));
}
- Node* NewNode(Operator* op, Node* n1) { return NewNode(op, 1, &n1); }
- Node* NewNode(Operator* op, Node* n1, Node* n2) {
+ Node* NewNode(const Operator* op, Node* n1) { return NewNode(op, 1, &n1); }
+ Node* NewNode(const Operator* op, Node* n1, Node* n2) {
Node* nodes[] = {n1, n2};
- return NewNode(op, ARRAY_SIZE(nodes), nodes);
+ return NewNode(op, arraysize(nodes), nodes);
}
- Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3) {
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
Node* nodes[] = {n1, n2, n3};
- return NewNode(op, ARRAY_SIZE(nodes), nodes);
+ return NewNode(op, arraysize(nodes), nodes);
}
- Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
Node* nodes[] = {n1, n2, n3, n4};
- return NewNode(op, ARRAY_SIZE(nodes), nodes);
+ return NewNode(op, arraysize(nodes), nodes);
}
- Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
Node* n5) {
Node* nodes[] = {n1, n2, n3, n4, n5};
- return NewNode(op, ARRAY_SIZE(nodes), nodes);
+ return NewNode(op, arraysize(nodes), nodes);
}
- Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4, Node* n5,
- Node* n6) {
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5, Node* n6) {
Node* nodes[] = {n1, n2, n3, n4, n5, n6};
- return NewNode(op, ARRAY_SIZE(nodes), nodes);
+ return NewNode(op, arraysize(nodes), nodes);
}
- void ChangeOperator(Node* node, Operator* op);
- void DeleteNode(Node* node);
-
template <class Visitor>
void VisitNodeUsesFrom(Node* node, Visitor* visitor);
@@ -72,16 +69,14 @@ class Graph : public GenericGraph<Node> {
}
void RemoveDecorator(GraphDecorator* decorator) {
- DecoratorVector::iterator it =
+ ZoneVector<GraphDecorator*>::iterator it =
std::find(decorators_.begin(), decorators_.end(), decorator);
DCHECK(it != decorators_.end());
decorators_.erase(it, it + 1);
}
private:
- typedef std::vector<GraphDecorator*, zone_allocator<GraphDecorator*> >
- DecoratorVector;
- DecoratorVector decorators_;
+ ZoneVector<GraphDecorator*> decorators_;
};
@@ -90,8 +85,9 @@ class GraphDecorator : public ZoneObject {
virtual ~GraphDecorator() {}
virtual void Decorate(Node* node) = 0;
};
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_GRAPH_H_
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index 31a01798a9..d9f8833ff7 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -59,6 +59,9 @@ class IA32OperandConverter : public InstructionOperandConverter {
switch (constant.type()) {
case Constant::kInt32:
return Immediate(constant.ToInt32());
+ case Constant::kFloat32:
+ return Immediate(
+ isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
case Constant::kFloat64:
return Immediate(
isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
@@ -73,25 +76,83 @@ class IA32OperandConverter : public InstructionOperandConverter {
return Immediate(-1);
}
- Operand MemoryOperand(int* first_input) {
- const int offset = *first_input;
- switch (AddressingModeField::decode(instr_->opcode())) {
+ static int NextOffset(int* offset) {
+ int i = *offset;
+ (*offset)++;
+ return i;
+ }
+
+ static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
+ STATIC_ASSERT(0 == static_cast<int>(times_1));
+ STATIC_ASSERT(1 == static_cast<int>(times_2));
+ STATIC_ASSERT(2 == static_cast<int>(times_4));
+ STATIC_ASSERT(3 == static_cast<int>(times_8));
+ int scale = static_cast<int>(mode - one);
+ DCHECK(scale >= 0 && scale < 4);
+ return static_cast<ScaleFactor>(scale);
+ }
+
+ Operand MemoryOperand(int* offset) {
+ AddressingMode mode = AddressingModeField::decode(instr_->opcode());
+ switch (mode) {
+ case kMode_MR: {
+ Register base = InputRegister(NextOffset(offset));
+ int32_t disp = 0;
+ return Operand(base, disp);
+ }
+ case kMode_MRI: {
+ Register base = InputRegister(NextOffset(offset));
+ int32_t disp = InputInt32(NextOffset(offset));
+ return Operand(base, disp);
+ }
+ case kMode_MR1:
+ case kMode_MR2:
+ case kMode_MR4:
+ case kMode_MR8: {
+ Register base = InputRegister(NextOffset(offset));
+ Register index = InputRegister(NextOffset(offset));
+ ScaleFactor scale = ScaleFor(kMode_MR1, mode);
+ int32_t disp = 0;
+ return Operand(base, index, scale, disp);
+ }
case kMode_MR1I:
- *first_input += 2;
- return Operand(InputRegister(offset + 0), InputRegister(offset + 1),
- times_1,
- 0); // TODO(dcarney): K != 0
- case kMode_MRI:
- *first_input += 2;
- return Operand::ForRegisterPlusImmediate(InputRegister(offset + 0),
- InputImmediate(offset + 1));
- case kMode_MI:
- *first_input += 1;
- return Operand(InputImmediate(offset + 0));
- default:
+ case kMode_MR2I:
+ case kMode_MR4I:
+ case kMode_MR8I: {
+ Register base = InputRegister(NextOffset(offset));
+ Register index = InputRegister(NextOffset(offset));
+ ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
+ int32_t disp = InputInt32(NextOffset(offset));
+ return Operand(base, index, scale, disp);
+ }
+ case kMode_M1:
+ case kMode_M2:
+ case kMode_M4:
+ case kMode_M8: {
+ Register index = InputRegister(NextOffset(offset));
+ ScaleFactor scale = ScaleFor(kMode_M1, mode);
+ int32_t disp = 0;
+ return Operand(index, scale, disp);
+ }
+ case kMode_M1I:
+ case kMode_M2I:
+ case kMode_M4I:
+ case kMode_M8I: {
+ Register index = InputRegister(NextOffset(offset));
+ ScaleFactor scale = ScaleFor(kMode_M1I, mode);
+ int32_t disp = InputInt32(NextOffset(offset));
+ return Operand(index, scale, disp);
+ }
+ case kMode_MI: {
+ int32_t disp = InputInt32(NextOffset(offset));
+ return Operand(Immediate(disp));
+ }
+ case kMode_None:
UNREACHABLE();
- return Operand(no_reg);
+ return Operand(no_reg, 0);
}
+ UNREACHABLE();
+ return Operand(no_reg, 0);
}
Operand MemoryOperand() {
@@ -111,6 +172,30 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
IA32OperandConverter i(this, instr);
switch (ArchOpcodeField::decode(instr->opcode())) {
+ case kArchCallCodeObject: {
+ EnsureSpaceForLazyDeopt();
+ if (HasImmediateInput(instr, 0)) {
+ Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ __ call(code, RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ __ call(Operand(reg, Code::kHeaderSize - kHeapObjectTag));
+ }
+ AddSafepointAndDeopt(instr);
+ break;
+ }
+ case kArchCallJSFunction: {
+ EnsureSpaceForLazyDeopt();
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
+ __ Assert(equal, kWrongFunctionContext);
+ }
+ __ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
+ AddSafepointAndDeopt(instr);
+ break;
+ }
case kArchJmp:
__ jmp(code()->GetLabel(i.InputBlock(0)));
break;
@@ -120,15 +205,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchRet:
AssembleReturn();
break;
- case kArchDeoptimize: {
- int deoptimization_id = MiscField::decode(instr->opcode());
- BuildTranslation(instr, deoptimization_id);
-
- Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, Deoptimizer::LAZY);
- __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ case kArchTruncateDoubleToI:
+ __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
- }
case kIA32Add:
if (HasImmediateInput(instr, 1)) {
__ add(i.InputOperand(0), i.InputImmediate(1));
@@ -220,58 +299,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ sar_cl(i.OutputRegister());
}
break;
- case kIA32Push:
- if (HasImmediateInput(instr, 0)) {
- __ push(i.InputImmediate(0));
- } else {
- __ push(i.InputOperand(0));
- }
- break;
- case kIA32CallCodeObject: {
- if (HasImmediateInput(instr, 0)) {
- Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
- __ call(code, RelocInfo::CODE_TARGET);
- } else {
- Register reg = i.InputRegister(0);
- int entry = Code::kHeaderSize - kHeapObjectTag;
- __ call(Operand(reg, entry));
- }
- RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
-
- bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
- if (lazy_deopt) {
- RecordLazyDeoptimizationEntry(instr);
- }
- AddNopForSmiCodeInlining();
- break;
- }
- case kIA32CallAddress:
- if (HasImmediateInput(instr, 0)) {
- // TODO(dcarney): wire up EXTERNAL_REFERENCE instead of RUNTIME_ENTRY.
- __ call(reinterpret_cast<byte*>(i.InputInt32(0)),
- RelocInfo::RUNTIME_ENTRY);
+ case kIA32Ror:
+ if (HasImmediateInput(instr, 1)) {
+ __ ror(i.OutputRegister(), i.InputInt5(1));
} else {
- __ call(i.InputRegister(0));
+ __ ror_cl(i.OutputRegister());
}
break;
- case kPopStack: {
- int words = MiscField::decode(instr->opcode());
- __ add(esp, Immediate(kPointerSize * words));
- break;
- }
- case kIA32CallJSFunction: {
- Register func = i.InputRegister(0);
-
- // TODO(jarin) The load of the context should be separated from the call.
- __ mov(esi, FieldOperand(func, JSFunction::kContextOffset));
- __ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
-
- RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- RecordLazyDeoptimizationEntry(instr);
- break;
- }
case kSSEFloat64Cmp:
__ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
break;
@@ -312,14 +346,22 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ add(esp, Immediate(kDoubleSize));
break;
}
+ case kSSEFloat64Sqrt:
+ __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
+ break;
+ case kSSECvtss2sd:
+ __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kSSECvtsd2ss:
+ __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
case kSSEFloat64ToInt32:
__ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
break;
case kSSEFloat64ToUint32: {
XMMRegister scratch = xmm0;
__ Move(scratch, -2147483648.0);
- // TODO(turbofan): IA32 SSE subsd() should take an operand.
- __ addsd(scratch, i.InputDoubleRegister(0));
+ __ addsd(scratch, i.InputOperand(0));
__ cvttsd2si(i.OutputRegister(), scratch);
__ add(i.OutputRegister(), Immediate(0x80000000));
break;
@@ -331,60 +373,76 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// TODO(turbofan): IA32 SSE LoadUint32() should take an operand.
__ LoadUint32(i.OutputDoubleRegister(), i.InputRegister(0));
break;
- case kSSELoad:
- __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
- break;
- case kSSEStore: {
- int index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ movsd(operand, i.InputDoubleRegister(index));
+ case kIA32Movsxbl:
+ __ movsx_b(i.OutputRegister(), i.MemoryOperand());
break;
- }
- case kIA32LoadWord8:
+ case kIA32Movzxbl:
__ movzx_b(i.OutputRegister(), i.MemoryOperand());
break;
- case kIA32StoreWord8: {
+ case kIA32Movb: {
int index = 0;
Operand operand = i.MemoryOperand(&index);
- __ mov_b(operand, i.InputRegister(index));
+ if (HasImmediateInput(instr, index)) {
+ __ mov_b(operand, i.InputInt8(index));
+ } else {
+ __ mov_b(operand, i.InputRegister(index));
+ }
break;
}
- case kIA32StoreWord8I: {
- int index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ mov_b(operand, i.InputInt8(index));
+ case kIA32Movsxwl:
+ __ movsx_w(i.OutputRegister(), i.MemoryOperand());
break;
- }
- case kIA32LoadWord16:
+ case kIA32Movzxwl:
__ movzx_w(i.OutputRegister(), i.MemoryOperand());
break;
- case kIA32StoreWord16: {
+ case kIA32Movw: {
int index = 0;
Operand operand = i.MemoryOperand(&index);
- __ mov_w(operand, i.InputRegister(index));
+ if (HasImmediateInput(instr, index)) {
+ __ mov_w(operand, i.InputInt16(index));
+ } else {
+ __ mov_w(operand, i.InputRegister(index));
+ }
break;
}
- case kIA32StoreWord16I: {
- int index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ mov_w(operand, i.InputInt16(index));
+ case kIA32Movl:
+ if (instr->HasOutput()) {
+ __ mov(i.OutputRegister(), i.MemoryOperand());
+ } else {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ if (HasImmediateInput(instr, index)) {
+ __ mov(operand, i.InputImmediate(index));
+ } else {
+ __ mov(operand, i.InputRegister(index));
+ }
+ }
break;
- }
- case kIA32LoadWord32:
- __ mov(i.OutputRegister(), i.MemoryOperand());
+ case kIA32Movsd:
+ if (instr->HasOutput()) {
+ __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
+ } else {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ movsd(operand, i.InputDoubleRegister(index));
+ }
break;
- case kIA32StoreWord32: {
- int index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ mov(operand, i.InputRegister(index));
+ case kIA32Movss:
+ if (instr->HasOutput()) {
+ __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
+ } else {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ movss(operand, i.InputDoubleRegister(index));
+ }
break;
- }
- case kIA32StoreWord32I: {
- int index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ mov(operand, i.InputImmediate(index));
+ case kIA32Push:
+ if (HasImmediateInput(instr, 0)) {
+ __ push(i.InputImmediate(0));
+ } else {
+ __ push(i.InputOperand(0));
+ }
break;
- }
case kIA32StoreWriteBarrier: {
Register object = i.InputRegister(0);
Register index = i.InputRegister(1);
@@ -574,6 +632,13 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
}
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+ Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+ isolate(), deoptimization_id, Deoptimizer::LAZY);
+ __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
// The calling convention for JSFunctions on IA32 passes arguments on the
// stack and the JSFunction and context in EDI and ESI, respectively, thus
// the steps of the call look as follows:
@@ -782,8 +847,9 @@ void CodeGenerator::AssembleReturn() {
} else {
__ mov(esp, ebp); // Move stack pointer back to frame pointer.
__ pop(ebp); // Pop caller's frame pointer.
- int pop_count =
- descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
+ int pop_count = descriptor->IsJSFunctionCall()
+ ? static_cast<int>(descriptor->JSParameterCount())
+ : 0;
__ ret(pop_count * kPointerSize);
}
}
@@ -834,9 +900,23 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else if (destination->IsStackSlot()) {
Operand dst = g.ToOperand(destination);
__ mov(dst, g.ToImmediate(source));
+ } else if (src_constant.type() == Constant::kFloat32) {
+ // TODO(turbofan): Can we do better here?
+ Immediate src(bit_cast<int32_t>(src_constant.ToFloat32()));
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ __ push(Immediate(src));
+ __ movss(dst, Operand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize / 2));
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ Operand dst = g.ToOperand(destination);
+ __ mov(dst, src);
+ }
} else {
- double v = g.ToDouble(source);
- uint64_t int_val = BitCast<uint64_t, double>(v);
+ DCHECK_EQ(Constant::kFloat64, src_constant.type());
+ double v = src_constant.ToFloat64();
+ uint64_t int_val = bit_cast<uint64_t, double>(v);
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
if (destination->IsDoubleRegister()) {
@@ -936,21 +1016,23 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
-#undef __
-#ifdef DEBUG
-
-// Checks whether the code between start_pc and end_pc is a no-op.
-bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
- int end_pc) {
- if (start_pc + 1 != end_pc) {
- return false;
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ int space_needed = Deoptimizer::patch_size();
+ if (!linkage()->info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ __ Nop(padding_size);
+ }
}
- return *(code->instruction_start() + start_pc) ==
- v8::internal::Assembler::kNopByte;
+ MarkLazyDeoptSite();
}
-#endif // DEBUG
-}
-}
-} // namespace v8::internal::compiler
+#undef __
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index f175ebb559..268a59da0c 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -27,32 +27,30 @@ namespace compiler {
V(IA32Shl) \
V(IA32Shr) \
V(IA32Sar) \
- V(IA32Push) \
- V(IA32CallCodeObject) \
- V(IA32CallAddress) \
- V(PopStack) \
- V(IA32CallJSFunction) \
+ V(IA32Ror) \
V(SSEFloat64Cmp) \
V(SSEFloat64Add) \
V(SSEFloat64Sub) \
V(SSEFloat64Mul) \
V(SSEFloat64Div) \
V(SSEFloat64Mod) \
+ V(SSEFloat64Sqrt) \
+ V(SSECvtss2sd) \
+ V(SSECvtsd2ss) \
V(SSEFloat64ToInt32) \
V(SSEFloat64ToUint32) \
V(SSEInt32ToFloat64) \
V(SSEUint32ToFloat64) \
- V(SSELoad) \
- V(SSEStore) \
- V(IA32LoadWord8) \
- V(IA32StoreWord8) \
- V(IA32StoreWord8I) \
- V(IA32LoadWord16) \
- V(IA32StoreWord16) \
- V(IA32StoreWord16I) \
- V(IA32LoadWord32) \
- V(IA32StoreWord32) \
- V(IA32StoreWord32I) \
+ V(IA32Movsxbl) \
+ V(IA32Movzxbl) \
+ V(IA32Movb) \
+ V(IA32Movsxwl) \
+ V(IA32Movzxwl) \
+ V(IA32Movw) \
+ V(IA32Movl) \
+ V(IA32Movss) \
+ V(IA32Movsd) \
+ V(IA32Push) \
V(IA32StoreWriteBarrier)
@@ -63,23 +61,31 @@ namespace compiler {
//
// We use the following local notation for addressing modes:
//
-// R = register
-// O = register or stack slot
-// D = double register
-// I = immediate (handle, external, int32)
-// MR = [register]
-// MI = [immediate]
-// MRN = [register + register * N in {1, 2, 4, 8}]
-// MRI = [register + immediate]
-// MRNI = [register + register * N in {1, 2, 4, 8} + immediate]
+// M = memory operand
+// R = base register
+// N = index register * N for N in {1, 2, 4, 8}
+// I = immediate displacement (int32_t)
+
#define TARGET_ADDRESSING_MODE_LIST(V) \
- V(MI) /* [K] */ \
- V(MR) /* [%r0] */ \
- V(MRI) /* [%r0 + K] */ \
- V(MR1I) /* [%r0 + %r1 * 1 + K] */ \
- V(MR2I) /* [%r0 + %r1 * 2 + K] */ \
- V(MR4I) /* [%r0 + %r1 * 4 + K] */ \
- V(MR8I) /* [%r0 + %r1 * 8 + K] */
+ V(MR) /* [%r1 ] */ \
+ V(MRI) /* [%r1 + K] */ \
+ V(MR1) /* [%r1 + %r2*1 ] */ \
+ V(MR2) /* [%r1 + %r2*2 ] */ \
+ V(MR4) /* [%r1 + %r2*4 ] */ \
+ V(MR8) /* [%r1 + %r2*8 ] */ \
+ V(MR1I) /* [%r1 + %r2*1 + K] */ \
+ V(MR2I) /* [%r1 + %r2*2 + K] */ \
+ V(MR4I) /* [%r1 + %r2*3 + K] */ \
+ V(MR8I) /* [%r1 + %r2*4 + K] */ \
+ V(M1) /* [ %r2*1 ] */ \
+ V(M2) /* [ %r2*2 ] */ \
+ V(M4) /* [ %r2*4 ] */ \
+ V(M8) /* [ %r2*8 ] */ \
+ V(M1I) /* [ %r2*1 + K] */ \
+ V(M2I) /* [ %r2*2 + K] */ \
+ V(M4I) /* [ %r2*4 + K] */ \
+ V(M8I) /* [ %r2*8 + K] */ \
+ V(MI) /* [ K] */
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32-unittest.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32-unittest.cc
new file mode 100644
index 0000000000..89d150f26e
--- /dev/null
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -0,0 +1,429 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+// Immediates (random subset).
+static const int32_t kImmediates[] = {
+ kMinInt, -42, -1, 0, 1, 2, 3, 4, 5,
+ 6, 7, 8, 16, 42, 0xff, 0xffff, 0x0f0f0f0f, kMaxInt};
+
+} // namespace
+
+
+TEST_F(InstructionSelectorTest, Int32AddWithParameter) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Add(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddWithImmediate) {
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Int32Add(m.Parameter(0), m.Int32Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Int32SubWithParameter) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Sub(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kIA32Sub, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32SubWithImmediate) {
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Int32Sub(m.Parameter(0), m.Int32Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kIA32Sub, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Conversions.
+
+
+TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) {
+ StreamBuilder m(this, kMachFloat32, kMachFloat64);
+ m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSECvtss2sd, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
+ StreamBuilder m(this, kMachFloat64, kMachFloat32);
+ m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSECvtsd2ss, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+// -----------------------------------------------------------------------------
+// Better left operand for commutative binops
+
+TEST_F(InstructionSelectorTest, BetterLeftOperandTestAddBinop) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* param1 = m.Parameter(0);
+ Node* param2 = m.Parameter(1);
+ Node* add = m.Int32Add(param1, param2);
+ m.Return(m.Int32Add(add, param1));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated());
+ EXPECT_EQ(param2->id(), s.ToVreg(s[0]->InputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, BetterLeftOperandTestMulBinop) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* param1 = m.Parameter(0);
+ Node* param2 = m.Parameter(1);
+ Node* mul = m.Int32Mul(param1, param2);
+ m.Return(m.Int32Mul(mul, param1));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kIA32Imul, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated());
+ EXPECT_EQ(param2->id(), s.ToVreg(s[0]->InputAt(0)));
+}
+
+
+// -----------------------------------------------------------------------------
+// Loads and stores
+
+namespace {
+
+struct MemoryAccess {
+ MachineType type;
+ ArchOpcode load_opcode;
+ ArchOpcode store_opcode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
+ OStringStream ost;
+ ost << memacc.type;
+ return os << ost.c_str();
+}
+
+
+static const MemoryAccess kMemoryAccesses[] = {
+ {kMachInt8, kIA32Movsxbl, kIA32Movb},
+ {kMachUint8, kIA32Movzxbl, kIA32Movb},
+ {kMachInt16, kIA32Movsxwl, kIA32Movw},
+ {kMachUint16, kIA32Movzxwl, kIA32Movw},
+ {kMachInt32, kIA32Movl, kIA32Movl},
+ {kMachUint32, kIA32Movl, kIA32Movl},
+ {kMachFloat32, kIA32Movss, kIA32Movss},
+ {kMachFloat64, kIA32Movsd, kIA32Movsd}};
+
+} // namespace
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccess>
+ InstructionSelectorMemoryAccessTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateBase) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FOREACH(int32_t, base, kImmediates) {
+ StreamBuilder m(this, memacc.type, kMachPtr);
+ m.Return(m.Load(memacc.type, m.Int32Constant(base), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+ if (base == 0) {
+ ASSERT_EQ(1U, s[0]->InputCount());
+ } else {
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(base, s.ToInt32(s[0]->InputAt(1)));
+ }
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, kImmediates) {
+ StreamBuilder m(this, memacc.type, kMachPtr);
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+ if (index == 0) {
+ ASSERT_EQ(1U, s[0]->InputCount());
+ } else {
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ }
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
+ m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateBase) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FOREACH(int32_t, base, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, memacc.type);
+ m.Store(memacc.type, m.Int32Constant(base), m.Parameter(0), m.Parameter(1));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ if (base == 0) {
+ ASSERT_EQ(2U, s[0]->InputCount());
+ } else {
+ ASSERT_EQ(3U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(base, s.ToInt32(s[0]->InputAt(1)));
+ }
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
+ m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
+ m.Parameter(1));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ if (index == 0) {
+ ASSERT_EQ(2U, s[0]->InputCount());
+ } else {
+ ASSERT_EQ(3U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ }
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessTest,
+ ::testing::ValuesIn(kMemoryAccesses));
+
+
+// -----------------------------------------------------------------------------
+// AddressingMode for loads and stores.
+
+class AddressingModeUnitTest : public InstructionSelectorTest {
+ public:
+ AddressingModeUnitTest() : m(NULL) { Reset(); }
+ ~AddressingModeUnitTest() { delete m; }
+
+ void Run(Node* base, Node* index, AddressingMode mode) {
+ Node* load = m->Load(kMachInt32, base, index);
+ m->Store(kMachInt32, base, index, load);
+ m->Return(m->Int32Constant(0));
+ Stream s = m->Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(mode, s[0]->addressing_mode());
+ EXPECT_EQ(mode, s[1]->addressing_mode());
+ }
+
+ Node* zero;
+ Node* null_ptr;
+ Node* non_zero;
+ Node* base_reg; // opaque value to generate base as register
+ Node* index_reg; // opaque value to generate index as register
+ Node* scales[4];
+ StreamBuilder* m;
+
+ void Reset() {
+ delete m;
+ m = new StreamBuilder(this, kMachInt32, kMachInt32, kMachInt32);
+ zero = m->Int32Constant(0);
+ null_ptr = m->Int32Constant(0);
+ non_zero = m->Int32Constant(127);
+ base_reg = m->Parameter(0);
+ index_reg = m->Parameter(0);
+
+ scales[0] = m->Int32Constant(1);
+ scales[1] = m->Int32Constant(2);
+ scales[2] = m->Int32Constant(4);
+ scales[3] = m->Int32Constant(8);
+ }
+};
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MR) {
+ Node* base = base_reg;
+ Node* index = zero;
+ Run(base, index, kMode_MR);
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MRI) {
+ Node* base = base_reg;
+ Node* index = non_zero;
+ Run(base, index, kMode_MRI);
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MR1) {
+ Node* base = base_reg;
+ Node* index = index_reg;
+ Run(base, index, kMode_MR1);
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MRN) {
+ AddressingMode expected[] = {kMode_MR1, kMode_MR2, kMode_MR4, kMode_MR8};
+ for (size_t i = 0; i < arraysize(scales); ++i) {
+ Reset();
+ Node* base = base_reg;
+ Node* index = m->Int32Mul(index_reg, scales[i]);
+ Run(base, index, expected[i]);
+ }
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MR1I) {
+ Node* base = base_reg;
+ Node* index = m->Int32Add(index_reg, non_zero);
+ Run(base, index, kMode_MR1I);
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MRNI) {
+ AddressingMode expected[] = {kMode_MR1I, kMode_MR2I, kMode_MR4I, kMode_MR8I};
+ for (size_t i = 0; i < arraysize(scales); ++i) {
+ Reset();
+ Node* base = base_reg;
+ Node* index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
+ Run(base, index, expected[i]);
+ }
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_M1) {
+ Node* base = null_ptr;
+ Node* index = index_reg;
+ Run(base, index, kMode_MR);
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MN) {
+ AddressingMode expected[] = {kMode_MR, kMode_M2, kMode_M4, kMode_M8};
+ for (size_t i = 0; i < arraysize(scales); ++i) {
+ Reset();
+ Node* base = null_ptr;
+ Node* index = m->Int32Mul(index_reg, scales[i]);
+ Run(base, index, expected[i]);
+ }
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_M1I) {
+ Node* base = null_ptr;
+ Node* index = m->Int32Add(index_reg, non_zero);
+ Run(base, index, kMode_MRI);
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MNI) {
+ AddressingMode expected[] = {kMode_MRI, kMode_M2I, kMode_M4I, kMode_M8I};
+ for (size_t i = 0; i < arraysize(scales); ++i) {
+ Reset();
+ Node* base = null_ptr;
+ Node* index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
+ Run(base, index, expected[i]);
+ }
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MI) {
+ Node* bases[] = {null_ptr, non_zero};
+ Node* indices[] = {zero, non_zero};
+ for (size_t i = 0; i < arraysize(bases); ++i) {
+ for (size_t j = 0; j < arraysize(indices); ++j) {
+ Reset();
+ Node* base = bases[i];
+ Node* index = indices[j];
+ Run(base, index, kMode_MI);
+ }
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index a057a1e713..70bee35a2f 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -11,7 +11,7 @@ namespace internal {
namespace compiler {
// Adds IA32-specific methods for generating operands.
-class IA32OperandGenerator V8_FINAL : public OperandGenerator {
+class IA32OperandGenerator FINAL : public OperandGenerator {
public:
explicit IA32OperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
@@ -30,60 +30,157 @@ class IA32OperandGenerator V8_FINAL : public OperandGenerator {
case IrOpcode::kHeapConstant: {
// Constants in new space cannot be used as immediates in V8 because
// the GC does not scan code objects when collecting the new generation.
- Handle<HeapObject> value = ValueOf<Handle<HeapObject> >(node->op());
- return !isolate()->heap()->InNewSpace(*value);
+ Unique<HeapObject> value = OpParameter<Unique<HeapObject> >(node);
+ return !isolate()->heap()->InNewSpace(*value.handle());
}
default:
return false;
}
}
+
+ bool CanBeBetterLeftOperand(Node* node) const {
+ return !selector()->IsLive(node);
+ }
+};
+
+
+class AddressingModeMatcher {
+ public:
+ AddressingModeMatcher(IA32OperandGenerator* g, Node* base, Node* index)
+ : base_operand_(NULL),
+ index_operand_(NULL),
+ displacement_operand_(NULL),
+ mode_(kMode_None) {
+ Int32Matcher index_imm(index);
+ if (index_imm.HasValue()) {
+ int32_t displacement = index_imm.Value();
+ // Compute base operand and fold base immediate into displacement.
+ Int32Matcher base_imm(base);
+ if (!base_imm.HasValue()) {
+ base_operand_ = g->UseRegister(base);
+ } else {
+ displacement += base_imm.Value();
+ }
+ if (displacement != 0 || base_operand_ == NULL) {
+ displacement_operand_ = g->TempImmediate(displacement);
+ }
+ if (base_operand_ == NULL) {
+ mode_ = kMode_MI;
+ } else {
+ if (displacement == 0) {
+ mode_ = kMode_MR;
+ } else {
+ mode_ = kMode_MRI;
+ }
+ }
+ } else {
+ // Compute index and displacement.
+ IndexAndDisplacementMatcher matcher(index);
+ index_operand_ = g->UseRegister(matcher.index_node());
+ int32_t displacement = matcher.displacement();
+ // Compute base operand and fold base immediate into displacement.
+ Int32Matcher base_imm(base);
+ if (!base_imm.HasValue()) {
+ base_operand_ = g->UseRegister(base);
+ } else {
+ displacement += base_imm.Value();
+ }
+ // Compute displacement operand.
+ if (displacement != 0) {
+ displacement_operand_ = g->TempImmediate(displacement);
+ }
+ // Compute mode with scale factor one.
+ if (base_operand_ == NULL) {
+ if (displacement_operand_ == NULL) {
+ mode_ = kMode_M1;
+ } else {
+ mode_ = kMode_M1I;
+ }
+ } else {
+ if (displacement_operand_ == NULL) {
+ mode_ = kMode_MR1;
+ } else {
+ mode_ = kMode_MR1I;
+ }
+ }
+ // Adjust mode to actual scale factor.
+ mode_ = GetMode(mode_, matcher.power());
+ // Don't emit instructions with scale factor 1 if there's no base.
+ if (mode_ == kMode_M1) {
+ mode_ = kMode_MR;
+ } else if (mode_ == kMode_M1I) {
+ mode_ = kMode_MRI;
+ }
+ }
+ DCHECK_NE(kMode_None, mode_);
+ }
+
+ AddressingMode GetMode(AddressingMode one, int power) {
+ return static_cast<AddressingMode>(static_cast<int>(one) + power);
+ }
+
+ size_t SetInputs(InstructionOperand** inputs) {
+ size_t input_count = 0;
+ // Compute inputs_ and input_count.
+ if (base_operand_ != NULL) {
+ inputs[input_count++] = base_operand_;
+ }
+ if (index_operand_ != NULL) {
+ inputs[input_count++] = index_operand_;
+ }
+ if (displacement_operand_ != NULL) {
+ inputs[input_count++] = displacement_operand_;
+ }
+ DCHECK_NE(input_count, 0);
+ return input_count;
+ }
+
+ static const int kMaxInputCount = 3;
+ InstructionOperand* base_operand_;
+ InstructionOperand* index_operand_;
+ InstructionOperand* displacement_operand_;
+ AddressingMode mode_;
};
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = OpParameter<MachineType>(node);
- IA32OperandGenerator g(this);
+ MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+ MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- InstructionOperand* output = rep == kMachineFloat64
- ? g.DefineAsDoubleRegister(node)
- : g.DefineAsRegister(node);
ArchOpcode opcode;
+ // TODO(titzer): signed/unsigned small loads
switch (rep) {
- case kMachineFloat64:
- opcode = kSSELoad;
+ case kRepFloat32:
+ opcode = kIA32Movss;
break;
- case kMachineWord8:
- opcode = kIA32LoadWord8;
+ case kRepFloat64:
+ opcode = kIA32Movsd;
break;
- case kMachineWord16:
- opcode = kIA32LoadWord16;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = typ == kTypeInt32 ? kIA32Movsxbl : kIA32Movzxbl;
break;
- case kMachineTagged: // Fall through.
- case kMachineWord32:
- opcode = kIA32LoadWord32;
+ case kRepWord16:
+ opcode = typ == kTypeInt32 ? kIA32Movsxwl : kIA32Movzxwl;
+ break;
+ case kRepTagged: // Fall through.
+ case kRepWord32:
+ opcode = kIA32Movl;
break;
default:
UNREACHABLE();
return;
}
- if (g.CanBeImmediate(base)) {
- if (Int32Matcher(index).Is(0)) { // load [#base + #0]
- Emit(opcode | AddressingModeField::encode(kMode_MI), output,
- g.UseImmediate(base));
- } else { // load [#base + %index]
- Emit(opcode | AddressingModeField::encode(kMode_MRI), output,
- g.UseRegister(index), g.UseImmediate(base));
- }
- } else if (g.CanBeImmediate(index)) { // load [%base + #index]
- Emit(opcode | AddressingModeField::encode(kMode_MRI), output,
- g.UseRegister(base), g.UseImmediate(index));
- } else { // load [%base + %index + K]
- Emit(opcode | AddressingModeField::encode(kMode_MR1I), output,
- g.UseRegister(base), g.UseRegister(index));
- }
- // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+
+ IA32OperandGenerator g(this);
+ AddressingModeMatcher matcher(&g, base, index);
+ InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_);
+ InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
+ InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount];
+ size_t input_count = matcher.SetInputs(inputs);
+ Emit(code, 1, outputs, input_count, inputs);
}
@@ -94,68 +191,59 @@ void InstructionSelector::VisitStore(Node* node) {
Node* value = node->InputAt(2);
StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
- MachineType rep = store_rep.rep;
- if (store_rep.write_barrier_kind == kFullWriteBarrier) {
- DCHECK_EQ(kMachineTagged, rep);
+ MachineType rep = RepresentationOf(store_rep.machine_type());
+ if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+ DCHECK_EQ(kRepTagged, rep);
// TODO(dcarney): refactor RecordWrite function to take temp registers
// and pass them here instead of using fixed regs
// TODO(dcarney): handle immediate indices.
InstructionOperand* temps[] = {g.TempRegister(ecx), g.TempRegister(edx)};
Emit(kIA32StoreWriteBarrier, NULL, g.UseFixed(base, ebx),
- g.UseFixed(index, ecx), g.UseFixed(value, edx), ARRAY_SIZE(temps),
+ g.UseFixed(index, ecx), g.UseFixed(value, edx), arraysize(temps),
temps);
return;
}
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
- bool is_immediate = false;
- InstructionOperand* val;
- if (rep == kMachineFloat64) {
- val = g.UseDoubleRegister(value);
- } else {
- is_immediate = g.CanBeImmediate(value);
- if (is_immediate) {
- val = g.UseImmediate(value);
- } else if (rep == kMachineWord8) {
- val = g.UseByteRegister(value);
- } else {
- val = g.UseRegister(value);
- }
- }
+ DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+
ArchOpcode opcode;
switch (rep) {
- case kMachineFloat64:
- opcode = kSSEStore;
+ case kRepFloat32:
+ opcode = kIA32Movss;
break;
- case kMachineWord8:
- opcode = is_immediate ? kIA32StoreWord8I : kIA32StoreWord8;
+ case kRepFloat64:
+ opcode = kIA32Movsd;
break;
- case kMachineWord16:
- opcode = is_immediate ? kIA32StoreWord16I : kIA32StoreWord16;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = kIA32Movb;
break;
- case kMachineTagged: // Fall through.
- case kMachineWord32:
- opcode = is_immediate ? kIA32StoreWord32I : kIA32StoreWord32;
+ case kRepWord16:
+ opcode = kIA32Movw;
+ break;
+ case kRepTagged: // Fall through.
+ case kRepWord32:
+ opcode = kIA32Movl;
break;
default:
UNREACHABLE();
return;
}
- if (g.CanBeImmediate(base)) {
- if (Int32Matcher(index).Is(0)) { // store [#base], %|#value
- Emit(opcode | AddressingModeField::encode(kMode_MI), NULL,
- g.UseImmediate(base), val);
- } else { // store [#base + %index], %|#value
- Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
- g.UseRegister(index), g.UseImmediate(base), val);
- }
- } else if (g.CanBeImmediate(index)) { // store [%base + #index], %|#value
- Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
- g.UseRegister(base), g.UseImmediate(index), val);
- } else { // store [%base + %index], %|#value
- Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL,
- g.UseRegister(base), g.UseRegister(index), val);
+
+ InstructionOperand* val;
+ if (g.CanBeImmediate(value)) {
+ val = g.UseImmediate(value);
+ } else if (rep == kRepWord8 || rep == kRepBit) {
+ val = g.UseByteRegister(value);
+ } else {
+ val = g.UseRegister(value);
}
- // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+
+ AddressingModeMatcher matcher(&g, base, index);
+ InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_);
+ InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount + 1];
+ size_t input_count = matcher.SetInputs(inputs);
+ inputs[input_count++] = val;
+ Emit(code, 0, static_cast<InstructionOperand**>(NULL), input_count, inputs);
}
@@ -164,20 +252,24 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
IA32OperandGenerator g(selector);
Int32BinopMatcher m(node);
+ Node* left = m.left().node();
+ Node* right = m.right().node();
InstructionOperand* inputs[4];
size_t input_count = 0;
InstructionOperand* outputs[2];
size_t output_count = 0;
// TODO(turbofan): match complex addressing modes.
- // TODO(turbofan): if commutative, pick the non-live-in operand as the left as
- // this might be the last use and therefore its register can be reused.
- if (g.CanBeImmediate(m.right().node())) {
- inputs[input_count++] = g.Use(m.left().node());
- inputs[input_count++] = g.UseImmediate(m.right().node());
+ if (g.CanBeImmediate(right)) {
+ inputs[input_count++] = g.Use(left);
+ inputs[input_count++] = g.UseImmediate(right);
} else {
- inputs[input_count++] = g.UseRegister(m.left().node());
- inputs[input_count++] = g.Use(m.right().node());
+ if (node->op()->HasProperty(Operator::kCommutative) &&
+ g.CanBeBetterLeftOperand(right)) {
+ std::swap(left, right);
+ }
+ inputs[input_count++] = g.UseRegister(left);
+ inputs[input_count++] = g.Use(right);
}
if (cont->IsBranch()) {
@@ -193,8 +285,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
DCHECK_NE(0, input_count);
DCHECK_NE(0, output_count);
- DCHECK_GE(ARRAY_SIZE(inputs), input_count);
- DCHECK_GE(ARRAY_SIZE(outputs), output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
outputs, input_count, inputs);
@@ -271,6 +363,11 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
}
+void InstructionSelector::VisitWord32Ror(Node* node) {
+ VisitShift(this, node, kIA32Ror);
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop(this, node, kIA32Add);
}
@@ -289,16 +386,16 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
void InstructionSelector::VisitInt32Mul(Node* node) {
IA32OperandGenerator g(this);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
+ Int32BinopMatcher m(node);
+ Node* left = m.left().node();
+ Node* right = m.right().node();
if (g.CanBeImmediate(right)) {
Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left),
g.UseImmediate(right));
- } else if (g.CanBeImmediate(left)) {
- Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(right),
- g.UseImmediate(left));
} else {
- // TODO(turbofan): select better left operand.
+ if (g.CanBeBetterLeftOperand(right)) {
+ std::swap(left, right);
+ }
Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
g.Use(right));
}
@@ -309,7 +406,7 @@ static inline void VisitDiv(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
IA32OperandGenerator g(selector);
InstructionOperand* temps[] = {g.TempRegister(edx)};
- size_t temp_count = ARRAY_SIZE(temps);
+ size_t temp_count = arraysize(temps);
selector->Emit(opcode, g.DefineAsFixed(node, eax),
g.UseFixed(node->InputAt(0), eax),
g.UseUnique(node->InputAt(1)), temp_count, temps);
@@ -330,7 +427,7 @@ static inline void VisitMod(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
IA32OperandGenerator g(selector);
InstructionOperand* temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
- size_t temp_count = ARRAY_SIZE(temps);
+ size_t temp_count = arraysize(temps);
selector->Emit(opcode, g.DefineAsFixed(node, edx),
g.UseFixed(node->InputAt(0), eax),
g.UseUnique(node->InputAt(1)), temp_count, temps);
@@ -347,17 +444,23 @@ void InstructionSelector::VisitInt32UMod(Node* node) {
}
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+ IA32OperandGenerator g(this);
+ // TODO(turbofan): IA32 SSE conversions should take an operand.
+ Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
IA32OperandGenerator g(this);
- Emit(kSSEInt32ToFloat64, g.DefineAsDoubleRegister(node),
- g.Use(node->InputAt(0)));
+ Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
IA32OperandGenerator g(this);
// TODO(turbofan): IA32 SSE LoadUint32() should take an operand.
- Emit(kSSEUint32ToFloat64, g.DefineAsDoubleRegister(node),
+ Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
@@ -370,41 +473,42 @@ void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
IA32OperandGenerator g(this);
- // TODO(turbofan): IA32 SSE subsd() should take an operand.
- Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node),
- g.UseDoubleRegister(node->InputAt(0)));
+ Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ IA32OperandGenerator g(this);
+ // TODO(turbofan): IA32 SSE conversions should take an operand.
+ Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64Add(Node* node) {
IA32OperandGenerator g(this);
Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
- g.UseDoubleRegister(node->InputAt(0)),
- g.UseDoubleRegister(node->InputAt(1)));
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
IA32OperandGenerator g(this);
Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
- g.UseDoubleRegister(node->InputAt(0)),
- g.UseDoubleRegister(node->InputAt(1)));
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
void InstructionSelector::VisitFloat64Mul(Node* node) {
IA32OperandGenerator g(this);
Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
- g.UseDoubleRegister(node->InputAt(0)),
- g.UseDoubleRegister(node->InputAt(1)));
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
void InstructionSelector::VisitFloat64Div(Node* node) {
IA32OperandGenerator g(this);
Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
- g.UseDoubleRegister(node->InputAt(0)),
- g.UseDoubleRegister(node->InputAt(1)));
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
@@ -412,8 +516,14 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand* temps[] = {g.TempRegister(eax)};
Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
- g.UseDoubleRegister(node->InputAt(0)),
- g.UseDoubleRegister(node->InputAt(1)), 1, temps);
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
+ temps);
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
@@ -495,8 +605,7 @@ void InstructionSelector::VisitFloat64Compare(Node* node,
IA32OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
- VisitCompare(this, kSSEFloat64Cmp, g.UseDoubleRegister(left), g.Use(right),
- cont);
+ VisitCompare(this, kSSEFloat64Cmp, g.UseRegister(left), g.Use(right), cont);
}
@@ -504,55 +613,53 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
BasicBlock* deoptimization) {
IA32OperandGenerator g(this);
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
- CallBuffer buffer(zone(), descriptor);
+
+ FrameStateDescriptor* frame_state_descriptor = NULL;
+
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor =
+ GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+ }
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
// Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(call, &buffer, true, true, continuation, deoptimization);
+ InitializeCallBuffer(call, &buffer, true, true);
// Push any stack arguments.
- for (int i = buffer.pushed_count - 1; i >= 0; --i) {
- Node* input = buffer.pushed_nodes[i];
+ for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
+ input != buffer.pushed_nodes.rend(); input++) {
// TODO(titzer): handle pushing double parameters.
Emit(kIA32Push, NULL,
- g.CanBeImmediate(input) ? g.UseImmediate(input) : g.Use(input));
+ g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input));
}
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
case CallDescriptor::kCallCodeObject: {
- bool lazy_deopt = descriptor->CanLazilyDeoptimize();
- opcode = kIA32CallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
+ opcode = kArchCallCodeObject;
break;
}
- case CallDescriptor::kCallAddress:
- opcode = kIA32CallAddress;
- break;
case CallDescriptor::kCallJSFunction:
- opcode = kIA32CallJSFunction;
+ opcode = kArchCallJSFunction;
break;
default:
UNREACHABLE();
return;
}
+ opcode |= MiscField::encode(descriptor->flags());
// Emit the call instruction.
Instruction* call_instr =
- Emit(opcode, buffer.output_count, buffer.outputs,
- buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
+ Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+ buffer.instruction_args.size(), &buffer.instruction_args.front());
call_instr->MarkAsCall();
if (deoptimization != NULL) {
DCHECK(continuation != NULL);
call_instr->MarkAsControl();
}
-
- // Caller clean up of stack for C-style calls.
- if (descriptor->kind() == CallDescriptor::kCallAddress &&
- buffer.pushed_count > 0) {
- DCHECK(deoptimization == NULL && continuation == NULL);
- Emit(kPopStack | MiscField::encode(buffer.pushed_count), NULL);
- }
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/ia32/linkage-ia32.cc b/deps/v8/src/compiler/ia32/linkage-ia32.cc
index 57a2c6918a..f2c5fab5dc 100644
--- a/deps/v8/src/compiler/ia32/linkage-ia32.cc
+++ b/deps/v8/src/compiler/ia32/linkage-ia32.cc
@@ -14,7 +14,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-struct LinkageHelperTraits {
+struct IA32LinkageHelperTraits {
static Register ReturnValueReg() { return eax; }
static Register ReturnValue2Reg() { return edx; }
static Register JSCallFunctionReg() { return edi; }
@@ -28,36 +28,34 @@ struct LinkageHelperTraits {
static int CRegisterParametersLength() { return 0; }
};
+typedef LinkageHelper<IA32LinkageHelperTraits> LH;
CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
- return LinkageHelper::GetJSCallDescriptor<LinkageHelperTraits>(
- zone, parameter_count);
+ return LH::GetJSCallDescriptor(zone, parameter_count);
}
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
Runtime::FunctionId function, int parameter_count,
- Operator::Property properties,
- CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
- return LinkageHelper::GetRuntimeCallDescriptor<LinkageHelperTraits>(
- zone, function, parameter_count, properties, can_deoptimize);
+ Operator::Properties properties, Zone* zone) {
+ return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
+ properties);
}
CallDescriptor* Linkage::GetStubCallDescriptor(
- CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count,
- CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
- return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>(
- zone, descriptor, stack_parameter_count, can_deoptimize);
+ CallInterfaceDescriptor descriptor, int stack_parameter_count,
+ CallDescriptor::Flags flags, Zone* zone) {
+ return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
+ flags);
}
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(
- Zone* zone, int num_params, MachineType return_type,
- const MachineType* param_types) {
- return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>(
- zone, num_params, return_type, param_types);
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+ MachineSignature* sig) {
+ return LH::GetSimplifiedCDescriptor(zone, sig);
}
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index 35c8e31f27..4b7fe679e9 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -29,10 +29,12 @@ namespace compiler {
// Target-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define ARCH_OPCODE_LIST(V) \
- V(ArchDeoptimize) \
+ V(ArchCallCodeObject) \
+ V(ArchCallJSFunction) \
V(ArchJmp) \
V(ArchNop) \
V(ArchRet) \
+ V(ArchTruncateDoubleToI) \
TARGET_ARCH_OPCODE_LIST(V)
enum ArchOpcode {
@@ -105,10 +107,10 @@ typedef int32_t InstructionCode;
// continuation into a single InstructionCode which is stored as part of
// the instruction.
typedef BitField<ArchOpcode, 0, 7> ArchOpcodeField;
-typedef BitField<AddressingMode, 7, 4> AddressingModeField;
-typedef BitField<FlagsMode, 11, 2> FlagsModeField;
-typedef BitField<FlagsCondition, 13, 5> FlagsConditionField;
-typedef BitField<int, 13, 19> MiscField;
+typedef BitField<AddressingMode, 7, 5> AddressingModeField;
+typedef BitField<FlagsMode, 12, 2> FlagsModeField;
+typedef BitField<FlagsCondition, 14, 5> FlagsConditionField;
+typedef BitField<int, 14, 18> MiscField;
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index ac446b38ed..b860bc5187 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -25,11 +25,6 @@ class OperandGenerator {
UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
}
- InstructionOperand* DefineAsDoubleRegister(Node* node) {
- return Define(node, new (zone())
- UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
- }
-
InstructionOperand* DefineSameAsFirst(Node* result) {
return Define(result, new (zone())
UnallocatedOperand(UnallocatedOperand::SAME_AS_FIRST_INPUT));
@@ -41,7 +36,7 @@ class OperandGenerator {
Register::ToAllocationIndex(reg)));
}
- InstructionOperand* DefineAsFixedDouble(Node* node, DoubleRegister reg) {
+ InstructionOperand* DefineAsFixed(Node* node, DoubleRegister reg) {
return Define(node, new (zone())
UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
DoubleRegister::ToAllocationIndex(reg)));
@@ -53,8 +48,9 @@ class OperandGenerator {
return ConstantOperand::Create(node->id(), zone());
}
- InstructionOperand* DefineAsLocation(Node* node, LinkageLocation location) {
- return Define(node, ToUnallocatedOperand(location));
+ InstructionOperand* DefineAsLocation(Node* node, LinkageLocation location,
+ MachineType type) {
+ return Define(node, ToUnallocatedOperand(location, type));
}
InstructionOperand* Use(Node* node) {
@@ -69,12 +65,6 @@ class OperandGenerator {
UnallocatedOperand::USED_AT_START));
}
- InstructionOperand* UseDoubleRegister(Node* node) {
- return Use(node, new (zone())
- UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
- UnallocatedOperand::USED_AT_START));
- }
-
// Use register or operand for the node. If a register is chosen, it won't
// alias any temporary or output registers.
InstructionOperand* UseUnique(Node* node) {
@@ -88,20 +78,13 @@ class OperandGenerator {
UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
}
- // Use a unique double register for the node that does not alias any temporary
- // or output double registers.
- InstructionOperand* UseUniqueDoubleRegister(Node* node) {
- return Use(node, new (zone())
- UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
- }
-
InstructionOperand* UseFixed(Node* node, Register reg) {
return Use(node, new (zone())
UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
Register::ToAllocationIndex(reg)));
}
- InstructionOperand* UseFixedDouble(Node* node, DoubleRegister reg) {
+ InstructionOperand* UseFixed(Node* node, DoubleRegister reg) {
return Use(node, new (zone())
UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
DoubleRegister::ToAllocationIndex(reg)));
@@ -112,8 +95,9 @@ class OperandGenerator {
return ImmediateOperand::Create(index, zone());
}
- InstructionOperand* UseLocation(Node* node, LinkageLocation location) {
- return Use(node, ToUnallocatedOperand(location));
+ InstructionOperand* UseLocation(Node* node, LinkageLocation location,
+ MachineType type) {
+ return Use(node, ToUnallocatedOperand(location, type));
}
InstructionOperand* TempRegister() {
@@ -159,16 +143,18 @@ class OperandGenerator {
static Constant ToConstant(const Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
- return Constant(ValueOf<int32_t>(node->op()));
+ return Constant(OpParameter<int32_t>(node));
case IrOpcode::kInt64Constant:
- return Constant(ValueOf<int64_t>(node->op()));
- case IrOpcode::kNumberConstant:
+ return Constant(OpParameter<int64_t>(node));
+ case IrOpcode::kFloat32Constant:
+ return Constant(OpParameter<float>(node));
case IrOpcode::kFloat64Constant:
- return Constant(ValueOf<double>(node->op()));
+ case IrOpcode::kNumberConstant:
+ return Constant(OpParameter<double>(node));
case IrOpcode::kExternalConstant:
- return Constant(ValueOf<ExternalReference>(node->op()));
+ return Constant(OpParameter<ExternalReference>(node));
case IrOpcode::kHeapConstant:
- return Constant(ValueOf<Handle<HeapObject> >(node->op()));
+ return Constant(OpParameter<Unique<HeapObject> >(node).handle());
default:
break;
}
@@ -192,7 +178,8 @@ class OperandGenerator {
return operand;
}
- UnallocatedOperand* ToUnallocatedOperand(LinkageLocation location) {
+ UnallocatedOperand* ToUnallocatedOperand(LinkageLocation location,
+ MachineType type) {
if (location.location_ == LinkageLocation::ANY_REGISTER) {
return new (zone())
UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER);
@@ -201,7 +188,7 @@ class OperandGenerator {
return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_SLOT,
location.location_);
}
- if (location.rep_ == kMachineFloat64) {
+ if (RepresentationOf(type) == kRepFloat64) {
return new (zone()) UnallocatedOperand(
UnallocatedOperand::FIXED_DOUBLE_REGISTER, location.location_);
}
@@ -218,7 +205,7 @@ class OperandGenerator {
// The whole instruction is treated as a unit by the register allocator, and
// thus no spills or moves can be introduced between the flags-setting
// instruction and the branch or set it should be combined with.
-class FlagsContinuation V8_FINAL {
+class FlagsContinuation FINAL {
public:
FlagsContinuation() : mode_(kFlags_none) {}
@@ -346,22 +333,26 @@ class FlagsContinuation V8_FINAL {
// TODO(bmeurer): Get rid of the CallBuffer business and make
// InstructionSelector::VisitCall platform independent instead.
struct CallBuffer {
- CallBuffer(Zone* zone, CallDescriptor* descriptor);
+ CallBuffer(Zone* zone, CallDescriptor* descriptor,
+ FrameStateDescriptor* frame_state);
- int output_count;
CallDescriptor* descriptor;
- Node** output_nodes;
- InstructionOperand** outputs;
- InstructionOperand** fixed_and_control_args;
- int fixed_count;
- Node** pushed_nodes;
- int pushed_count;
+ FrameStateDescriptor* frame_state_descriptor;
+ NodeVector output_nodes;
+ InstructionOperandVector outputs;
+ InstructionOperandVector instruction_args;
+ NodeVector pushed_nodes;
- int input_count() { return descriptor->InputCount(); }
+ size_t input_count() const { return descriptor->InputCount(); }
- int control_count() { return descriptor->CanLazilyDeoptimize() ? 2 : 0; }
+ size_t frame_state_count() const { return descriptor->FrameStateCount(); }
- int fixed_and_control_count() { return fixed_count + control_count(); }
+ size_t frame_state_value_count() const {
+ return (frame_state_descriptor == NULL)
+ ? 0
+ : (frame_state_descriptor->GetTotalSize() +
+ 1); // Include deopt id.
+ }
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/instruction-selector-unittest.cc b/deps/v8/src/compiler/instruction-selector-unittest.cc
new file mode 100644
index 0000000000..75159b09c1
--- /dev/null
+++ b/deps/v8/src/compiler/instruction-selector-unittest.cc
@@ -0,0 +1,512 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-unittest.h"
+
+#include "src/compiler/compiler-test-utils.h"
+#include "src/flags.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+typedef RawMachineAssembler::Label MLabel;
+
+} // namespace
+
+
+InstructionSelectorTest::InstructionSelectorTest() : rng_(FLAG_random_seed) {}
+
+
+InstructionSelectorTest::~InstructionSelectorTest() {}
+
+
+InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
+ InstructionSelector::Features features,
+ InstructionSelectorTest::StreamBuilderMode mode) {
+ Schedule* schedule = Export();
+ if (FLAG_trace_turbo) {
+ OFStream out(stdout);
+ out << "=== Schedule before instruction selection ===" << endl << *schedule;
+ }
+ EXPECT_NE(0, graph()->NodeCount());
+ CompilationInfo info(test_->isolate(), test_->zone());
+ Linkage linkage(&info, call_descriptor());
+ InstructionSequence sequence(&linkage, graph(), schedule);
+ SourcePositionTable source_position_table(graph());
+ InstructionSelector selector(&sequence, &source_position_table, features);
+ selector.SelectInstructions();
+ if (FLAG_trace_turbo) {
+ OFStream out(stdout);
+ out << "=== Code sequence after instruction selection ===" << endl
+ << sequence;
+ }
+ Stream s;
+ std::set<int> virtual_registers;
+ for (InstructionSequence::const_iterator i = sequence.begin();
+ i != sequence.end(); ++i) {
+ Instruction* instr = *i;
+ if (instr->opcode() < 0) continue;
+ if (mode == kTargetInstructions) {
+ switch (instr->arch_opcode()) {
+#define CASE(Name) \
+ case k##Name: \
+ break;
+ TARGET_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ default:
+ continue;
+ }
+ }
+ if (mode == kAllExceptNopInstructions && instr->arch_opcode() == kArchNop) {
+ continue;
+ }
+ for (size_t i = 0; i < instr->OutputCount(); ++i) {
+ InstructionOperand* output = instr->OutputAt(i);
+ EXPECT_NE(InstructionOperand::IMMEDIATE, output->kind());
+ if (output->IsConstant()) {
+ s.constants_.insert(std::make_pair(
+ output->index(), sequence.GetConstant(output->index())));
+ virtual_registers.insert(output->index());
+ } else if (output->IsUnallocated()) {
+ virtual_registers.insert(
+ UnallocatedOperand::cast(output)->virtual_register());
+ }
+ }
+ for (size_t i = 0; i < instr->InputCount(); ++i) {
+ InstructionOperand* input = instr->InputAt(i);
+ EXPECT_NE(InstructionOperand::CONSTANT, input->kind());
+ if (input->IsImmediate()) {
+ s.immediates_.insert(std::make_pair(
+ input->index(), sequence.GetImmediate(input->index())));
+ } else if (input->IsUnallocated()) {
+ virtual_registers.insert(
+ UnallocatedOperand::cast(input)->virtual_register());
+ }
+ }
+ s.instructions_.push_back(instr);
+ }
+ for (std::set<int>::const_iterator i = virtual_registers.begin();
+ i != virtual_registers.end(); ++i) {
+ int virtual_register = *i;
+ if (sequence.IsDouble(virtual_register)) {
+ EXPECT_FALSE(sequence.IsReference(virtual_register));
+ s.doubles_.insert(virtual_register);
+ }
+ if (sequence.IsReference(virtual_register)) {
+ EXPECT_FALSE(sequence.IsDouble(virtual_register));
+ s.references_.insert(virtual_register);
+ }
+ }
+ for (int i = 0; i < sequence.GetFrameStateDescriptorCount(); i++) {
+ s.deoptimization_entries_.push_back(sequence.GetFrameStateDescriptor(
+ InstructionSequence::StateId::FromInt(i)));
+ }
+ return s;
+}
+
+
+// -----------------------------------------------------------------------------
+// Return.
+
+
+TARGET_TEST_F(InstructionSelectorTest, ReturnFloat32Constant) {
+ const float kValue = 4.2f;
+ StreamBuilder m(this, kMachFloat32);
+ m.Return(m.Float32Constant(kValue));
+ Stream s = m.Build(kAllInstructions);
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArchNop, s[0]->arch_opcode());
+ ASSERT_EQ(InstructionOperand::CONSTANT, s[0]->OutputAt(0)->kind());
+ EXPECT_FLOAT_EQ(kValue, s.ToFloat32(s[0]->OutputAt(0)));
+ EXPECT_EQ(kArchRet, s[1]->arch_opcode());
+ EXPECT_EQ(1U, s[1]->InputCount());
+}
+
+
+TARGET_TEST_F(InstructionSelectorTest, ReturnParameter) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Parameter(0));
+ Stream s = m.Build(kAllInstructions);
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArchNop, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kArchRet, s[1]->arch_opcode());
+ EXPECT_EQ(1U, s[1]->InputCount());
+}
+
+
+TARGET_TEST_F(InstructionSelectorTest, ReturnZero) {
+ StreamBuilder m(this, kMachInt32);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build(kAllInstructions);
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArchNop, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(InstructionOperand::CONSTANT, s[0]->OutputAt(0)->kind());
+ EXPECT_EQ(0, s.ToInt32(s[0]->OutputAt(0)));
+ EXPECT_EQ(kArchRet, s[1]->arch_opcode());
+ EXPECT_EQ(1U, s[1]->InputCount());
+}
+
+
+// -----------------------------------------------------------------------------
+// Conversions.
+
+
+TARGET_TEST_F(InstructionSelectorTest, TruncateFloat64ToInt32WithParameter) {
+ StreamBuilder m(this, kMachInt32, kMachFloat64);
+ m.Return(m.TruncateFloat64ToInt32(m.Parameter(0)));
+ Stream s = m.Build(kAllInstructions);
+ ASSERT_EQ(3U, s.size());
+ EXPECT_EQ(kArchNop, s[0]->arch_opcode());
+ EXPECT_EQ(kArchTruncateDoubleToI, s[1]->arch_opcode());
+ EXPECT_EQ(1U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ EXPECT_EQ(kArchRet, s[2]->arch_opcode());
+}
+
+
+// -----------------------------------------------------------------------------
+// Parameters.
+
+
+TARGET_TEST_F(InstructionSelectorTest, DoubleParameter) {
+ StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ Node* param = m.Parameter(0);
+ m.Return(param);
+ Stream s = m.Build(kAllInstructions);
+ EXPECT_TRUE(s.IsDouble(param->id()));
+}
+
+
+TARGET_TEST_F(InstructionSelectorTest, ReferenceParameter) {
+ StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged);
+ Node* param = m.Parameter(0);
+ m.Return(param);
+ Stream s = m.Build(kAllInstructions);
+ EXPECT_TRUE(s.IsReference(param->id()));
+}
+
+
+// -----------------------------------------------------------------------------
+// Finish.
+
+
+TARGET_TEST_F(InstructionSelectorTest, Finish) {
+ StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged);
+ Node* param = m.Parameter(0);
+ Node* finish = m.NewNode(m.common()->Finish(1), param, m.graph()->start());
+ m.Return(finish);
+ Stream s = m.Build(kAllInstructions);
+ ASSERT_EQ(3U, s.size());
+ EXPECT_EQ(kArchNop, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ ASSERT_TRUE(s[0]->Output()->IsUnallocated());
+ EXPECT_EQ(param->id(), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kArchNop, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->InputCount());
+ ASSERT_TRUE(s[1]->InputAt(0)->IsUnallocated());
+ EXPECT_EQ(param->id(), s.ToVreg(s[1]->InputAt(0)));
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ ASSERT_TRUE(s[1]->Output()->IsUnallocated());
+ EXPECT_TRUE(UnallocatedOperand::cast(s[1]->Output())->HasSameAsInputPolicy());
+ EXPECT_EQ(finish->id(), s.ToVreg(s[1]->Output()));
+ EXPECT_TRUE(s.IsReference(finish->id()));
+}
+
+
+// -----------------------------------------------------------------------------
+// Phi.
+
+
+typedef InstructionSelectorTestWithParam<MachineType>
+ InstructionSelectorPhiTest;
+
+
+TARGET_TEST_P(InstructionSelectorPhiTest, Doubleness) {
+ const MachineType type = GetParam();
+ StreamBuilder m(this, type, type, type);
+ Node* param0 = m.Parameter(0);
+ Node* param1 = m.Parameter(1);
+ MLabel a, b, c;
+ m.Branch(m.Int32Constant(0), &a, &b);
+ m.Bind(&a);
+ m.Goto(&c);
+ m.Bind(&b);
+ m.Goto(&c);
+ m.Bind(&c);
+ Node* phi = m.Phi(type, param0, param1);
+ m.Return(phi);
+ Stream s = m.Build(kAllInstructions);
+ EXPECT_EQ(s.IsDouble(phi->id()), s.IsDouble(param0->id()));
+ EXPECT_EQ(s.IsDouble(phi->id()), s.IsDouble(param1->id()));
+}
+
+
+TARGET_TEST_P(InstructionSelectorPhiTest, Referenceness) {
+ const MachineType type = GetParam();
+ StreamBuilder m(this, type, type, type);
+ Node* param0 = m.Parameter(0);
+ Node* param1 = m.Parameter(1);
+ MLabel a, b, c;
+ m.Branch(m.Int32Constant(1), &a, &b);
+ m.Bind(&a);
+ m.Goto(&c);
+ m.Bind(&b);
+ m.Goto(&c);
+ m.Bind(&c);
+ Node* phi = m.Phi(type, param0, param1);
+ m.Return(phi);
+ Stream s = m.Build(kAllInstructions);
+ EXPECT_EQ(s.IsReference(phi->id()), s.IsReference(param0->id()));
+ EXPECT_EQ(s.IsReference(phi->id()), s.IsReference(param1->id()));
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorPhiTest,
+ ::testing::Values(kMachFloat64, kMachInt8, kMachUint8,
+ kMachInt16, kMachUint16, kMachInt32,
+ kMachUint32, kMachInt64, kMachUint64,
+ kMachPtr, kMachAnyTagged));
+
+
+// -----------------------------------------------------------------------------
+// ValueEffect.
+
+
+TARGET_TEST_F(InstructionSelectorTest, ValueEffect) {
+ StreamBuilder m1(this, kMachInt32, kMachPtr);
+ Node* p1 = m1.Parameter(0);
+ m1.Return(m1.Load(kMachInt32, p1, m1.Int32Constant(0)));
+ Stream s1 = m1.Build(kAllInstructions);
+ StreamBuilder m2(this, kMachInt32, kMachPtr);
+ Node* p2 = m2.Parameter(0);
+ m2.Return(m2.NewNode(m2.machine()->Load(kMachInt32), p2, m2.Int32Constant(0),
+ m2.NewNode(m2.common()->ValueEffect(1), p2)));
+ Stream s2 = m2.Build(kAllInstructions);
+ EXPECT_LE(3U, s1.size());
+ ASSERT_EQ(s1.size(), s2.size());
+ TRACED_FORRANGE(size_t, i, 0, s1.size() - 1) {
+ const Instruction* i1 = s1[i];
+ const Instruction* i2 = s2[i];
+ EXPECT_EQ(i1->arch_opcode(), i2->arch_opcode());
+ EXPECT_EQ(i1->InputCount(), i2->InputCount());
+ EXPECT_EQ(i1->OutputCount(), i2->OutputCount());
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Calls with deoptimization.
+
+
+TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
+ StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
+ kMachAnyTagged);
+
+ BailoutId bailout_id(42);
+
+ Node* function_node = m.Parameter(0);
+ Node* receiver = m.Parameter(1);
+ Node* context = m.Parameter(2);
+
+ Node* parameters = m.NewNode(m.common()->StateValues(1), m.Int32Constant(1));
+ Node* locals = m.NewNode(m.common()->StateValues(0));
+ Node* stack = m.NewNode(m.common()->StateValues(0));
+ Node* context_dummy = m.Int32Constant(0);
+
+ Node* state_node = m.NewNode(
+ m.common()->FrameState(JS_FRAME, bailout_id, kPushOutput), parameters,
+ locals, stack, context_dummy, m.UndefinedConstant());
+ Node* call = m.CallJS0(function_node, receiver, context, state_node);
+ m.Return(call);
+
+ Stream s = m.Build(kAllExceptNopInstructions);
+
+ // Skip until kArchCallJSFunction.
+ size_t index = 0;
+ for (; index < s.size() && s[index]->arch_opcode() != kArchCallJSFunction;
+ index++) {
+ }
+ // Now we should have two instructions: call and return.
+ ASSERT_EQ(index + 2, s.size());
+
+ EXPECT_EQ(kArchCallJSFunction, s[index++]->arch_opcode());
+ EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
+
+ // TODO(jarin) Check deoptimization table.
+}
+
+
+TARGET_TEST_F(InstructionSelectorTest, CallFunctionStubWithDeopt) {
+ StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
+ kMachAnyTagged);
+
+ BailoutId bailout_id_before(42);
+
+ // Some arguments for the call node.
+ Node* function_node = m.Parameter(0);
+ Node* receiver = m.Parameter(1);
+ Node* context = m.Int32Constant(1); // Context is ignored.
+
+ // Build frame state for the state before the call.
+ Node* parameters = m.NewNode(m.common()->StateValues(1), m.Int32Constant(43));
+ Node* locals = m.NewNode(m.common()->StateValues(1), m.Int32Constant(44));
+ Node* stack = m.NewNode(m.common()->StateValues(1), m.Int32Constant(45));
+
+ Node* context_sentinel = m.Int32Constant(0);
+ Node* frame_state_before = m.NewNode(
+ m.common()->FrameState(JS_FRAME, bailout_id_before, kPushOutput),
+ parameters, locals, stack, context_sentinel, m.UndefinedConstant());
+
+ // Build the call.
+ Node* call = m.CallFunctionStub0(function_node, receiver, context,
+ frame_state_before, CALL_AS_METHOD);
+
+ m.Return(call);
+
+ Stream s = m.Build(kAllExceptNopInstructions);
+
+ // Skip until kArchCallJSFunction.
+ size_t index = 0;
+ for (; index < s.size() && s[index]->arch_opcode() != kArchCallCodeObject;
+ index++) {
+ }
+ // Now we should have two instructions: call, return.
+ ASSERT_EQ(index + 2, s.size());
+
+ // Check the call instruction
+ const Instruction* call_instr = s[index++];
+ EXPECT_EQ(kArchCallCodeObject, call_instr->arch_opcode());
+ size_t num_operands =
+ 1 + // Code object.
+ 1 +
+ 4 + // Frame state deopt id + one input for each value in frame state.
+ 1 + // Function.
+ 1; // Context.
+ ASSERT_EQ(num_operands, call_instr->InputCount());
+
+ // Code object.
+ EXPECT_TRUE(call_instr->InputAt(0)->IsImmediate());
+
+ // Deoptimization id.
+ int32_t deopt_id_before = s.ToInt32(call_instr->InputAt(1));
+ FrameStateDescriptor* desc_before =
+ s.GetFrameStateDescriptor(deopt_id_before);
+ EXPECT_EQ(bailout_id_before, desc_before->bailout_id());
+ EXPECT_EQ(kPushOutput, desc_before->state_combine());
+ EXPECT_EQ(1u, desc_before->parameters_count());
+ EXPECT_EQ(1u, desc_before->locals_count());
+ EXPECT_EQ(1u, desc_before->stack_count());
+ EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(2)));
+ EXPECT_EQ(0, s.ToInt32(call_instr->InputAt(3)));
+ EXPECT_EQ(44, s.ToInt32(call_instr->InputAt(4)));
+ EXPECT_EQ(45, s.ToInt32(call_instr->InputAt(5)));
+
+ // Function.
+ EXPECT_EQ(function_node->id(), s.ToVreg(call_instr->InputAt(6)));
+ // Context.
+ EXPECT_EQ(context->id(), s.ToVreg(call_instr->InputAt(7)));
+
+ EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
+
+ EXPECT_EQ(index, s.size());
+}
+
+
+TARGET_TEST_F(InstructionSelectorTest,
+ CallFunctionStubDeoptRecursiveFrameState) {
+ StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
+ kMachAnyTagged);
+
+ BailoutId bailout_id_before(42);
+ BailoutId bailout_id_parent(62);
+
+ // Some arguments for the call node.
+ Node* function_node = m.Parameter(0);
+ Node* receiver = m.Parameter(1);
+ Node* context = m.Int32Constant(66);
+
+ // Build frame state for the state before the call.
+ Node* parameters = m.NewNode(m.common()->StateValues(1), m.Int32Constant(63));
+ Node* locals = m.NewNode(m.common()->StateValues(1), m.Int32Constant(64));
+ Node* stack = m.NewNode(m.common()->StateValues(1), m.Int32Constant(65));
+ Node* frame_state_parent = m.NewNode(
+ m.common()->FrameState(JS_FRAME, bailout_id_parent, kIgnoreOutput),
+ parameters, locals, stack, context, m.UndefinedConstant());
+
+ Node* context2 = m.Int32Constant(46);
+ Node* parameters2 =
+ m.NewNode(m.common()->StateValues(1), m.Int32Constant(43));
+ Node* locals2 = m.NewNode(m.common()->StateValues(1), m.Int32Constant(44));
+ Node* stack2 = m.NewNode(m.common()->StateValues(1), m.Int32Constant(45));
+ Node* frame_state_before = m.NewNode(
+ m.common()->FrameState(JS_FRAME, bailout_id_before, kPushOutput),
+ parameters2, locals2, stack2, context2, frame_state_parent);
+
+ // Build the call.
+ Node* call = m.CallFunctionStub0(function_node, receiver, context2,
+ frame_state_before, CALL_AS_METHOD);
+
+ m.Return(call);
+
+ Stream s = m.Build(kAllExceptNopInstructions);
+
+ // Skip until kArchCallJSFunction.
+ size_t index = 0;
+ for (; index < s.size() && s[index]->arch_opcode() != kArchCallCodeObject;
+ index++) {
+ }
+ // Now we should have three instructions: call, return.
+ EXPECT_EQ(index + 2, s.size());
+
+ // Check the call instruction
+ const Instruction* call_instr = s[index++];
+ EXPECT_EQ(kArchCallCodeObject, call_instr->arch_opcode());
+ size_t num_operands =
+ 1 + // Code object.
+ 1 + // Frame state deopt id
+ 4 + // One input for each value in frame state + context.
+ 4 + // One input for each value in the parent frame state + context.
+ 1 + // Function.
+ 1; // Context.
+ EXPECT_EQ(num_operands, call_instr->InputCount());
+ // Code object.
+ EXPECT_TRUE(call_instr->InputAt(0)->IsImmediate());
+
+ // Deoptimization id.
+ int32_t deopt_id_before = s.ToInt32(call_instr->InputAt(1));
+ FrameStateDescriptor* desc_before =
+ s.GetFrameStateDescriptor(deopt_id_before);
+ EXPECT_EQ(bailout_id_before, desc_before->bailout_id());
+ EXPECT_EQ(1u, desc_before->parameters_count());
+ EXPECT_EQ(1u, desc_before->locals_count());
+ EXPECT_EQ(1u, desc_before->stack_count());
+ EXPECT_EQ(63, s.ToInt32(call_instr->InputAt(2)));
+ // Context:
+ EXPECT_EQ(66, s.ToInt32(call_instr->InputAt(3)));
+ EXPECT_EQ(64, s.ToInt32(call_instr->InputAt(4)));
+ EXPECT_EQ(65, s.ToInt32(call_instr->InputAt(5)));
+ // Values from parent environment should follow.
+ EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(6)));
+ EXPECT_EQ(46, s.ToInt32(call_instr->InputAt(7)));
+ EXPECT_EQ(44, s.ToInt32(call_instr->InputAt(8)));
+ EXPECT_EQ(45, s.ToInt32(call_instr->InputAt(9)));
+
+ // Function.
+ EXPECT_EQ(function_node->id(), s.ToVreg(call_instr->InputAt(10)));
+ // Context.
+ EXPECT_EQ(context2->id(), s.ToVreg(call_instr->InputAt(11)));
+ // Continuation.
+
+ EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
+ EXPECT_EQ(index, s.size());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/instruction-selector-unittest.h b/deps/v8/src/compiler/instruction-selector-unittest.h
new file mode 100644
index 0000000000..3e7f63aa60
--- /dev/null
+++ b/deps/v8/src/compiler/instruction-selector-unittest.h
@@ -0,0 +1,213 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_UNITTEST_H_
+#define V8_COMPILER_INSTRUCTION_SELECTOR_UNITTEST_H_
+
+#include <deque>
+#include <set>
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/test/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class InstructionSelectorTest : public TestWithContext, public TestWithZone {
+ public:
+ InstructionSelectorTest();
+ virtual ~InstructionSelectorTest();
+
+ base::RandomNumberGenerator* rng() { return &rng_; }
+
+ class Stream;
+
+ enum StreamBuilderMode {
+ kAllInstructions,
+ kTargetInstructions,
+ kAllExceptNopInstructions
+ };
+
+ class StreamBuilder FINAL : public RawMachineAssembler {
+ public:
+ StreamBuilder(InstructionSelectorTest* test, MachineType return_type)
+ : RawMachineAssembler(new (test->zone()) Graph(test->zone()),
+ MakeMachineSignature(test->zone(), return_type)),
+ test_(test) {}
+ StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
+ MachineType parameter0_type)
+ : RawMachineAssembler(
+ new (test->zone()) Graph(test->zone()),
+ MakeMachineSignature(test->zone(), return_type, parameter0_type)),
+ test_(test) {}
+ StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
+ MachineType parameter0_type, MachineType parameter1_type)
+ : RawMachineAssembler(
+ new (test->zone()) Graph(test->zone()),
+ MakeMachineSignature(test->zone(), return_type, parameter0_type,
+ parameter1_type)),
+ test_(test) {}
+ StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
+ MachineType parameter0_type, MachineType parameter1_type,
+ MachineType parameter2_type)
+ : RawMachineAssembler(
+ new (test->zone()) Graph(test->zone()),
+ MakeMachineSignature(test->zone(), return_type, parameter0_type,
+ parameter1_type, parameter2_type)),
+ test_(test) {}
+
+ Stream Build(CpuFeature feature) {
+ return Build(InstructionSelector::Features(feature));
+ }
+ Stream Build(CpuFeature feature1, CpuFeature feature2) {
+ return Build(InstructionSelector::Features(feature1, feature2));
+ }
+ Stream Build(StreamBuilderMode mode = kTargetInstructions) {
+ return Build(InstructionSelector::Features(), mode);
+ }
+ Stream Build(InstructionSelector::Features features,
+ StreamBuilderMode mode = kTargetInstructions);
+
+ private:
+ MachineSignature* MakeMachineSignature(Zone* zone,
+ MachineType return_type) {
+ MachineSignature::Builder builder(zone, 1, 0);
+ builder.AddReturn(return_type);
+ return builder.Build();
+ }
+
+ MachineSignature* MakeMachineSignature(Zone* zone, MachineType return_type,
+ MachineType parameter0_type) {
+ MachineSignature::Builder builder(zone, 1, 1);
+ builder.AddReturn(return_type);
+ builder.AddParam(parameter0_type);
+ return builder.Build();
+ }
+
+ MachineSignature* MakeMachineSignature(Zone* zone, MachineType return_type,
+ MachineType parameter0_type,
+ MachineType parameter1_type) {
+ MachineSignature::Builder builder(zone, 1, 2);
+ builder.AddReturn(return_type);
+ builder.AddParam(parameter0_type);
+ builder.AddParam(parameter1_type);
+ return builder.Build();
+ }
+
+ MachineSignature* MakeMachineSignature(Zone* zone, MachineType return_type,
+ MachineType parameter0_type,
+ MachineType parameter1_type,
+ MachineType parameter2_type) {
+ MachineSignature::Builder builder(zone, 1, 3);
+ builder.AddReturn(return_type);
+ builder.AddParam(parameter0_type);
+ builder.AddParam(parameter1_type);
+ builder.AddParam(parameter2_type);
+ return builder.Build();
+ }
+
+ private:
+ InstructionSelectorTest* test_;
+ };
+
+ class Stream FINAL {
+ public:
+ size_t size() const { return instructions_.size(); }
+ const Instruction* operator[](size_t index) const {
+ EXPECT_LT(index, size());
+ return instructions_[index];
+ }
+
+ bool IsDouble(const InstructionOperand* operand) const {
+ return IsDouble(ToVreg(operand));
+ }
+ bool IsDouble(int virtual_register) const {
+ return doubles_.find(virtual_register) != doubles_.end();
+ }
+
+ bool IsInteger(const InstructionOperand* operand) const {
+ return IsInteger(ToVreg(operand));
+ }
+ bool IsInteger(int virtual_register) const {
+ return !IsDouble(virtual_register) && !IsReference(virtual_register);
+ }
+
+ bool IsReference(const InstructionOperand* operand) const {
+ return IsReference(ToVreg(operand));
+ }
+ bool IsReference(int virtual_register) const {
+ return references_.find(virtual_register) != references_.end();
+ }
+
+ float ToFloat32(const InstructionOperand* operand) const {
+ return ToConstant(operand).ToFloat32();
+ }
+
+ int32_t ToInt32(const InstructionOperand* operand) const {
+ return ToConstant(operand).ToInt32();
+ }
+
+ int64_t ToInt64(const InstructionOperand* operand) const {
+ return ToConstant(operand).ToInt64();
+ }
+
+ int ToVreg(const InstructionOperand* operand) const {
+ if (operand->IsConstant()) return operand->index();
+ EXPECT_EQ(InstructionOperand::UNALLOCATED, operand->kind());
+ return UnallocatedOperand::cast(operand)->virtual_register();
+ }
+
+ FrameStateDescriptor* GetFrameStateDescriptor(int deoptimization_id) {
+ EXPECT_LT(deoptimization_id, GetFrameStateDescriptorCount());
+ return deoptimization_entries_[deoptimization_id];
+ }
+
+ int GetFrameStateDescriptorCount() {
+ return static_cast<int>(deoptimization_entries_.size());
+ }
+
+ private:
+ Constant ToConstant(const InstructionOperand* operand) const {
+ ConstantMap::const_iterator i;
+ if (operand->IsConstant()) {
+ i = constants_.find(operand->index());
+ EXPECT_FALSE(constants_.end() == i);
+ } else {
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, operand->kind());
+ i = immediates_.find(operand->index());
+ EXPECT_FALSE(immediates_.end() == i);
+ }
+ EXPECT_EQ(operand->index(), i->first);
+ return i->second;
+ }
+
+ friend class StreamBuilder;
+
+ typedef std::map<int, Constant> ConstantMap;
+
+ ConstantMap constants_;
+ ConstantMap immediates_;
+ std::deque<Instruction*> instructions_;
+ std::set<int> doubles_;
+ std::set<int> references_;
+ std::deque<FrameStateDescriptor*> deoptimization_entries_;
+ };
+
+ base::RandomNumberGenerator rng_;
+};
+
+
+template <typename T>
+class InstructionSelectorTestWithParam
+ : public InstructionSelectorTest,
+ public ::testing::WithParamInterface<T> {};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_INSTRUCTION_SELECTOR_UNITTEST_H_
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index 541e0452fa..f36b07ea7e 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -21,9 +21,9 @@ InstructionSelector::InstructionSelector(InstructionSequence* sequence,
source_positions_(source_positions),
features_(features),
current_block_(NULL),
- instructions_(InstructionDeque::allocator_type(zone())),
- defined_(graph()->NodeCount(), false, BoolVector::allocator_type(zone())),
- used_(graph()->NodeCount(), false, BoolVector::allocator_type(zone())) {}
+ instructions_(zone()),
+ defined_(graph()->NodeCount(), false, zone()),
+ used_(graph()->NodeCount(), false, zone()) {}
void InstructionSelector::SelectInstructions() {
@@ -91,7 +91,7 @@ Instruction* InstructionSelector::Emit(InstructionCode opcode,
InstructionOperand** temps) {
size_t output_count = output == NULL ? 0 : 1;
InstructionOperand* inputs[] = {a, b};
- size_t input_count = ARRAY_SIZE(inputs);
+ size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
}
@@ -105,7 +105,7 @@ Instruction* InstructionSelector::Emit(InstructionCode opcode,
InstructionOperand** temps) {
size_t output_count = output == NULL ? 0 : 1;
InstructionOperand* inputs[] = {a, b, c};
- size_t input_count = ARRAY_SIZE(inputs);
+ size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
}
@@ -117,7 +117,7 @@ Instruction* InstructionSelector::Emit(
size_t temp_count, InstructionOperand** temps) {
size_t output_count = output == NULL ? 0 : 1;
InstructionOperand* inputs[] = {a, b, c, d};
- size_t input_count = ARRAY_SIZE(inputs);
+ size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
}
@@ -198,14 +198,6 @@ void InstructionSelector::MarkAsDouble(Node* node) {
DCHECK_NOT_NULL(node);
DCHECK(!IsReference(node));
sequence()->MarkAsDouble(node->id());
-
- // Propagate "doubleness" throughout phis.
- for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
- Node* user = *i;
- if (user->opcode() != IrOpcode::kPhi) continue;
- if (IsDouble(user)) continue;
- MarkAsDouble(user);
- }
}
@@ -219,40 +211,39 @@ void InstructionSelector::MarkAsReference(Node* node) {
DCHECK_NOT_NULL(node);
DCHECK(!IsDouble(node));
sequence()->MarkAsReference(node->id());
-
- // Propagate "referenceness" throughout phis.
- for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
- Node* user = *i;
- if (user->opcode() != IrOpcode::kPhi) continue;
- if (IsReference(user)) continue;
- MarkAsReference(user);
- }
}
void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) {
DCHECK_NOT_NULL(node);
- if (rep == kMachineFloat64) MarkAsDouble(node);
- if (rep == kMachineTagged) MarkAsReference(node);
+ switch (RepresentationOf(rep)) {
+ case kRepFloat32:
+ case kRepFloat64:
+ MarkAsDouble(node);
+ break;
+ case kRepTagged:
+ MarkAsReference(node);
+ break;
+ default:
+ break;
+ }
}
// TODO(bmeurer): Get rid of the CallBuffer business and make
// InstructionSelector::VisitCall platform independent instead.
-CallBuffer::CallBuffer(Zone* zone, CallDescriptor* d)
- : output_count(0),
- descriptor(d),
- output_nodes(zone->NewArray<Node*>(d->ReturnCount())),
- outputs(zone->NewArray<InstructionOperand*>(d->ReturnCount())),
- fixed_and_control_args(
- zone->NewArray<InstructionOperand*>(input_count() + control_count())),
- fixed_count(0),
- pushed_nodes(zone->NewArray<Node*>(input_count())),
- pushed_count(0) {
- if (d->ReturnCount() > 1) {
- memset(output_nodes, 0, sizeof(Node*) * d->ReturnCount()); // NOLINT
- }
- memset(pushed_nodes, 0, sizeof(Node*) * input_count()); // NOLINT
+CallBuffer::CallBuffer(Zone* zone, CallDescriptor* d,
+ FrameStateDescriptor* frame_desc)
+ : descriptor(d),
+ frame_state_descriptor(frame_desc),
+ output_nodes(zone),
+ outputs(zone),
+ instruction_args(zone),
+ pushed_nodes(zone) {
+ output_nodes.reserve(d->ReturnCount());
+ outputs.reserve(d->ReturnCount());
+ pushed_nodes.reserve(input_count());
+ instruction_args.reserve(input_count() + frame_state_value_count());
}
@@ -260,96 +251,109 @@ CallBuffer::CallBuffer(Zone* zone, CallDescriptor* d)
// InstructionSelector::VisitCall platform independent instead.
void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
bool call_code_immediate,
- bool call_address_immediate,
- BasicBlock* cont_node,
- BasicBlock* deopt_node) {
+ bool call_address_immediate) {
OperandGenerator g(this);
DCHECK_EQ(call->op()->OutputCount(), buffer->descriptor->ReturnCount());
DCHECK_EQ(OperatorProperties::GetValueInputCount(call->op()),
- buffer->input_count());
+ buffer->input_count() + buffer->frame_state_count());
if (buffer->descriptor->ReturnCount() > 0) {
// Collect the projections that represent multiple outputs from this call.
if (buffer->descriptor->ReturnCount() == 1) {
- buffer->output_nodes[0] = call;
+ buffer->output_nodes.push_back(call);
} else {
- call->CollectProjections(buffer->descriptor->ReturnCount(),
- buffer->output_nodes);
+ buffer->output_nodes.resize(buffer->descriptor->ReturnCount(), NULL);
+ call->CollectProjections(&buffer->output_nodes);
}
// Filter out the outputs that aren't live because no projection uses them.
- for (int i = 0; i < buffer->descriptor->ReturnCount(); i++) {
+ for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
if (buffer->output_nodes[i] != NULL) {
Node* output = buffer->output_nodes[i];
- LinkageLocation location = buffer->descriptor->GetReturnLocation(i);
- MarkAsRepresentation(location.representation(), output);
- buffer->outputs[buffer->output_count++] =
- g.DefineAsLocation(output, location);
+ MachineType type =
+ buffer->descriptor->GetReturnType(static_cast<int>(i));
+ LinkageLocation location =
+ buffer->descriptor->GetReturnLocation(static_cast<int>(i));
+ MarkAsRepresentation(type, output);
+ buffer->outputs.push_back(g.DefineAsLocation(output, location, type));
}
}
}
- buffer->fixed_count = 1; // First argument is always the callee.
+ // The first argument is always the callee code.
Node* callee = call->InputAt(0);
switch (buffer->descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
- buffer->fixed_and_control_args[0] =
+ buffer->instruction_args.push_back(
(call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
? g.UseImmediate(callee)
- : g.UseRegister(callee);
+ : g.UseRegister(callee));
break;
case CallDescriptor::kCallAddress:
- buffer->fixed_and_control_args[0] =
+ buffer->instruction_args.push_back(
(call_address_immediate &&
(callee->opcode() == IrOpcode::kInt32Constant ||
callee->opcode() == IrOpcode::kInt64Constant))
? g.UseImmediate(callee)
- : g.UseRegister(callee);
+ : g.UseRegister(callee));
break;
case CallDescriptor::kCallJSFunction:
- buffer->fixed_and_control_args[0] =
- g.UseLocation(callee, buffer->descriptor->GetInputLocation(0));
+ buffer->instruction_args.push_back(
+ g.UseLocation(callee, buffer->descriptor->GetInputLocation(0),
+ buffer->descriptor->GetInputType(0)));
break;
}
+ DCHECK_EQ(1, buffer->instruction_args.size());
+
+ // If the call needs a frame state, we insert the state information as
+ // follows (n is the number of value inputs to the frame state):
+ // arg 1 : deoptimization id.
+ // arg 2 - arg (n + 1) : value inputs to the frame state.
+ if (buffer->frame_state_descriptor != NULL) {
+ InstructionSequence::StateId state_id =
+ sequence()->AddFrameStateDescriptor(buffer->frame_state_descriptor);
+ buffer->instruction_args.push_back(g.TempImmediate(state_id.ToInt()));
+
+ Node* frame_state =
+ call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
+ AddFrameStateInputs(frame_state, &buffer->instruction_args,
+ buffer->frame_state_descriptor);
+ }
+ DCHECK(1 + buffer->frame_state_value_count() ==
+ buffer->instruction_args.size());
- int input_count = buffer->input_count();
+ size_t input_count = static_cast<size_t>(buffer->input_count());
- // Split the arguments into pushed_nodes and fixed_args. Pushed arguments
- // require an explicit push instruction before the call and do not appear
- // as arguments to the call. Everything else ends up as an InstructionOperand
- // argument to the call.
+ // Split the arguments into pushed_nodes and instruction_args. Pushed
+ // arguments require an explicit push instruction before the call and do
+ // not appear as arguments to the call. Everything else ends up
+ // as an InstructionOperand argument to the call.
InputIter iter(call->inputs().begin());
- for (int index = 0; index < input_count; ++iter, ++index) {
+ int pushed_count = 0;
+ for (size_t index = 0; index < input_count; ++iter, ++index) {
DCHECK(iter != call->inputs().end());
- DCHECK(index == iter.index());
+ DCHECK(index == static_cast<size_t>(iter.index()));
+ DCHECK((*iter)->op()->opcode() != IrOpcode::kFrameState);
if (index == 0) continue; // The first argument (callee) is already done.
InstructionOperand* op =
- g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index));
+ g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index),
+ buffer->descriptor->GetInputType(index));
if (UnallocatedOperand::cast(op)->HasFixedSlotPolicy()) {
int stack_index = -UnallocatedOperand::cast(op)->fixed_slot_index() - 1;
- DCHECK(buffer->pushed_nodes[stack_index] == NULL);
+ if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
+ buffer->pushed_nodes.resize(stack_index + 1, NULL);
+ }
+ DCHECK_EQ(NULL, buffer->pushed_nodes[stack_index]);
buffer->pushed_nodes[stack_index] = *iter;
- buffer->pushed_count++;
+ pushed_count++;
} else {
- buffer->fixed_and_control_args[buffer->fixed_count] = op;
- buffer->fixed_count++;
+ buffer->instruction_args.push_back(op);
}
}
-
- // If the call can deoptimize, we add the continuation and deoptimization
- // block labels.
- if (buffer->descriptor->CanLazilyDeoptimize()) {
- DCHECK(cont_node != NULL);
- DCHECK(deopt_node != NULL);
- buffer->fixed_and_control_args[buffer->fixed_count] = g.Label(cont_node);
- buffer->fixed_and_control_args[buffer->fixed_count + 1] =
- g.Label(deopt_node);
- } else {
- DCHECK(cont_node == NULL);
- DCHECK(deopt_node == NULL);
- }
-
- DCHECK(input_count == (buffer->fixed_count + buffer->pushed_count));
+ CHECK_EQ(pushed_count, static_cast<int>(buffer->pushed_nodes.size()));
+ DCHECK(static_cast<size_t>(input_count) ==
+ (buffer->instruction_args.size() + buffer->pushed_nodes.size() -
+ buffer->frame_state_value_count()));
}
@@ -422,14 +426,6 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
}
case BasicBlockData::kThrow:
return VisitThrow(input);
- case BasicBlockData::kDeoptimize:
- return VisitDeoptimize(input);
- case BasicBlockData::kCall: {
- BasicBlock* deoptimization = block->SuccessorAt(0);
- BasicBlock* continuation = block->SuccessorAt(1);
- VisitCall(input, continuation, deoptimization);
- break;
- }
case BasicBlockData::kNone: {
// TODO(titzer): exit block doesn't have control.
DCHECK(input == NULL);
@@ -460,27 +456,28 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kIfFalse:
case IrOpcode::kEffectPhi:
case IrOpcode::kMerge:
- case IrOpcode::kLazyDeoptimization:
- case IrOpcode::kContinuation:
// No code needed for these graph artifacts.
return;
+ case IrOpcode::kFinish:
+ return MarkAsReference(node), VisitFinish(node);
case IrOpcode::kParameter: {
- int index = OpParameter<int>(node);
- MachineType rep = linkage()
- ->GetIncomingDescriptor()
- ->GetInputLocation(index)
- .representation();
- MarkAsRepresentation(rep, node);
+ MachineType type = linkage()->GetParameterType(OpParameter<int>(node));
+ MarkAsRepresentation(type, node);
return VisitParameter(node);
}
- case IrOpcode::kPhi:
+ case IrOpcode::kPhi: {
+ MachineType type = OpParameter<MachineType>(node);
+ MarkAsRepresentation(type, node);
return VisitPhi(node);
+ }
case IrOpcode::kProjection:
return VisitProjection(node);
case IrOpcode::kInt32Constant:
case IrOpcode::kInt64Constant:
case IrOpcode::kExternalConstant:
return VisitConstant(node);
+ case IrOpcode::kFloat32Constant:
+ return MarkAsDouble(node), VisitConstant(node);
case IrOpcode::kFloat64Constant:
return MarkAsDouble(node), VisitConstant(node);
case IrOpcode::kHeapConstant:
@@ -493,8 +490,8 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kStateValues:
return;
case IrOpcode::kLoad: {
- MachineType load_rep = OpParameter<MachineType>(node);
- MarkAsRepresentation(load_rep, node);
+ LoadRepresentation rep = OpParameter<LoadRepresentation>(node);
+ MarkAsRepresentation(rep, node);
return VisitLoad(node);
}
case IrOpcode::kStore:
@@ -511,6 +508,8 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitWord32Shr(node);
case IrOpcode::kWord32Sar:
return VisitWord32Sar(node);
+ case IrOpcode::kWord32Ror:
+ return VisitWord32Ror(node);
case IrOpcode::kWord32Equal:
return VisitWord32Equal(node);
case IrOpcode::kWord64And:
@@ -525,6 +524,8 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitWord64Shr(node);
case IrOpcode::kWord64Sar:
return VisitWord64Sar(node);
+ case IrOpcode::kWord64Ror:
+ return VisitWord64Ror(node);
case IrOpcode::kWord64Equal:
return VisitWord64Equal(node);
case IrOpcode::kInt32Add:
@@ -571,10 +572,8 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitInt64LessThan(node);
case IrOpcode::kInt64LessThanOrEqual:
return VisitInt64LessThanOrEqual(node);
- case IrOpcode::kConvertInt32ToInt64:
- return VisitConvertInt32ToInt64(node);
- case IrOpcode::kConvertInt64ToInt32:
- return VisitConvertInt64ToInt32(node);
+ case IrOpcode::kChangeFloat32ToFloat64:
+ return MarkAsDouble(node), VisitChangeFloat32ToFloat64(node);
case IrOpcode::kChangeInt32ToFloat64:
return MarkAsDouble(node), VisitChangeInt32ToFloat64(node);
case IrOpcode::kChangeUint32ToFloat64:
@@ -583,6 +582,16 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitChangeFloat64ToInt32(node);
case IrOpcode::kChangeFloat64ToUint32:
return VisitChangeFloat64ToUint32(node);
+ case IrOpcode::kChangeInt32ToInt64:
+ return VisitChangeInt32ToInt64(node);
+ case IrOpcode::kChangeUint32ToUint64:
+ return VisitChangeUint32ToUint64(node);
+ case IrOpcode::kTruncateFloat64ToFloat32:
+ return MarkAsDouble(node), VisitTruncateFloat64ToFloat32(node);
+ case IrOpcode::kTruncateFloat64ToInt32:
+ return VisitTruncateFloat64ToInt32(node);
+ case IrOpcode::kTruncateInt64ToInt32:
+ return VisitTruncateInt64ToInt32(node);
case IrOpcode::kFloat64Add:
return MarkAsDouble(node), VisitFloat64Add(node);
case IrOpcode::kFloat64Sub:
@@ -593,6 +602,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsDouble(node), VisitFloat64Div(node);
case IrOpcode::kFloat64Mod:
return MarkAsDouble(node), VisitFloat64Mod(node);
+ case IrOpcode::kFloat64Sqrt:
+ return MarkAsDouble(node), VisitFloat64Sqrt(node);
case IrOpcode::kFloat64Equal:
return VisitFloat64Equal(node);
case IrOpcode::kFloat64LessThan:
@@ -684,6 +695,13 @@ void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
}
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+ OperandGenerator g(this);
+ Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitFloat64Equal(Node* node) {
FlagsContinuation cont(kUnorderedEqual, node);
VisitFloat64Compare(node, &cont);
@@ -724,6 +742,9 @@ void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord64Ror(Node* node) { UNIMPLEMENTED(); }
+
+
void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
@@ -745,12 +766,17 @@ void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt64UMod(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitConvertInt64ToInt32(Node* node) {
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitConvertInt32ToInt64(Node* node) {
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
UNIMPLEMENTED();
}
@@ -774,10 +800,19 @@ void InstructionSelector::VisitWord64Compare(Node* node,
#endif // V8_TARGET_ARCH_32_BIT || !V8_TURBOFAN_BACKEND
+void InstructionSelector::VisitFinish(Node* node) {
+ OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+}
+
+
void InstructionSelector::VisitParameter(Node* node) {
OperandGenerator g(this);
- Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetParameterLocation(
- OpParameter<int>(node))));
+ int index = OpParameter<int>(node);
+ Emit(kArchNop,
+ g.DefineAsLocation(node, linkage()->GetParameterLocation(index),
+ linkage()->GetParameterType(index)));
}
@@ -795,10 +830,10 @@ void InstructionSelector::VisitProjection(Node* node) {
switch (value->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
case IrOpcode::kInt32SubWithOverflow:
- if (OpParameter<int32_t>(node) == 0) {
+ if (OpParameter<size_t>(node) == 0) {
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
} else {
- DCHECK_EQ(1, OpParameter<int32_t>(node));
+ DCHECK(OpParameter<size_t>(node) == 1u);
MarkAsUsed(value);
}
break;
@@ -906,7 +941,7 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
- if (OpParameter<int32_t>(value) == 1) {
+ if (OpParameter<size_t>(value) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
// <Operation> is either NULL, which means there's no use of the
@@ -941,7 +976,8 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitReturn(Node* value) {
OperandGenerator g(this);
if (value != NULL) {
- Emit(kArchRet, NULL, g.UseLocation(value, linkage()->GetReturnLocation()));
+ Emit(kArchRet, NULL, g.UseLocation(value, linkage()->GetReturnLocation(),
+ linkage()->GetReturnType()));
} else {
Emit(kArchRet, NULL);
}
@@ -953,6 +989,26 @@ void InstructionSelector::VisitThrow(Node* value) {
}
+FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
+ Node* state) {
+ DCHECK(state->opcode() == IrOpcode::kFrameState);
+ DCHECK_EQ(5, state->InputCount());
+ FrameStateCallInfo state_info = OpParameter<FrameStateCallInfo>(state);
+ int parameters = OpParameter<int>(state->InputAt(0));
+ int locals = OpParameter<int>(state->InputAt(1));
+ int stack = OpParameter<int>(state->InputAt(2));
+
+ FrameStateDescriptor* outer_state = NULL;
+ Node* outer_node = state->InputAt(4);
+ if (outer_node->opcode() == IrOpcode::kFrameState) {
+ outer_state = GetFrameStateDescriptor(outer_node);
+ }
+
+ return new (instruction_zone())
+ FrameStateDescriptor(state_info, parameters, locals, stack, outer_state);
+}
+
+
static InstructionOperand* UseOrImmediate(OperandGenerator* g, Node* input) {
switch (input->opcode()) {
case IrOpcode::kInt32Constant:
@@ -961,48 +1017,46 @@ static InstructionOperand* UseOrImmediate(OperandGenerator* g, Node* input) {
case IrOpcode::kHeapConstant:
return g->UseImmediate(input);
default:
- return g->Use(input);
+ return g->UseUnique(input);
}
}
-void InstructionSelector::VisitDeoptimize(Node* deopt) {
- DCHECK(deopt->op()->opcode() == IrOpcode::kDeoptimize);
- Node* state = deopt->InputAt(0);
- DCHECK(state->op()->opcode() == IrOpcode::kFrameState);
- BailoutId ast_id = OpParameter<BailoutId>(state);
+void InstructionSelector::AddFrameStateInputs(
+ Node* state, InstructionOperandVector* inputs,
+ FrameStateDescriptor* descriptor) {
+ DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
- // Add the inputs.
- Node* parameters = state->InputAt(0);
- int parameters_count = OpParameter<int>(parameters);
+ if (descriptor->outer_state() != NULL) {
+ AddFrameStateInputs(state->InputAt(4), inputs, descriptor->outer_state());
+ }
+ Node* parameters = state->InputAt(0);
Node* locals = state->InputAt(1);
- int locals_count = OpParameter<int>(locals);
-
Node* stack = state->InputAt(2);
- int stack_count = OpParameter<int>(stack);
+ Node* context = state->InputAt(3);
+
+ DCHECK_EQ(IrOpcode::kStateValues, parameters->op()->opcode());
+ DCHECK_EQ(IrOpcode::kStateValues, locals->op()->opcode());
+ DCHECK_EQ(IrOpcode::kStateValues, stack->op()->opcode());
+
+ DCHECK_EQ(descriptor->parameters_count(), parameters->InputCount());
+ DCHECK_EQ(descriptor->locals_count(), locals->InputCount());
+ DCHECK_EQ(descriptor->stack_count(), stack->InputCount());
OperandGenerator g(this);
- std::vector<InstructionOperand*> inputs;
- inputs.reserve(parameters_count + locals_count + stack_count);
- for (int i = 0; i < parameters_count; i++) {
- inputs.push_back(UseOrImmediate(&g, parameters->InputAt(i)));
+ for (int i = 0; i < static_cast<int>(descriptor->parameters_count()); i++) {
+ inputs->push_back(UseOrImmediate(&g, parameters->InputAt(i)));
}
- for (int i = 0; i < locals_count; i++) {
- inputs.push_back(UseOrImmediate(&g, locals->InputAt(i)));
+ if (descriptor->HasContext()) {
+ inputs->push_back(UseOrImmediate(&g, context));
}
- for (int i = 0; i < stack_count; i++) {
- inputs.push_back(UseOrImmediate(&g, stack->InputAt(i)));
+ for (int i = 0; i < static_cast<int>(descriptor->locals_count()); i++) {
+ inputs->push_back(UseOrImmediate(&g, locals->InputAt(i)));
+ }
+ for (int i = 0; i < static_cast<int>(descriptor->stack_count()); i++) {
+ inputs->push_back(UseOrImmediate(&g, stack->InputAt(i)));
}
-
- FrameStateDescriptor* descriptor = new (instruction_zone())
- FrameStateDescriptor(ast_id, parameters_count, locals_count, stack_count);
-
- DCHECK_EQ(descriptor->size(), inputs.size());
-
- int deoptimization_id = sequence()->AddDeoptimizationEntry(descriptor);
- Emit(kArchDeoptimize | MiscField::encode(deoptimization_id), 0, NULL,
- inputs.size(), &inputs.front(), 0, NULL);
}
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index e283322846..264f737ba7 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -20,7 +20,7 @@ namespace compiler {
struct CallBuffer; // TODO(bmeurer): Remove this.
class FlagsContinuation;
-class InstructionSelector V8_FINAL {
+class InstructionSelector FINAL {
public:
// Forward declarations.
class Features;
@@ -62,7 +62,7 @@ class InstructionSelector V8_FINAL {
// ============== Architecture-independent CPU feature methods. ==============
// ===========================================================================
- class Features V8_FINAL {
+ class Features FINAL {
public:
Features() : bits_(0) {}
explicit Features(unsigned bits) : bits_(bits) {}
@@ -84,6 +84,9 @@ class InstructionSelector V8_FINAL {
return Features(CpuFeatures::SupportedFeatures());
}
+ // Checks if {node} is currently live.
+ bool IsLive(Node* node) const { return !IsDefined(node) && IsUsed(node); }
+
private:
friend class OperandGenerator;
@@ -139,8 +142,11 @@ class InstructionSelector V8_FINAL {
// {call_address_immediate} to generate immediate operands to address calls.
void InitializeCallBuffer(Node* call, CallBuffer* buffer,
bool call_code_immediate,
- bool call_address_immediate, BasicBlock* cont_node,
- BasicBlock* deopt_node);
+ bool call_address_immediate);
+
+ FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
+ void AddFrameStateInputs(Node* state, InstructionOperandVector* inputs,
+ FrameStateDescriptor* descriptor);
// ===========================================================================
// ============= Architecture-specific graph covering methods. ===============
@@ -169,6 +175,7 @@ class InstructionSelector V8_FINAL {
void VisitWord64Compare(Node* node, FlagsContinuation* cont);
void VisitFloat64Compare(Node* node, FlagsContinuation* cont);
+ void VisitFinish(Node* node);
void VisitParameter(Node* node);
void VisitPhi(Node* node);
void VisitProjection(Node* node);
@@ -192,15 +199,12 @@ class InstructionSelector V8_FINAL {
// ===========================================================================
- typedef zone_allocator<Instruction*> InstructionPtrZoneAllocator;
- typedef std::deque<Instruction*, InstructionPtrZoneAllocator> Instructions;
-
Zone zone_;
InstructionSequence* sequence_;
SourcePositionTable* source_positions_;
Features features_;
BasicBlock* current_block_;
- Instructions instructions_;
+ ZoneDeque<Instruction*> instructions_;
BoolVector defined_;
BoolVector used_;
};
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index a2f4ed4f47..35232807e6 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -75,6 +75,7 @@ void SubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
void SubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
delete[] cache;
+ cache = NULL;
}
@@ -299,6 +300,8 @@ OStream& operator<<(OStream& os, const Constant& constant) {
return os << constant.ToInt32();
case Constant::kInt64:
return os << constant.ToInt64() << "l";
+ case Constant::kFloat32:
+ return os << constant.ToFloat32() << "f";
case Constant::kFloat64:
return os << constant.ToFloat64();
case Constant::kExternalReference:
@@ -393,20 +396,20 @@ void InstructionSequence::AddGapMove(int index, InstructionOperand* from,
}
-int InstructionSequence::AddDeoptimizationEntry(
+InstructionSequence::StateId InstructionSequence::AddFrameStateDescriptor(
FrameStateDescriptor* descriptor) {
int deoptimization_id = static_cast<int>(deoptimization_entries_.size());
deoptimization_entries_.push_back(descriptor);
- return deoptimization_id;
+ return StateId::FromInt(deoptimization_id);
}
-FrameStateDescriptor* InstructionSequence::GetDeoptimizationEntry(
- int deoptimization_id) {
- return deoptimization_entries_[deoptimization_id];
+FrameStateDescriptor* InstructionSequence::GetFrameStateDescriptor(
+ InstructionSequence::StateId state_id) {
+ return deoptimization_entries_[state_id.ToInt()];
}
-int InstructionSequence::GetDeoptimizationEntryCount() {
+int InstructionSequence::GetFrameStateDescriptorCount() {
return static_cast<int>(deoptimization_entries_.size());
}
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index 7b357639ef..f8ad55e0e6 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -9,14 +9,14 @@
#include <map>
#include <set>
-// TODO(titzer): don't include the assembler?
-#include "src/assembler.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/frame.h"
#include "src/compiler/graph.h"
#include "src/compiler/instruction-codes.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/schedule.h"
+// TODO(titzer): don't include the macro-assembler?
+#include "src/macro-assembler.h"
#include "src/zone-allocator.h"
namespace v8 {
@@ -89,6 +89,8 @@ class InstructionOperand : public ZoneObject {
unsigned value_;
};
+typedef ZoneVector<InstructionOperand*> InstructionOperandVector;
+
OStream& operator<<(OStream& os, const InstructionOperand& op);
class UnallocatedOperand : public InstructionOperand {
@@ -265,7 +267,7 @@ class UnallocatedOperand : public InstructionOperand {
};
-class MoveOperands V8_FINAL {
+class MoveOperands FINAL {
public:
MoveOperands(InstructionOperand* source, InstructionOperand* destination)
: source_(source), destination_(destination) {}
@@ -311,7 +313,7 @@ class MoveOperands V8_FINAL {
OStream& operator<<(OStream& os, const MoveOperands& mo);
template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
-class SubKindOperand V8_FINAL : public InstructionOperand {
+class SubKindOperand FINAL : public InstructionOperand {
public:
static SubKindOperand* Create(int index, Zone* zone) {
DCHECK(index >= 0);
@@ -342,7 +344,7 @@ INSTRUCTION_OPERAND_LIST(INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS)
#undef INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS
-class ParallelMove V8_FINAL : public ZoneObject {
+class ParallelMove FINAL : public ZoneObject {
public:
explicit ParallelMove(Zone* zone) : move_operands_(4, zone) {}
@@ -363,7 +365,7 @@ class ParallelMove V8_FINAL : public ZoneObject {
OStream& operator<<(OStream& os, const ParallelMove& pm);
-class PointerMap V8_FINAL : public ZoneObject {
+class PointerMap FINAL : public ZoneObject {
public:
explicit PointerMap(Zone* zone)
: pointer_operands_(8, zone),
@@ -402,12 +404,14 @@ OStream& operator<<(OStream& os, const PointerMap& pm);
class Instruction : public ZoneObject {
public:
size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
- InstructionOperand* Output() const { return OutputAt(0); }
InstructionOperand* OutputAt(size_t i) const {
DCHECK(i < OutputCount());
return operands_[i];
}
+ bool HasOutput() const { return OutputCount() == 1; }
+ InstructionOperand* Output() const { return OutputAt(0); }
+
size_t InputCount() const { return InputCountField::decode(bit_field_); }
InstructionOperand* InputAt(size_t i) const {
DCHECK(i < InputCount());
@@ -593,7 +597,7 @@ class GapInstruction : public Instruction {
// This special kind of gap move instruction represents the beginning of a
// block of code.
// TODO(titzer): move code_start and code_end from BasicBlock to here.
-class BlockStartInstruction V8_FINAL : public GapInstruction {
+class BlockStartInstruction FINAL : public GapInstruction {
public:
BasicBlock* block() const { return block_; }
Label* label() { return &label_; }
@@ -617,7 +621,7 @@ class BlockStartInstruction V8_FINAL : public GapInstruction {
};
-class SourcePositionInstruction V8_FINAL : public Instruction {
+class SourcePositionInstruction FINAL : public Instruction {
public:
static SourcePositionInstruction* New(Zone* zone, SourcePosition position) {
void* buffer = zone->New(sizeof(SourcePositionInstruction));
@@ -648,17 +652,25 @@ class SourcePositionInstruction V8_FINAL : public Instruction {
};
-class Constant V8_FINAL {
+class Constant FINAL {
public:
- enum Type { kInt32, kInt64, kFloat64, kExternalReference, kHeapObject };
+ enum Type {
+ kInt32,
+ kInt64,
+ kFloat32,
+ kFloat64,
+ kExternalReference,
+ kHeapObject
+ };
explicit Constant(int32_t v) : type_(kInt32), value_(v) {}
explicit Constant(int64_t v) : type_(kInt64), value_(v) {}
- explicit Constant(double v) : type_(kFloat64), value_(BitCast<int64_t>(v)) {}
+ explicit Constant(float v) : type_(kFloat32), value_(bit_cast<int32_t>(v)) {}
+ explicit Constant(double v) : type_(kFloat64), value_(bit_cast<int64_t>(v)) {}
explicit Constant(ExternalReference ref)
- : type_(kExternalReference), value_(BitCast<intptr_t>(ref)) {}
+ : type_(kExternalReference), value_(bit_cast<intptr_t>(ref)) {}
explicit Constant(Handle<HeapObject> obj)
- : type_(kHeapObject), value_(BitCast<intptr_t>(obj)) {}
+ : type_(kHeapObject), value_(bit_cast<intptr_t>(obj)) {}
Type type() const { return type_; }
@@ -673,20 +685,25 @@ class Constant V8_FINAL {
return value_;
}
+ float ToFloat32() const {
+ DCHECK_EQ(kFloat32, type());
+ return bit_cast<float>(static_cast<int32_t>(value_));
+ }
+
double ToFloat64() const {
if (type() == kInt32) return ToInt32();
DCHECK_EQ(kFloat64, type());
- return BitCast<double>(value_);
+ return bit_cast<double>(value_);
}
ExternalReference ToExternalReference() const {
DCHECK_EQ(kExternalReference, type());
- return BitCast<ExternalReference>(static_cast<intptr_t>(value_));
+ return bit_cast<ExternalReference>(static_cast<intptr_t>(value_));
}
Handle<HeapObject> ToHeapObject() const {
DCHECK_EQ(kHeapObject, type());
- return BitCast<Handle<HeapObject> >(static_cast<intptr_t>(value_));
+ return bit_cast<Handle<HeapObject> >(static_cast<intptr_t>(value_));
}
private:
@@ -697,46 +714,101 @@ class Constant V8_FINAL {
class FrameStateDescriptor : public ZoneObject {
public:
- FrameStateDescriptor(BailoutId bailout_id, int parameters_count,
- int locals_count, int stack_count)
- : bailout_id_(bailout_id),
+ FrameStateDescriptor(const FrameStateCallInfo& state_info,
+ size_t parameters_count, size_t locals_count,
+ size_t stack_count,
+ FrameStateDescriptor* outer_state = NULL)
+ : type_(state_info.type()),
+ bailout_id_(state_info.bailout_id()),
+ frame_state_combine_(state_info.state_combine()),
parameters_count_(parameters_count),
locals_count_(locals_count),
- stack_count_(stack_count) {}
+ stack_count_(stack_count),
+ outer_state_(outer_state),
+ jsfunction_(state_info.jsfunction()) {}
+ FrameStateType type() const { return type_; }
BailoutId bailout_id() const { return bailout_id_; }
- int parameters_count() { return parameters_count_; }
- int locals_count() { return locals_count_; }
- int stack_count() { return stack_count_; }
+ OutputFrameStateCombine state_combine() const { return frame_state_combine_; }
+ size_t parameters_count() const { return parameters_count_; }
+ size_t locals_count() const { return locals_count_; }
+ size_t stack_count() const { return stack_count_; }
+ FrameStateDescriptor* outer_state() const { return outer_state_; }
+ MaybeHandle<JSFunction> jsfunction() const { return jsfunction_; }
+
+ size_t size() const {
+ return parameters_count_ + locals_count_ + stack_count_ +
+ (HasContext() ? 1 : 0);
+ }
+
+ size_t GetTotalSize() const {
+ size_t total_size = 0;
+ for (const FrameStateDescriptor* iter = this; iter != NULL;
+ iter = iter->outer_state_) {
+ total_size += iter->size();
+ }
+ return total_size;
+ }
- int size() { return parameters_count_ + locals_count_ + stack_count_; }
+ size_t GetHeight(OutputFrameStateCombine override) const {
+ size_t height = size() - parameters_count();
+ switch (override) {
+ case kPushOutput:
+ ++height;
+ break;
+ case kIgnoreOutput:
+ break;
+ }
+ return height;
+ }
+
+ size_t GetFrameCount() const {
+ size_t count = 0;
+ for (const FrameStateDescriptor* iter = this; iter != NULL;
+ iter = iter->outer_state_) {
+ ++count;
+ }
+ return count;
+ }
+
+ size_t GetJSFrameCount() const {
+ size_t count = 0;
+ for (const FrameStateDescriptor* iter = this; iter != NULL;
+ iter = iter->outer_state_) {
+ if (iter->type_ == JS_FRAME) {
+ ++count;
+ }
+ }
+ return count;
+ }
+
+ bool HasContext() const { return type_ == JS_FRAME; }
private:
+ FrameStateType type_;
BailoutId bailout_id_;
- int parameters_count_;
- int locals_count_;
- int stack_count_;
+ OutputFrameStateCombine frame_state_combine_;
+ size_t parameters_count_;
+ size_t locals_count_;
+ size_t stack_count_;
+ FrameStateDescriptor* outer_state_;
+ MaybeHandle<JSFunction> jsfunction_;
};
OStream& operator<<(OStream& os, const Constant& constant);
-typedef std::deque<Constant, zone_allocator<Constant> > ConstantDeque;
+typedef ZoneDeque<Constant> ConstantDeque;
typedef std::map<int, Constant, std::less<int>,
zone_allocator<std::pair<int, Constant> > > ConstantMap;
-
-typedef std::deque<Instruction*, zone_allocator<Instruction*> >
- InstructionDeque;
-typedef std::deque<PointerMap*, zone_allocator<PointerMap*> > PointerMapDeque;
-typedef std::vector<FrameStateDescriptor*,
- zone_allocator<FrameStateDescriptor*> >
- DeoptimizationVector;
-
+typedef ZoneDeque<Instruction*> InstructionDeque;
+typedef ZoneDeque<PointerMap*> PointerMapDeque;
+typedef ZoneVector<FrameStateDescriptor*> DeoptimizationVector;
// Represents architecture-specific generated code before, during, and after
// register allocation.
// TODO(titzer): s/IsDouble/IsFloat64/
-class InstructionSequence V8_FINAL {
+class InstructionSequence FINAL {
public:
InstructionSequence(Linkage* linkage, Graph* graph, Schedule* schedule)
: graph_(graph),
@@ -744,14 +816,14 @@ class InstructionSequence V8_FINAL {
schedule_(schedule),
constants_(ConstantMap::key_compare(),
ConstantMap::allocator_type(zone())),
- immediates_(ConstantDeque::allocator_type(zone())),
- instructions_(InstructionDeque::allocator_type(zone())),
+ immediates_(zone()),
+ instructions_(zone()),
next_virtual_register_(graph->NodeCount()),
- pointer_maps_(PointerMapDeque::allocator_type(zone())),
+ pointer_maps_(zone()),
doubles_(std::less<int>(), VirtualRegisterSet::allocator_type(zone())),
references_(std::less<int>(),
VirtualRegisterSet::allocator_type(zone())),
- deoptimization_entries_(DeoptimizationVector::allocator_type(zone())) {}
+ deoptimization_entries_(zone()) {}
int NextVirtualRegister() { return next_virtual_register_++; }
int VirtualRegisterCount() const { return next_virtual_register_; }
@@ -839,9 +911,19 @@ class InstructionSequence V8_FINAL {
return immediates_[index];
}
- int AddDeoptimizationEntry(FrameStateDescriptor* descriptor);
- FrameStateDescriptor* GetDeoptimizationEntry(int deoptimization_id);
- int GetDeoptimizationEntryCount();
+ class StateId {
+ public:
+ static StateId FromInt(int id) { return StateId(id); }
+ int ToInt() const { return id_; }
+
+ private:
+ explicit StateId(int id) : id_(id) {}
+ int id_;
+ };
+
+ StateId AddFrameStateDescriptor(FrameStateDescriptor* descriptor);
+ FrameStateDescriptor* GetFrameStateDescriptor(StateId deoptimization_id);
+ int GetFrameStateDescriptorCount();
private:
friend OStream& operator<<(OStream& os, const InstructionSequence& code);
diff --git a/deps/v8/src/compiler/js-builtin-reducer-unittest.cc b/deps/v8/src/compiler/js-builtin-reducer-unittest.cc
new file mode 100644
index 0000000000..5177d8d3eb
--- /dev/null
+++ b/deps/v8/src/compiler/js-builtin-reducer-unittest.cc
@@ -0,0 +1,236 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-unittest.h"
+#include "src/compiler/js-builtin-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/typer.h"
+#include "testing/gmock-support.h"
+
+using testing::Capture;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSBuiltinReducerTest : public GraphTest {
+ public:
+ JSBuiltinReducerTest() : javascript_(zone()) {}
+
+ protected:
+ Reduction Reduce(Node* node) {
+ Typer typer(zone());
+ MachineOperatorBuilder machine;
+ JSGraph jsgraph(graph(), common(), javascript(), &typer, &machine);
+ JSBuiltinReducer reducer(&jsgraph);
+ return reducer.Reduce(node);
+ }
+
+ Node* Parameter(Type* t, int32_t index = 0) {
+ Node* n = graph()->NewNode(common()->Parameter(index), graph()->start());
+ NodeProperties::SetBounds(n, Bounds(Type::None(), t));
+ return n;
+ }
+
+ Node* UndefinedConstant() {
+ return HeapConstant(
+ Unique<HeapObject>::CreateImmovable(factory()->undefined_value()));
+ }
+
+ JSOperatorBuilder* javascript() { return &javascript_; }
+
+ private:
+ JSOperatorBuilder javascript_;
+};
+
+
+namespace {
+
+// TODO(mstarzinger): Find a common place and unify with test-js-typed-lowering.
+Type* const kNumberTypes[] = {
+ Type::UnsignedSmall(), Type::OtherSignedSmall(), Type::OtherUnsigned31(),
+ Type::OtherUnsigned32(), Type::OtherSigned32(), Type::SignedSmall(),
+ Type::Signed32(), Type::Unsigned32(), Type::Integral32(),
+ Type::MinusZero(), Type::NaN(), Type::OtherNumber(),
+ Type::OrderedNumber(), Type::Number()};
+
+} // namespace
+
+
+// -----------------------------------------------------------------------------
+// Math.abs
+
+
+TEST_F(JSBuiltinReducerTest, MathAbs) {
+ Handle<JSFunction> f(isolate()->context()->math_abs_fun());
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call = graph()->NewNode(javascript()->Call(3, NO_CALL_FUNCTION_FLAGS),
+ fun, UndefinedConstant(), p0);
+ Reduction r = Reduce(call);
+
+ if (t0->Is(Type::Unsigned32())) {
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), p0);
+ } else {
+ Capture<Node*> branch;
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsPhi(kMachNone, p0, IsNumberSubtract(IsNumberConstant(0), p0),
+ IsMerge(IsIfTrue(CaptureEq(&branch)),
+ IsIfFalse(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsNumberLessThan(IsNumberConstant(0), p0),
+ graph()->start()))))));
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Math.sqrt
+
+
+TEST_F(JSBuiltinReducerTest, MathSqrt) {
+ Handle<JSFunction> f(isolate()->context()->math_sqrt_fun());
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call = graph()->NewNode(javascript()->Call(3, NO_CALL_FUNCTION_FLAGS),
+ fun, UndefinedConstant(), p0);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Sqrt(p0));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Math.max
+
+
+TEST_F(JSBuiltinReducerTest, MathMax0) {
+ Handle<JSFunction> f(isolate()->context()->math_max_fun());
+
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call = graph()->NewNode(javascript()->Call(2, NO_CALL_FUNCTION_FLAGS),
+ fun, UndefinedConstant());
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberConstant(-V8_INFINITY));
+}
+
+
+TEST_F(JSBuiltinReducerTest, MathMax1) {
+ Handle<JSFunction> f(isolate()->context()->math_max_fun());
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call = graph()->NewNode(javascript()->Call(3, NO_CALL_FUNCTION_FLAGS),
+ fun, UndefinedConstant(), p0);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), p0);
+ }
+}
+
+
+TEST_F(JSBuiltinReducerTest, MathMax2) {
+ Handle<JSFunction> f(isolate()->context()->math_max_fun());
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ TRACED_FOREACH(Type*, t1, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* p1 = Parameter(t1, 1);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call =
+ graph()->NewNode(javascript()->Call(4, NO_CALL_FUNCTION_FLAGS), fun,
+ UndefinedConstant(), p0, p1);
+ Reduction r = Reduce(call);
+
+ if (t0->Is(Type::Integral32()) && t1->Is(Type::Integral32())) {
+ Capture<Node*> branch;
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsPhi(kMachNone, p1, p0,
+ IsMerge(IsIfTrue(CaptureEq(&branch)),
+ IsIfFalse(AllOf(CaptureEq(&branch),
+ IsBranch(IsNumberLessThan(p0, p1),
+ graph()->start()))))));
+ } else {
+ ASSERT_FALSE(r.Changed());
+ EXPECT_EQ(IrOpcode::kJSCallFunction, call->opcode());
+ }
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Math.imul
+
+
+TEST_F(JSBuiltinReducerTest, MathImul) {
+ Handle<JSFunction> f(isolate()->context()->math_imul_fun());
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ TRACED_FOREACH(Type*, t1, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* p1 = Parameter(t1, 1);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call =
+ graph()->NewNode(javascript()->Call(4, NO_CALL_FUNCTION_FLAGS), fun,
+ UndefinedConstant(), p0, p1);
+ Reduction r = Reduce(call);
+
+ if (t0->Is(Type::Integral32()) && t1->Is(Type::Integral32())) {
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Mul(p0, p1));
+ } else {
+ ASSERT_FALSE(r.Changed());
+ EXPECT_EQ(IrOpcode::kJSCallFunction, call->opcode());
+ }
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Math.fround
+
+
+TEST_F(JSBuiltinReducerTest, MathFround) {
+ Handle<Object> m =
+ JSObject::GetProperty(isolate()->global_object(),
+ isolate()->factory()->NewStringFromAsciiChecked(
+ "Math")).ToHandleChecked();
+ Handle<JSFunction> f = Handle<JSFunction>::cast(
+ JSObject::GetProperty(m, isolate()->factory()->NewStringFromAsciiChecked(
+ "fround")).ToHandleChecked());
+
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+ Node* call = graph()->NewNode(javascript()->Call(3, NO_CALL_FUNCTION_FLAGS),
+ fun, UndefinedConstant(), p0);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsTruncateFloat64ToFloat32(p0));
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
new file mode 100644
index 0000000000..ec73742fd2
--- /dev/null
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -0,0 +1,218 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-builtin-reducer.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/types.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+// Helper method that assumes replacement nodes are pure values that don't
+// produce an effect. Replaces {node} with {reduction} and relaxes effects.
+static Reduction ReplaceWithPureReduction(Node* node, Reduction reduction) {
+ if (reduction.Changed()) {
+ NodeProperties::ReplaceWithValue(node, reduction.replacement());
+ return reduction;
+ }
+ return Reducer::NoChange();
+}
+
+
+// Helper class to access JSCallFunction nodes that are potential candidates
+// for reduction when they have a BuiltinFunctionId associated with them.
+class JSCallReduction {
+ public:
+ explicit JSCallReduction(Node* node) : node_(node) {}
+
+ // Determines whether the node is a JSCallFunction operation that targets a
+ // constant callee being a well-known builtin with a BuiltinFunctionId.
+ bool HasBuiltinFunctionId() {
+ if (node_->opcode() != IrOpcode::kJSCallFunction) return false;
+ HeapObjectMatcher<Object> m(NodeProperties::GetValueInput(node_, 0));
+ if (!m.HasValue() || !m.Value().handle()->IsJSFunction()) return false;
+ Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value().handle());
+ return function->shared()->HasBuiltinFunctionId();
+ }
+
+ // Retrieves the BuiltinFunctionId as described above.
+ BuiltinFunctionId GetBuiltinFunctionId() {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
+ HeapObjectMatcher<Object> m(NodeProperties::GetValueInput(node_, 0));
+ Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value().handle());
+ return function->shared()->builtin_function_id();
+ }
+
+ // Determines whether the call takes zero inputs.
+ bool InputsMatchZero() { return GetJSCallArity() == 0; }
+
+ // Determines whether the call takes one input of the given type.
+ bool InputsMatchOne(Type* t1) {
+ return GetJSCallArity() == 1 &&
+ NodeProperties::GetBounds(GetJSCallInput(0)).upper->Is(t1);
+ }
+
+ // Determines whether the call takes two inputs of the given types.
+ bool InputsMatchTwo(Type* t1, Type* t2) {
+ return GetJSCallArity() == 2 &&
+ NodeProperties::GetBounds(GetJSCallInput(0)).upper->Is(t1) &&
+ NodeProperties::GetBounds(GetJSCallInput(1)).upper->Is(t2);
+ }
+
+ // Determines whether the call takes inputs all of the given type.
+ bool InputsMatchAll(Type* t) {
+ for (int i = 0; i < GetJSCallArity(); i++) {
+ if (!NodeProperties::GetBounds(GetJSCallInput(i)).upper->Is(t)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ Node* left() { return GetJSCallInput(0); }
+ Node* right() { return GetJSCallInput(1); }
+
+ int GetJSCallArity() {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
+ // Skip first (i.e. callee) and second (i.e. receiver) operand.
+ return OperatorProperties::GetValueInputCount(node_->op()) - 2;
+ }
+
+ Node* GetJSCallInput(int index) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
+ DCHECK_LT(index, GetJSCallArity());
+ // Skip first (i.e. callee) and second (i.e. receiver) operand.
+ return NodeProperties::GetValueInput(node_, index + 2);
+ }
+
+ private:
+ Node* node_;
+};
+
+
+// ECMA-262, section 15.8.2.1.
+Reduction JSBuiltinReducer::ReduceMathAbs(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::Unsigned32())) {
+ // Math.abs(a:uint32) -> a
+ return Replace(r.left());
+ }
+ if (r.InputsMatchOne(Type::Number())) {
+ // Math.abs(a:number) -> (a > 0 ? a : 0 - a)
+ Node* value = r.left();
+ Node* zero = jsgraph()->ZeroConstant();
+ Node* control = graph()->start();
+ Node* tag = graph()->NewNode(simplified()->NumberLessThan(), zero, value);
+
+ Node* branch = graph()->NewNode(common()->Branch(), tag, control);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+
+ Node* neg = graph()->NewNode(simplified()->NumberSubtract(), zero, value);
+ value = graph()->NewNode(common()->Phi(kMachNone, 2), value, neg, merge);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+
+// ECMA-262, section 15.8.2.17.
+Reduction JSBuiltinReducer::ReduceMathSqrt(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::Number())) {
+ // Math.sqrt(a:number) -> Float64Sqrt(a)
+ Node* value = graph()->NewNode(machine()->Float64Sqrt(), r.left());
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+
+// ECMA-262, section 15.8.2.11.
+Reduction JSBuiltinReducer::ReduceMathMax(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchZero()) {
+ // Math.max() -> -Infinity
+ return Replace(jsgraph()->Constant(-V8_INFINITY));
+ }
+ if (r.InputsMatchOne(Type::Number())) {
+ // Math.max(a:number) -> a
+ return Replace(r.left());
+ }
+ if (r.InputsMatchAll(Type::Integral32())) {
+ // Math.max(a:int32, b:int32, ...)
+ Node* value = r.GetJSCallInput(0);
+ for (int i = 1; i < r.GetJSCallArity(); i++) {
+ Node* p = r.GetJSCallInput(i);
+ Node* control = graph()->start();
+ Node* tag = graph()->NewNode(simplified()->NumberLessThan(), value, p);
+
+ Node* branch = graph()->NewNode(common()->Branch(), tag, control);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+
+ value = graph()->NewNode(common()->Phi(kMachNone, 2), p, value, merge);
+ }
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+
+// ES6 draft 08-24-14, section 20.2.2.19.
+Reduction JSBuiltinReducer::ReduceMathImul(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchTwo(Type::Integral32(), Type::Integral32())) {
+ // Math.imul(a:int32, b:int32) -> Int32Mul(a, b)
+ Node* value = graph()->NewNode(machine()->Int32Mul(), r.left(), r.right());
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+
+// ES6 draft 08-24-14, section 20.2.2.17.
+Reduction JSBuiltinReducer::ReduceMathFround(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::Number())) {
+ // Math.fround(a:number) -> TruncateFloat64ToFloat32(a)
+ Node* value =
+ graph()->NewNode(machine()->TruncateFloat64ToFloat32(), r.left());
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+
+Reduction JSBuiltinReducer::Reduce(Node* node) {
+ JSCallReduction r(node);
+
+ // Dispatch according to the BuiltinFunctionId if present.
+ if (!r.HasBuiltinFunctionId()) return NoChange();
+ switch (r.GetBuiltinFunctionId()) {
+ case kMathAbs:
+ return ReplaceWithPureReduction(node, ReduceMathAbs(node));
+ case kMathSqrt:
+ return ReplaceWithPureReduction(node, ReduceMathSqrt(node));
+ case kMathMax:
+ return ReplaceWithPureReduction(node, ReduceMathMax(node));
+ case kMathImul:
+ return ReplaceWithPureReduction(node, ReduceMathImul(node));
+ case kMathFround:
+ return ReplaceWithPureReduction(node, ReduceMathFround(node));
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
new file mode 100644
index 0000000000..f3b862f527
--- /dev/null
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -0,0 +1,47 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_BUILTIN_REDUCER_H_
+#define V8_COMPILER_JS_BUILTIN_REDUCER_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSBuiltinReducer FINAL : public Reducer {
+ public:
+ explicit JSBuiltinReducer(JSGraph* jsgraph)
+ : jsgraph_(jsgraph), simplified_(jsgraph->zone()) {}
+ virtual ~JSBuiltinReducer() {}
+
+ virtual Reduction Reduce(Node* node) OVERRIDE;
+
+ private:
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Graph* graph() const { return jsgraph_->graph(); }
+ CommonOperatorBuilder* common() const { return jsgraph_->common(); }
+ MachineOperatorBuilder* machine() const { return jsgraph_->machine(); }
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+ Reduction ReduceMathAbs(Node* node);
+ Reduction ReduceMathSqrt(Node* node);
+ Reduction ReduceMathMax(Node* node);
+ Reduction ReduceMathImul(Node* node);
+ Reduction ReduceMathFround(Node* node);
+
+ JSGraph* jsgraph_;
+ SimplifiedOperatorBuilder simplified_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_BUILTIN_REDUCER_H_
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index bdf142763a..cd8932b298 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -15,26 +15,6 @@ namespace v8 {
namespace internal {
namespace compiler {
-// TODO(titzer): factor this out to a common routine with js-typed-lowering.
-static void ReplaceEffectfulWithValue(Node* node, Node* value) {
- Node* effect = NULL;
- if (OperatorProperties::HasEffectInput(node->op())) {
- effect = NodeProperties::GetEffectInput(node);
- }
-
- // Requires distinguishing between value and effect edges.
- UseIter iter = node->uses().begin();
- while (iter != node->uses().end()) {
- if (NodeProperties::IsEffectEdge(iter.edge())) {
- DCHECK_NE(NULL, effect);
- iter = iter.UpdateToAndIncrement(effect);
- } else {
- iter = iter.UpdateToAndIncrement(value);
- }
- }
-}
-
-
class ContextSpecializationVisitor : public NullNodeVisitor {
public:
explicit ContextSpecializationVisitor(JSContextSpecializer* spec)
@@ -45,14 +25,16 @@ class ContextSpecializationVisitor : public NullNodeVisitor {
case IrOpcode::kJSLoadContext: {
Reduction r = spec_->ReduceJSLoadContext(node);
if (r.Changed() && r.replacement() != node) {
- ReplaceEffectfulWithValue(node, r.replacement());
+ NodeProperties::ReplaceWithValue(node, r.replacement());
+ node->RemoveAllInputs();
}
break;
}
case IrOpcode::kJSStoreContext: {
Reduction r = spec_->ReduceJSStoreContext(node);
if (r.Changed() && r.replacement() != node) {
- ReplaceEffectfulWithValue(node, r.replacement());
+ NodeProperties::ReplaceWithValue(node, r.replacement());
+ node->RemoveAllInputs();
}
break;
}
@@ -68,7 +50,8 @@ class ContextSpecializationVisitor : public NullNodeVisitor {
void JSContextSpecializer::SpecializeToContext() {
- ReplaceEffectfulWithValue(context_, jsgraph_->Constant(info_->context()));
+ NodeProperties::ReplaceWithValue(context_,
+ jsgraph_->Constant(info_->context()));
ContextSpecializationVisitor visitor(this);
jsgraph_->graph()->VisitNodeInputsFromEnd(&visitor);
@@ -78,16 +61,16 @@ void JSContextSpecializer::SpecializeToContext() {
Reduction JSContextSpecializer::ReduceJSLoadContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
- ValueMatcher<Handle<Context> > match(NodeProperties::GetValueInput(node, 0));
+ HeapObjectMatcher<Context> m(NodeProperties::GetValueInput(node, 0));
// If the context is not constant, no reduction can occur.
- if (!match.HasValue()) {
+ if (!m.HasValue()) {
return Reducer::NoChange();
}
ContextAccess access = OpParameter<ContextAccess>(node);
// Find the right parent context.
- Context* context = *match.Value();
+ Context* context = *m.Value().handle();
for (int i = access.depth(); i > 0; --i) {
context = context->previous();
}
@@ -98,8 +81,8 @@ Reduction JSContextSpecializer::ReduceJSLoadContext(Node* node) {
if (access.depth() == 0) {
return Reducer::NoChange();
}
- Operator* op = jsgraph_->javascript()->LoadContext(0, access.index(),
- access.immutable());
+ const Operator* op = jsgraph_->javascript()->LoadContext(
+ 0, access.index(), access.immutable());
node->set_op(op);
Handle<Object> context_handle = Handle<Object>(context, info_->isolate());
node->ReplaceInput(0, jsgraph_->Constant(context_handle));
@@ -126,9 +109,9 @@ Reduction JSContextSpecializer::ReduceJSLoadContext(Node* node) {
Reduction JSContextSpecializer::ReduceJSStoreContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
- ValueMatcher<Handle<Context> > match(NodeProperties::GetValueInput(node, 0));
+ HeapObjectMatcher<Context> m(NodeProperties::GetValueInput(node, 0));
// If the context is not constant, no reduction can occur.
- if (!match.HasValue()) {
+ if (!m.HasValue()) {
return Reducer::NoChange();
}
@@ -140,18 +123,19 @@ Reduction JSContextSpecializer::ReduceJSStoreContext(Node* node) {
}
// Find the right parent context.
- Context* context = *match.Value();
+ Context* context = *m.Value().handle();
for (int i = access.depth(); i > 0; --i) {
context = context->previous();
}
- Operator* op = jsgraph_->javascript()->StoreContext(0, access.index());
+ const Operator* op = jsgraph_->javascript()->StoreContext(0, access.index());
node->set_op(op);
Handle<Object> new_context_handle = Handle<Object>(context, info_->isolate());
node->ReplaceInput(0, jsgraph_->Constant(new_context_handle));
return Reducer::Changed(node);
}
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 68cc1cea90..300604e198 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-inl.h"
@@ -15,157 +16,13 @@ namespace v8 {
namespace internal {
namespace compiler {
-
-// TODO(mstarzinger): This is a temporary workaround for non-hydrogen stubs for
-// which we don't have an interface descriptor yet. Use ReplaceWithICStubCall
-// once these stub have been made into a HydrogenCodeStub.
-template <typename T>
-static CodeStubInterfaceDescriptor* GetInterfaceDescriptor(Isolate* isolate,
- T* stub) {
- CodeStub::Major key = static_cast<CodeStub*>(stub)->MajorKey();
- CodeStubInterfaceDescriptor* d = isolate->code_stub_interface_descriptor(key);
- stub->InitializeInterfaceDescriptor(d);
- return d;
-}
-
-
-// TODO(mstarzinger): This is a temporary shim to be able to call an IC stub
-// which doesn't have an interface descriptor yet. It mimics a hydrogen code
-// stub for the underlying IC stub code.
-class LoadICStubShim : public HydrogenCodeStub {
- public:
- LoadICStubShim(Isolate* isolate, ContextualMode contextual_mode)
- : HydrogenCodeStub(isolate), contextual_mode_(contextual_mode) {
- i::compiler::GetInterfaceDescriptor(isolate, this);
- }
-
- virtual Handle<Code> GenerateCode() V8_OVERRIDE {
- ExtraICState extra_state = LoadIC::ComputeExtraICState(contextual_mode_);
- return LoadIC::initialize_stub(isolate(), extra_state);
- }
-
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE {
- Register registers[] = { InterfaceDescriptor::ContextRegister(),
- LoadIC::ReceiverRegister(),
- LoadIC::NameRegister() };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
- }
-
- private:
- virtual Major MajorKey() const V8_OVERRIDE { return NoCache; }
- virtual int NotMissMinorKey() const V8_OVERRIDE { return 0; }
- virtual bool UseSpecialCache() V8_OVERRIDE { return true; }
-
- ContextualMode contextual_mode_;
-};
-
-
-// TODO(mstarzinger): This is a temporary shim to be able to call an IC stub
-// which doesn't have an interface descriptor yet. It mimics a hydrogen code
-// stub for the underlying IC stub code.
-class KeyedLoadICStubShim : public HydrogenCodeStub {
- public:
- explicit KeyedLoadICStubShim(Isolate* isolate) : HydrogenCodeStub(isolate) {
- i::compiler::GetInterfaceDescriptor(isolate, this);
- }
-
- virtual Handle<Code> GenerateCode() V8_OVERRIDE {
- return isolate()->builtins()->KeyedLoadIC_Initialize();
- }
-
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE {
- Register registers[] = { InterfaceDescriptor::ContextRegister(),
- KeyedLoadIC::ReceiverRegister(),
- KeyedLoadIC::NameRegister() };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
- }
-
- private:
- virtual Major MajorKey() const V8_OVERRIDE { return NoCache; }
- virtual int NotMissMinorKey() const V8_OVERRIDE { return 0; }
- virtual bool UseSpecialCache() V8_OVERRIDE { return true; }
-};
-
-
-// TODO(mstarzinger): This is a temporary shim to be able to call an IC stub
-// which doesn't have an interface descriptor yet. It mimics a hydrogen code
-// stub for the underlying IC stub code.
-class StoreICStubShim : public HydrogenCodeStub {
- public:
- StoreICStubShim(Isolate* isolate, StrictMode strict_mode)
- : HydrogenCodeStub(isolate), strict_mode_(strict_mode) {
- i::compiler::GetInterfaceDescriptor(isolate, this);
- }
-
- virtual Handle<Code> GenerateCode() V8_OVERRIDE {
- return StoreIC::initialize_stub(isolate(), strict_mode_);
- }
-
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE {
- Register registers[] = { InterfaceDescriptor::ContextRegister(),
- StoreIC::ReceiverRegister(),
- StoreIC::NameRegister(),
- StoreIC::ValueRegister() };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
- }
-
- private:
- virtual Major MajorKey() const V8_OVERRIDE { return NoCache; }
- virtual int NotMissMinorKey() const V8_OVERRIDE { return 0; }
- virtual bool UseSpecialCache() V8_OVERRIDE { return true; }
-
- StrictMode strict_mode_;
-};
-
-
-// TODO(mstarzinger): This is a temporary shim to be able to call an IC stub
-// which doesn't have an interface descriptor yet. It mimics a hydrogen code
-// stub for the underlying IC stub code.
-class KeyedStoreICStubShim : public HydrogenCodeStub {
- public:
- KeyedStoreICStubShim(Isolate* isolate, StrictMode strict_mode)
- : HydrogenCodeStub(isolate), strict_mode_(strict_mode) {
- i::compiler::GetInterfaceDescriptor(isolate, this);
- }
-
- virtual Handle<Code> GenerateCode() V8_OVERRIDE {
- return strict_mode_ == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- }
-
- virtual void InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE {
- Register registers[] = { InterfaceDescriptor::ContextRegister(),
- KeyedStoreIC::ReceiverRegister(),
- KeyedStoreIC::NameRegister(),
- KeyedStoreIC::ValueRegister() };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
- }
-
- private:
- virtual Major MajorKey() const V8_OVERRIDE { return NoCache; }
- virtual int NotMissMinorKey() const V8_OVERRIDE { return 0; }
- virtual bool UseSpecialCache() V8_OVERRIDE { return true; }
-
- StrictMode strict_mode_;
-};
-
-
-JSGenericLowering::JSGenericLowering(CompilationInfo* info, JSGraph* jsgraph,
- MachineOperatorBuilder* machine,
- SourcePositionTable* source_positions)
- : LoweringBuilder(jsgraph->graph(), source_positions),
- info_(info),
+JSGenericLowering::JSGenericLowering(CompilationInfo* info, JSGraph* jsgraph)
+ : info_(info),
jsgraph_(jsgraph),
- linkage_(new (jsgraph->zone()) Linkage(info)),
- machine_(machine) {}
+ linkage_(new (jsgraph->zone()) Linkage(info)) {}
-void JSGenericLowering::PatchOperator(Node* node, Operator* op) {
+void JSGenericLowering::PatchOperator(Node* node, const Operator* op) {
node->set_op(op);
}
@@ -200,58 +57,45 @@ Node* JSGenericLowering::ExternalConstant(ExternalReference ref) {
}
-void JSGenericLowering::Lower(Node* node) {
- Node* replacement = NULL;
- // Dispatch according to the opcode.
+Reduction JSGenericLowering::Reduce(Node* node) {
switch (node->opcode()) {
-#define DECLARE_CASE(x) \
- case IrOpcode::k##x: \
- replacement = Lower##x(node); \
+#define DECLARE_CASE(x) \
+ case IrOpcode::k##x: \
+ Lower##x(node); \
break;
DECLARE_CASE(Branch)
JS_OP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
default:
// Nothing to see.
- return;
+ return NoChange();
}
-
- // Nothing to do if lowering was done by patching the existing node.
- if (replacement == node) return;
-
- // Iterate through uses of the original node and replace uses accordingly.
- UNIMPLEMENTED();
+ return Changed(node);
}
-#define REPLACE_IC_STUB_CALL(op, StubDeclaration) \
- Node* JSGenericLowering::Lower##op(Node* node) { \
- StubDeclaration; \
- ReplaceWithICStubCall(node, &stub); \
- return node; \
+#define REPLACE_BINARY_OP_IC_CALL(op, token) \
+ void JSGenericLowering::Lower##op(Node* node) { \
+ ReplaceWithStubCall(node, CodeFactory::BinaryOpIC(isolate(), token), \
+ CallDescriptor::kPatchableCallSiteWithNop); \
}
-REPLACE_IC_STUB_CALL(JSBitwiseOr, BinaryOpICStub stub(isolate(), Token::BIT_OR))
-REPLACE_IC_STUB_CALL(JSBitwiseXor,
- BinaryOpICStub stub(isolate(), Token::BIT_XOR))
-REPLACE_IC_STUB_CALL(JSBitwiseAnd,
- BinaryOpICStub stub(isolate(), Token::BIT_AND))
-REPLACE_IC_STUB_CALL(JSShiftLeft, BinaryOpICStub stub(isolate(), Token::SHL))
-REPLACE_IC_STUB_CALL(JSShiftRight, BinaryOpICStub stub(isolate(), Token::SAR))
-REPLACE_IC_STUB_CALL(JSShiftRightLogical,
- BinaryOpICStub stub(isolate(), Token::SHR))
-REPLACE_IC_STUB_CALL(JSAdd, BinaryOpICStub stub(isolate(), Token::ADD))
-REPLACE_IC_STUB_CALL(JSSubtract, BinaryOpICStub stub(isolate(), Token::SUB))
-REPLACE_IC_STUB_CALL(JSMultiply, BinaryOpICStub stub(isolate(), Token::MUL))
-REPLACE_IC_STUB_CALL(JSDivide, BinaryOpICStub stub(isolate(), Token::DIV))
-REPLACE_IC_STUB_CALL(JSModulus, BinaryOpICStub stub(isolate(), Token::MOD))
-REPLACE_IC_STUB_CALL(JSToNumber, ToNumberStub stub(isolate()))
-#undef REPLACE_IC_STUB_CALL
-
-
-#define REPLACE_COMPARE_IC_CALL(op, token, pure) \
- Node* JSGenericLowering::Lower##op(Node* node) { \
- ReplaceWithCompareIC(node, token, pure); \
- return node; \
+REPLACE_BINARY_OP_IC_CALL(JSBitwiseOr, Token::BIT_OR)
+REPLACE_BINARY_OP_IC_CALL(JSBitwiseXor, Token::BIT_XOR)
+REPLACE_BINARY_OP_IC_CALL(JSBitwiseAnd, Token::BIT_AND)
+REPLACE_BINARY_OP_IC_CALL(JSShiftLeft, Token::SHL)
+REPLACE_BINARY_OP_IC_CALL(JSShiftRight, Token::SAR)
+REPLACE_BINARY_OP_IC_CALL(JSShiftRightLogical, Token::SHR)
+REPLACE_BINARY_OP_IC_CALL(JSAdd, Token::ADD)
+REPLACE_BINARY_OP_IC_CALL(JSSubtract, Token::SUB)
+REPLACE_BINARY_OP_IC_CALL(JSMultiply, Token::MUL)
+REPLACE_BINARY_OP_IC_CALL(JSDivide, Token::DIV)
+REPLACE_BINARY_OP_IC_CALL(JSModulus, Token::MOD)
+#undef REPLACE_BINARY_OP_IC_CALL
+
+
+#define REPLACE_COMPARE_IC_CALL(op, token, pure) \
+ void JSGenericLowering::Lower##op(Node* node) { \
+ ReplaceWithCompareIC(node, token, pure); \
}
REPLACE_COMPARE_IC_CALL(JSEqual, Token::EQ, false)
REPLACE_COMPARE_IC_CALL(JSNotEqual, Token::NE, false)
@@ -264,10 +108,9 @@ REPLACE_COMPARE_IC_CALL(JSGreaterThanOrEqual, Token::GTE, false)
#undef REPLACE_COMPARE_IC_CALL
-#define REPLACE_RUNTIME_CALL(op, fun) \
- Node* JSGenericLowering::Lower##op(Node* node) { \
- ReplaceWithRuntimeCall(node, fun); \
- return node; \
+#define REPLACE_RUNTIME_CALL(op, fun) \
+ void JSGenericLowering::Lower##op(Node* node) { \
+ ReplaceWithRuntimeCall(node, fun); \
}
REPLACE_RUNTIME_CALL(JSTypeOf, Runtime::kTypeof)
REPLACE_RUNTIME_CALL(JSCreate, Runtime::kAbort)
@@ -280,61 +123,71 @@ REPLACE_RUNTIME_CALL(JSCreateGlobalContext, Runtime::kAbort)
#undef REPLACE_RUNTIME
-#define REPLACE_UNIMPLEMENTED(op) \
- Node* JSGenericLowering::Lower##op(Node* node) { \
- UNIMPLEMENTED(); \
- return node; \
- }
-REPLACE_UNIMPLEMENTED(JSToString)
+#define REPLACE_UNIMPLEMENTED(op) \
+ void JSGenericLowering::Lower##op(Node* node) { UNIMPLEMENTED(); }
REPLACE_UNIMPLEMENTED(JSToName)
REPLACE_UNIMPLEMENTED(JSYield)
REPLACE_UNIMPLEMENTED(JSDebugger)
#undef REPLACE_UNIMPLEMENTED
-static CallDescriptor::DeoptimizationSupport DeoptimizationSupportForNode(
- Node* node) {
- return OperatorProperties::CanLazilyDeoptimize(node->op())
- ? CallDescriptor::kCanDeoptimize
- : CallDescriptor::kCannotDeoptimize;
+static CallDescriptor::Flags FlagsForNode(Node* node) {
+ CallDescriptor::Flags result = CallDescriptor::kNoFlags;
+ if (OperatorProperties::HasFrameStateInput(node->op())) {
+ result |= CallDescriptor::kNeedsFrameState;
+ }
+ return result;
}
void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token,
bool pure) {
- BinaryOpICStub stub(isolate(), Token::ADD); // TODO(mstarzinger): Hack.
- CodeStubInterfaceDescriptor* d = stub.GetInterfaceDescriptor();
- CallDescriptor* desc_compare = linkage()->GetStubCallDescriptor(d);
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), token);
- Node* compare;
+ Callable callable = CodeFactory::CompareIC(isolate(), token);
+ bool has_frame_state = OperatorProperties::HasFrameStateInput(node->op());
+ CallDescriptor* desc_compare = linkage()->GetStubCallDescriptor(
+ callable.descriptor(), 0,
+ CallDescriptor::kPatchableCallSiteWithNop | FlagsForNode(node));
+ NodeVector inputs(zone());
+ inputs.reserve(node->InputCount() + 1);
+ inputs.push_back(CodeConstant(callable.code()));
+ inputs.push_back(NodeProperties::GetValueInput(node, 0));
+ inputs.push_back(NodeProperties::GetValueInput(node, 1));
+ inputs.push_back(NodeProperties::GetContextInput(node));
if (pure) {
- // A pure (strict) comparison doesn't have an effect or control.
- // But for the graph, we need to add these inputs.
- compare = graph()->NewNode(common()->Call(desc_compare), CodeConstant(ic),
- NodeProperties::GetValueInput(node, 0),
- NodeProperties::GetValueInput(node, 1),
- NodeProperties::GetContextInput(node),
- graph()->start(), graph()->start());
+ // A pure (strict) comparison doesn't have an effect, control or frame
+ // state. But for the graph, we need to add control and effect inputs.
+ DCHECK(!has_frame_state);
+ inputs.push_back(graph()->start());
+ inputs.push_back(graph()->start());
} else {
- compare = graph()->NewNode(common()->Call(desc_compare), CodeConstant(ic),
- NodeProperties::GetValueInput(node, 0),
- NodeProperties::GetValueInput(node, 1),
- NodeProperties::GetContextInput(node),
- NodeProperties::GetEffectInput(node),
- NodeProperties::GetControlInput(node));
+ DCHECK(has_frame_state == FLAG_turbo_deoptimization);
+ if (FLAG_turbo_deoptimization) {
+ inputs.push_back(NodeProperties::GetFrameStateInput(node));
+ }
+ inputs.push_back(NodeProperties::GetEffectInput(node));
+ inputs.push_back(NodeProperties::GetControlInput(node));
}
+ Node* compare =
+ graph()->NewNode(common()->Call(desc_compare),
+ static_cast<int>(inputs.size()), &inputs.front());
+
node->ReplaceInput(0, compare);
node->ReplaceInput(1, SmiConstant(token));
+
+ if (has_frame_state) {
+ // Remove the frame state from inputs.
+ node->RemoveInput(NodeProperties::FirstFrameStateIndex(node));
+ }
+
ReplaceWithRuntimeCall(node, Runtime::kBooleanize);
}
-void JSGenericLowering::ReplaceWithICStubCall(Node* node,
- HydrogenCodeStub* stub) {
- CodeStubInterfaceDescriptor* d = stub->GetInterfaceDescriptor();
+void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
+ CallDescriptor::Flags flags) {
CallDescriptor* desc = linkage()->GetStubCallDescriptor(
- d, 0, DeoptimizationSupportForNode(node));
- Node* stub_code = CodeConstant(stub->GetCode());
+ callable.descriptor(), 0, flags | FlagsForNode(node));
+ Node* stub_code = CodeConstant(callable.code());
PatchInsertInput(node, 0, stub_code);
PatchOperator(node, common()->Call(desc));
}
@@ -343,14 +196,15 @@ void JSGenericLowering::ReplaceWithICStubCall(Node* node,
void JSGenericLowering::ReplaceWithBuiltinCall(Node* node,
Builtins::JavaScript id,
int nargs) {
- CallFunctionStub stub(isolate(), nargs - 1, NO_CALL_FUNCTION_FLAGS);
- CodeStubInterfaceDescriptor* d = GetInterfaceDescriptor(isolate(), &stub);
- CallDescriptor* desc = linkage()->GetStubCallDescriptor(d, nargs);
+ Callable callable =
+ CodeFactory::CallFunction(isolate(), nargs - 1, NO_CALL_FUNCTION_FLAGS);
+ CallDescriptor* desc =
+ linkage()->GetStubCallDescriptor(callable.descriptor(), nargs);
// TODO(mstarzinger): Accessing the builtins object this way prevents sharing
// of code across native contexts. Fix this by loading from given context.
Handle<JSFunction> function(
JSFunction::cast(info()->context()->builtins()->javascript_builtin(id)));
- Node* stub_code = CodeConstant(stub.GetCode());
+ Node* stub_code = CodeConstant(callable.code());
Node* function_node = FunctionConstant(function);
PatchInsertInput(node, 0, stub_code);
PatchInsertInput(node, 1, function_node);
@@ -361,11 +215,11 @@ void JSGenericLowering::ReplaceWithBuiltinCall(Node* node,
void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
Runtime::FunctionId f,
int nargs_override) {
- Operator::Property props = node->op()->properties();
+ Operator::Properties properties = node->op()->properties();
const Runtime::Function* fun = Runtime::FunctionForId(f);
int nargs = (nargs_override < 0) ? fun->nargs : nargs_override;
- CallDescriptor* desc = linkage()->GetRuntimeCallDescriptor(
- f, nargs, props, DeoptimizationSupportForNode(node));
+ CallDescriptor* desc =
+ linkage()->GetRuntimeCallDescriptor(f, nargs, properties);
Node* ref = ExternalConstant(ExternalReference(f, isolate()));
Node* arity = Int32Constant(nargs);
if (!centrystub_constant_.is_set()) {
@@ -378,143 +232,144 @@ void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
}
-Node* JSGenericLowering::LowerBranch(Node* node) {
- Node* test = graph()->NewNode(machine()->WordEqual(), node->InputAt(0),
- jsgraph()->TrueConstant());
- node->ReplaceInput(0, test);
- return node;
+void JSGenericLowering::LowerBranch(Node* node) {
+ if (!info()->is_typing_enabled()) {
+ // TODO(mstarzinger): If typing is enabled then simplified lowering will
+ // have inserted the correct ChangeBoolToBit, otherwise we need to perform
+ // poor-man's representation inference here and insert manual change.
+ Node* test = graph()->NewNode(machine()->WordEqual(), node->InputAt(0),
+ jsgraph()->TrueConstant());
+ node->ReplaceInput(0, test);
+ }
+}
+
+
+void JSGenericLowering::LowerJSUnaryNot(Node* node) {
+ Callable callable = CodeFactory::ToBoolean(
+ isolate(), ToBooleanStub::RESULT_AS_INVERSE_ODDBALL);
+ ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+}
+
+
+void JSGenericLowering::LowerJSToBoolean(Node* node) {
+ Callable callable =
+ CodeFactory::ToBoolean(isolate(), ToBooleanStub::RESULT_AS_ODDBALL);
+ ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
}
-Node* JSGenericLowering::LowerJSUnaryNot(Node* node) {
- ToBooleanStub stub(isolate(), ToBooleanStub::RESULT_AS_INVERSE_ODDBALL);
- ReplaceWithICStubCall(node, &stub);
- return node;
+void JSGenericLowering::LowerJSToNumber(Node* node) {
+ Callable callable = CodeFactory::ToNumber(isolate());
+ ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags);
}
-Node* JSGenericLowering::LowerJSToBoolean(Node* node) {
- ToBooleanStub stub(isolate(), ToBooleanStub::RESULT_AS_ODDBALL);
- ReplaceWithICStubCall(node, &stub);
- return node;
+void JSGenericLowering::LowerJSToString(Node* node) {
+ ReplaceWithBuiltinCall(node, Builtins::TO_STRING, 1);
}
-Node* JSGenericLowering::LowerJSToObject(Node* node) {
+void JSGenericLowering::LowerJSToObject(Node* node) {
ReplaceWithBuiltinCall(node, Builtins::TO_OBJECT, 1);
- return node;
}
-Node* JSGenericLowering::LowerJSLoadProperty(Node* node) {
- KeyedLoadICStubShim stub(isolate());
- ReplaceWithICStubCall(node, &stub);
- return node;
+void JSGenericLowering::LowerJSLoadProperty(Node* node) {
+ Callable callable = CodeFactory::KeyedLoadIC(isolate());
+ ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
}
-Node* JSGenericLowering::LowerJSLoadNamed(Node* node) {
+void JSGenericLowering::LowerJSLoadNamed(Node* node) {
LoadNamedParameters p = OpParameter<LoadNamedParameters>(node);
- LoadICStubShim stub(isolate(), p.contextual_mode);
+ Callable callable = CodeFactory::LoadIC(isolate(), p.contextual_mode);
PatchInsertInput(node, 1, jsgraph()->HeapConstant(p.name));
- ReplaceWithICStubCall(node, &stub);
- return node;
+ ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
}
-Node* JSGenericLowering::LowerJSStoreProperty(Node* node) {
- // TODO(mstarzinger): The strict_mode needs to be carried along in the
- // operator so that graphs are fully compositional for inlining.
- StrictMode strict_mode = info()->strict_mode();
- KeyedStoreICStubShim stub(isolate(), strict_mode);
- ReplaceWithICStubCall(node, &stub);
- return node;
+void JSGenericLowering::LowerJSStoreProperty(Node* node) {
+ StrictMode strict_mode = OpParameter<StrictMode>(node);
+ Callable callable = CodeFactory::KeyedStoreIC(isolate(), strict_mode);
+ ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
}
-Node* JSGenericLowering::LowerJSStoreNamed(Node* node) {
- PrintableUnique<Name> key = OpParameter<PrintableUnique<Name> >(node);
- // TODO(mstarzinger): The strict_mode needs to be carried along in the
- // operator so that graphs are fully compositional for inlining.
- StrictMode strict_mode = info()->strict_mode();
- StoreICStubShim stub(isolate(), strict_mode);
- PatchInsertInput(node, 1, jsgraph()->HeapConstant(key));
- ReplaceWithICStubCall(node, &stub);
- return node;
+void JSGenericLowering::LowerJSStoreNamed(Node* node) {
+ StoreNamedParameters params = OpParameter<StoreNamedParameters>(node);
+ Callable callable = CodeFactory::StoreIC(isolate(), params.strict_mode);
+ PatchInsertInput(node, 1, jsgraph()->HeapConstant(params.name));
+ ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
}
-Node* JSGenericLowering::LowerJSDeleteProperty(Node* node) {
+void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
StrictMode strict_mode = OpParameter<StrictMode>(node);
PatchInsertInput(node, 2, SmiConstant(strict_mode));
ReplaceWithBuiltinCall(node, Builtins::DELETE, 3);
- return node;
}
-Node* JSGenericLowering::LowerJSHasProperty(Node* node) {
+void JSGenericLowering::LowerJSHasProperty(Node* node) {
ReplaceWithBuiltinCall(node, Builtins::IN, 2);
- return node;
}
-Node* JSGenericLowering::LowerJSInstanceOf(Node* node) {
+void JSGenericLowering::LowerJSInstanceOf(Node* node) {
InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
InstanceofStub::kReturnTrueFalseObject |
InstanceofStub::kArgsInRegisters);
InstanceofStub stub(isolate(), flags);
- CodeStubInterfaceDescriptor* d = GetInterfaceDescriptor(isolate(), &stub);
+ CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
CallDescriptor* desc = linkage()->GetStubCallDescriptor(d, 0);
Node* stub_code = CodeConstant(stub.GetCode());
PatchInsertInput(node, 0, stub_code);
PatchOperator(node, common()->Call(desc));
- return node;
}
-Node* JSGenericLowering::LowerJSLoadContext(Node* node) {
+void JSGenericLowering::LowerJSLoadContext(Node* node) {
ContextAccess access = OpParameter<ContextAccess>(node);
// TODO(mstarzinger): Use simplified operators instead of machine operators
// here so that load/store optimization can be applied afterwards.
for (int i = 0; i < access.depth(); ++i) {
node->ReplaceInput(
0, graph()->NewNode(
- machine()->Load(kMachineTagged),
+ machine()->Load(kMachAnyTagged),
NodeProperties::GetValueInput(node, 0),
Int32Constant(Context::SlotOffset(Context::PREVIOUS_INDEX)),
NodeProperties::GetEffectInput(node)));
}
node->ReplaceInput(1, Int32Constant(Context::SlotOffset(access.index())));
- PatchOperator(node, machine()->Load(kMachineTagged));
- return node;
+ PatchOperator(node, machine()->Load(kMachAnyTagged));
}
-Node* JSGenericLowering::LowerJSStoreContext(Node* node) {
+void JSGenericLowering::LowerJSStoreContext(Node* node) {
ContextAccess access = OpParameter<ContextAccess>(node);
// TODO(mstarzinger): Use simplified operators instead of machine operators
// here so that load/store optimization can be applied afterwards.
for (int i = 0; i < access.depth(); ++i) {
node->ReplaceInput(
0, graph()->NewNode(
- machine()->Load(kMachineTagged),
+ machine()->Load(kMachAnyTagged),
NodeProperties::GetValueInput(node, 0),
Int32Constant(Context::SlotOffset(Context::PREVIOUS_INDEX)),
NodeProperties::GetEffectInput(node)));
}
node->ReplaceInput(2, NodeProperties::GetValueInput(node, 1));
node->ReplaceInput(1, Int32Constant(Context::SlotOffset(access.index())));
- PatchOperator(node, machine()->Store(kMachineTagged, kFullWriteBarrier));
- return node;
+ PatchOperator(node, machine()->Store(StoreRepresentation(kMachAnyTagged,
+ kFullWriteBarrier)));
}
-Node* JSGenericLowering::LowerJSCallConstruct(Node* node) {
+void JSGenericLowering::LowerJSCallConstruct(Node* node) {
int arity = OpParameter<int>(node);
CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- CodeStubInterfaceDescriptor* d = GetInterfaceDescriptor(isolate(), &stub);
- CallDescriptor* desc = linkage()->GetStubCallDescriptor(
- d, arity, DeoptimizationSupportForNode(node));
+ CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
+ CallDescriptor* desc =
+ linkage()->GetStubCallDescriptor(d, arity, FlagsForNode(node));
Node* stub_code = CodeConstant(stub.GetCode());
Node* construct = NodeProperties::GetValueInput(node, 0);
PatchInsertInput(node, 0, stub_code);
@@ -522,29 +377,27 @@ Node* JSGenericLowering::LowerJSCallConstruct(Node* node) {
PatchInsertInput(node, 2, construct);
PatchInsertInput(node, 3, jsgraph()->UndefinedConstant());
PatchOperator(node, common()->Call(desc));
- return node;
}
-Node* JSGenericLowering::LowerJSCallFunction(Node* node) {
+void JSGenericLowering::LowerJSCallFunction(Node* node) {
CallParameters p = OpParameter<CallParameters>(node);
CallFunctionStub stub(isolate(), p.arity - 2, p.flags);
- CodeStubInterfaceDescriptor* d = GetInterfaceDescriptor(isolate(), &stub);
- CallDescriptor* desc = linkage()->GetStubCallDescriptor(
- d, p.arity - 1, DeoptimizationSupportForNode(node));
+ CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
+ CallDescriptor* desc =
+ linkage()->GetStubCallDescriptor(d, p.arity - 1, FlagsForNode(node));
Node* stub_code = CodeConstant(stub.GetCode());
PatchInsertInput(node, 0, stub_code);
PatchOperator(node, common()->Call(desc));
- return node;
}
-Node* JSGenericLowering::LowerJSCallRuntime(Node* node) {
+void JSGenericLowering::LowerJSCallRuntime(Node* node) {
Runtime::FunctionId function = OpParameter<Runtime::FunctionId>(node);
int arity = OperatorProperties::GetValueInputCount(node->op());
ReplaceWithRuntimeCall(node, function, arity);
- return node;
}
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/js-generic-lowering.h b/deps/v8/src/compiler/js-generic-lowering.h
index e3113e541d..400f8062a5 100644
--- a/deps/v8/src/compiler/js-generic-lowering.h
+++ b/deps/v8/src/compiler/js-generic-lowering.h
@@ -8,18 +8,14 @@
#include "src/v8.h"
#include "src/allocation.h"
+#include "src/code-factory.h"
#include "src/compiler/graph.h"
+#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-graph.h"
-#include "src/compiler/lowering-builder.h"
#include "src/compiler/opcodes.h"
-#include "src/unique.h"
namespace v8 {
namespace internal {
-
-// Forward declarations.
-class HydrogenCodeStub;
-
namespace compiler {
// Forward declarations.
@@ -28,18 +24,16 @@ class MachineOperatorBuilder;
class Linkage;
// Lowers JS-level operators to runtime and IC calls in the "generic" case.
-class JSGenericLowering : public LoweringBuilder {
+class JSGenericLowering : public Reducer {
public:
- JSGenericLowering(CompilationInfo* info, JSGraph* graph,
- MachineOperatorBuilder* machine,
- SourcePositionTable* source_positions);
+ JSGenericLowering(CompilationInfo* info, JSGraph* graph);
virtual ~JSGenericLowering() {}
- virtual void Lower(Node* node);
+ virtual Reduction Reduce(Node* node);
protected:
-// Dispatched depending on opcode.
-#define DECLARE_LOWER(x) Node* Lower##x(Node* node);
+#define DECLARE_LOWER(x) void Lower##x(Node* node);
+ // Dispatched depending on opcode.
ALL_OP_LIST(DECLARE_LOWER)
#undef DECLARE_LOWER
@@ -51,12 +45,12 @@ class JSGenericLowering : public LoweringBuilder {
Node* ExternalConstant(ExternalReference ref);
// Helpers to patch existing nodes in the graph.
- void PatchOperator(Node* node, Operator* new_op);
+ void PatchOperator(Node* node, const Operator* new_op);
void PatchInsertInput(Node* node, int index, Node* input);
// Helpers to replace existing nodes with a generic call.
void ReplaceWithCompareIC(Node* node, Token::Value token, bool pure);
- void ReplaceWithICStubCall(Node* node, HydrogenCodeStub* stub);
+ void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags);
void ReplaceWithBuiltinCall(Node* node, Builtins::JavaScript id, int args);
void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
@@ -67,17 +61,17 @@ class JSGenericLowering : public LoweringBuilder {
Linkage* linkage() const { return linkage_; }
CompilationInfo* info() const { return info_; }
CommonOperatorBuilder* common() const { return jsgraph()->common(); }
- MachineOperatorBuilder* machine() const { return machine_; }
+ MachineOperatorBuilder* machine() const { return jsgraph()->machine(); }
private:
CompilationInfo* info_;
JSGraph* jsgraph_;
Linkage* linkage_;
- MachineOperatorBuilder* machine_;
SetOncePointer<Node> centrystub_constant_;
};
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_JS_GENERIC_LOWERING_H_
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 2cebbc784e..c403721cf0 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -11,19 +11,27 @@ namespace internal {
namespace compiler {
Node* JSGraph::ImmovableHeapConstant(Handle<Object> object) {
- PrintableUnique<Object> unique =
- PrintableUnique<Object>::CreateImmovable(zone(), object);
+ Unique<Object> unique = Unique<Object>::CreateImmovable(object);
return NewNode(common()->HeapConstant(unique));
}
-Node* JSGraph::NewNode(Operator* op) {
+Node* JSGraph::NewNode(const Operator* op) {
Node* node = graph()->NewNode(op);
typer_->Init(node);
return node;
}
+Node* JSGraph::CEntryStubConstant() {
+ if (!c_entry_stub_constant_.is_set()) {
+ c_entry_stub_constant_.set(
+ ImmovableHeapConstant(CEntryStub(isolate(), 1).GetCode()));
+ }
+ return c_entry_stub_constant_.get();
+}
+
+
Node* JSGraph::UndefinedConstant() {
if (!undefined_constant_.is_set()) {
undefined_constant_.set(
@@ -85,7 +93,7 @@ Node* JSGraph::NaNConstant() {
}
-Node* JSGraph::HeapConstant(PrintableUnique<Object> value) {
+Node* JSGraph::HeapConstant(Unique<Object> value) {
// TODO(turbofan): canonicalize heap constants using Unique<T>
return NewNode(common()->HeapConstant(value));
}
@@ -95,8 +103,12 @@ Node* JSGraph::HeapConstant(Handle<Object> value) {
// TODO(titzer): We could also match against the addresses of immortable
// immovables here, even without access to the heap, thus always
// canonicalizing references to them.
- return HeapConstant(
- PrintableUnique<Object>::CreateUninitialized(zone(), value));
+ // return HeapConstant(Unique<Object>::CreateUninitialized(value));
+ // TODO(turbofan): This is a work-around to make Unique::HashCode() work for
+ // value numbering. We need some sane way to compute a unique hash code for
+ // arbitrary handles here.
+ Unique<Object> unique(reinterpret_cast<Address>(*value.location()), value);
+ return HeapConstant(unique);
}
@@ -122,8 +134,8 @@ Node* JSGraph::Constant(Handle<Object> value) {
Node* JSGraph::Constant(double value) {
- if (BitCast<int64_t>(value) == BitCast<int64_t>(0.0)) return ZeroConstant();
- if (BitCast<int64_t>(value) == BitCast<int64_t>(1.0)) return OneConstant();
+ if (bit_cast<int64_t>(value) == bit_cast<int64_t>(0.0)) return ZeroConstant();
+ if (bit_cast<int64_t>(value) == bit_cast<int64_t>(1.0)) return OneConstant();
return NumberConstant(value);
}
@@ -153,6 +165,12 @@ Node* JSGraph::NumberConstant(double value) {
}
+Node* JSGraph::Float32Constant(float value) {
+ // TODO(turbofan): cache float32 constants.
+ return NewNode(common()->Float32Constant(value));
+}
+
+
Node* JSGraph::Float64Constant(double value) {
Node** loc = cache_.FindFloat64Constant(value);
if (*loc == NULL) {
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index 59a6b845ed..fd3de124f0 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -9,6 +9,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/js-operator.h"
+#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
namespace v8 {
@@ -22,14 +23,18 @@ class Typer;
// constants, and various helper methods.
class JSGraph : public ZoneObject {
public:
- JSGraph(Graph* graph, CommonOperatorBuilder* common, Typer* typer)
+ JSGraph(Graph* graph, CommonOperatorBuilder* common,
+ JSOperatorBuilder* javascript, Typer* typer,
+ MachineOperatorBuilder* machine)
: graph_(graph),
common_(common),
- javascript_(zone()),
+ javascript_(javascript),
typer_(typer),
+ machine_(machine),
cache_(zone()) {}
// Canonicalized global constants.
+ Node* CEntryStubConstant();
Node* UndefinedConstant();
Node* TheHoleConstant();
Node* TrueConstant();
@@ -41,7 +46,7 @@ class JSGraph : public ZoneObject {
// Creates a HeapConstant node, possibly canonicalized, without inspecting the
// object.
- Node* HeapConstant(PrintableUnique<Object> value);
+ Node* HeapConstant(Unique<Object> value);
// Creates a HeapConstant node, possibly canonicalized, and may access the
// heap to inspect the object.
@@ -60,6 +65,12 @@ class JSGraph : public ZoneObject {
// Creates a Int32Constant node, usually canonicalized.
Node* Int32Constant(int32_t value);
+ Node* Uint32Constant(uint32_t value) {
+ return Int32Constant(bit_cast<int32_t>(value));
+ }
+
+ // Creates a Float32Constant node, usually canonicalized.
+ Node* Float32Constant(float value);
// Creates a Float64Constant node, usually canonicalized.
Node* Float64Constant(double value);
@@ -72,17 +83,21 @@ class JSGraph : public ZoneObject {
return Constant(immediate);
}
- JSOperatorBuilder* javascript() { return &javascript_; }
+ JSOperatorBuilder* javascript() { return javascript_; }
CommonOperatorBuilder* common() { return common_; }
+ MachineOperatorBuilder* machine() { return machine_; }
Graph* graph() { return graph_; }
Zone* zone() { return graph()->zone(); }
+ Isolate* isolate() { return zone()->isolate(); }
private:
Graph* graph_;
CommonOperatorBuilder* common_;
- JSOperatorBuilder javascript_;
+ JSOperatorBuilder* javascript_;
Typer* typer_;
+ MachineOperatorBuilder* machine_;
+ SetOncePointer<Node> c_entry_stub_constant_;
SetOncePointer<Node> undefined_constant_;
SetOncePointer<Node> the_hole_constant_;
SetOncePointer<Node> true_constant_;
@@ -96,10 +111,11 @@ class JSGraph : public ZoneObject {
Node* ImmovableHeapConstant(Handle<Object> value);
Node* NumberConstant(double value);
- Node* NewNode(Operator* op);
+ Node* NewNode(const Operator* op);
- Factory* factory() { return zone()->isolate()->factory(); }
+ Factory* factory() { return isolate()->factory(); }
};
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
new file mode 100644
index 0000000000..af021459a2
--- /dev/null
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -0,0 +1,446 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/ast-graph-builder.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/js-inlining.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/typer.h"
+#include "src/full-codegen.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
+#include "src/scopes.h"
+
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class InlinerVisitor : public NullNodeVisitor {
+ public:
+ explicit InlinerVisitor(JSInliner* inliner) : inliner_(inliner) {}
+
+ GenericGraphVisit::Control Post(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSCallFunction:
+ inliner_->TryInlineCall(node);
+ break;
+ default:
+ break;
+ }
+ return GenericGraphVisit::CONTINUE;
+ }
+
+ private:
+ JSInliner* inliner_;
+};
+
+
+void JSInliner::Inline() {
+ InlinerVisitor visitor(this);
+ jsgraph_->graph()->VisitNodeInputsFromEnd(&visitor);
+}
+
+
+// TODO(sigurds) Find a home for this function and reuse it everywhere (esp. in
+// test cases, where similar code is currently duplicated).
+static void Parse(Handle<JSFunction> function, CompilationInfoWithZone* info) {
+ CHECK(Parser::Parse(info));
+ CHECK(Rewriter::Rewrite(info));
+ CHECK(Scope::Analyze(info));
+ CHECK(Compiler::EnsureDeoptimizationSupport(info));
+}
+
+
+// A facade on a JSFunction's graph to facilitate inlining. It assumes the
+// that the function graph has only one return statement, and provides
+// {UnifyReturn} to convert a function graph to that end.
+class Inlinee {
+ public:
+ Inlinee(Node* start, Node* end) : start_(start), end_(end) {}
+
+ // Returns the last regular control node, that is
+ // the last control node before the end node.
+ Node* end_block() { return NodeProperties::GetControlInput(unique_return()); }
+
+ // Return the effect output of the graph,
+ // that is the effect input of the return statement of the inlinee.
+ Node* effect_output() {
+ return NodeProperties::GetEffectInput(unique_return());
+ }
+ // Return the value output of the graph,
+ // that is the value input of the return statement of the inlinee.
+ Node* value_output() {
+ return NodeProperties::GetValueInput(unique_return(), 0);
+ }
+ // Return the unique return statement of the graph.
+ Node* unique_return() {
+ Node* unique_return = NodeProperties::GetControlInput(end_);
+ DCHECK_EQ(IrOpcode::kReturn, unique_return->opcode());
+ return unique_return;
+ }
+
+ // Counts JSFunction, Receiver, arguments, context but not effect, control.
+ size_t total_parameters() { return start_->op()->OutputCount(); }
+
+ // Counts only formal parameters.
+ size_t formal_parameters() {
+ DCHECK_GE(total_parameters(), 3);
+ return total_parameters() - 3;
+ }
+
+ // Inline this graph at {call}, use {jsgraph} and its zone to create
+ // any new nodes.
+ void InlineAtCall(JSGraph* jsgraph, Node* call);
+
+ // Ensure that only a single return reaches the end node.
+ static void UnifyReturn(JSGraph* jsgraph);
+
+ private:
+ Node* start_;
+ Node* end_;
+};
+
+
+void Inlinee::UnifyReturn(JSGraph* jsgraph) {
+ Graph* graph = jsgraph->graph();
+
+ Node* final_merge = NodeProperties::GetControlInput(graph->end(), 0);
+ if (final_merge->opcode() == IrOpcode::kReturn) {
+ // nothing to do
+ return;
+ }
+ DCHECK_EQ(IrOpcode::kMerge, final_merge->opcode());
+
+ int predecessors =
+ OperatorProperties::GetControlInputCount(final_merge->op());
+
+ const Operator* op_phi = jsgraph->common()->Phi(kMachAnyTagged, predecessors);
+ const Operator* op_ephi = jsgraph->common()->EffectPhi(predecessors);
+
+ NodeVector values(jsgraph->zone());
+ NodeVector effects(jsgraph->zone());
+ // Iterate over all control flow predecessors,
+ // which must be return statements.
+ InputIter iter = final_merge->inputs().begin();
+ while (iter != final_merge->inputs().end()) {
+ Node* input = *iter;
+ switch (input->opcode()) {
+ case IrOpcode::kReturn:
+ values.push_back(NodeProperties::GetValueInput(input, 0));
+ effects.push_back(NodeProperties::GetEffectInput(input));
+ iter.UpdateToAndIncrement(NodeProperties::GetControlInput(input));
+ input->RemoveAllInputs();
+ break;
+ default:
+ UNREACHABLE();
+ ++iter;
+ break;
+ }
+ }
+ values.push_back(final_merge);
+ effects.push_back(final_merge);
+ Node* phi =
+ graph->NewNode(op_phi, static_cast<int>(values.size()), &values.front());
+ Node* ephi = graph->NewNode(op_ephi, static_cast<int>(effects.size()),
+ &effects.front());
+ Node* new_return =
+ graph->NewNode(jsgraph->common()->Return(), phi, ephi, final_merge);
+ graph->end()->ReplaceInput(0, new_return);
+}
+
+
+class CopyVisitor : public NullNodeVisitor {
+ public:
+ CopyVisitor(Graph* source_graph, Graph* target_graph, Zone* temp_zone)
+ : copies_(source_graph->NodeCount(), NULL, temp_zone),
+ sentinels_(source_graph->NodeCount(), NULL, temp_zone),
+ source_graph_(source_graph),
+ target_graph_(target_graph),
+ temp_zone_(temp_zone),
+ sentinel_op_(IrOpcode::kDead, Operator::kNoProperties, 0, 0,
+ "sentinel") {}
+
+ GenericGraphVisit::Control Post(Node* original) {
+ NodeVector inputs(temp_zone_);
+ for (InputIter it = original->inputs().begin();
+ it != original->inputs().end(); ++it) {
+ inputs.push_back(GetCopy(*it));
+ }
+
+ // Reuse the operator in the copy. This assumes that op lives in a zone
+ // that lives longer than graph()'s zone.
+ Node* copy =
+ target_graph_->NewNode(original->op(), static_cast<int>(inputs.size()),
+ (inputs.empty() ? NULL : &inputs.front()));
+ copies_[original->id()] = copy;
+ return GenericGraphVisit::CONTINUE;
+ }
+
+ Node* GetCopy(Node* original) {
+ Node* copy = copies_[original->id()];
+ if (copy == NULL) {
+ copy = GetSentinel(original);
+ }
+ DCHECK_NE(NULL, copy);
+ return copy;
+ }
+
+ void CopyGraph() {
+ source_graph_->VisitNodeInputsFromEnd(this);
+ ReplaceSentinels();
+ }
+
+ const NodeVector& copies() { return copies_; }
+
+ private:
+ void ReplaceSentinels() {
+ for (NodeId id = 0; id < source_graph_->NodeCount(); ++id) {
+ Node* sentinel = sentinels_[id];
+ if (sentinel == NULL) continue;
+ Node* copy = copies_[id];
+ DCHECK_NE(NULL, copy);
+ sentinel->ReplaceUses(copy);
+ }
+ }
+
+ Node* GetSentinel(Node* original) {
+ Node* sentinel = sentinels_[original->id()];
+ if (sentinel == NULL) {
+ sentinel = target_graph_->NewNode(&sentinel_op_);
+ }
+ return sentinel;
+ }
+
+ NodeVector copies_;
+ NodeVector sentinels_;
+ Graph* source_graph_;
+ Graph* target_graph_;
+ Zone* temp_zone_;
+ SimpleOperator sentinel_op_;
+};
+
+
+void Inlinee::InlineAtCall(JSGraph* jsgraph, Node* call) {
+ // The scheduler is smart enough to place our code; we just ensure {control}
+ // becomes the control input of the start of the inlinee.
+ Node* control = NodeProperties::GetControlInput(call);
+
+ // The inlinee uses the context from the JSFunction object. This will
+ // also be the effect dependency for the inlinee as it produces an effect.
+ SimplifiedOperatorBuilder simplified(jsgraph->zone());
+ Node* context = jsgraph->graph()->NewNode(
+ simplified.LoadField(AccessBuilder::ForJSFunctionContext()),
+ NodeProperties::GetValueInput(call, 0),
+ NodeProperties::GetEffectInput(call));
+
+ // Context is last argument.
+ int inlinee_context_index = static_cast<int>(total_parameters()) - 1;
+ // {inliner_inputs} counts JSFunction, Receiver, arguments, but not
+ // context, effect, control.
+ int inliner_inputs = OperatorProperties::GetValueInputCount(call->op());
+ // Iterate over all uses of the start node.
+ UseIter iter = start_->uses().begin();
+ while (iter != start_->uses().end()) {
+ Node* use = *iter;
+ switch (use->opcode()) {
+ case IrOpcode::kParameter: {
+ int index = 1 + OpParameter<int>(use->op());
+ if (index < inliner_inputs && index < inlinee_context_index) {
+ // There is an input from the call, and the index is a value
+ // projection but not the context, so rewire the input.
+ NodeProperties::ReplaceWithValue(*iter, call->InputAt(index));
+ } else if (index == inlinee_context_index) {
+ // This is the context projection, rewire it to the context from the
+ // JSFunction object.
+ NodeProperties::ReplaceWithValue(*iter, context);
+ } else if (index < inlinee_context_index) {
+ // Call has fewer arguments than required, fill with undefined.
+ NodeProperties::ReplaceWithValue(*iter, jsgraph->UndefinedConstant());
+ } else {
+ // We got too many arguments, discard for now.
+ // TODO(sigurds): Fix to treat arguments array correctly.
+ }
+ ++iter;
+ break;
+ }
+ default:
+ if (NodeProperties::IsEffectEdge(iter.edge())) {
+ iter.UpdateToAndIncrement(context);
+ } else if (NodeProperties::IsControlEdge(iter.edge())) {
+ iter.UpdateToAndIncrement(control);
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ }
+
+ // Iterate over all uses of the call node.
+ iter = call->uses().begin();
+ while (iter != call->uses().end()) {
+ if (NodeProperties::IsEffectEdge(iter.edge())) {
+ iter.UpdateToAndIncrement(effect_output());
+ } else if (NodeProperties::IsControlEdge(iter.edge())) {
+ UNREACHABLE();
+ } else {
+ DCHECK(NodeProperties::IsValueEdge(iter.edge()));
+ iter.UpdateToAndIncrement(value_output());
+ }
+ }
+ call->RemoveAllInputs();
+ DCHECK_EQ(0, call->UseCount());
+ // TODO(sigurds) Remove this once we copy.
+ unique_return()->RemoveAllInputs();
+}
+
+
+// TODO(turbofan) Provide such accessors for every node, possibly even
+// generate them.
+class JSCallFunctionAccessor {
+ public:
+ explicit JSCallFunctionAccessor(Node* call) : call_(call) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, call->opcode());
+ }
+
+ Node* jsfunction() { return call_->InputAt(0); }
+
+ Node* receiver() { return call_->InputAt(1); }
+
+ Node* formal_argument(size_t index) {
+ DCHECK(index < formal_arguments());
+ return call_->InputAt(static_cast<int>(2 + index));
+ }
+
+ size_t formal_arguments() {
+ // {value_inputs} includes jsfunction and receiver.
+ size_t value_inputs = OperatorProperties::GetValueInputCount(call_->op());
+ DCHECK_GE(call_->InputCount(), 2);
+ return value_inputs - 2;
+ }
+
+ Node* frame_state() { return NodeProperties::GetFrameStateInput(call_); }
+
+ private:
+ Node* call_;
+};
+
+
+void JSInliner::AddClosureToFrameState(Node* frame_state,
+ Handle<JSFunction> jsfunction) {
+ FrameStateCallInfo call_info = OpParameter<FrameStateCallInfo>(frame_state);
+ const Operator* op = jsgraph_->common()->FrameState(
+ FrameStateType::JS_FRAME, call_info.bailout_id(),
+ call_info.state_combine(), jsfunction);
+ frame_state->set_op(op);
+}
+
+
+Node* JSInliner::CreateArgumentsAdaptorFrameState(JSCallFunctionAccessor* call,
+ Handle<JSFunction> jsfunction,
+ Zone* temp_zone) {
+ const Operator* op =
+ jsgraph_->common()->FrameState(FrameStateType::ARGUMENTS_ADAPTOR,
+ BailoutId(-1), kIgnoreOutput, jsfunction);
+ const Operator* op0 = jsgraph_->common()->StateValues(0);
+ Node* node0 = jsgraph_->graph()->NewNode(op0);
+ NodeVector params(temp_zone);
+ params.push_back(call->receiver());
+ for (size_t argument = 0; argument != call->formal_arguments(); ++argument) {
+ params.push_back(call->formal_argument(argument));
+ }
+ const Operator* op_param =
+ jsgraph_->common()->StateValues(static_cast<int>(params.size()));
+ Node* params_node = jsgraph_->graph()->NewNode(
+ op_param, static_cast<int>(params.size()), &params.front());
+ return jsgraph_->graph()->NewNode(op, params_node, node0, node0,
+ jsgraph_->UndefinedConstant(),
+ call->frame_state());
+}
+
+
+void JSInliner::TryInlineCall(Node* call_node) {
+ JSCallFunctionAccessor call(call_node);
+
+ HeapObjectMatcher<JSFunction> match(call.jsfunction());
+ if (!match.HasValue()) {
+ return;
+ }
+
+ Handle<JSFunction> function = match.Value().handle();
+
+ if (function->shared()->native()) {
+ if (FLAG_trace_turbo_inlining) {
+ SmartArrayPointer<char> name =
+ function->shared()->DebugName()->ToCString();
+ PrintF("Not Inlining %s into %s because inlinee is native\n", name.get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ }
+ return;
+ }
+
+ CompilationInfoWithZone info(function);
+ Parse(function, &info);
+
+ if (info.scope()->arguments() != NULL) {
+ // For now do not inline functions that use their arguments array.
+ SmartArrayPointer<char> name = function->shared()->DebugName()->ToCString();
+ if (FLAG_trace_turbo_inlining) {
+ PrintF(
+ "Not Inlining %s into %s because inlinee uses arguments "
+ "array\n",
+ name.get(), info_->shared_info()->DebugName()->ToCString().get());
+ }
+ return;
+ }
+
+ if (FLAG_trace_turbo_inlining) {
+ SmartArrayPointer<char> name = function->shared()->DebugName()->ToCString();
+ PrintF("Inlining %s into %s\n", name.get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ }
+
+ Graph graph(info.zone());
+ Typer typer(info.zone());
+ JSGraph jsgraph(&graph, jsgraph_->common(), jsgraph_->javascript(), &typer,
+ jsgraph_->machine());
+
+ AstGraphBuilder graph_builder(&info, &jsgraph);
+ graph_builder.CreateGraph();
+ Inlinee::UnifyReturn(&jsgraph);
+
+ CopyVisitor visitor(&graph, jsgraph_->graph(), info.zone());
+ visitor.CopyGraph();
+
+ Inlinee inlinee(visitor.GetCopy(graph.start()), visitor.GetCopy(graph.end()));
+
+ Node* outer_frame_state = call.frame_state();
+ // Insert argument adaptor frame if required.
+ if (call.formal_arguments() != inlinee.formal_parameters()) {
+ outer_frame_state =
+ CreateArgumentsAdaptorFrameState(&call, function, info.zone());
+ }
+
+ for (NodeVectorConstIter it = visitor.copies().begin();
+ it != visitor.copies().end(); ++it) {
+ Node* node = *it;
+ if (node != NULL && node->opcode() == IrOpcode::kFrameState) {
+ AddClosureToFrameState(node, function);
+ NodeProperties::ReplaceFrameStateInput(node, outer_frame_state);
+ }
+ }
+
+ inlinee.InlineAtCall(jsgraph_, call_node);
+}
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
new file mode 100644
index 0000000000..f13517007e
--- /dev/null
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -0,0 +1,40 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_INLINING_H_
+#define V8_COMPILER_JS_INLINING_H_
+
+#include "src/compiler/js-graph.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSCallFunctionAccessor;
+
+class JSInliner {
+ public:
+ JSInliner(CompilationInfo* info, JSGraph* jsgraph)
+ : info_(info), jsgraph_(jsgraph) {}
+
+ void Inline();
+ void TryInlineCall(Node* node);
+
+ private:
+ friend class InlinerVisitor;
+ CompilationInfo* info_;
+ JSGraph* jsgraph_;
+
+ Node* CreateArgumentsAdaptorFrameState(JSCallFunctionAccessor* call,
+ Handle<JSFunction> jsfunction,
+ Zone* temp_zone);
+ void AddClosureToFrameState(Node* frame_state, Handle<JSFunction> jsfunction);
+ static void UnifyReturn(Graph* graph);
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_JS_INLINING_H_
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index fd9547d94a..b95467fc9b 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -40,7 +40,7 @@ class ContextAccess {
// Defines the property being loaded from an object by a named load. This is
// used as a parameter by JSLoadNamed operators.
struct LoadNamedParameters {
- PrintableUnique<Name> name;
+ Unique<Name> name;
ContextualMode contextual_mode;
};
@@ -51,6 +51,13 @@ struct CallParameters {
CallFunctionFlags flags;
};
+// Defines the property being stored to an object by a named store. This is
+// used as a parameter by JSStoreNamed operators.
+struct StoreNamedParameters {
+ StrictMode strict_mode;
+ Unique<Name> name;
+};
+
// Interface for building JavaScript-level operators, e.g. directly from the
// AST. Most operators have no parameters, thus can be globally shared for all
// graphs.
@@ -74,95 +81,106 @@ class JSOperatorBuilder {
#define PURE_BINOP(name) SIMPLE(name, Operator::kPure, 2, 1)
- Operator* Equal() { BINOP(JSEqual); }
- Operator* NotEqual() { BINOP(JSNotEqual); }
- Operator* StrictEqual() { PURE_BINOP(JSStrictEqual); }
- Operator* StrictNotEqual() { PURE_BINOP(JSStrictNotEqual); }
- Operator* LessThan() { BINOP(JSLessThan); }
- Operator* GreaterThan() { BINOP(JSGreaterThan); }
- Operator* LessThanOrEqual() { BINOP(JSLessThanOrEqual); }
- Operator* GreaterThanOrEqual() { BINOP(JSGreaterThanOrEqual); }
- Operator* BitwiseOr() { BINOP(JSBitwiseOr); }
- Operator* BitwiseXor() { BINOP(JSBitwiseXor); }
- Operator* BitwiseAnd() { BINOP(JSBitwiseAnd); }
- Operator* ShiftLeft() { BINOP(JSShiftLeft); }
- Operator* ShiftRight() { BINOP(JSShiftRight); }
- Operator* ShiftRightLogical() { BINOP(JSShiftRightLogical); }
- Operator* Add() { BINOP(JSAdd); }
- Operator* Subtract() { BINOP(JSSubtract); }
- Operator* Multiply() { BINOP(JSMultiply); }
- Operator* Divide() { BINOP(JSDivide); }
- Operator* Modulus() { BINOP(JSModulus); }
-
- Operator* UnaryNot() { UNOP(JSUnaryNot); }
- Operator* ToBoolean() { UNOP(JSToBoolean); }
- Operator* ToNumber() { UNOP(JSToNumber); }
- Operator* ToString() { UNOP(JSToString); }
- Operator* ToName() { UNOP(JSToName); }
- Operator* ToObject() { UNOP(JSToObject); }
- Operator* Yield() { UNOP(JSYield); }
-
- Operator* Create() { SIMPLE(JSCreate, Operator::kEliminatable, 0, 1); }
-
- Operator* Call(int arguments, CallFunctionFlags flags) {
+ const Operator* Equal() { BINOP(JSEqual); }
+ const Operator* NotEqual() { BINOP(JSNotEqual); }
+ const Operator* StrictEqual() { PURE_BINOP(JSStrictEqual); }
+ const Operator* StrictNotEqual() { PURE_BINOP(JSStrictNotEqual); }
+ const Operator* LessThan() { BINOP(JSLessThan); }
+ const Operator* GreaterThan() { BINOP(JSGreaterThan); }
+ const Operator* LessThanOrEqual() { BINOP(JSLessThanOrEqual); }
+ const Operator* GreaterThanOrEqual() { BINOP(JSGreaterThanOrEqual); }
+ const Operator* BitwiseOr() { BINOP(JSBitwiseOr); }
+ const Operator* BitwiseXor() { BINOP(JSBitwiseXor); }
+ const Operator* BitwiseAnd() { BINOP(JSBitwiseAnd); }
+ const Operator* ShiftLeft() { BINOP(JSShiftLeft); }
+ const Operator* ShiftRight() { BINOP(JSShiftRight); }
+ const Operator* ShiftRightLogical() { BINOP(JSShiftRightLogical); }
+ const Operator* Add() { BINOP(JSAdd); }
+ const Operator* Subtract() { BINOP(JSSubtract); }
+ const Operator* Multiply() { BINOP(JSMultiply); }
+ const Operator* Divide() { BINOP(JSDivide); }
+ const Operator* Modulus() { BINOP(JSModulus); }
+
+ const Operator* UnaryNot() { UNOP(JSUnaryNot); }
+ const Operator* ToBoolean() { UNOP(JSToBoolean); }
+ const Operator* ToNumber() { UNOP(JSToNumber); }
+ const Operator* ToString() { UNOP(JSToString); }
+ const Operator* ToName() { UNOP(JSToName); }
+ const Operator* ToObject() { UNOP(JSToObject); }
+ const Operator* Yield() { UNOP(JSYield); }
+
+ const Operator* Create() { SIMPLE(JSCreate, Operator::kEliminatable, 0, 1); }
+
+ const Operator* Call(int arguments, CallFunctionFlags flags) {
CallParameters parameters = {arguments, flags};
OP1(JSCallFunction, CallParameters, parameters, Operator::kNoProperties,
arguments, 1);
}
- Operator* CallNew(int arguments) {
+ const Operator* CallNew(int arguments) {
return new (zone_)
Operator1<int>(IrOpcode::kJSCallConstruct, Operator::kNoProperties,
arguments, 1, "JSCallConstruct", arguments);
}
- Operator* LoadProperty() { BINOP(JSLoadProperty); }
- Operator* LoadNamed(PrintableUnique<Name> name,
- ContextualMode contextual_mode = NOT_CONTEXTUAL) {
+ const Operator* LoadProperty() { BINOP(JSLoadProperty); }
+ const Operator* LoadNamed(Unique<Name> name,
+ ContextualMode contextual_mode = NOT_CONTEXTUAL) {
LoadNamedParameters parameters = {name, contextual_mode};
OP1(JSLoadNamed, LoadNamedParameters, parameters, Operator::kNoProperties,
1, 1);
}
- Operator* StoreProperty() { NOPROPS(JSStoreProperty, 3, 0); }
- Operator* StoreNamed(PrintableUnique<Name> name) {
- OP1(JSStoreNamed, PrintableUnique<Name>, name, Operator::kNoProperties, 2,
+ const Operator* StoreProperty(StrictMode strict_mode) {
+ OP1(JSStoreProperty, StrictMode, strict_mode, Operator::kNoProperties, 3,
0);
}
- Operator* DeleteProperty(StrictMode strict_mode) {
+ const Operator* StoreNamed(StrictMode strict_mode, Unique<Name> name) {
+ StoreNamedParameters parameters = {strict_mode, name};
+ OP1(JSStoreNamed, StoreNamedParameters, parameters, Operator::kNoProperties,
+ 2, 0);
+ }
+
+ const Operator* DeleteProperty(StrictMode strict_mode) {
OP1(JSDeleteProperty, StrictMode, strict_mode, Operator::kNoProperties, 2,
1);
}
- Operator* HasProperty() { NOPROPS(JSHasProperty, 2, 1); }
+ const Operator* HasProperty() { NOPROPS(JSHasProperty, 2, 1); }
- Operator* LoadContext(uint16_t depth, uint32_t index, bool immutable) {
+ const Operator* LoadContext(uint16_t depth, uint32_t index, bool immutable) {
ContextAccess access(depth, index, immutable);
OP1(JSLoadContext, ContextAccess, access,
Operator::kEliminatable | Operator::kNoWrite, 1, 1);
}
- Operator* StoreContext(uint16_t depth, uint32_t index) {
+ const Operator* StoreContext(uint16_t depth, uint32_t index) {
ContextAccess access(depth, index, false);
- OP1(JSStoreContext, ContextAccess, access, Operator::kNoProperties, 2, 1);
+ OP1(JSStoreContext, ContextAccess, access, Operator::kNoProperties, 2, 0);
}
- Operator* TypeOf() { SIMPLE(JSTypeOf, Operator::kPure, 1, 1); }
- Operator* InstanceOf() { NOPROPS(JSInstanceOf, 2, 1); }
- Operator* Debugger() { NOPROPS(JSDebugger, 0, 0); }
+ const Operator* TypeOf() { SIMPLE(JSTypeOf, Operator::kPure, 1, 1); }
+ const Operator* InstanceOf() { NOPROPS(JSInstanceOf, 2, 1); }
+ const Operator* Debugger() { NOPROPS(JSDebugger, 0, 0); }
// TODO(titzer): nail down the static parts of each of these context flavors.
- Operator* CreateFunctionContext() { NOPROPS(JSCreateFunctionContext, 1, 1); }
- Operator* CreateCatchContext(PrintableUnique<String> name) {
- OP1(JSCreateCatchContext, PrintableUnique<String>, name,
- Operator::kNoProperties, 1, 1);
+ const Operator* CreateFunctionContext() {
+ NOPROPS(JSCreateFunctionContext, 1, 1);
+ }
+ const Operator* CreateCatchContext(Unique<String> name) {
+ OP1(JSCreateCatchContext, Unique<String>, name, Operator::kNoProperties, 1,
+ 1);
+ }
+ const Operator* CreateWithContext() { NOPROPS(JSCreateWithContext, 2, 1); }
+ const Operator* CreateBlockContext() { NOPROPS(JSCreateBlockContext, 2, 1); }
+ const Operator* CreateModuleContext() {
+ NOPROPS(JSCreateModuleContext, 2, 1);
+ }
+ const Operator* CreateGlobalContext() {
+ NOPROPS(JSCreateGlobalContext, 2, 1);
}
- Operator* CreateWithContext() { NOPROPS(JSCreateWithContext, 2, 1); }
- Operator* CreateBlockContext() { NOPROPS(JSCreateBlockContext, 2, 1); }
- Operator* CreateModuleContext() { NOPROPS(JSCreateModuleContext, 2, 1); }
- Operator* CreateGlobalContext() { NOPROPS(JSCreateGlobalContext, 2, 1); }
- Operator* Runtime(Runtime::FunctionId function, int arguments) {
+ const Operator* Runtime(Runtime::FunctionId function, int arguments) {
const Runtime::Function* f = Runtime::FunctionForId(function);
DCHECK(f->nargs == -1 || f->nargs == arguments);
OP1(JSCallRuntime, Runtime::FunctionId, function, Operator::kNoProperties,
@@ -207,8 +225,9 @@ struct StaticParameterTraits<Runtime::FunctionId> {
return a == b;
}
};
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_JS_OPERATOR_H_
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 361cb94f05..39104bb50d 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/access-builder.h"
#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-builtin-reducer.h"
#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/node-aux-data-inl.h"
#include "src/compiler/node-properties-inl.h"
@@ -17,40 +19,22 @@ namespace compiler {
// - relax effects from generic but not-side-effecting operations
// - relax effects for ToNumber(mixed)
-// Replace value uses of {node} with {value} and effect uses of {node} with
-// {effect}. If {effect == NULL}, then use the effect input to {node}.
-// TODO(titzer): move into a GraphEditor?
-static void ReplaceUses(Node* node, Node* value, Node* effect) {
- if (value == effect) {
- // Effect and value updates are the same; no special iteration needed.
- if (value != node) node->ReplaceUses(value);
- return;
- }
-
- if (effect == NULL) effect = NodeProperties::GetEffectInput(node);
-
- // The iteration requires distinguishing between value and effect edges.
- UseIter iter = node->uses().begin();
- while (iter != node->uses().end()) {
- if (NodeProperties::IsEffectEdge(iter.edge())) {
- iter = iter.UpdateToAndIncrement(effect);
- } else {
- iter = iter.UpdateToAndIncrement(value);
- }
- }
-}
-
// Relax the effects of {node} by immediately replacing effect uses of {node}
// with the effect input to {node}.
// TODO(turbofan): replace the effect input to {node} with {graph->start()}.
// TODO(titzer): move into a GraphEditor?
-static void RelaxEffects(Node* node) { ReplaceUses(node, node, NULL); }
+static void RelaxEffects(Node* node) {
+ NodeProperties::ReplaceWithValue(node, node, NULL);
+}
+
+
+JSTypedLowering::~JSTypedLowering() {}
Reduction JSTypedLowering::ReplaceEagerly(Node* old, Node* node) {
- ReplaceUses(old, node, node);
- return Reducer::Changed(node);
+ NodeProperties::ReplaceWithValue(old, node, node);
+ return Changed(node);
}
@@ -99,7 +83,7 @@ class JSBinopReduction {
// Remove all effect and control inputs and outputs to this node and change
// to the pure operator {op}, possibly inserting a boolean inversion.
- Reduction ChangeToPureOperator(Operator* op, bool invert = false) {
+ Reduction ChangeToPureOperator(const Operator* op, bool invert = false) {
DCHECK_EQ(0, OperatorProperties::GetEffectInputCount(op));
DCHECK_EQ(false, OperatorProperties::HasContextInput(op));
DCHECK_EQ(0, OperatorProperties::GetControlInputCount(op));
@@ -179,7 +163,7 @@ class JSBinopReduction {
return n;
}
- // Try to narrowing a double or number operation to an Int32 operation.
+ // Try narrowing a double or number operation to an Int32 operation.
bool TryNarrowingToI32(Type* type, Node* node) {
switch (node->opcode()) {
case IrOpcode::kFloat64Add:
@@ -225,8 +209,8 @@ class JSBinopReduction {
if (input_type->Is(type)) return node; // already in the value range.
- Operator* op = is_signed ? simplified()->NumberToInt32()
- : simplified()->NumberToUint32();
+ const Operator* op = is_signed ? simplified()->NumberToInt32()
+ : simplified()->NumberToUint32();
Node* n = graph()->NewNode(op, node);
return n;
}
@@ -239,18 +223,35 @@ class JSBinopReduction {
Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
JSBinopReduction r(this, node);
+ if (r.BothInputsAre(Type::Number())) {
+ // JSAdd(x:number, y:number) => NumberAdd(x, y)
+ return r.ChangeToPureOperator(simplified()->NumberAdd());
+ }
+ Type* maybe_string = Type::Union(Type::String(), Type::Receiver(), zone());
+ if (r.NeitherInputCanBe(maybe_string)) {
+ // JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
+ r.ConvertInputsToNumber();
+ return r.ChangeToPureOperator(simplified()->NumberAdd());
+ }
+#if 0
+ // TODO(turbofan): Lowering of StringAdd is disabled for now because:
+ // a) The inserted ToString operation screws up valueOf vs. toString order.
+ // b) Deoptimization at ToString doesn't have corresponding bailout id.
+ // c) Our current StringAddStub is actually non-pure and requires context.
if (r.OneInputIs(Type::String())) {
+ // JSAdd(x:string, y:string) => StringAdd(x, y)
+ // JSAdd(x:string, y) => StringAdd(x, ToString(y))
+ // JSAdd(x, y:string) => StringAdd(ToString(x), y)
r.ConvertInputsToString();
return r.ChangeToPureOperator(simplified()->StringAdd());
- } else if (r.NeitherInputCanBe(Type::String())) {
- r.ConvertInputsToNumber();
- return r.ChangeToPureOperator(simplified()->NumberAdd());
}
+#endif
return NoChange();
}
-Reduction JSTypedLowering::ReduceNumberBinop(Node* node, Operator* numberOp) {
+Reduction JSTypedLowering::ReduceNumberBinop(Node* node,
+ const Operator* numberOp) {
JSBinopReduction r(this, node);
if (r.OneInputIs(Type::Primitive())) {
// If at least one input is a primitive, then insert appropriate conversions
@@ -265,7 +266,8 @@ Reduction JSTypedLowering::ReduceNumberBinop(Node* node, Operator* numberOp) {
Reduction JSTypedLowering::ReduceI32Binop(Node* node, bool left_signed,
- bool right_signed, Operator* intOp) {
+ bool right_signed,
+ const Operator* intOp) {
JSBinopReduction r(this, node);
// TODO(titzer): some Smi bitwise operations don't really require going
// all the way to int32, which can save tagging/untagging for some operations
@@ -277,7 +279,7 @@ Reduction JSTypedLowering::ReduceI32Binop(Node* node, bool left_signed,
Reduction JSTypedLowering::ReduceI32Shift(Node* node, bool left_signed,
- Operator* shift_op) {
+ const Operator* shift_op) {
JSBinopReduction r(this, node);
r.ConvertInputsForShift(left_signed);
return r.ChangeToPureOperator(shift_op);
@@ -288,7 +290,7 @@ Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::String())) {
// If both inputs are definitely strings, perform a string comparison.
- Operator* stringOp;
+ const Operator* stringOp;
switch (node->opcode()) {
case IrOpcode::kJSLessThan:
stringOp = simplified()->StringLessThan();
@@ -308,10 +310,12 @@ Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
return NoChange();
}
return r.ChangeToPureOperator(stringOp);
- } else if (r.OneInputCannotBe(Type::String())) {
+ }
+ Type* maybe_string = Type::Union(Type::String(), Type::Receiver(), zone());
+ if (r.OneInputCannotBe(maybe_string)) {
// If one input cannot be a string, then emit a number comparison.
- Operator* less_than;
- Operator* less_than_or_equal;
+ const Operator* less_than;
+ const Operator* less_than_or_equal;
if (r.BothInputsAre(Type::Unsigned32())) {
less_than = machine()->Uint32LessThan();
less_than_or_equal = machine()->Uint32LessThanOrEqual();
@@ -324,7 +328,7 @@ Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
less_than = simplified()->NumberLessThan();
less_than_or_equal = simplified()->NumberLessThanOrEqual();
}
- Operator* comparison;
+ const Operator* comparison;
switch (node->opcode()) {
case IrOpcode::kJSLessThan:
comparison = less_than;
@@ -430,7 +434,7 @@ Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
}
Type* input_type = NodeProperties::GetBounds(input).upper;
if (input_type->Is(Type::Number())) {
- // JSToNumber(number) => x
+ // JSToNumber(x:number) => x
return Changed(input);
}
if (input_type->Is(Type::Undefined())) {
@@ -441,8 +445,12 @@ Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
// JSToNumber(null) => #0
return ReplaceWith(jsgraph()->ZeroConstant());
}
- // TODO(turbofan): js-typed-lowering of ToNumber(boolean)
- // TODO(turbofan): js-typed-lowering of ToNumber(string)
+ if (input_type->Is(Type::Boolean())) {
+ // JSToNumber(x:boolean) => BooleanToNumber(x)
+ return ReplaceWith(
+ graph()->NewNode(simplified()->BooleanToNumber(), input));
+ }
+ // TODO(turbofan): js-typed-lowering of ToNumber(x:string)
return NoChange();
}
@@ -459,7 +467,7 @@ Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
}
Type* input_type = NodeProperties::GetBounds(input).upper;
if (input_type->Is(Type::String())) {
- return Changed(input); // JSToString(string) => x
+ return Changed(input); // JSToString(x:string) => x
}
if (input_type->Is(Type::Undefined())) {
return ReplaceWith(jsgraph()->HeapConstant(
@@ -469,8 +477,8 @@ Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
return ReplaceWith(jsgraph()->HeapConstant(
graph()->zone()->isolate()->factory()->null_string()));
}
- // TODO(turbofan): js-typed-lowering of ToString(boolean)
- // TODO(turbofan): js-typed-lowering of ToString(number)
+ // TODO(turbofan): js-typed-lowering of ToString(x:boolean)
+ // TODO(turbofan): js-typed-lowering of ToString(x:number)
return NoChange();
}
@@ -487,7 +495,7 @@ Reduction JSTypedLowering::ReduceJSToBooleanInput(Node* input) {
}
Type* input_type = NodeProperties::GetBounds(input).upper;
if (input_type->Is(Type::Boolean())) {
- return Changed(input); // JSToBoolean(boolean) => x
+ return Changed(input); // JSToBoolean(x:boolean) => x
}
if (input_type->Is(Type::Undefined())) {
// JSToBoolean(undefined) => #false
@@ -498,31 +506,123 @@ Reduction JSTypedLowering::ReduceJSToBooleanInput(Node* input) {
return ReplaceWith(jsgraph()->FalseConstant());
}
if (input_type->Is(Type::DetectableReceiver())) {
- // JSToBoolean(detectable) => #true
+ // JSToBoolean(x:detectable) => #true
return ReplaceWith(jsgraph()->TrueConstant());
}
if (input_type->Is(Type::Undetectable())) {
- // JSToBoolean(undetectable) => #false
+ // JSToBoolean(x:undetectable) => #false
return ReplaceWith(jsgraph()->FalseConstant());
}
- if (input_type->Is(Type::Number())) {
- // JSToBoolean(number) => BooleanNot(NumberEqual(x, #0))
+ if (input_type->Is(Type::OrderedNumber())) {
+ // JSToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x, #0))
Node* cmp = graph()->NewNode(simplified()->NumberEqual(), input,
jsgraph()->ZeroConstant());
Node* inv = graph()->NewNode(simplified()->BooleanNot(), cmp);
- ReplaceEagerly(input, inv);
- // TODO(titzer): Ugly. ReplaceEagerly smashes all uses. Smash it back here.
- cmp->ReplaceInput(0, input);
- return Changed(inv);
+ return ReplaceWith(inv);
}
// TODO(turbofan): js-typed-lowering of ToBoolean(string)
return NoChange();
}
+Reduction JSTypedLowering::ReduceJSLoadProperty(Node* node) {
+ Node* key = NodeProperties::GetValueInput(node, 1);
+ Node* base = NodeProperties::GetValueInput(node, 0);
+ Type* key_type = NodeProperties::GetBounds(key).upper;
+ Type* base_type = NodeProperties::GetBounds(base).upper;
+ // TODO(mstarzinger): This lowering is not correct if:
+ // a) The typed array turns external (i.e. MaterializeArrayBuffer)
+ // b) The typed array or it's buffer is neutered.
+ // c) The index is out of bounds.
+ if (base_type->IsConstant() && key_type->Is(Type::Integral32()) &&
+ base_type->AsConstant()->Value()->IsJSTypedArray()) {
+ // JSLoadProperty(typed-array, int32)
+ JSTypedArray* array = JSTypedArray::cast(*base_type->AsConstant()->Value());
+ ElementsKind elements_kind = array->map()->elements_kind();
+ ExternalArrayType type = array->type();
+ uint32_t length;
+ CHECK(array->length()->ToUint32(&length));
+ ElementAccess element_access;
+ Node* elements = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()), base,
+ NodeProperties::GetEffectInput(node));
+ if (IsExternalArrayElementsKind(elements_kind)) {
+ elements = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForExternalArrayPointer()),
+ elements, NodeProperties::GetEffectInput(node));
+ element_access = AccessBuilder::ForTypedArrayElement(type, true);
+ } else {
+ DCHECK(IsFixedTypedArrayElementsKind(elements_kind));
+ element_access = AccessBuilder::ForTypedArrayElement(type, false);
+ }
+ Node* value =
+ graph()->NewNode(simplified()->LoadElement(element_access), elements,
+ key, jsgraph()->Uint32Constant(length),
+ NodeProperties::GetEffectInput(node));
+ return ReplaceEagerly(node, value);
+ }
+ return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
+ Node* key = NodeProperties::GetValueInput(node, 1);
+ Node* base = NodeProperties::GetValueInput(node, 0);
+ Node* value = NodeProperties::GetValueInput(node, 2);
+ Type* key_type = NodeProperties::GetBounds(key).upper;
+ Type* base_type = NodeProperties::GetBounds(base).upper;
+ // TODO(mstarzinger): This lowering is not correct if:
+ // a) The typed array turns external (i.e. MaterializeArrayBuffer)
+ // b) The typed array or its buffer is neutered.
+ if (key_type->Is(Type::Integral32()) && base_type->IsConstant() &&
+ base_type->AsConstant()->Value()->IsJSTypedArray()) {
+ // JSStoreProperty(typed-array, int32, value)
+ JSTypedArray* array = JSTypedArray::cast(*base_type->AsConstant()->Value());
+ ElementsKind elements_kind = array->map()->elements_kind();
+ ExternalArrayType type = array->type();
+ uint32_t length;
+ CHECK(array->length()->ToUint32(&length));
+ ElementAccess element_access;
+ Node* elements = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()), base,
+ NodeProperties::GetEffectInput(node));
+ if (IsExternalArrayElementsKind(elements_kind)) {
+ elements = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForExternalArrayPointer()),
+ elements, NodeProperties::GetEffectInput(node));
+ element_access = AccessBuilder::ForTypedArrayElement(type, true);
+ } else {
+ DCHECK(IsFixedTypedArrayElementsKind(elements_kind));
+ element_access = AccessBuilder::ForTypedArrayElement(type, false);
+ }
+
+ Node* check = graph()->NewNode(machine()->Uint32LessThan(), key,
+ jsgraph()->Uint32Constant(length));
+ Node* branch = graph()->NewNode(common()->Branch(), check,
+ NodeProperties::GetControlInput(node));
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+
+ Node* store =
+ graph()->NewNode(simplified()->StoreElement(element_access), elements,
+ key, jsgraph()->Uint32Constant(length), value,
+ NodeProperties::GetEffectInput(node), if_true);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(common()->EffectPhi(2), store,
+ NodeProperties::GetEffectInput(node), merge);
+
+ return ReplaceWith(phi);
+ }
+ return NoChange();
+}
+
+
static Reduction ReplaceWithReduction(Node* node, Reduction reduction) {
if (reduction.Changed()) {
- ReplaceUses(node, reduction.replacement(), NULL);
+ NodeProperties::ReplaceWithValue(node, reduction.replacement());
return reduction;
}
return Reducer::NoChange();
@@ -570,19 +670,19 @@ Reduction JSTypedLowering::Reduce(Node* node) {
Reduction result = ReduceJSToBooleanInput(node->InputAt(0));
Node* value;
if (result.Changed()) {
- // !x => BooleanNot(x)
+ // JSUnaryNot(x:boolean) => BooleanNot(x)
value =
graph()->NewNode(simplified()->BooleanNot(), result.replacement());
- ReplaceUses(node, value, NULL);
+ NodeProperties::ReplaceWithValue(node, value);
return Changed(value);
} else {
- // !x => BooleanNot(JSToBoolean(x))
+ // JSUnaryNot(x) => BooleanNot(JSToBoolean(x))
value = graph()->NewNode(simplified()->BooleanNot(), node);
node->set_op(javascript()->ToBoolean());
- ReplaceUses(node, value, node);
+ NodeProperties::ReplaceWithValue(node, value, node);
// Note: ReplaceUses() smashes all uses, so smash it back here.
value->ReplaceInput(0, node);
- return ReplaceWith(value);
+ return Changed(node);
}
}
case IrOpcode::kJSToBoolean:
@@ -594,11 +694,18 @@ Reduction JSTypedLowering::Reduce(Node* node) {
case IrOpcode::kJSToString:
return ReplaceWithReduction(node,
ReduceJSToStringInput(node->InputAt(0)));
+ case IrOpcode::kJSLoadProperty:
+ return ReduceJSLoadProperty(node);
+ case IrOpcode::kJSStoreProperty:
+ return ReduceJSStoreProperty(node);
+ case IrOpcode::kJSCallFunction:
+ return JSBuiltinReducer(jsgraph()).Reduce(node);
default:
break;
}
return NoChange();
}
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index c69fc2736a..deaf1fa878 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -2,12 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_OPERATOR_REDUCERS_H_
-#define V8_COMPILER_OPERATOR_REDUCERS_H_
+#ifndef V8_COMPILER_JS_TYPED_LOWERING_H_
+#define V8_COMPILER_JS_TYPED_LOWERING_H_
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-graph.h"
-#include "src/compiler/lowering-builder.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
@@ -17,51 +16,49 @@ namespace internal {
namespace compiler {
// Lowers JS-level operators to simplified operators based on types.
-class JSTypedLowering : public LoweringBuilder {
+class JSTypedLowering FINAL : public Reducer {
public:
- explicit JSTypedLowering(JSGraph* jsgraph,
- SourcePositionTable* source_positions)
- : LoweringBuilder(jsgraph->graph(), source_positions),
- jsgraph_(jsgraph),
- simplified_(jsgraph->zone()),
- machine_(jsgraph->zone()) {}
- virtual ~JSTypedLowering() {}
+ explicit JSTypedLowering(JSGraph* jsgraph)
+ : jsgraph_(jsgraph), simplified_(jsgraph->zone()) {}
+ virtual ~JSTypedLowering();
- Reduction Reduce(Node* node);
- virtual void Lower(Node* node) { Reduce(node); }
+ virtual Reduction Reduce(Node* node) OVERRIDE;
JSGraph* jsgraph() { return jsgraph_; }
Graph* graph() { return jsgraph_->graph(); }
+ Zone* zone() { return jsgraph_->zone(); }
private:
friend class JSBinopReduction;
- JSGraph* jsgraph_;
- SimplifiedOperatorBuilder simplified_;
- MachineOperatorBuilder machine_;
Reduction ReplaceEagerly(Node* old, Node* node);
- Reduction NoChange() { return Reducer::NoChange(); }
Reduction ReplaceWith(Node* node) { return Reducer::Replace(node); }
- Reduction Changed(Node* node) { return Reducer::Changed(node); }
Reduction ReduceJSAdd(Node* node);
Reduction ReduceJSComparison(Node* node);
+ Reduction ReduceJSLoadProperty(Node* node);
+ Reduction ReduceJSStoreProperty(Node* node);
Reduction ReduceJSEqual(Node* node, bool invert);
Reduction ReduceJSStrictEqual(Node* node, bool invert);
Reduction ReduceJSToNumberInput(Node* input);
Reduction ReduceJSToStringInput(Node* input);
Reduction ReduceJSToBooleanInput(Node* input);
- Reduction ReduceNumberBinop(Node* node, Operator* numberOp);
+ Reduction ReduceNumberBinop(Node* node, const Operator* numberOp);
Reduction ReduceI32Binop(Node* node, bool left_signed, bool right_signed,
- Operator* intOp);
- Reduction ReduceI32Shift(Node* node, bool left_signed, Operator* shift_op);
+ const Operator* intOp);
+ Reduction ReduceI32Shift(Node* node, bool left_signed,
+ const Operator* shift_op);
JSOperatorBuilder* javascript() { return jsgraph_->javascript(); }
CommonOperatorBuilder* common() { return jsgraph_->common(); }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
- MachineOperatorBuilder* machine() { return &machine_; }
+ MachineOperatorBuilder* machine() { return jsgraph_->machine(); }
+
+ JSGraph* jsgraph_;
+ SimplifiedOperatorBuilder simplified_;
};
-}
-}
-} // namespace v8::internal::compiler
-#endif // V8_COMPILER_OPERATOR_REDUCERS_H_
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_TYPED_LOWERING_H_
diff --git a/deps/v8/src/compiler/linkage-impl.h b/deps/v8/src/compiler/linkage-impl.h
index e7aafc3885..c32c7066fd 100644
--- a/deps/v8/src/compiler/linkage-impl.h
+++ b/deps/v8/src/compiler/linkage-impl.h
@@ -9,198 +9,218 @@ namespace v8 {
namespace internal {
namespace compiler {
+// TODO(titzer): replace uses of int with size_t in LinkageHelper.
+template <typename LinkageTraits>
class LinkageHelper {
public:
- static LinkageLocation TaggedStackSlot(int index) {
- DCHECK(index < 0);
- return LinkageLocation(kMachineTagged, index);
- }
-
- static LinkageLocation TaggedRegisterLocation(Register reg) {
- return LinkageLocation(kMachineTagged, Register::ToAllocationIndex(reg));
- }
-
- static inline LinkageLocation WordRegisterLocation(Register reg) {
- return LinkageLocation(MachineOperatorBuilder::pointer_rep(),
- Register::ToAllocationIndex(reg));
- }
+ static const RegList kNoCalleeSaved = 0;
- static LinkageLocation UnconstrainedRegister(MachineType rep) {
- return LinkageLocation(rep, LinkageLocation::ANY_REGISTER);
+ static void AddReturnLocations(LocationSignature::Builder* locations) {
+ DCHECK(locations->return_count_ <= 2);
+ if (locations->return_count_ > 0) {
+ locations->AddReturn(regloc(LinkageTraits::ReturnValueReg()));
+ }
+ if (locations->return_count_ > 1) {
+ locations->AddReturn(regloc(LinkageTraits::ReturnValue2Reg()));
+ }
}
- static const RegList kNoCalleeSaved = 0;
-
// TODO(turbofan): cache call descriptors for JSFunction calls.
- template <typename LinkageTraits>
- static CallDescriptor* GetJSCallDescriptor(Zone* zone, int parameter_count) {
- const int jsfunction_count = 1;
- const int context_count = 1;
- int input_count = jsfunction_count + parameter_count + context_count;
-
- const int return_count = 1;
- LinkageLocation* locations =
- zone->NewArray<LinkageLocation>(return_count + input_count);
-
- int index = 0;
- locations[index++] =
- TaggedRegisterLocation(LinkageTraits::ReturnValueReg());
- locations[index++] =
- TaggedRegisterLocation(LinkageTraits::JSCallFunctionReg());
+ static CallDescriptor* GetJSCallDescriptor(Zone* zone,
+ int js_parameter_count) {
+ const size_t return_count = 1;
+ const size_t context_count = 1;
+ const size_t parameter_count = js_parameter_count + context_count;
+
+ LocationSignature::Builder locations(zone, return_count, parameter_count);
+ MachineSignature::Builder types(zone, return_count, parameter_count);
+
+ // Add returns.
+ AddReturnLocations(&locations);
+ for (size_t i = 0; i < return_count; i++) {
+ types.AddReturn(kMachAnyTagged);
+ }
- for (int i = 0; i < parameter_count; i++) {
- // All parameters to JS calls go on the stack.
- int spill_slot_index = i - parameter_count;
- locations[index++] = TaggedStackSlot(spill_slot_index);
+ // All parameters to JS calls go on the stack.
+ for (int i = 0; i < js_parameter_count; i++) {
+ int spill_slot_index = i - js_parameter_count;
+ locations.AddParam(stackloc(spill_slot_index));
+ types.AddParam(kMachAnyTagged);
}
- locations[index++] = TaggedRegisterLocation(LinkageTraits::ContextReg());
-
- // TODO(titzer): refactor TurboFan graph to consider context a value input.
- return new (zone)
- CallDescriptor(CallDescriptor::kCallJSFunction, // kind
- return_count, // return_count
- parameter_count, // parameter_count
- input_count - context_count, // input_count
- locations, // locations
- Operator::kNoProperties, // properties
- kNoCalleeSaved, // callee-saved registers
- CallDescriptor::kCanDeoptimize); // deoptimization
+ // Add context.
+ locations.AddParam(regloc(LinkageTraits::ContextReg()));
+ types.AddParam(kMachAnyTagged);
+
+ // The target for JS function calls is the JSFunction object.
+ MachineType target_type = kMachAnyTagged;
+ LinkageLocation target_loc = regloc(LinkageTraits::JSCallFunctionReg());
+ return new (zone) CallDescriptor(CallDescriptor::kCallJSFunction, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ types.Build(), // machine_sig
+ locations.Build(), // location_sig
+ js_parameter_count, // js_parameter_count
+ Operator::kNoProperties, // properties
+ kNoCalleeSaved, // callee-saved
+ CallDescriptor::kNeedsFrameState, // flags
+ "js-call");
}
// TODO(turbofan): cache call descriptors for runtime calls.
- template <typename LinkageTraits>
static CallDescriptor* GetRuntimeCallDescriptor(
- Zone* zone, Runtime::FunctionId function_id, int parameter_count,
- Operator::Property properties,
- CallDescriptor::DeoptimizationSupport can_deoptimize) {
- const int code_count = 1;
- const int function_count = 1;
- const int num_args_count = 1;
- const int context_count = 1;
- const int input_count = code_count + parameter_count + function_count +
- num_args_count + context_count;
+ Zone* zone, Runtime::FunctionId function_id, int js_parameter_count,
+ Operator::Properties properties) {
+ const size_t function_count = 1;
+ const size_t num_args_count = 1;
+ const size_t context_count = 1;
+ const size_t parameter_count = function_count +
+ static_cast<size_t>(js_parameter_count) +
+ num_args_count + context_count;
const Runtime::Function* function = Runtime::FunctionForId(function_id);
- const int return_count = function->result_size;
- LinkageLocation* locations =
- zone->NewArray<LinkageLocation>(return_count + input_count);
-
- int index = 0;
- if (return_count > 0) {
- locations[index++] =
- TaggedRegisterLocation(LinkageTraits::ReturnValueReg());
+ const size_t return_count = static_cast<size_t>(function->result_size);
+
+ LocationSignature::Builder locations(zone, return_count, parameter_count);
+ MachineSignature::Builder types(zone, return_count, parameter_count);
+
+ // Add returns.
+ AddReturnLocations(&locations);
+ for (size_t i = 0; i < return_count; i++) {
+ types.AddReturn(kMachAnyTagged);
}
- if (return_count > 1) {
- locations[index++] =
- TaggedRegisterLocation(LinkageTraits::ReturnValue2Reg());
+
+ // All parameters to the runtime call go on the stack.
+ for (int i = 0; i < js_parameter_count; i++) {
+ locations.AddParam(stackloc(i - js_parameter_count));
+ types.AddParam(kMachAnyTagged);
}
+ // Add runtime function itself.
+ locations.AddParam(regloc(LinkageTraits::RuntimeCallFunctionReg()));
+ types.AddParam(kMachAnyTagged);
- DCHECK_LE(return_count, 2);
+ // Add runtime call argument count.
+ locations.AddParam(regloc(LinkageTraits::RuntimeCallArgCountReg()));
+ types.AddParam(kMachPtr);
- locations[index++] = UnconstrainedRegister(kMachineTagged); // CEntryStub
+ // Add context.
+ locations.AddParam(regloc(LinkageTraits::ContextReg()));
+ types.AddParam(kMachAnyTagged);
- for (int i = 0; i < parameter_count; i++) {
- // All parameters to runtime calls go on the stack.
- int spill_slot_index = i - parameter_count;
- locations[index++] = TaggedStackSlot(spill_slot_index);
- }
- locations[index++] =
- TaggedRegisterLocation(LinkageTraits::RuntimeCallFunctionReg());
- locations[index++] =
- WordRegisterLocation(LinkageTraits::RuntimeCallArgCountReg());
- locations[index++] = TaggedRegisterLocation(LinkageTraits::ContextReg());
+ CallDescriptor::Flags flags = Linkage::NeedsFrameState(function_id)
+ ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags;
- // TODO(titzer): refactor TurboFan graph to consider context a value input.
+ // The target for runtime calls is a code object.
+ MachineType target_type = kMachAnyTagged;
+ LinkageLocation target_loc = LinkageLocation::AnyRegister();
return new (zone) CallDescriptor(CallDescriptor::kCallCodeObject, // kind
- return_count, // return_count
- parameter_count, // parameter_count
- input_count, // input_count
- locations, // locations
- properties, // properties
- kNoCalleeSaved, // callee-saved registers
- can_deoptimize, // deoptimization
- function->name);
+ target_type, // target MachineType
+ target_loc, // target location
+ types.Build(), // machine_sig
+ locations.Build(), // location_sig
+ js_parameter_count, // js_parameter_count
+ properties, // properties
+ kNoCalleeSaved, // callee-saved
+ flags, // flags
+ function->name); // debug name
}
// TODO(turbofan): cache call descriptors for code stub calls.
- template <typename LinkageTraits>
static CallDescriptor* GetStubCallDescriptor(
- Zone* zone, CodeStubInterfaceDescriptor* descriptor,
- int stack_parameter_count,
- CallDescriptor::DeoptimizationSupport can_deoptimize) {
- int register_parameter_count = descriptor->GetEnvironmentParameterCount();
- int parameter_count = register_parameter_count + stack_parameter_count;
- const int code_count = 1;
+ Zone* zone, CallInterfaceDescriptor descriptor, int stack_parameter_count,
+ CallDescriptor::Flags flags) {
+ const int register_parameter_count =
+ descriptor.GetEnvironmentParameterCount();
+ const int js_parameter_count =
+ register_parameter_count + stack_parameter_count;
const int context_count = 1;
- int input_count = code_count + parameter_count + context_count;
+ const size_t return_count = 1;
+ const size_t parameter_count =
+ static_cast<size_t>(js_parameter_count + context_count);
- const int return_count = 1;
- LinkageLocation* locations =
- zone->NewArray<LinkageLocation>(return_count + input_count);
+ LocationSignature::Builder locations(zone, return_count, parameter_count);
+ MachineSignature::Builder types(zone, return_count, parameter_count);
- int index = 0;
- locations[index++] =
- TaggedRegisterLocation(LinkageTraits::ReturnValueReg());
- locations[index++] = UnconstrainedRegister(kMachineTagged); // code
- for (int i = 0; i < parameter_count; i++) {
+ // Add return location.
+ AddReturnLocations(&locations);
+ types.AddReturn(kMachAnyTagged);
+
+ // Add parameters in registers and on the stack.
+ for (int i = 0; i < js_parameter_count; i++) {
if (i < register_parameter_count) {
- // The first parameters to code stub calls go in registers.
- Register reg = descriptor->GetEnvironmentParameterRegister(i);
- locations[index++] = TaggedRegisterLocation(reg);
+ // The first parameters go in registers.
+ Register reg = descriptor.GetEnvironmentParameterRegister(i);
+ locations.AddParam(regloc(reg));
} else {
// The rest of the parameters go on the stack.
int stack_slot = i - register_parameter_count - stack_parameter_count;
- locations[index++] = TaggedStackSlot(stack_slot);
+ locations.AddParam(stackloc(stack_slot));
}
+ types.AddParam(kMachAnyTagged);
}
- locations[index++] = TaggedRegisterLocation(LinkageTraits::ContextReg());
-
- // TODO(titzer): refactor TurboFan graph to consider context a value input.
- return new (zone)
- CallDescriptor(CallDescriptor::kCallCodeObject, // kind
- return_count, // return_count
- parameter_count, // parameter_count
- input_count, // input_count
- locations, // locations
- Operator::kNoProperties, // properties
- kNoCalleeSaved, // callee-saved registers
- can_deoptimize, // deoptimization
- CodeStub::MajorName(descriptor->MajorKey(), false));
+ // Add context.
+ locations.AddParam(regloc(LinkageTraits::ContextReg()));
+ types.AddParam(kMachAnyTagged);
+
+ // The target for stub calls is a code object.
+ MachineType target_type = kMachAnyTagged;
+ LinkageLocation target_loc = LinkageLocation::AnyRegister();
+ return new (zone) CallDescriptor(CallDescriptor::kCallCodeObject, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ types.Build(), // machine_sig
+ locations.Build(), // location_sig
+ js_parameter_count, // js_parameter_count
+ Operator::kNoProperties, // properties
+ kNoCalleeSaved, // callee-saved registers
+ flags, // flags
+ descriptor.DebugName(zone->isolate()));
}
+ static CallDescriptor* GetSimplifiedCDescriptor(Zone* zone,
+ MachineSignature* msig) {
+ LocationSignature::Builder locations(zone, msig->return_count(),
+ msig->parameter_count());
+ // Add return location(s).
+ AddReturnLocations(&locations);
- template <typename LinkageTraits>
- static CallDescriptor* GetSimplifiedCDescriptor(
- Zone* zone, int num_params, MachineType return_type,
- const MachineType* param_types) {
- LinkageLocation* locations =
- zone->NewArray<LinkageLocation>(num_params + 2);
- int index = 0;
- locations[index++] =
- TaggedRegisterLocation(LinkageTraits::ReturnValueReg());
- locations[index++] = LinkageHelper::UnconstrainedRegister(
- MachineOperatorBuilder::pointer_rep());
- // TODO(dcarney): test with lots of parameters.
- int i = 0;
- for (; i < LinkageTraits::CRegisterParametersLength() && i < num_params;
- i++) {
- locations[index++] = LinkageLocation(
- param_types[i],
- Register::ToAllocationIndex(LinkageTraits::CRegisterParameter(i)));
- }
- for (; i < num_params; i++) {
- locations[index++] = LinkageLocation(param_types[i], -1 - i);
+ // Add register and/or stack parameter(s).
+ const int parameter_count = static_cast<int>(msig->parameter_count());
+ for (int i = 0; i < parameter_count; i++) {
+ if (i < LinkageTraits::CRegisterParametersLength()) {
+ locations.AddParam(regloc(LinkageTraits::CRegisterParameter(i)));
+ } else {
+ locations.AddParam(stackloc(-1 - i));
+ }
}
- return new (zone) CallDescriptor(
- CallDescriptor::kCallAddress, 1, num_params, num_params + 1, locations,
- Operator::kNoProperties, LinkageTraits::CCalleeSaveRegisters(),
- CallDescriptor::kCannotDeoptimize); // TODO(jarin) should deoptimize!
+
+ // The target for C calls is always an address (i.e. machine pointer).
+ MachineType target_type = kMachPtr;
+ LinkageLocation target_loc = LinkageLocation::AnyRegister();
+ return new (zone) CallDescriptor(CallDescriptor::kCallAddress, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ msig, // machine_sig
+ locations.Build(), // location_sig
+ 0, // js_parameter_count
+ Operator::kNoProperties, // properties
+ LinkageTraits::CCalleeSaveRegisters(),
+ CallDescriptor::kNoFlags, "c-call");
+ }
+
+ static LinkageLocation regloc(Register reg) {
+ return LinkageLocation(Register::ToAllocationIndex(reg));
+ }
+
+ static LinkageLocation stackloc(int i) {
+ DCHECK_LT(i, 0);
+ return LinkageLocation(i);
}
};
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_LINKAGE_IMPL_H_
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 26a3dccc47..465a6679e0 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -34,8 +34,8 @@ OStream& operator<<(OStream& os, const CallDescriptor::Kind& k) {
OStream& operator<<(OStream& os, const CallDescriptor& d) {
// TODO(svenpanne) Output properties etc. and be less cryptic.
return os << d.kind() << ":" << d.debug_name() << ":r" << d.ReturnCount()
- << "p" << d.ParameterCount() << "i" << d.InputCount()
- << (d.CanLazilyDeoptimize() ? "deopt" : "");
+ << "j" << d.JSParameterCount() << "i" << d.InputCount() << "f"
+ << d.FrameStateCount();
}
@@ -51,9 +51,8 @@ Linkage::Linkage(CompilationInfo* info) : info_(info) {
incoming_ = GetJSCallDescriptor(1 + shared->formal_parameter_count());
} else if (info->code_stub() != NULL) {
// Use the code stub interface descriptor.
- HydrogenCodeStub* stub = info->code_stub();
- CodeStubInterfaceDescriptor* descriptor =
- info_->isolate()->code_stub_interface_descriptor(stub->MajorKey());
+ CallInterfaceDescriptor descriptor =
+ info->code_stub()->GetCallInterfaceDescriptor();
incoming_ = GetStubCallDescriptor(descriptor);
} else {
incoming_ = NULL; // TODO(titzer): ?
@@ -95,18 +94,42 @@ CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count) {
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
Runtime::FunctionId function, int parameter_count,
- Operator::Property properties,
- CallDescriptor::DeoptimizationSupport can_deoptimize) {
+ Operator::Properties properties) {
return GetRuntimeCallDescriptor(function, parameter_count, properties,
- can_deoptimize, this->info_->zone());
+ this->info_->zone());
}
CallDescriptor* Linkage::GetStubCallDescriptor(
- CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count,
- CallDescriptor::DeoptimizationSupport can_deoptimize) {
- return GetStubCallDescriptor(descriptor, stack_parameter_count,
- can_deoptimize, this->info_->zone());
+ CallInterfaceDescriptor descriptor, int stack_parameter_count,
+ CallDescriptor::Flags flags) {
+ return GetStubCallDescriptor(descriptor, stack_parameter_count, flags,
+ this->info_->zone());
+}
+
+
+// static
+bool Linkage::NeedsFrameState(Runtime::FunctionId function) {
+ if (!FLAG_turbo_deoptimization) {
+ return false;
+ }
+ // TODO(jarin) At the moment, we only add frame state for
+ // few chosen runtime functions.
+ switch (function) {
+ case Runtime::kDebugBreak:
+ case Runtime::kDebugGetLoadedScripts:
+ case Runtime::kDeoptimizeFunction:
+ case Runtime::kInlineCallFunction:
+ case Runtime::kPrepareStep:
+ case Runtime::kSetScriptBreakPoint:
+ case Runtime::kStackGuard:
+ case Runtime::kCheckExecutionState:
+ case Runtime::kDebugEvaluate:
+ case Runtime::kCollectStackTrace:
+ return true;
+ default:
+ return false;
+ }
}
@@ -122,24 +145,22 @@ CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
Runtime::FunctionId function, int parameter_count,
- Operator::Property properties,
- CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
+ Operator::Properties properties, Zone* zone) {
UNIMPLEMENTED();
return NULL;
}
CallDescriptor* Linkage::GetStubCallDescriptor(
- CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count,
- CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
+ CallInterfaceDescriptor descriptor, int stack_parameter_count,
+ CallDescriptor::Flags flags, Zone* zone) {
UNIMPLEMENTED();
return NULL;
}
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(
- Zone* zone, int num_params, MachineType return_type,
- const MachineType* param_types) {
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+ MachineSignature* sig) {
UNIMPLEMENTED();
return NULL;
}
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 9fe02183ec..c5cef5e35f 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -5,11 +5,10 @@
#ifndef V8_COMPILER_LINKAGE_H_
#define V8_COMPILER_LINKAGE_H_
-#include "src/v8.h"
-
+#include "src/base/flags.h"
#include "src/code-stubs.h"
#include "src/compiler/frame.h"
-#include "src/compiler/machine-operator.h"
+#include "src/compiler/machine-type.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
#include "src/zone.h"
@@ -19,80 +18,111 @@ namespace internal {
namespace compiler {
// Describes the location for a parameter or a return value to a call.
-// TODO(titzer): replace with Radium locations when they are ready.
class LinkageLocation {
public:
- LinkageLocation(MachineType rep, int location)
- : rep_(rep), location_(location) {}
-
- inline MachineType representation() const { return rep_; }
+ explicit LinkageLocation(int location) : location_(location) {}
static const int16_t ANY_REGISTER = 32767;
+ static LinkageLocation AnyRegister() { return LinkageLocation(ANY_REGISTER); }
+
private:
friend class CallDescriptor;
friend class OperandGenerator;
- MachineType rep_;
int16_t location_; // >= 0 implies register, otherwise stack slot.
};
+typedef Signature<LinkageLocation> LocationSignature;
-class CallDescriptor : public ZoneObject {
+// Describes a call to various parts of the compiler. Every call has the notion
+// of a "target", which is the first input to the call.
+class CallDescriptor FINAL : public ZoneObject {
public:
- // Describes whether the first parameter is a code object, a JSFunction,
- // or an address--all of which require different machine sequences to call.
- enum Kind { kCallCodeObject, kCallJSFunction, kCallAddress };
-
- enum DeoptimizationSupport { kCanDeoptimize, kCannotDeoptimize };
-
- CallDescriptor(Kind kind, int8_t return_count, int16_t parameter_count,
- int16_t input_count, LinkageLocation* locations,
- Operator::Property properties, RegList callee_saved_registers,
- DeoptimizationSupport deoptimization_support,
+ // Describes the kind of this call, which determines the target.
+ enum Kind {
+ kCallCodeObject, // target is a Code object
+ kCallJSFunction, // target is a JSFunction object
+ kCallAddress // target is a machine pointer
+ };
+
+ enum Flag {
+ // TODO(jarin) kLazyDeoptimization and kNeedsFrameState should be unified.
+ kNoFlags = 0u,
+ kNeedsFrameState = 1u << 0,
+ kPatchableCallSite = 1u << 1,
+ kNeedsNopAfterCall = 1u << 2,
+ kPatchableCallSiteWithNop = kPatchableCallSite | kNeedsNopAfterCall
+ };
+ typedef base::Flags<Flag> Flags;
+
+ CallDescriptor(Kind kind, MachineType target_type, LinkageLocation target_loc,
+ MachineSignature* machine_sig, LocationSignature* location_sig,
+ size_t js_param_count, Operator::Properties properties,
+ RegList callee_saved_registers, Flags flags,
const char* debug_name = "")
: kind_(kind),
- return_count_(return_count),
- parameter_count_(parameter_count),
- input_count_(input_count),
- locations_(locations),
+ target_type_(target_type),
+ target_loc_(target_loc),
+ machine_sig_(machine_sig),
+ location_sig_(location_sig),
+ js_param_count_(js_param_count),
properties_(properties),
callee_saved_registers_(callee_saved_registers),
- deoptimization_support_(deoptimization_support),
- debug_name_(debug_name) {}
+ flags_(flags),
+ debug_name_(debug_name) {
+ DCHECK(machine_sig->return_count() == location_sig->return_count());
+ DCHECK(machine_sig->parameter_count() == location_sig->parameter_count());
+ }
+
// Returns the kind of this call.
Kind kind() const { return kind_; }
// Returns {true} if this descriptor is a call to a JSFunction.
bool IsJSFunctionCall() const { return kind_ == kCallJSFunction; }
- // The number of return values from this call, usually 0 or 1.
- int ReturnCount() const { return return_count_; }
+ // The number of return values from this call.
+ size_t ReturnCount() const { return machine_sig_->return_count(); }
+
+ // The number of JavaScript parameters to this call, including the receiver
+ // object.
+ size_t JSParameterCount() const { return js_param_count_; }
+
+ // The total number of inputs to this call, which includes the target,
+ // receiver, context, etc.
+ // TODO(titzer): this should input the framestate input too.
+ size_t InputCount() const { return 1 + machine_sig_->parameter_count(); }
+
+ size_t FrameStateCount() const { return NeedsFrameState() ? 1 : 0; }
- // The number of JavaScript parameters to this call, including receiver,
- // but not the context.
- int ParameterCount() const { return parameter_count_; }
+ Flags flags() const { return flags_; }
- int InputCount() const { return input_count_; }
+ bool NeedsFrameState() const { return flags() & kNeedsFrameState; }
- bool CanLazilyDeoptimize() const {
- return deoptimization_support_ == kCanDeoptimize;
+ LinkageLocation GetReturnLocation(size_t index) const {
+ return location_sig_->GetReturn(index);
}
- LinkageLocation GetReturnLocation(int index) {
- DCHECK(index < return_count_);
- return locations_[0 + index]; // return locations start at 0.
+ LinkageLocation GetInputLocation(size_t index) const {
+ if (index == 0) return target_loc_;
+ return location_sig_->GetParam(index - 1);
}
- LinkageLocation GetInputLocation(int index) {
- DCHECK(index < input_count_ + 1); // input_count + 1 is the context.
- return locations_[return_count_ + index]; // inputs start after returns.
+ const MachineSignature* GetMachineSignature() const { return machine_sig_; }
+
+ MachineType GetReturnType(size_t index) const {
+ return machine_sig_->GetReturn(index);
+ }
+
+ MachineType GetInputType(size_t index) const {
+ if (index == 0) return target_type_;
+ return machine_sig_->GetParam(index - 1);
}
// Operator properties describe how this call can be optimized, if at all.
- Operator::Property properties() const { return properties_; }
+ Operator::Properties properties() const { return properties_; }
// Get the callee-saved registers, if any, across this call.
- RegList CalleeSavedRegisters() { return callee_saved_registers_; }
+ RegList CalleeSavedRegisters() const { return callee_saved_registers_; }
const char* debug_name() const { return debug_name_; }
@@ -100,16 +130,19 @@ class CallDescriptor : public ZoneObject {
friend class Linkage;
Kind kind_;
- int8_t return_count_;
- int16_t parameter_count_;
- int16_t input_count_;
- LinkageLocation* locations_;
- Operator::Property properties_;
+ MachineType target_type_;
+ LinkageLocation target_loc_;
+ MachineSignature* machine_sig_;
+ LocationSignature* location_sig_;
+ size_t js_param_count_;
+ Operator::Properties properties_;
RegList callee_saved_registers_;
- DeoptimizationSupport deoptimization_support_;
+ Flags flags_;
const char* debug_name_;
};
+DEFINE_OPERATORS_FOR_FLAGS(CallDescriptor::Flags)
+
OStream& operator<<(OStream& os, const CallDescriptor& d);
OStream& operator<<(OStream& os, const CallDescriptor::Kind& k);
@@ -137,35 +170,35 @@ class Linkage : public ZoneObject {
CallDescriptor* GetIncomingDescriptor() { return incoming_; }
CallDescriptor* GetJSCallDescriptor(int parameter_count);
static CallDescriptor* GetJSCallDescriptor(int parameter_count, Zone* zone);
- CallDescriptor* GetRuntimeCallDescriptor(
- Runtime::FunctionId function, int parameter_count,
- Operator::Property properties,
- CallDescriptor::DeoptimizationSupport can_deoptimize =
- CallDescriptor::kCannotDeoptimize);
+ CallDescriptor* GetRuntimeCallDescriptor(Runtime::FunctionId function,
+ int parameter_count,
+ Operator::Properties properties);
static CallDescriptor* GetRuntimeCallDescriptor(
Runtime::FunctionId function, int parameter_count,
- Operator::Property properties,
- CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone);
+ Operator::Properties properties, Zone* zone);
CallDescriptor* GetStubCallDescriptor(
- CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count = 0,
- CallDescriptor::DeoptimizationSupport can_deoptimize =
- CallDescriptor::kCannotDeoptimize);
+ CallInterfaceDescriptor descriptor, int stack_parameter_count = 0,
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags);
static CallDescriptor* GetStubCallDescriptor(
- CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count,
- CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone);
+ CallInterfaceDescriptor descriptor, int stack_parameter_count,
+ CallDescriptor::Flags flags, Zone* zone);
// Creates a call descriptor for simplified C calls that is appropriate
// for the host platform. This simplified calling convention only supports
// integers and pointers of one word size each, i.e. no floating point,
// structs, pointers to members, etc.
- static CallDescriptor* GetSimplifiedCDescriptor(
- Zone* zone, int num_params, MachineType return_type,
- const MachineType* param_types);
+ static CallDescriptor* GetSimplifiedCDescriptor(Zone* zone,
+ MachineSignature* sig);
// Get the location of an (incoming) parameter to this function.
LinkageLocation GetParameterLocation(int index) {
- return incoming_->GetInputLocation(index + 1);
+ return incoming_->GetInputLocation(index + 1); // + 1 to skip target.
+ }
+
+ // Get the machine type of an (incoming) parameter to this function.
+ MachineType GetParameterType(int index) {
+ return incoming_->GetInputType(index + 1); // + 1 to skip target.
}
// Get the location where this function should place its return value.
@@ -173,6 +206,9 @@ class Linkage : public ZoneObject {
return incoming_->GetReturnLocation(0);
}
+ // Get the machine type of this function's return value.
+ MachineType GetReturnType() { return incoming_->GetReturnType(0); }
+
// Get the frame offset for a given spill slot. The location depends on the
// calling convention and the specific frame layout, and may thus be
// architecture-specific. Negative spill slots indicate arguments on the
@@ -182,12 +218,15 @@ class Linkage : public ZoneObject {
CompilationInfo* info() const { return info_; }
+ static bool NeedsFrameState(Runtime::FunctionId function);
+
private:
CompilationInfo* info_;
CallDescriptor* incoming_;
};
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_LINKAGE_H_
diff --git a/deps/v8/src/compiler/lowering-builder.cc b/deps/v8/src/compiler/lowering-builder.cc
deleted file mode 100644
index 1246f54f14..0000000000
--- a/deps/v8/src/compiler/lowering-builder.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/graph-inl.h"
-#include "src/compiler/lowering-builder.h"
-#include "src/compiler/node-aux-data-inl.h"
-#include "src/compiler/node-properties-inl.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class LoweringBuilder::NodeVisitor : public NullNodeVisitor {
- public:
- explicit NodeVisitor(LoweringBuilder* lowering) : lowering_(lowering) {}
-
- GenericGraphVisit::Control Post(Node* node) {
- if (lowering_->source_positions_ != NULL) {
- SourcePositionTable::Scope pos(lowering_->source_positions_, node);
- lowering_->Lower(node);
- } else {
- lowering_->Lower(node);
- }
- return GenericGraphVisit::CONTINUE;
- }
-
- private:
- LoweringBuilder* lowering_;
-};
-
-
-LoweringBuilder::LoweringBuilder(Graph* graph,
- SourcePositionTable* source_positions)
- : graph_(graph), source_positions_(source_positions) {}
-
-
-void LoweringBuilder::LowerAllNodes() {
- NodeVisitor visitor(this);
- graph()->VisitNodeInputsFromEnd(&visitor);
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/lowering-builder.h b/deps/v8/src/compiler/lowering-builder.h
deleted file mode 100644
index aeaaaacfd9..0000000000
--- a/deps/v8/src/compiler/lowering-builder.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_LOWERING_BUILDER_H_
-#define V8_COMPILER_LOWERING_BUILDER_H_
-
-#include "src/v8.h"
-
-#include "src/compiler/graph.h"
-
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// TODO(dcarney): rename this class.
-class LoweringBuilder {
- public:
- explicit LoweringBuilder(Graph* graph, SourcePositionTable* source_positions);
- virtual ~LoweringBuilder() {}
-
- void LowerAllNodes();
- virtual void Lower(Node* node) = 0; // Exposed for testing.
-
- Graph* graph() const { return graph_; }
-
- private:
- class NodeVisitor;
- Graph* graph_;
- SourcePositionTable* source_positions_;
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_LOWERING_BUILDER_H_
diff --git a/deps/v8/src/compiler/machine-node-factory.h b/deps/v8/src/compiler/machine-node-factory.h
deleted file mode 100644
index faee93ebb2..0000000000
--- a/deps/v8/src/compiler/machine-node-factory.h
+++ /dev/null
@@ -1,381 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_MACHINE_NODE_FACTORY_H_
-#define V8_COMPILER_MACHINE_NODE_FACTORY_H_
-
-#ifdef USE_SIMULATOR
-#define MACHINE_ASSEMBLER_SUPPORTS_CALL_C 0
-#else
-#define MACHINE_ASSEMBLER_SUPPORTS_CALL_C 1
-#endif
-
-#include "src/v8.h"
-
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/node.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class MachineCallDescriptorBuilder : public ZoneObject {
- public:
- MachineCallDescriptorBuilder(MachineType return_type, int parameter_count,
- const MachineType* parameter_types)
- : return_type_(return_type),
- parameter_count_(parameter_count),
- parameter_types_(parameter_types) {}
-
- int parameter_count() const { return parameter_count_; }
- const MachineType* parameter_types() const { return parameter_types_; }
-
- CallDescriptor* BuildCallDescriptor(Zone* zone) {
- return Linkage::GetSimplifiedCDescriptor(zone, parameter_count_,
- return_type_, parameter_types_);
- }
-
- private:
- const MachineType return_type_;
- const int parameter_count_;
- const MachineType* const parameter_types_;
-};
-
-
-#define ZONE() static_cast<NodeFactory*>(this)->zone()
-#define COMMON() static_cast<NodeFactory*>(this)->common()
-#define MACHINE() static_cast<NodeFactory*>(this)->machine()
-#define NEW_NODE_0(op) static_cast<NodeFactory*>(this)->NewNode(op)
-#define NEW_NODE_1(op, a) static_cast<NodeFactory*>(this)->NewNode(op, a)
-#define NEW_NODE_2(op, a, b) static_cast<NodeFactory*>(this)->NewNode(op, a, b)
-#define NEW_NODE_3(op, a, b, c) \
- static_cast<NodeFactory*>(this)->NewNode(op, a, b, c)
-
-template <typename NodeFactory>
-class MachineNodeFactory {
- public:
- // Constants.
- Node* PointerConstant(void* value) {
- return IntPtrConstant(reinterpret_cast<intptr_t>(value));
- }
- Node* IntPtrConstant(intptr_t value) {
- // TODO(dcarney): mark generated code as unserializable if value != 0.
- return kPointerSize == 8 ? Int64Constant(value)
- : Int32Constant(static_cast<int>(value));
- }
- Node* Int32Constant(int32_t value) {
- return NEW_NODE_0(COMMON()->Int32Constant(value));
- }
- Node* Int64Constant(int64_t value) {
- return NEW_NODE_0(COMMON()->Int64Constant(value));
- }
- Node* NumberConstant(double value) {
- return NEW_NODE_0(COMMON()->NumberConstant(value));
- }
- Node* Float64Constant(double value) {
- return NEW_NODE_0(COMMON()->Float64Constant(value));
- }
- Node* HeapConstant(Handle<Object> object) {
- PrintableUnique<Object> val =
- PrintableUnique<Object>::CreateUninitialized(ZONE(), object);
- return NEW_NODE_0(COMMON()->HeapConstant(val));
- }
-
- Node* Projection(int index, Node* a) {
- return NEW_NODE_1(COMMON()->Projection(index), a);
- }
-
- // Memory Operations.
- Node* Load(MachineType rep, Node* base) {
- return Load(rep, base, Int32Constant(0));
- }
- Node* Load(MachineType rep, Node* base, Node* index) {
- return NEW_NODE_2(MACHINE()->Load(rep), base, index);
- }
- void Store(MachineType rep, Node* base, Node* value) {
- Store(rep, base, Int32Constant(0), value);
- }
- void Store(MachineType rep, Node* base, Node* index, Node* value) {
- NEW_NODE_3(MACHINE()->Store(rep, kNoWriteBarrier), base, index, value);
- }
- // Arithmetic Operations.
- Node* WordAnd(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->WordAnd(), a, b);
- }
- Node* WordOr(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->WordOr(), a, b);
- }
- Node* WordXor(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->WordXor(), a, b);
- }
- Node* WordShl(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->WordShl(), a, b);
- }
- Node* WordShr(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->WordShr(), a, b);
- }
- Node* WordSar(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->WordSar(), a, b);
- }
- Node* WordEqual(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->WordEqual(), a, b);
- }
- Node* WordNotEqual(Node* a, Node* b) {
- return WordBinaryNot(WordEqual(a, b));
- }
- Node* WordNot(Node* a) {
- if (MACHINE()->is32()) {
- return Word32Not(a);
- } else {
- return Word64Not(a);
- }
- }
- Node* WordBinaryNot(Node* a) {
- if (MACHINE()->is32()) {
- return Word32BinaryNot(a);
- } else {
- return Word64BinaryNot(a);
- }
- }
-
- Node* Word32And(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Word32And(), a, b);
- }
- Node* Word32Or(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Word32Or(), a, b);
- }
- Node* Word32Xor(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Word32Xor(), a, b);
- }
- Node* Word32Shl(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Word32Shl(), a, b);
- }
- Node* Word32Shr(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Word32Shr(), a, b);
- }
- Node* Word32Sar(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Word32Sar(), a, b);
- }
- Node* Word32Equal(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Word32Equal(), a, b);
- }
- Node* Word32NotEqual(Node* a, Node* b) {
- return Word32BinaryNot(Word32Equal(a, b));
- }
- Node* Word32Not(Node* a) { return Word32Xor(a, Int32Constant(-1)); }
- Node* Word32BinaryNot(Node* a) { return Word32Equal(a, Int32Constant(0)); }
-
- Node* Word64And(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Word64And(), a, b);
- }
- Node* Word64Or(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Word64Or(), a, b);
- }
- Node* Word64Xor(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Word64Xor(), a, b);
- }
- Node* Word64Shl(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Word64Shl(), a, b);
- }
- Node* Word64Shr(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Word64Shr(), a, b);
- }
- Node* Word64Sar(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Word64Sar(), a, b);
- }
- Node* Word64Equal(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Word64Equal(), a, b);
- }
- Node* Word64NotEqual(Node* a, Node* b) {
- return Word64BinaryNot(Word64Equal(a, b));
- }
- Node* Word64Not(Node* a) { return Word64Xor(a, Int64Constant(-1)); }
- Node* Word64BinaryNot(Node* a) { return Word64Equal(a, Int64Constant(0)); }
-
- Node* Int32Add(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Int32Add(), a, b);
- }
- Node* Int32AddWithOverflow(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Int32AddWithOverflow(), a, b);
- }
- Node* Int32Sub(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Int32Sub(), a, b);
- }
- Node* Int32SubWithOverflow(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Int32SubWithOverflow(), a, b);
- }
- Node* Int32Mul(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Int32Mul(), a, b);
- }
- Node* Int32Div(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Int32Div(), a, b);
- }
- Node* Int32UDiv(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Int32UDiv(), a, b);
- }
- Node* Int32Mod(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Int32Mod(), a, b);
- }
- Node* Int32UMod(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Int32UMod(), a, b);
- }
- Node* Int32LessThan(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Int32LessThan(), a, b);
- }
- Node* Int32LessThanOrEqual(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Int32LessThanOrEqual(), a, b);
- }
- Node* Uint32LessThan(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Uint32LessThan(), a, b);
- }
- Node* Uint32LessThanOrEqual(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Uint32LessThanOrEqual(), a, b);
- }
- Node* Int32GreaterThan(Node* a, Node* b) { return Int32LessThan(b, a); }
- Node* Int32GreaterThanOrEqual(Node* a, Node* b) {
- return Int32LessThanOrEqual(b, a);
- }
- Node* Int32Neg(Node* a) { return Int32Sub(Int32Constant(0), a); }
-
- Node* Int64Add(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Int64Add(), a, b);
- }
- Node* Int64Sub(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Int64Sub(), a, b);
- }
- Node* Int64Mul(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Int64Mul(), a, b);
- }
- Node* Int64Div(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Int64Div(), a, b);
- }
- Node* Int64UDiv(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Int64UDiv(), a, b);
- }
- Node* Int64Mod(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Int64Mod(), a, b);
- }
- Node* Int64UMod(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Int64UMod(), a, b);
- }
- Node* Int64Neg(Node* a) { return Int64Sub(Int64Constant(0), a); }
- Node* Int64LessThan(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Int64LessThan(), a, b);
- }
- Node* Int64LessThanOrEqual(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Int64LessThanOrEqual(), a, b);
- }
- Node* Int64GreaterThan(Node* a, Node* b) { return Int64LessThan(b, a); }
- Node* Int64GreaterThanOrEqual(Node* a, Node* b) {
- return Int64LessThanOrEqual(b, a);
- }
-
- Node* ConvertIntPtrToInt32(Node* a) {
- return kPointerSize == 8 ? NEW_NODE_1(MACHINE()->ConvertInt64ToInt32(), a)
- : a;
- }
- Node* ConvertInt32ToIntPtr(Node* a) {
- return kPointerSize == 8 ? NEW_NODE_1(MACHINE()->ConvertInt32ToInt64(), a)
- : a;
- }
-
-#define INTPTR_BINOP(prefix, name) \
- Node* IntPtr##name(Node* a, Node* b) { \
- return kPointerSize == 8 ? prefix##64##name(a, b) \
- : prefix##32##name(a, b); \
- }
-
- INTPTR_BINOP(Int, Add);
- INTPTR_BINOP(Int, Sub);
- INTPTR_BINOP(Int, LessThan);
- INTPTR_BINOP(Int, LessThanOrEqual);
- INTPTR_BINOP(Word, Equal);
- INTPTR_BINOP(Word, NotEqual);
- INTPTR_BINOP(Int, GreaterThanOrEqual);
- INTPTR_BINOP(Int, GreaterThan);
-
-#undef INTPTR_BINOP
-
- Node* Float64Add(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Float64Add(), a, b);
- }
- Node* Float64Sub(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Float64Sub(), a, b);
- }
- Node* Float64Mul(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Float64Mul(), a, b);
- }
- Node* Float64Div(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Float64Div(), a, b);
- }
- Node* Float64Mod(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Float64Mod(), a, b);
- }
- Node* Float64Equal(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Float64Equal(), a, b);
- }
- Node* Float64NotEqual(Node* a, Node* b) {
- return WordBinaryNot(Float64Equal(a, b));
- }
- Node* Float64LessThan(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Float64LessThan(), a, b);
- }
- Node* Float64LessThanOrEqual(Node* a, Node* b) {
- return NEW_NODE_2(MACHINE()->Float64LessThanOrEqual(), a, b);
- }
- Node* Float64GreaterThan(Node* a, Node* b) { return Float64LessThan(b, a); }
- Node* Float64GreaterThanOrEqual(Node* a, Node* b) {
- return Float64LessThanOrEqual(b, a);
- }
-
- // Conversions.
- Node* ConvertInt32ToInt64(Node* a) {
- return NEW_NODE_1(MACHINE()->ConvertInt32ToInt64(), a);
- }
- Node* ConvertInt64ToInt32(Node* a) {
- return NEW_NODE_1(MACHINE()->ConvertInt64ToInt32(), a);
- }
- Node* ChangeInt32ToFloat64(Node* a) {
- return NEW_NODE_1(MACHINE()->ChangeInt32ToFloat64(), a);
- }
- Node* ChangeUint32ToFloat64(Node* a) {
- return NEW_NODE_1(MACHINE()->ChangeUint32ToFloat64(), a);
- }
- Node* ChangeFloat64ToInt32(Node* a) {
- return NEW_NODE_1(MACHINE()->ChangeFloat64ToInt32(), a);
- }
- Node* ChangeFloat64ToUint32(Node* a) {
- return NEW_NODE_1(MACHINE()->ChangeFloat64ToUint32(), a);
- }
-
-#ifdef MACHINE_ASSEMBLER_SUPPORTS_CALL_C
- // Call to C.
- Node* CallC(Node* function_address, MachineType return_type,
- MachineType* arg_types, Node** args, int n_args) {
- CallDescriptor* descriptor = Linkage::GetSimplifiedCDescriptor(
- ZONE(), n_args, return_type, arg_types);
- Node** passed_args =
- static_cast<Node**>(alloca((n_args + 1) * sizeof(args[0])));
- passed_args[0] = function_address;
- for (int i = 0; i < n_args; ++i) {
- passed_args[i + 1] = args[i];
- }
- return NEW_NODE_2(COMMON()->Call(descriptor), n_args + 1, passed_args);
- }
-#endif
-};
-
-#undef NEW_NODE_0
-#undef NEW_NODE_1
-#undef NEW_NODE_2
-#undef NEW_NODE_3
-#undef MACHINE
-#undef COMMON
-#undef ZONE
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_MACHINE_NODE_FACTORY_H_
diff --git a/deps/v8/src/compiler/machine-operator-reducer-unittest.cc b/deps/v8/src/compiler/machine-operator-reducer-unittest.cc
new file mode 100644
index 0000000000..5a76342f77
--- /dev/null
+++ b/deps/v8/src/compiler/machine-operator-reducer-unittest.cc
@@ -0,0 +1,659 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/compiler/graph-unittest.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator-reducer.h"
+#include "src/compiler/typer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class MachineOperatorReducerTest : public GraphTest {
+ public:
+ explicit MachineOperatorReducerTest(int num_parameters = 2)
+ : GraphTest(num_parameters) {}
+
+ protected:
+ Reduction Reduce(Node* node) {
+ Typer typer(zone());
+ JSOperatorBuilder javascript(zone());
+ JSGraph jsgraph(graph(), common(), &javascript, &typer, &machine_);
+ MachineOperatorReducer reducer(&jsgraph);
+ return reducer.Reduce(node);
+ }
+
+ MachineOperatorBuilder* machine() { return &machine_; }
+
+ private:
+ MachineOperatorBuilder machine_;
+};
+
+
+template <typename T>
+class MachineOperatorReducerTestWithParam
+ : public MachineOperatorReducerTest,
+ public ::testing::WithParamInterface<T> {
+ public:
+ explicit MachineOperatorReducerTestWithParam(int num_parameters = 2)
+ : MachineOperatorReducerTest(num_parameters) {}
+ virtual ~MachineOperatorReducerTestWithParam() {}
+};
+
+
+namespace {
+
+static const float kFloat32Values[] = {
+ -std::numeric_limits<float>::infinity(), -2.70497e+38f, -1.4698e+37f,
+ -1.22813e+35f, -1.20555e+35f, -1.34584e+34f,
+ -1.0079e+32f, -6.49364e+26f, -3.06077e+25f,
+ -1.46821e+25f, -1.17658e+23f, -1.9617e+22f,
+ -2.7357e+20f, -1.48708e+13f, -1.89633e+12f,
+ -4.66622e+11f, -2.22581e+11f, -1.45381e+10f,
+ -1.3956e+09f, -1.32951e+09f, -1.30721e+09f,
+ -1.19756e+09f, -9.26822e+08f, -6.35647e+08f,
+ -4.00037e+08f, -1.81227e+08f, -5.09256e+07f,
+ -964300.0f, -192446.0f, -28455.0f,
+ -27194.0f, -26401.0f, -20575.0f,
+ -17069.0f, -9167.0f, -960.178f,
+ -113.0f, -62.0f, -15.0f,
+ -7.0f, -0.0256635f, -4.60374e-07f,
+ -3.63759e-10f, -4.30175e-14f, -5.27385e-15f,
+ -1.48084e-15f, -1.05755e-19f, -3.2995e-21f,
+ -1.67354e-23f, -1.11885e-23f, -1.78506e-30f,
+ -5.07594e-31f, -3.65799e-31f, -1.43718e-34f,
+ -1.27126e-38f, -0.0f, 0.0f,
+ 1.17549e-38f, 1.56657e-37f, 4.08512e-29f,
+ 3.31357e-28f, 6.25073e-22f, 4.1723e-13f,
+ 1.44343e-09f, 5.27004e-08f, 9.48298e-08f,
+ 5.57888e-07f, 4.89988e-05f, 0.244326f,
+ 12.4895f, 19.0f, 47.0f,
+ 106.0f, 538.324f, 564.536f,
+ 819.124f, 7048.0f, 12611.0f,
+ 19878.0f, 20309.0f, 797056.0f,
+ 1.77219e+09f, 1.51116e+11f, 4.18193e+13f,
+ 3.59167e+16f, 3.38211e+19f, 2.67488e+20f,
+ 1.78831e+21f, 9.20914e+21f, 8.35654e+23f,
+ 1.4495e+24f, 5.94015e+25f, 4.43608e+30f,
+ 2.44502e+33f, 2.61152e+33f, 1.38178e+37f,
+ 1.71306e+37f, 3.31899e+38f, 3.40282e+38f,
+ std::numeric_limits<float>::infinity()};
+
+
+static const double kFloat64Values[] = {
+ -V8_INFINITY, -4.23878e+275, -5.82632e+265, -6.60355e+220, -6.26172e+212,
+ -2.56222e+211, -4.82408e+201, -1.84106e+157, -1.63662e+127, -1.55772e+100,
+ -1.67813e+72, -2.3382e+55, -3.179e+30, -1.441e+09, -1.0647e+09,
+ -7.99361e+08, -5.77375e+08, -2.20984e+08, -32757, -13171,
+ -9970, -3984, -107, -105, -92,
+ -77, -61, -0.000208163, -1.86685e-06, -1.17296e-10,
+ -9.26358e-11, -5.08004e-60, -1.74753e-65, -1.06561e-71, -5.67879e-79,
+ -5.78459e-130, -2.90989e-171, -7.15489e-243, -3.76242e-252, -1.05639e-263,
+ -4.40497e-267, -2.19666e-273, -4.9998e-276, -5.59821e-278, -2.03855e-282,
+ -5.99335e-283, -7.17554e-284, -3.11744e-309, -0.0, 0.0,
+ 2.22507e-308, 1.30127e-270, 7.62898e-260, 4.00313e-249, 3.16829e-233,
+ 1.85244e-228, 2.03544e-129, 1.35126e-110, 1.01182e-106, 5.26333e-94,
+ 1.35292e-90, 2.85394e-83, 1.78323e-77, 5.4967e-57, 1.03207e-25,
+ 4.57401e-25, 1.58738e-05, 2, 125, 2310,
+ 9636, 14802, 17168, 28945, 29305,
+ 4.81336e+07, 1.41207e+08, 4.65962e+08, 1.40499e+09, 2.12648e+09,
+ 8.80006e+30, 1.4446e+45, 1.12164e+54, 2.48188e+89, 6.71121e+102,
+ 3.074e+112, 4.9699e+152, 5.58383e+166, 4.30654e+172, 7.08824e+185,
+ 9.6586e+214, 2.028e+223, 6.63277e+243, 1.56192e+261, 1.23202e+269,
+ 5.72883e+289, 8.5798e+290, 1.40256e+294, 1.79769e+308, V8_INFINITY};
+
+
+static const int32_t kInt32Values[] = {
+ -2147483647 - 1, -1914954528, -1698749618, -1578693386, -1577976073,
+ -1573998034, -1529085059, -1499540537, -1299205097, -1090814845,
+ -938186388, -806828902, -750927650, -520676892, -513661538,
+ -453036354, -433622833, -282638793, -28375, -27788,
+ -22770, -18806, -14173, -11956, -11200,
+ -10212, -8160, -3751, -2758, -1522,
+ -121, -120, -118, -117, -106,
+ -84, -80, -74, -59, -52,
+ -48, -39, -35, -17, -11,
+ -10, -9, -7, -5, 0,
+ 9, 12, 17, 23, 29,
+ 31, 33, 35, 40, 47,
+ 55, 56, 62, 64, 67,
+ 68, 69, 74, 79, 84,
+ 89, 90, 97, 104, 118,
+ 124, 126, 127, 7278, 17787,
+ 24136, 24202, 25570, 26680, 30242,
+ 32399, 420886487, 642166225, 821912648, 822577803,
+ 851385718, 1212241078, 1411419304, 1589626102, 1596437184,
+ 1876245816, 1954730266, 2008792749, 2045320228, 2147483647};
+
+
+static const int64_t kInt64Values[] = {
+ V8_INT64_C(-9223372036854775807) - 1, V8_INT64_C(-8974392461363618006),
+ V8_INT64_C(-8874367046689588135), V8_INT64_C(-8269197512118230839),
+ V8_INT64_C(-8146091527100606733), V8_INT64_C(-7550917981466150848),
+ V8_INT64_C(-7216590251577894337), V8_INT64_C(-6464086891160048440),
+ V8_INT64_C(-6365616494908257190), V8_INT64_C(-6305630541365849726),
+ V8_INT64_C(-5982222642272245453), V8_INT64_C(-5510103099058504169),
+ V8_INT64_C(-5496838675802432701), V8_INT64_C(-4047626578868642657),
+ V8_INT64_C(-4033755046900164544), V8_INT64_C(-3554299241457877041),
+ V8_INT64_C(-2482258764588614470), V8_INT64_C(-1688515425526875335),
+ V8_INT64_C(-924784137176548532), V8_INT64_C(-725316567157391307),
+ V8_INT64_C(-439022654781092241), V8_INT64_C(-105545757668917080),
+ V8_INT64_C(-2088319373), V8_INT64_C(-2073699916),
+ V8_INT64_C(-1844949911), V8_INT64_C(-1831090548),
+ V8_INT64_C(-1756711933), V8_INT64_C(-1559409497),
+ V8_INT64_C(-1281179700), V8_INT64_C(-1211513985),
+ V8_INT64_C(-1182371520), V8_INT64_C(-785934753),
+ V8_INT64_C(-767480697), V8_INT64_C(-705745662),
+ V8_INT64_C(-514362436), V8_INT64_C(-459916580),
+ V8_INT64_C(-312328082), V8_INT64_C(-302949707),
+ V8_INT64_C(-285499304), V8_INT64_C(-125701262),
+ V8_INT64_C(-95139843), V8_INT64_C(-32768),
+ V8_INT64_C(-27542), V8_INT64_C(-23600),
+ V8_INT64_C(-18582), V8_INT64_C(-17770),
+ V8_INT64_C(-9086), V8_INT64_C(-9010),
+ V8_INT64_C(-8244), V8_INT64_C(-2890),
+ V8_INT64_C(-103), V8_INT64_C(-34),
+ V8_INT64_C(-27), V8_INT64_C(-25),
+ V8_INT64_C(-9), V8_INT64_C(-7),
+ V8_INT64_C(0), V8_INT64_C(2),
+ V8_INT64_C(38), V8_INT64_C(58),
+ V8_INT64_C(65), V8_INT64_C(93),
+ V8_INT64_C(111), V8_INT64_C(1003),
+ V8_INT64_C(1267), V8_INT64_C(12797),
+ V8_INT64_C(23122), V8_INT64_C(28200),
+ V8_INT64_C(30888), V8_INT64_C(42648848),
+ V8_INT64_C(116836693), V8_INT64_C(263003643),
+ V8_INT64_C(571039860), V8_INT64_C(1079398689),
+ V8_INT64_C(1145196402), V8_INT64_C(1184846321),
+ V8_INT64_C(1758281648), V8_INT64_C(1859991374),
+ V8_INT64_C(1960251588), V8_INT64_C(2042443199),
+ V8_INT64_C(296220586027987448), V8_INT64_C(1015494173071134726),
+ V8_INT64_C(1151237951914455318), V8_INT64_C(1331941174616854174),
+ V8_INT64_C(2022020418667972654), V8_INT64_C(2450251424374977035),
+ V8_INT64_C(3668393562685561486), V8_INT64_C(4858229301215502171),
+ V8_INT64_C(4919426235170669383), V8_INT64_C(5034286595330341762),
+ V8_INT64_C(5055797915536941182), V8_INT64_C(6072389716149252074),
+ V8_INT64_C(6185309910199801210), V8_INT64_C(6297328311011094138),
+ V8_INT64_C(6932372858072165827), V8_INT64_C(8483640924987737210),
+ V8_INT64_C(8663764179455849203), V8_INT64_C(8877197042645298254),
+ V8_INT64_C(8901543506779157333), V8_INT64_C(9223372036854775807)};
+
+
+static const uint32_t kUint32Values[] = {
+ 0x00000000, 0x00000001, 0xffffffff, 0x1b09788b, 0x04c5fce8, 0xcc0de5bf,
+ 0x273a798e, 0x187937a3, 0xece3af83, 0x5495a16b, 0x0b668ecc, 0x11223344,
+ 0x0000009e, 0x00000043, 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c,
+ 0x88776655, 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
+ 0x761c4761, 0x80000000, 0x88888888, 0xa0000000, 0xdddddddd, 0xe0000000,
+ 0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x003fffff, 0x001fffff,
+ 0x000fffff, 0x0007ffff, 0x0003ffff, 0x0001ffff, 0x0000ffff, 0x00007fff,
+ 0x00003fff, 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff};
+
+} // namespace
+
+
+// -----------------------------------------------------------------------------
+// Unary operators
+
+
+namespace {
+
+struct UnaryOperator {
+ const Operator* (MachineOperatorBuilder::*constructor)();
+ const char* constructor_name;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const UnaryOperator& unop) {
+ return os << unop.constructor_name;
+}
+
+
+static const UnaryOperator kUnaryOperators[] = {
+ {&MachineOperatorBuilder::ChangeInt32ToFloat64, "ChangeInt32ToFloat64"},
+ {&MachineOperatorBuilder::ChangeUint32ToFloat64, "ChangeUint32ToFloat64"},
+ {&MachineOperatorBuilder::ChangeFloat64ToInt32, "ChangeFloat64ToInt32"},
+ {&MachineOperatorBuilder::ChangeFloat64ToUint32, "ChangeFloat64ToUint32"},
+ {&MachineOperatorBuilder::ChangeInt32ToInt64, "ChangeInt32ToInt64"},
+ {&MachineOperatorBuilder::ChangeUint32ToUint64, "ChangeUint32ToUint64"},
+ {&MachineOperatorBuilder::TruncateFloat64ToInt32, "TruncateFloat64ToInt32"},
+ {&MachineOperatorBuilder::TruncateInt64ToInt32, "TruncateInt64ToInt32"}};
+
+} // namespace
+
+
+typedef MachineOperatorReducerTestWithParam<UnaryOperator>
+ MachineUnaryOperatorReducerTest;
+
+
+TEST_P(MachineUnaryOperatorReducerTest, Parameter) {
+ const UnaryOperator unop = GetParam();
+ Reduction reduction =
+ Reduce(graph()->NewNode((machine()->*unop.constructor)(), Parameter(0)));
+ EXPECT_FALSE(reduction.Changed());
+}
+
+
+INSTANTIATE_TEST_CASE_P(MachineOperatorReducerTest,
+ MachineUnaryOperatorReducerTest,
+ ::testing::ValuesIn(kUnaryOperators));
+
+
+// -----------------------------------------------------------------------------
+// ChangeFloat64ToFloat32
+
+
+TEST_F(MachineOperatorReducerTest, ChangeFloat64ToFloat32WithConstant) {
+ TRACED_FOREACH(float, x, kFloat32Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->ChangeFloat32ToFloat64(), Float32Constant(x)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFloat64Constant(x));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeFloat64ToInt32
+
+
+TEST_F(MachineOperatorReducerTest,
+ ChangeFloat64ToInt32WithChangeInt32ToFloat64) {
+ Node* value = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->ChangeFloat64ToInt32(),
+ graph()->NewNode(machine()->ChangeInt32ToFloat64(), value)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, ChangeFloat64ToInt32WithConstant) {
+ TRACED_FOREACH(int32_t, x, kInt32Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->ChangeFloat64ToInt32(), Float64Constant(FastI2D(x))));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(x));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeFloat64ToUint32
+
+
+TEST_F(MachineOperatorReducerTest,
+ ChangeFloat64ToUint32WithChangeUint32ToFloat64) {
+ Node* value = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->ChangeFloat64ToUint32(),
+ graph()->NewNode(machine()->ChangeUint32ToFloat64(), value)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, ChangeFloat64ToUint32WithConstant) {
+ TRACED_FOREACH(uint32_t, x, kUint32Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->ChangeFloat64ToUint32(), Float64Constant(FastUI2D(x))));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(bit_cast<int32_t>(x)));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeInt32ToFloat64
+
+
+TEST_F(MachineOperatorReducerTest, ChangeInt32ToFloat64WithConstant) {
+ TRACED_FOREACH(int32_t, x, kInt32Values) {
+ Reduction reduction = Reduce(
+ graph()->NewNode(machine()->ChangeInt32ToFloat64(), Int32Constant(x)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFloat64Constant(FastI2D(x)));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeInt32ToInt64
+
+
+TEST_F(MachineOperatorReducerTest, ChangeInt32ToInt64WithConstant) {
+ TRACED_FOREACH(int32_t, x, kInt32Values) {
+ Reduction reduction = Reduce(
+ graph()->NewNode(machine()->ChangeInt32ToInt64(), Int32Constant(x)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt64Constant(x));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeUint32ToFloat64
+
+
+TEST_F(MachineOperatorReducerTest, ChangeUint32ToFloat64WithConstant) {
+ TRACED_FOREACH(uint32_t, x, kUint32Values) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(machine()->ChangeUint32ToFloat64(),
+ Int32Constant(bit_cast<int32_t>(x))));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFloat64Constant(FastUI2D(x)));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeUint32ToUint64
+
+
+TEST_F(MachineOperatorReducerTest, ChangeUint32ToUint64WithConstant) {
+ TRACED_FOREACH(uint32_t, x, kUint32Values) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(machine()->ChangeUint32ToUint64(),
+ Int32Constant(bit_cast<int32_t>(x))));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(),
+ IsInt64Constant(bit_cast<int64_t>(static_cast<uint64_t>(x))));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// TruncateFloat64ToFloat32
+
+
+TEST_F(MachineOperatorReducerTest,
+ TruncateFloat64ToFloat32WithChangeFloat32ToFloat64) {
+ Node* value = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->TruncateFloat64ToFloat32(),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), value)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, TruncateFloat64ToFloat32WithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->TruncateFloat64ToFloat32(), Float64Constant(x)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFloat32Constant(DoubleToFloat32(x)));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// TruncateFloat64ToInt32
+
+
+TEST_F(MachineOperatorReducerTest,
+ TruncateFloat64ToInt32WithChangeInt32ToFloat64) {
+ Node* value = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->TruncateFloat64ToInt32(),
+ graph()->NewNode(machine()->ChangeInt32ToFloat64(), value)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, TruncateFloat64ToInt32WithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->TruncateFloat64ToInt32(), Float64Constant(x)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(DoubleToInt32(x)));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// TruncateInt64ToInt32
+
+
+TEST_F(MachineOperatorReducerTest, TruncateInt64ToInt32WithChangeInt32ToInt64) {
+ Node* value = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->TruncateInt64ToInt32(),
+ graph()->NewNode(machine()->ChangeInt32ToInt64(), value)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, TruncateInt64ToInt32WithConstant) {
+ TRACED_FOREACH(int64_t, x, kInt64Values) {
+ Reduction reduction = Reduce(
+ graph()->NewNode(machine()->TruncateInt64ToInt32(), Int64Constant(x)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(),
+ IsInt32Constant(bit_cast<int32_t>(
+ static_cast<uint32_t>(bit_cast<uint64_t>(x)))));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Word32Ror
+
+
+TEST_F(MachineOperatorReducerTest, ReduceToWord32RorWithParameters) {
+ Node* value = Parameter(0);
+ Node* shift = Parameter(1);
+ Node* shl = graph()->NewNode(machine()->Word32Shl(), value, shift);
+ Node* shr = graph()->NewNode(
+ machine()->Word32Shr(), value,
+ graph()->NewNode(machine()->Int32Sub(), Int32Constant(32), shift));
+
+ // (x << y) | (x >> (32 - y)) => x ror y
+ Node* node1 = graph()->NewNode(machine()->Word32Or(), shl, shr);
+ Reduction reduction1 = Reduce(node1);
+ EXPECT_TRUE(reduction1.Changed());
+ EXPECT_EQ(reduction1.replacement(), node1);
+ EXPECT_THAT(reduction1.replacement(), IsWord32Ror(value, shift));
+
+ // (x >> (32 - y)) | (x << y) => x ror y
+ Node* node2 = graph()->NewNode(machine()->Word32Or(), shr, shl);
+ Reduction reduction2 = Reduce(node2);
+ EXPECT_TRUE(reduction2.Changed());
+ EXPECT_EQ(reduction2.replacement(), node2);
+ EXPECT_THAT(reduction2.replacement(), IsWord32Ror(value, shift));
+}
+
+
+TEST_F(MachineOperatorReducerTest, ReduceToWord32RorWithConstant) {
+ Node* value = Parameter(0);
+ TRACED_FORRANGE(int32_t, k, 0, 31) {
+ Node* shl =
+ graph()->NewNode(machine()->Word32Shl(), value, Int32Constant(k));
+ Node* shr =
+ graph()->NewNode(machine()->Word32Shr(), value, Int32Constant(32 - k));
+
+ // (x << K) | (x >> ((32 - K) - y)) => x ror K
+ Node* node1 = graph()->NewNode(machine()->Word32Or(), shl, shr);
+ Reduction reduction1 = Reduce(node1);
+ EXPECT_TRUE(reduction1.Changed());
+ EXPECT_EQ(reduction1.replacement(), node1);
+ EXPECT_THAT(reduction1.replacement(),
+ IsWord32Ror(value, IsInt32Constant(k)));
+
+ // (x >> (32 - K)) | (x << K) => x ror K
+ Node* node2 = graph()->NewNode(machine()->Word32Or(), shr, shl);
+ Reduction reduction2 = Reduce(node2);
+ EXPECT_TRUE(reduction2.Changed());
+ EXPECT_EQ(reduction2.replacement(), node2);
+ EXPECT_THAT(reduction2.replacement(),
+ IsWord32Ror(value, IsInt32Constant(k)));
+ }
+}
+
+
+TEST_F(MachineOperatorReducerTest, Word32RorWithZeroShift) {
+ Node* value = Parameter(0);
+ Node* node =
+ graph()->NewNode(machine()->Word32Ror(), value, Int32Constant(0));
+ Reduction reduction = Reduce(node);
+ EXPECT_TRUE(reduction.Changed());
+ EXPECT_EQ(reduction.replacement(), value);
+}
+
+
+TEST_F(MachineOperatorReducerTest, Word32RorWithConstants) {
+ TRACED_FOREACH(int32_t, x, kUint32Values) {
+ TRACED_FORRANGE(int32_t, y, 0, 31) {
+ Node* node = graph()->NewNode(machine()->Word32Ror(), Int32Constant(x),
+ Int32Constant(y));
+ Reduction reduction = Reduce(node);
+ EXPECT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(),
+ IsInt32Constant(base::bits::RotateRight32(x, y)));
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Word32Shl
+
+
+TEST_F(MachineOperatorReducerTest, Word32ShlWithZeroShift) {
+ Node* p0 = Parameter(0);
+ Node* node = graph()->NewNode(machine()->Word32Shl(), p0, Int32Constant(0));
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(p0, r.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32Sar) {
+ Node* p0 = Parameter(0);
+ TRACED_FORRANGE(int32_t, x, 1, 31) {
+ Node* node = graph()->NewNode(
+ machine()->Word32Shl(),
+ graph()->NewNode(machine()->Word32Sar(), p0, Int32Constant(x)),
+ Int32Constant(x));
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ int32_t m = bit_cast<int32_t>(~((1U << x) - 1U));
+ EXPECT_THAT(r.replacement(), IsWord32And(p0, IsInt32Constant(m)));
+ }
+}
+
+
+TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32Shr) {
+ Node* p0 = Parameter(0);
+ TRACED_FORRANGE(int32_t, x, 1, 31) {
+ Node* node = graph()->NewNode(
+ machine()->Word32Shl(),
+ graph()->NewNode(machine()->Word32Shr(), p0, Int32Constant(x)),
+ Int32Constant(x));
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ int32_t m = bit_cast<int32_t>(~((1U << x) - 1U));
+ EXPECT_THAT(r.replacement(), IsWord32And(p0, IsInt32Constant(m)));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Int32AddWithOverflow
+
+
+TEST_F(MachineOperatorReducerTest, Int32AddWithOverflowWithZero) {
+ Node* p0 = Parameter(0);
+ {
+ Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(),
+ Int32Constant(0), p0);
+
+ Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+
+ r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(p0, r.replacement());
+ }
+ {
+ Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), p0,
+ Int32Constant(0));
+
+ Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+
+ r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(p0, r.replacement());
+ }
+}
+
+
+TEST_F(MachineOperatorReducerTest, Int32AddWithOverflowWithConstant) {
+ TRACED_FOREACH(int32_t, x, kInt32Values) {
+ TRACED_FOREACH(int32_t, y, kInt32Values) {
+ int32_t z;
+ Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(),
+ Int32Constant(x), Int32Constant(y));
+
+ Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt32Constant(base::bits::SignedAddOverflow32(x, y, &z)));
+
+ r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(z));
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Int32SubWithOverflow
+
+
+TEST_F(MachineOperatorReducerTest, Int32SubWithOverflowWithZero) {
+ Node* p0 = Parameter(0);
+ Node* add =
+ graph()->NewNode(machine()->Int32SubWithOverflow(), p0, Int32Constant(0));
+
+ Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+
+ r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(p0, r.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, Int32SubWithOverflowWithConstant) {
+ TRACED_FOREACH(int32_t, x, kInt32Values) {
+ TRACED_FOREACH(int32_t, y, kInt32Values) {
+ int32_t z;
+ Node* add = graph()->NewNode(machine()->Int32SubWithOverflow(),
+ Int32Constant(x), Int32Constant(y));
+
+ Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt32Constant(base::bits::SignedSubOverflow32(x, y, &z)));
+
+ r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(z));
+ }
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 4a4057646d..fa31d057e1 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -4,51 +4,48 @@
#include "src/compiler/machine-operator-reducer.h"
-#include "src/compiler/common-node-cache.h"
+#include "src/base/bits.h"
#include "src/compiler/generic-node-inl.h"
#include "src/compiler/graph.h"
+#include "src/compiler/js-graph.h"
#include "src/compiler/node-matchers.h"
namespace v8 {
namespace internal {
namespace compiler {
-MachineOperatorReducer::MachineOperatorReducer(Graph* graph)
- : graph_(graph),
- cache_(new (graph->zone()) CommonNodeCache(graph->zone())),
- common_(graph->zone()),
- machine_(graph->zone()) {}
+MachineOperatorReducer::MachineOperatorReducer(JSGraph* jsgraph)
+ : jsgraph_(jsgraph) {}
-MachineOperatorReducer::MachineOperatorReducer(Graph* graph,
- CommonNodeCache* cache)
- : graph_(graph),
- cache_(cache),
- common_(graph->zone()),
- machine_(graph->zone()) {}
+MachineOperatorReducer::~MachineOperatorReducer() {}
-Node* MachineOperatorReducer::Int32Constant(int32_t value) {
- Node** loc = cache_->FindInt32Constant(value);
- if (*loc == NULL) {
- *loc = graph_->NewNode(common_.Int32Constant(value));
- }
- return *loc;
+Node* MachineOperatorReducer::Float32Constant(volatile float value) {
+ return graph()->NewNode(common()->Float32Constant(value));
}
Node* MachineOperatorReducer::Float64Constant(volatile double value) {
- Node** loc = cache_->FindFloat64Constant(value);
- if (*loc == NULL) {
- *loc = graph_->NewNode(common_.Float64Constant(value));
- }
- return *loc;
+ return jsgraph()->Float64Constant(value);
+}
+
+
+Node* MachineOperatorReducer::Int32Constant(int32_t value) {
+ return jsgraph()->Int32Constant(value);
+}
+
+
+Node* MachineOperatorReducer::Int64Constant(int64_t value) {
+ return graph()->NewNode(common()->Int64Constant(value));
}
// Perform constant folding and strength reduction on machine operators.
Reduction MachineOperatorReducer::Reduce(Node* node) {
switch (node->opcode()) {
+ case IrOpcode::kProjection:
+ return ReduceProjection(OpParameter<size_t>(node), node->InputAt(0));
case IrOpcode::kWord32And: {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.right().node()); // x & 0 => 0
@@ -67,6 +64,56 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReplaceInt32(m.left().Value() | m.right().Value());
}
if (m.LeftEqualsRight()) return Replace(m.left().node()); // x | x => x
+ if (m.left().IsWord32Shl() && m.right().IsWord32Shr()) {
+ Int32BinopMatcher mleft(m.left().node());
+ Int32BinopMatcher mright(m.right().node());
+ if (mleft.left().node() == mright.left().node()) {
+ // (x << y) | (x >> (32 - y)) => x ror y
+ if (mright.right().IsInt32Sub()) {
+ Int32BinopMatcher mrightright(mright.right().node());
+ if (mrightright.left().Is(32) &&
+ mrightright.right().node() == mleft.right().node()) {
+ node->set_op(machine()->Word32Ror());
+ node->ReplaceInput(0, mleft.left().node());
+ node->ReplaceInput(1, mleft.right().node());
+ return Changed(node);
+ }
+ }
+ // (x << K) | (x >> (32 - K)) => x ror K
+ if (mleft.right().IsInRange(0, 31) &&
+ mright.right().Is(32 - mleft.right().Value())) {
+ node->set_op(machine()->Word32Ror());
+ node->ReplaceInput(0, mleft.left().node());
+ node->ReplaceInput(1, mleft.right().node());
+ return Changed(node);
+ }
+ }
+ }
+ if (m.left().IsWord32Shr() && m.right().IsWord32Shl()) {
+ // (x >> (32 - y)) | (x << y) => x ror y
+ Int32BinopMatcher mleft(m.left().node());
+ Int32BinopMatcher mright(m.right().node());
+ if (mleft.left().node() == mright.left().node()) {
+ if (mleft.right().IsInt32Sub()) {
+ Int32BinopMatcher mleftright(mleft.right().node());
+ if (mleftright.left().Is(32) &&
+ mleftright.right().node() == mright.right().node()) {
+ node->set_op(machine()->Word32Ror());
+ node->ReplaceInput(0, mright.left().node());
+ node->ReplaceInput(1, mright.right().node());
+ return Changed(node);
+ }
+ }
+ // (x >> (32 - K)) | (x << K) => x ror K
+ if (mright.right().IsInRange(0, 31) &&
+ mleft.right().Is(32 - mright.right().Value())) {
+ node->set_op(machine()->Word32Ror());
+ node->ReplaceInput(0, mright.left().node());
+ node->ReplaceInput(1, mright.right().node());
+ return Changed(node);
+ }
+ }
+ }
break;
}
case IrOpcode::kWord32Xor: {
@@ -84,6 +131,20 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.IsFoldable()) { // K << K => K
return ReplaceInt32(m.left().Value() << m.right().Value());
}
+ if (m.right().IsInRange(1, 31)) {
+ // (x >>> K) << K => x & ~(2^K - 1)
+ // (x >> K) << K => x & ~(2^K - 1)
+ if (m.left().IsWord32Sar() || m.left().IsWord32Shr()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().Is(m.right().Value())) {
+ node->set_op(machine()->Word32And());
+ node->ReplaceInput(0, mleft.left().node());
+ node->ReplaceInput(
+ 1, Uint32Constant(~((1U << m.right().Value()) - 1U)));
+ return Changed(node);
+ }
+ }
+ }
break;
}
case IrOpcode::kWord32Shr: {
@@ -102,6 +163,15 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
break;
}
+ case IrOpcode::kWord32Ror: {
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.left().node()); // x ror 0 => x
+ if (m.IsFoldable()) { // K ror K => K
+ return ReplaceInt32(
+ base::bits::RotateRight32(m.left().Value(), m.right().Value()));
+ }
+ break;
+ }
case IrOpcode::kWord32Equal: {
Int32BinopMatcher m(node);
if (m.IsFoldable()) { // K == K => K
@@ -144,13 +214,13 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReplaceInt32(m.left().Value() * m.right().Value());
}
if (m.right().Is(-1)) { // x * -1 => 0 - x
- graph_->ChangeOperator(node, machine_.Int32Sub());
+ node->set_op(machine()->Int32Sub());
node->ReplaceInput(0, Int32Constant(0));
node->ReplaceInput(1, m.left().node());
return Changed(node);
}
if (m.right().IsPowerOf2()) { // x * 2^n => x << n
- graph_->ChangeOperator(node, machine_.Word32Shl());
+ node->set_op(machine()->Word32Shl());
node->ReplaceInput(1, Int32Constant(WhichPowerOf2(m.right().Value())));
return Changed(node);
}
@@ -168,7 +238,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReplaceInt32(m.left().Value() / m.right().Value());
}
if (m.right().Is(-1)) { // x / -1 => 0 - x
- graph_->ChangeOperator(node, machine_.Int32Sub());
+ node->set_op(machine()->Int32Sub());
node->ReplaceInput(0, Int32Constant(0));
node->ReplaceInput(1, m.left().node());
return Changed(node);
@@ -185,7 +255,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReplaceInt32(m.left().Value() / m.right().Value());
}
if (m.right().IsPowerOf2()) { // x / 2^n => x >> n
- graph_->ChangeOperator(node, machine_.Word32Shr());
+ node->set_op(machine()->Word32Shr());
node->ReplaceInput(1, Int32Constant(WhichPowerOf2(m.right().Value())));
return Changed(node);
}
@@ -214,7 +284,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReplaceInt32(m.left().Value() % m.right().Value());
}
if (m.right().IsPowerOf2()) { // x % 2^n => x & 2^n-1
- graph_->ChangeOperator(node, machine_.Word32And());
+ node->set_op(machine()->Word32And());
node->ReplaceInput(1, Int32Constant(m.right().Value() - 1));
return Changed(node);
}
@@ -282,6 +352,9 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat64Add: {
Float64BinopMatcher m(node);
+ if (m.right().IsNaN()) { // x + NaN => NaN
+ return Replace(m.right().node());
+ }
if (m.IsFoldable()) { // K + K => K
return ReplaceFloat64(m.left().Value() + m.right().Value());
}
@@ -289,6 +362,15 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat64Sub: {
Float64BinopMatcher m(node);
+ if (m.right().Is(0) && (Double(m.right().Value()).Sign() > 0)) {
+ return Replace(m.left().node()); // x - 0 => x
+ }
+ if (m.right().IsNaN()) { // x - NaN => NaN
+ return Replace(m.right().node());
+ }
+ if (m.left().IsNaN()) { // NaN - x => NaN
+ return Replace(m.left().node());
+ }
if (m.IsFoldable()) { // K - K => K
return ReplaceFloat64(m.left().Value() - m.right().Value());
}
@@ -321,6 +403,9 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat64Mod: {
Float64BinopMatcher m(node);
+ if (m.right().Is(0)) { // x % 0 => NaN
+ return ReplaceFloat64(base::OS::nan_value());
+ }
if (m.right().IsNaN()) { // x % NaN => NaN
return Replace(m.right().node());
}
@@ -332,12 +417,117 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
break;
}
- // TODO(turbofan): strength-reduce and fold floating point operations.
+ case IrOpcode::kChangeFloat32ToFloat64: {
+ Float32Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(m.Value());
+ break;
+ }
+ case IrOpcode::kChangeFloat64ToInt32: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceInt32(FastD2I(m.Value()));
+ if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
+ break;
+ }
+ case IrOpcode::kChangeFloat64ToUint32: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceInt32(FastD2UI(m.Value()));
+ if (m.IsChangeUint32ToFloat64()) return Replace(m.node()->InputAt(0));
+ break;
+ }
+ case IrOpcode::kChangeInt32ToFloat64: {
+ Int32Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(FastI2D(m.Value()));
+ break;
+ }
+ case IrOpcode::kChangeInt32ToInt64: {
+ Int32Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceInt64(m.Value());
+ break;
+ }
+ case IrOpcode::kChangeUint32ToFloat64: {
+ Uint32Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(FastUI2D(m.Value()));
+ break;
+ }
+ case IrOpcode::kChangeUint32ToUint64: {
+ Uint32Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceInt64(static_cast<uint64_t>(m.Value()));
+ break;
+ }
+ case IrOpcode::kTruncateFloat64ToInt32: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
+ if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
+ break;
+ }
+ case IrOpcode::kTruncateInt64ToInt32: {
+ Int64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceInt32(static_cast<int32_t>(m.Value()));
+ if (m.IsChangeInt32ToInt64()) return Replace(m.node()->InputAt(0));
+ break;
+ }
+ case IrOpcode::kTruncateFloat64ToFloat32: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat32(DoubleToFloat32(m.Value()));
+ if (m.IsChangeFloat32ToFloat64()) return Replace(m.node()->InputAt(0));
+ break;
+ }
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+
+Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow: {
+ DCHECK(index == 0 || index == 1);
+ Int32BinopMatcher m(node);
+ if (m.IsFoldable()) {
+ int32_t val;
+ bool ovf = base::bits::SignedAddOverflow32(m.left().Value(),
+ m.right().Value(), &val);
+ return ReplaceInt32((index == 0) ? val : ovf);
+ }
+ if (m.right().Is(0)) {
+ return (index == 0) ? Replace(m.left().node()) : ReplaceInt32(0);
+ }
+ break;
+ }
+ case IrOpcode::kInt32SubWithOverflow: {
+ DCHECK(index == 0 || index == 1);
+ Int32BinopMatcher m(node);
+ if (m.IsFoldable()) {
+ int32_t val;
+ bool ovf = base::bits::SignedSubOverflow32(m.left().Value(),
+ m.right().Value(), &val);
+ return ReplaceInt32((index == 0) ? val : ovf);
+ }
+ if (m.right().Is(0)) {
+ return (index == 0) ? Replace(m.left().node()) : ReplaceInt32(0);
+ }
+ break;
+ }
default:
break;
}
return NoChange();
}
+
+
+CommonOperatorBuilder* MachineOperatorReducer::common() const {
+ return jsgraph()->common();
}
+
+
+MachineOperatorBuilder* MachineOperatorReducer::machine() const {
+ return jsgraph()->machine();
}
-} // namespace v8::internal::compiler
+
+
+Graph* MachineOperatorReducer::graph() const { return jsgraph()->graph(); }
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/machine-operator-reducer.h b/deps/v8/src/compiler/machine-operator-reducer.h
index 46d2931e99..e40ad65cdc 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.h
+++ b/deps/v8/src/compiler/machine-operator-reducer.h
@@ -5,7 +5,6 @@
#ifndef V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
#define V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
-#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/machine-operator.h"
@@ -14,39 +13,54 @@ namespace internal {
namespace compiler {
// Forward declarations.
-class CommonNodeCache;
+class CommonOperatorBuilder;
+class JSGraph;
+
// Performs constant folding and strength reduction on nodes that have
// machine operators.
-class MachineOperatorReducer : public Reducer {
+class MachineOperatorReducer FINAL : public Reducer {
public:
- explicit MachineOperatorReducer(Graph* graph);
-
- MachineOperatorReducer(Graph* graph, CommonNodeCache* cache);
+ explicit MachineOperatorReducer(JSGraph* jsgraph);
+ ~MachineOperatorReducer();
- virtual Reduction Reduce(Node* node);
+ virtual Reduction Reduce(Node* node) OVERRIDE;
private:
- Graph* graph_;
- CommonNodeCache* cache_;
- CommonOperatorBuilder common_;
- MachineOperatorBuilder machine_;
-
- Node* Int32Constant(int32_t value);
+ Node* Float32Constant(volatile float value);
Node* Float64Constant(volatile double value);
+ Node* Int32Constant(int32_t value);
+ Node* Int64Constant(int64_t value);
+ Node* Uint32Constant(uint32_t value) {
+ return Int32Constant(bit_cast<uint32_t>(value));
+ }
Reduction ReplaceBool(bool value) { return ReplaceInt32(value ? 1 : 0); }
-
- Reduction ReplaceInt32(int32_t value) {
- return Replace(Int32Constant(value));
+ Reduction ReplaceFloat32(volatile float value) {
+ return Replace(Float32Constant(value));
}
-
Reduction ReplaceFloat64(volatile double value) {
return Replace(Float64Constant(value));
}
+ Reduction ReplaceInt32(int32_t value) {
+ return Replace(Int32Constant(value));
+ }
+ Reduction ReplaceInt64(int64_t value) {
+ return Replace(Int64Constant(value));
+ }
+
+ Reduction ReduceProjection(size_t index, Node* node);
+
+ Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ CommonOperatorBuilder* common() const;
+ MachineOperatorBuilder* machine() const;
+
+ JSGraph* jsgraph_;
};
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
diff --git a/deps/v8/src/compiler/machine-operator-unittest.cc b/deps/v8/src/compiler/machine-operator-unittest.cc
new file mode 100644
index 0000000000..cb93ce76c6
--- /dev/null
+++ b/deps/v8/src/compiler/machine-operator-unittest.cc
@@ -0,0 +1,325 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/operator-properties-inl.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#if GTEST_HAS_COMBINE
+
+// TODO(bmeurer): Find a new home for these.
+inline std::ostream& operator<<(std::ostream& os, const MachineType& type) {
+ OStringStream ost;
+ ost << type;
+ return os << ost.c_str();
+}
+inline std::ostream& operator<<(std::ostream& os,
+ const WriteBarrierKind& write_barrier_kind) {
+ OStringStream ost;
+ ost << write_barrier_kind;
+ return os << ost.c_str();
+}
+
+
+template <typename T>
+class MachineOperatorTestWithParam
+ : public ::testing::TestWithParam< ::testing::tuple<MachineType, T> > {
+ protected:
+ MachineType type() const { return ::testing::get<0>(B::GetParam()); }
+ const T& GetParam() const { return ::testing::get<1>(B::GetParam()); }
+
+ private:
+ typedef ::testing::TestWithParam< ::testing::tuple<MachineType, T> > B;
+};
+
+
+namespace {
+
+const MachineType kMachineReps[] = {kRepWord32, kRepWord64};
+
+
+const MachineType kMachineTypes[] = {
+ kMachFloat32, kMachFloat64, kMachInt8, kMachUint8, kMachInt16,
+ kMachUint16, kMachInt32, kMachUint32, kMachInt64, kMachUint64,
+ kMachPtr, kMachAnyTagged, kRepBit, kRepWord8, kRepWord16,
+ kRepWord32, kRepWord64, kRepFloat32, kRepFloat64, kRepTagged};
+
+} // namespace
+
+
+// -----------------------------------------------------------------------------
+// Load operator.
+
+
+typedef MachineOperatorTestWithParam<LoadRepresentation>
+ MachineLoadOperatorTest;
+
+
+TEST_P(MachineLoadOperatorTest, InstancesAreGloballyShared) {
+ MachineOperatorBuilder machine1(type());
+ MachineOperatorBuilder machine2(type());
+ EXPECT_EQ(machine1.Load(GetParam()), machine2.Load(GetParam()));
+}
+
+
+TEST_P(MachineLoadOperatorTest, NumberOfInputsAndOutputs) {
+ MachineOperatorBuilder machine(type());
+ const Operator* op = machine.Load(GetParam());
+
+ EXPECT_EQ(2, OperatorProperties::GetValueInputCount(op));
+ EXPECT_EQ(1, OperatorProperties::GetEffectInputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetControlInputCount(op));
+ EXPECT_EQ(3, OperatorProperties::GetTotalInputCount(op));
+
+ EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
+ EXPECT_EQ(1, OperatorProperties::GetEffectOutputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+}
+
+
+TEST_P(MachineLoadOperatorTest, OpcodeIsCorrect) {
+ MachineOperatorBuilder machine(type());
+ EXPECT_EQ(IrOpcode::kLoad, machine.Load(GetParam())->opcode());
+}
+
+
+TEST_P(MachineLoadOperatorTest, ParameterIsCorrect) {
+ MachineOperatorBuilder machine(type());
+ EXPECT_EQ(GetParam(),
+ OpParameter<LoadRepresentation>(machine.Load(GetParam())));
+}
+
+
+INSTANTIATE_TEST_CASE_P(MachineOperatorTest, MachineLoadOperatorTest,
+ ::testing::Combine(::testing::ValuesIn(kMachineReps),
+ ::testing::ValuesIn(kMachineTypes)));
+
+
+// -----------------------------------------------------------------------------
+// Store operator.
+
+
+class MachineStoreOperatorTest
+ : public MachineOperatorTestWithParam<
+ ::testing::tuple<MachineType, WriteBarrierKind> > {
+ protected:
+ StoreRepresentation GetParam() const {
+ return StoreRepresentation(
+ ::testing::get<0>(MachineOperatorTestWithParam<
+ ::testing::tuple<MachineType, WriteBarrierKind> >::GetParam()),
+ ::testing::get<1>(MachineOperatorTestWithParam<
+ ::testing::tuple<MachineType, WriteBarrierKind> >::GetParam()));
+ }
+};
+
+
+TEST_P(MachineStoreOperatorTest, InstancesAreGloballyShared) {
+ MachineOperatorBuilder machine1(type());
+ MachineOperatorBuilder machine2(type());
+ EXPECT_EQ(machine1.Store(GetParam()), machine2.Store(GetParam()));
+}
+
+
+TEST_P(MachineStoreOperatorTest, NumberOfInputsAndOutputs) {
+ MachineOperatorBuilder machine(type());
+ const Operator* op = machine.Store(GetParam());
+
+ EXPECT_EQ(3, OperatorProperties::GetValueInputCount(op));
+ EXPECT_EQ(1, OperatorProperties::GetEffectInputCount(op));
+ EXPECT_EQ(1, OperatorProperties::GetControlInputCount(op));
+ EXPECT_EQ(5, OperatorProperties::GetTotalInputCount(op));
+
+ EXPECT_EQ(0, OperatorProperties::GetValueOutputCount(op));
+ EXPECT_EQ(1, OperatorProperties::GetEffectOutputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+}
+
+
+TEST_P(MachineStoreOperatorTest, OpcodeIsCorrect) {
+ MachineOperatorBuilder machine(type());
+ EXPECT_EQ(IrOpcode::kStore, machine.Store(GetParam())->opcode());
+}
+
+
+TEST_P(MachineStoreOperatorTest, ParameterIsCorrect) {
+ MachineOperatorBuilder machine(type());
+ EXPECT_EQ(GetParam(),
+ OpParameter<StoreRepresentation>(machine.Store(GetParam())));
+}
+
+
+INSTANTIATE_TEST_CASE_P(
+ MachineOperatorTest, MachineStoreOperatorTest,
+ ::testing::Combine(
+ ::testing::ValuesIn(kMachineReps),
+ ::testing::Combine(::testing::ValuesIn(kMachineTypes),
+ ::testing::Values(kNoWriteBarrier,
+ kFullWriteBarrier))));
+
+
+// -----------------------------------------------------------------------------
+// Pure operators.
+
+
+namespace {
+
+struct PureOperator {
+ const Operator* (MachineOperatorBuilder::*constructor)();
+ IrOpcode::Value opcode;
+ int value_input_count;
+ int value_output_count;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const PureOperator& pop) {
+ return os << IrOpcode::Mnemonic(pop.opcode);
+}
+
+
+const PureOperator kPureOperators[] = {
+#define PURE(Name, input_count, output_count) \
+ { \
+ &MachineOperatorBuilder::Name, IrOpcode::k##Name, input_count, \
+ output_count \
+ }
+ PURE(Word32And, 2, 1), PURE(Word32Or, 2, 1),
+ PURE(Word32Xor, 2, 1), PURE(Word32Shl, 2, 1),
+ PURE(Word32Shr, 2, 1), PURE(Word32Sar, 2, 1),
+ PURE(Word32Ror, 2, 1), PURE(Word32Equal, 2, 1),
+ PURE(Word64And, 2, 1), PURE(Word64Or, 2, 1),
+ PURE(Word64Xor, 2, 1), PURE(Word64Shl, 2, 1),
+ PURE(Word64Shr, 2, 1), PURE(Word64Sar, 2, 1),
+ PURE(Word64Ror, 2, 1), PURE(Word64Equal, 2, 1),
+ PURE(Int32Add, 2, 1), PURE(Int32AddWithOverflow, 2, 2),
+ PURE(Int32Sub, 2, 1), PURE(Int32SubWithOverflow, 2, 2),
+ PURE(Int32Mul, 2, 1), PURE(Int32Div, 2, 1),
+ PURE(Int32UDiv, 2, 1), PURE(Int32Mod, 2, 1),
+ PURE(Int32UMod, 2, 1), PURE(Int32LessThan, 2, 1),
+ PURE(Int32LessThanOrEqual, 2, 1), PURE(Uint32LessThan, 2, 1),
+ PURE(Uint32LessThanOrEqual, 2, 1), PURE(Int64Add, 2, 1),
+ PURE(Int64Sub, 2, 1), PURE(Int64Mul, 2, 1),
+ PURE(Int64Div, 2, 1), PURE(Int64UDiv, 2, 1),
+ PURE(Int64Mod, 2, 1), PURE(Int64UMod, 2, 1),
+ PURE(Int64LessThan, 2, 1), PURE(Int64LessThanOrEqual, 2, 1),
+ PURE(ChangeFloat32ToFloat64, 1, 1), PURE(ChangeFloat64ToInt32, 1, 1),
+ PURE(ChangeFloat64ToUint32, 1, 1), PURE(ChangeInt32ToInt64, 1, 1),
+ PURE(ChangeUint32ToFloat64, 1, 1), PURE(ChangeUint32ToUint64, 1, 1),
+ PURE(TruncateFloat64ToFloat32, 1, 1), PURE(TruncateFloat64ToInt32, 1, 1),
+ PURE(TruncateInt64ToInt32, 1, 1), PURE(Float64Add, 2, 1),
+ PURE(Float64Sub, 2, 1), PURE(Float64Mul, 2, 1),
+ PURE(Float64Div, 2, 1), PURE(Float64Mod, 2, 1),
+ PURE(Float64Sqrt, 1, 1), PURE(Float64Equal, 2, 1),
+ PURE(Float64LessThan, 2, 1), PURE(Float64LessThanOrEqual, 2, 1)
+#undef PURE
+};
+
+
+typedef MachineOperatorTestWithParam<PureOperator> MachinePureOperatorTest;
+
+} // namespace
+
+
+TEST_P(MachinePureOperatorTest, InstancesAreGloballyShared) {
+ const PureOperator& pop = GetParam();
+ MachineOperatorBuilder machine1(type());
+ MachineOperatorBuilder machine2(type());
+ EXPECT_EQ((machine1.*pop.constructor)(), (machine2.*pop.constructor)());
+}
+
+
+TEST_P(MachinePureOperatorTest, NumberOfInputsAndOutputs) {
+ MachineOperatorBuilder machine(type());
+ const PureOperator& pop = GetParam();
+ const Operator* op = (machine.*pop.constructor)();
+
+ EXPECT_EQ(pop.value_input_count, OperatorProperties::GetValueInputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetEffectInputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetControlInputCount(op));
+ EXPECT_EQ(pop.value_input_count, OperatorProperties::GetTotalInputCount(op));
+
+ EXPECT_EQ(pop.value_output_count,
+ OperatorProperties::GetValueOutputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+}
+
+
+TEST_P(MachinePureOperatorTest, MarkedAsPure) {
+ MachineOperatorBuilder machine(type());
+ const PureOperator& pop = GetParam();
+ const Operator* op = (machine.*pop.constructor)();
+ EXPECT_TRUE(op->HasProperty(Operator::kPure));
+}
+
+
+TEST_P(MachinePureOperatorTest, OpcodeIsCorrect) {
+ MachineOperatorBuilder machine(type());
+ const PureOperator& pop = GetParam();
+ const Operator* op = (machine.*pop.constructor)();
+ EXPECT_EQ(pop.opcode, op->opcode());
+}
+
+
+INSTANTIATE_TEST_CASE_P(
+ MachineOperatorTest, MachinePureOperatorTest,
+ ::testing::Combine(::testing::ValuesIn(kMachineReps),
+ ::testing::ValuesIn(kPureOperators)));
+
+#endif // GTEST_HAS_COMBINE
+
+
+// -----------------------------------------------------------------------------
+// Pseudo operators.
+
+
+TEST(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs32Bit) {
+ MachineOperatorBuilder machine(kRepWord32);
+ EXPECT_EQ(machine.Word32And(), machine.WordAnd());
+ EXPECT_EQ(machine.Word32Or(), machine.WordOr());
+ EXPECT_EQ(machine.Word32Xor(), machine.WordXor());
+ EXPECT_EQ(machine.Word32Shl(), machine.WordShl());
+ EXPECT_EQ(machine.Word32Shr(), machine.WordShr());
+ EXPECT_EQ(machine.Word32Sar(), machine.WordSar());
+ EXPECT_EQ(machine.Word32Ror(), machine.WordRor());
+ EXPECT_EQ(machine.Word32Equal(), machine.WordEqual());
+ EXPECT_EQ(machine.Int32Add(), machine.IntAdd());
+ EXPECT_EQ(machine.Int32Sub(), machine.IntSub());
+ EXPECT_EQ(machine.Int32Mul(), machine.IntMul());
+ EXPECT_EQ(machine.Int32Div(), machine.IntDiv());
+ EXPECT_EQ(machine.Int32UDiv(), machine.IntUDiv());
+ EXPECT_EQ(machine.Int32Mod(), machine.IntMod());
+ EXPECT_EQ(machine.Int32UMod(), machine.IntUMod());
+ EXPECT_EQ(machine.Int32LessThan(), machine.IntLessThan());
+ EXPECT_EQ(machine.Int32LessThanOrEqual(), machine.IntLessThanOrEqual());
+}
+
+
+TEST(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs64Bit) {
+ MachineOperatorBuilder machine(kRepWord64);
+ EXPECT_EQ(machine.Word64And(), machine.WordAnd());
+ EXPECT_EQ(machine.Word64Or(), machine.WordOr());
+ EXPECT_EQ(machine.Word64Xor(), machine.WordXor());
+ EXPECT_EQ(machine.Word64Shl(), machine.WordShl());
+ EXPECT_EQ(machine.Word64Shr(), machine.WordShr());
+ EXPECT_EQ(machine.Word64Sar(), machine.WordSar());
+ EXPECT_EQ(machine.Word64Ror(), machine.WordRor());
+ EXPECT_EQ(machine.Word64Equal(), machine.WordEqual());
+ EXPECT_EQ(machine.Int64Add(), machine.IntAdd());
+ EXPECT_EQ(machine.Int64Sub(), machine.IntSub());
+ EXPECT_EQ(machine.Int64Mul(), machine.IntMul());
+ EXPECT_EQ(machine.Int64Div(), machine.IntDiv());
+ EXPECT_EQ(machine.Int64UDiv(), machine.IntUDiv());
+ EXPECT_EQ(machine.Int64Mod(), machine.IntMod());
+ EXPECT_EQ(machine.Int64UMod(), machine.IntUMod());
+ EXPECT_EQ(machine.Int64LessThan(), machine.IntLessThan());
+ EXPECT_EQ(machine.Int64LessThanOrEqual(), machine.IntLessThanOrEqual());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
new file mode 100644
index 0000000000..2f30bd214d
--- /dev/null
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -0,0 +1,244 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/machine-operator.h"
+
+#include "src/base/lazy-instance.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+OStream& operator<<(OStream& os, const WriteBarrierKind& write_barrier_kind) {
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ return os << "NoWriteBarrier";
+ case kFullWriteBarrier:
+ return os << "FullWriteBarrier";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+OStream& operator<<(OStream& os, const StoreRepresentation& rep) {
+ return os << "(" << rep.machine_type() << " : " << rep.write_barrier_kind()
+ << ")";
+}
+
+
+template <>
+struct StaticParameterTraits<StoreRepresentation> {
+ static OStream& PrintTo(OStream& os, const StoreRepresentation& rep) {
+ return os << rep;
+ }
+ static int HashCode(const StoreRepresentation& rep) {
+ return rep.machine_type() + rep.write_barrier_kind();
+ }
+ static bool Equals(const StoreRepresentation& rep1,
+ const StoreRepresentation& rep2) {
+ return rep1 == rep2;
+ }
+};
+
+
+template <>
+struct StaticParameterTraits<LoadRepresentation> {
+ static OStream& PrintTo(OStream& os, LoadRepresentation type) { // NOLINT
+ return os << type;
+ }
+ static int HashCode(LoadRepresentation type) { return type; }
+ static bool Equals(LoadRepresentation lhs, LoadRepresentation rhs) {
+ return lhs == rhs;
+ }
+};
+
+
+#define PURE_OP_LIST(V) \
+ V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 1) \
+ V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 1) \
+ V(Word32Xor, Operator::kAssociative | Operator::kCommutative, 2, 1) \
+ V(Word32Shl, Operator::kNoProperties, 2, 1) \
+ V(Word32Shr, Operator::kNoProperties, 2, 1) \
+ V(Word32Sar, Operator::kNoProperties, 2, 1) \
+ V(Word32Ror, Operator::kNoProperties, 2, 1) \
+ V(Word32Equal, Operator::kCommutative, 2, 1) \
+ V(Word64And, Operator::kAssociative | Operator::kCommutative, 2, 1) \
+ V(Word64Or, Operator::kAssociative | Operator::kCommutative, 2, 1) \
+ V(Word64Xor, Operator::kAssociative | Operator::kCommutative, 2, 1) \
+ V(Word64Shl, Operator::kNoProperties, 2, 1) \
+ V(Word64Shr, Operator::kNoProperties, 2, 1) \
+ V(Word64Sar, Operator::kNoProperties, 2, 1) \
+ V(Word64Ror, Operator::kNoProperties, 2, 1) \
+ V(Word64Equal, Operator::kCommutative, 2, 1) \
+ V(Int32Add, Operator::kAssociative | Operator::kCommutative, 2, 1) \
+ V(Int32AddWithOverflow, Operator::kAssociative | Operator::kCommutative, 2, \
+ 2) \
+ V(Int32Sub, Operator::kNoProperties, 2, 1) \
+ V(Int32SubWithOverflow, Operator::kNoProperties, 2, 2) \
+ V(Int32Mul, Operator::kAssociative | Operator::kCommutative, 2, 1) \
+ V(Int32Div, Operator::kNoProperties, 2, 1) \
+ V(Int32UDiv, Operator::kNoProperties, 2, 1) \
+ V(Int32Mod, Operator::kNoProperties, 2, 1) \
+ V(Int32UMod, Operator::kNoProperties, 2, 1) \
+ V(Int32LessThan, Operator::kNoProperties, 2, 1) \
+ V(Int32LessThanOrEqual, Operator::kNoProperties, 2, 1) \
+ V(Uint32LessThan, Operator::kNoProperties, 2, 1) \
+ V(Uint32LessThanOrEqual, Operator::kNoProperties, 2, 1) \
+ V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 1) \
+ V(Int64Sub, Operator::kNoProperties, 2, 1) \
+ V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 1) \
+ V(Int64Div, Operator::kNoProperties, 2, 1) \
+ V(Int64UDiv, Operator::kNoProperties, 2, 1) \
+ V(Int64Mod, Operator::kNoProperties, 2, 1) \
+ V(Int64UMod, Operator::kNoProperties, 2, 1) \
+ V(Int64LessThan, Operator::kNoProperties, 2, 1) \
+ V(Int64LessThanOrEqual, Operator::kNoProperties, 2, 1) \
+ V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 1) \
+ V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 1) \
+ V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 1) \
+ V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 1) \
+ V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 1) \
+ V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 1) \
+ V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 1) \
+ V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 1) \
+ V(TruncateFloat64ToInt32, Operator::kNoProperties, 1, 1) \
+ V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 1) \
+ V(Float64Add, Operator::kCommutative, 2, 1) \
+ V(Float64Sub, Operator::kNoProperties, 2, 1) \
+ V(Float64Mul, Operator::kCommutative, 2, 1) \
+ V(Float64Div, Operator::kNoProperties, 2, 1) \
+ V(Float64Mod, Operator::kNoProperties, 2, 1) \
+ V(Float64Sqrt, Operator::kNoProperties, 1, 1) \
+ V(Float64Equal, Operator::kCommutative, 2, 1) \
+ V(Float64LessThan, Operator::kNoProperties, 2, 1) \
+ V(Float64LessThanOrEqual, Operator::kNoProperties, 2, 1)
+
+
+#define MACHINE_TYPE_LIST(V) \
+ V(MachFloat32) \
+ V(MachFloat64) \
+ V(MachInt8) \
+ V(MachUint8) \
+ V(MachInt16) \
+ V(MachUint16) \
+ V(MachInt32) \
+ V(MachUint32) \
+ V(MachInt64) \
+ V(MachUint64) \
+ V(MachAnyTagged) \
+ V(RepBit) \
+ V(RepWord8) \
+ V(RepWord16) \
+ V(RepWord32) \
+ V(RepWord64) \
+ V(RepFloat32) \
+ V(RepFloat64) \
+ V(RepTagged)
+
+
+struct MachineOperatorBuilderImpl {
+#define PURE(Name, properties, input_count, output_count) \
+ struct Name##Operator FINAL : public SimpleOperator { \
+ Name##Operator() \
+ : SimpleOperator(IrOpcode::k##Name, Operator::kPure | properties, \
+ input_count, output_count, #Name) {} \
+ }; \
+ Name##Operator k##Name;
+ PURE_OP_LIST(PURE)
+#undef PURE
+
+#define LOAD(Type) \
+ struct Load##Type##Operator FINAL : public Operator1<LoadRepresentation> { \
+ Load##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kLoad, Operator::kNoThrow | Operator::kNoWrite, 2, 1, \
+ "Load", k##Type) {} \
+ }; \
+ Load##Type##Operator k##Load##Type;
+ MACHINE_TYPE_LIST(LOAD)
+#undef LOAD
+
+#define STORE(Type) \
+ struct Store##Type##Operator : public Operator1<StoreRepresentation> { \
+ explicit Store##Type##Operator(WriteBarrierKind write_barrier_kind) \
+ : Operator1<StoreRepresentation>( \
+ IrOpcode::kStore, Operator::kNoRead | Operator::kNoThrow, 3, 0, \
+ "Store", StoreRepresentation(k##Type, write_barrier_kind)) {} \
+ }; \
+ struct Store##Type##NoWriteBarrier##Operator FINAL \
+ : public Store##Type##Operator { \
+ Store##Type##NoWriteBarrier##Operator() \
+ : Store##Type##Operator(kNoWriteBarrier) {} \
+ }; \
+ struct Store##Type##FullWriteBarrier##Operator FINAL \
+ : public Store##Type##Operator { \
+ Store##Type##FullWriteBarrier##Operator() \
+ : Store##Type##Operator(kFullWriteBarrier) {} \
+ }; \
+ Store##Type##NoWriteBarrier##Operator k##Store##Type##NoWriteBarrier; \
+ Store##Type##FullWriteBarrier##Operator k##Store##Type##FullWriteBarrier;
+ MACHINE_TYPE_LIST(STORE)
+#undef STORE
+};
+
+
+static base::LazyInstance<MachineOperatorBuilderImpl>::type kImpl =
+ LAZY_INSTANCE_INITIALIZER;
+
+
+MachineOperatorBuilder::MachineOperatorBuilder(MachineType word)
+ : impl_(kImpl.Get()), word_(word) {
+ DCHECK(word == kRepWord32 || word == kRepWord64);
+}
+
+
+#define PURE(Name, properties, input_count, output_count) \
+ const Operator* MachineOperatorBuilder::Name() { return &impl_.k##Name; }
+PURE_OP_LIST(PURE)
+#undef PURE
+
+
+const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
+ switch (rep) {
+#define LOAD(Type) \
+ case k##Type: \
+ return &impl_.k##Load##Type;
+ MACHINE_TYPE_LIST(LOAD)
+#undef LOAD
+
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+const Operator* MachineOperatorBuilder::Store(StoreRepresentation rep) {
+ switch (rep.machine_type()) {
+#define STORE(Type) \
+ case k##Type: \
+ switch (rep.write_barrier_kind()) { \
+ case kNoWriteBarrier: \
+ return &impl_.k##Store##Type##NoWriteBarrier; \
+ case kFullWriteBarrier: \
+ return &impl_.k##Store##Type##FullWriteBarrier; \
+ } \
+ break;
+ MACHINE_TYPE_LIST(STORE)
+#undef STORE
+
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 93ccedc2c8..92c8ac420f 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -6,163 +6,182 @@
#define V8_COMPILER_MACHINE_OPERATOR_H_
#include "src/compiler/machine-type.h"
-#include "src/compiler/opcodes.h"
-#include "src/compiler/operator.h"
-#include "src/zone.h"
namespace v8 {
namespace internal {
namespace compiler {
-// TODO(turbofan): other write barriers are possible based on type
+// Forward declarations.
+struct MachineOperatorBuilderImpl;
+class Operator;
+
+
+// Supported write barrier modes.
enum WriteBarrierKind { kNoWriteBarrier, kFullWriteBarrier };
+OStream& operator<<(OStream& os, const WriteBarrierKind& write_barrier_kind);
+
+
+typedef MachineType LoadRepresentation;
+
// A Store needs a MachineType and a WriteBarrierKind
// in order to emit the correct write barrier.
-struct StoreRepresentation {
- MachineType rep;
- WriteBarrierKind write_barrier_kind;
+class StoreRepresentation FINAL {
+ public:
+ StoreRepresentation(MachineType machine_type,
+ WriteBarrierKind write_barrier_kind)
+ : machine_type_(machine_type), write_barrier_kind_(write_barrier_kind) {}
+
+ MachineType machine_type() const { return machine_type_; }
+ WriteBarrierKind write_barrier_kind() const { return write_barrier_kind_; }
+
+ private:
+ MachineType machine_type_;
+ WriteBarrierKind write_barrier_kind_;
};
+inline bool operator==(const StoreRepresentation& rep1,
+ const StoreRepresentation& rep2) {
+ return rep1.machine_type() == rep2.machine_type() &&
+ rep1.write_barrier_kind() == rep2.write_barrier_kind();
+}
+
+inline bool operator!=(const StoreRepresentation& rep1,
+ const StoreRepresentation& rep2) {
+ return !(rep1 == rep2);
+}
+
+OStream& operator<<(OStream& os, const StoreRepresentation& rep);
+
// Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable
// for generating code to run on architectures such as ia32, x64, arm, etc.
-class MachineOperatorBuilder {
+class MachineOperatorBuilder FINAL {
public:
- explicit MachineOperatorBuilder(Zone* zone, MachineType word = pointer_rep())
- : zone_(zone), word_(word) {
- CHECK(word == kMachineWord32 || word == kMachineWord64);
- }
-
-#define SIMPLE(name, properties, inputs, outputs) \
- return new (zone_) \
- SimpleOperator(IrOpcode::k##name, properties, inputs, outputs, #name);
-
-#define OP1(name, ptype, pname, properties, inputs, outputs) \
- return new (zone_) \
- Operator1<ptype>(IrOpcode::k##name, properties | Operator::kNoThrow, \
- inputs, outputs, #name, pname)
-
-#define BINOP(name) SIMPLE(name, Operator::kPure, 2, 1)
-#define BINOP_O(name) SIMPLE(name, Operator::kPure, 2, 2)
-#define BINOP_C(name) \
- SIMPLE(name, Operator::kCommutative | Operator::kPure, 2, 1)
-#define BINOP_AC(name) \
- SIMPLE(name, \
- Operator::kAssociative | Operator::kCommutative | Operator::kPure, 2, \
- 1)
-#define BINOP_ACO(name) \
- SIMPLE(name, \
- Operator::kAssociative | Operator::kCommutative | Operator::kPure, 2, \
- 2)
-#define UNOP(name) SIMPLE(name, Operator::kPure, 1, 1)
-
-#define WORD_SIZE(x) return is64() ? Word64##x() : Word32##x()
-
- Operator* Load(MachineType rep) { // load [base + index]
- OP1(Load, MachineType, rep, Operator::kNoWrite, 2, 1);
- }
- // store [base + index], value
- Operator* Store(MachineType rep, WriteBarrierKind kind) {
- StoreRepresentation store_rep = {rep, kind};
- OP1(Store, StoreRepresentation, store_rep, Operator::kNoRead, 3, 0);
- }
-
- Operator* WordAnd() { WORD_SIZE(And); }
- Operator* WordOr() { WORD_SIZE(Or); }
- Operator* WordXor() { WORD_SIZE(Xor); }
- Operator* WordShl() { WORD_SIZE(Shl); }
- Operator* WordShr() { WORD_SIZE(Shr); }
- Operator* WordSar() { WORD_SIZE(Sar); }
- Operator* WordEqual() { WORD_SIZE(Equal); }
-
- Operator* Word32And() { BINOP_AC(Word32And); }
- Operator* Word32Or() { BINOP_AC(Word32Or); }
- Operator* Word32Xor() { BINOP_AC(Word32Xor); }
- Operator* Word32Shl() { BINOP(Word32Shl); }
- Operator* Word32Shr() { BINOP(Word32Shr); }
- Operator* Word32Sar() { BINOP(Word32Sar); }
- Operator* Word32Equal() { BINOP_C(Word32Equal); }
-
- Operator* Word64And() { BINOP_AC(Word64And); }
- Operator* Word64Or() { BINOP_AC(Word64Or); }
- Operator* Word64Xor() { BINOP_AC(Word64Xor); }
- Operator* Word64Shl() { BINOP(Word64Shl); }
- Operator* Word64Shr() { BINOP(Word64Shr); }
- Operator* Word64Sar() { BINOP(Word64Sar); }
- Operator* Word64Equal() { BINOP_C(Word64Equal); }
-
- Operator* Int32Add() { BINOP_AC(Int32Add); }
- Operator* Int32AddWithOverflow() { BINOP_ACO(Int32AddWithOverflow); }
- Operator* Int32Sub() { BINOP(Int32Sub); }
- Operator* Int32SubWithOverflow() { BINOP_O(Int32SubWithOverflow); }
- Operator* Int32Mul() { BINOP_AC(Int32Mul); }
- Operator* Int32Div() { BINOP(Int32Div); }
- Operator* Int32UDiv() { BINOP(Int32UDiv); }
- Operator* Int32Mod() { BINOP(Int32Mod); }
- Operator* Int32UMod() { BINOP(Int32UMod); }
- Operator* Int32LessThan() { BINOP(Int32LessThan); }
- Operator* Int32LessThanOrEqual() { BINOP(Int32LessThanOrEqual); }
- Operator* Uint32LessThan() { BINOP(Uint32LessThan); }
- Operator* Uint32LessThanOrEqual() { BINOP(Uint32LessThanOrEqual); }
-
- Operator* Int64Add() { BINOP_AC(Int64Add); }
- Operator* Int64Sub() { BINOP(Int64Sub); }
- Operator* Int64Mul() { BINOP_AC(Int64Mul); }
- Operator* Int64Div() { BINOP(Int64Div); }
- Operator* Int64UDiv() { BINOP(Int64UDiv); }
- Operator* Int64Mod() { BINOP(Int64Mod); }
- Operator* Int64UMod() { BINOP(Int64UMod); }
- Operator* Int64LessThan() { BINOP(Int64LessThan); }
- Operator* Int64LessThanOrEqual() { BINOP(Int64LessThanOrEqual); }
-
- Operator* ConvertInt32ToInt64() { UNOP(ConvertInt32ToInt64); }
- Operator* ConvertInt64ToInt32() { UNOP(ConvertInt64ToInt32); }
-
- // Convert representation of integers between float64 and int32/uint32.
- // The precise rounding mode and handling of out of range inputs are *not*
- // defined for these operators, since they are intended only for use with
- // integers.
- // TODO(titzer): rename ConvertXXX to ChangeXXX in machine operators.
- Operator* ChangeInt32ToFloat64() { UNOP(ChangeInt32ToFloat64); }
- Operator* ChangeUint32ToFloat64() { UNOP(ChangeUint32ToFloat64); }
- Operator* ChangeFloat64ToInt32() { UNOP(ChangeFloat64ToInt32); }
- Operator* ChangeFloat64ToUint32() { UNOP(ChangeFloat64ToUint32); }
+ explicit MachineOperatorBuilder(MachineType word = kMachPtr);
+
+ const Operator* Word32And();
+ const Operator* Word32Or();
+ const Operator* Word32Xor();
+ const Operator* Word32Shl();
+ const Operator* Word32Shr();
+ const Operator* Word32Sar();
+ const Operator* Word32Ror();
+ const Operator* Word32Equal();
+
+ const Operator* Word64And();
+ const Operator* Word64Or();
+ const Operator* Word64Xor();
+ const Operator* Word64Shl();
+ const Operator* Word64Shr();
+ const Operator* Word64Sar();
+ const Operator* Word64Ror();
+ const Operator* Word64Equal();
+
+ const Operator* Int32Add();
+ const Operator* Int32AddWithOverflow();
+ const Operator* Int32Sub();
+ const Operator* Int32SubWithOverflow();
+ const Operator* Int32Mul();
+ const Operator* Int32Div();
+ const Operator* Int32UDiv();
+ const Operator* Int32Mod();
+ const Operator* Int32UMod();
+ const Operator* Int32LessThan();
+ const Operator* Int32LessThanOrEqual();
+ const Operator* Uint32LessThan();
+ const Operator* Uint32LessThanOrEqual();
+
+ const Operator* Int64Add();
+ const Operator* Int64Sub();
+ const Operator* Int64Mul();
+ const Operator* Int64Div();
+ const Operator* Int64UDiv();
+ const Operator* Int64Mod();
+ const Operator* Int64UMod();
+ const Operator* Int64LessThan();
+ const Operator* Int64LessThanOrEqual();
+
+ // These operators change the representation of numbers while preserving the
+ // value of the number. Narrowing operators assume the input is representable
+ // in the target type and are *not* defined for other inputs.
+ // Use narrowing change operators only when there is a static guarantee that
+ // the input value is representable in the target value.
+ const Operator* ChangeFloat32ToFloat64();
+ const Operator* ChangeFloat64ToInt32(); // narrowing
+ const Operator* ChangeFloat64ToUint32(); // narrowing
+ const Operator* ChangeInt32ToFloat64();
+ const Operator* ChangeInt32ToInt64();
+ const Operator* ChangeUint32ToFloat64();
+ const Operator* ChangeUint32ToUint64();
+
+ // These operators truncate numbers, both changing the representation of
+ // the number and mapping multiple input values onto the same output value.
+ const Operator* TruncateFloat64ToFloat32();
+ const Operator* TruncateFloat64ToInt32(); // JavaScript semantics.
+ const Operator* TruncateInt64ToInt32();
// Floating point operators always operate with IEEE 754 round-to-nearest.
- Operator* Float64Add() { BINOP_C(Float64Add); }
- Operator* Float64Sub() { BINOP(Float64Sub); }
- Operator* Float64Mul() { BINOP_C(Float64Mul); }
- Operator* Float64Div() { BINOP(Float64Div); }
- Operator* Float64Mod() { BINOP(Float64Mod); }
+ const Operator* Float64Add();
+ const Operator* Float64Sub();
+ const Operator* Float64Mul();
+ const Operator* Float64Div();
+ const Operator* Float64Mod();
+ const Operator* Float64Sqrt();
// Floating point comparisons complying to IEEE 754.
- Operator* Float64Equal() { BINOP_C(Float64Equal); }
- Operator* Float64LessThan() { BINOP(Float64LessThan); }
- Operator* Float64LessThanOrEqual() { BINOP(Float64LessThanOrEqual); }
+ const Operator* Float64Equal();
+ const Operator* Float64LessThan();
+ const Operator* Float64LessThanOrEqual();
- inline bool is32() const { return word_ == kMachineWord32; }
- inline bool is64() const { return word_ == kMachineWord64; }
- inline MachineType word() const { return word_; }
+ // load [base + index]
+ const Operator* Load(LoadRepresentation rep);
- static inline MachineType pointer_rep() {
- return kPointerSize == 8 ? kMachineWord64 : kMachineWord32;
+ // store [base + index], value
+ const Operator* Store(StoreRepresentation rep);
+
+ // Target machine word-size assumed by this builder.
+ bool Is32() const { return word() == kRepWord32; }
+ bool Is64() const { return word() == kRepWord64; }
+ MachineType word() const { return word_; }
+
+// Pseudo operators that translate to 32/64-bit operators depending on the
+// word-size of the target machine assumed by this builder.
+#define PSEUDO_OP_LIST(V) \
+ V(Word, And) \
+ V(Word, Or) \
+ V(Word, Xor) \
+ V(Word, Shl) \
+ V(Word, Shr) \
+ V(Word, Sar) \
+ V(Word, Ror) \
+ V(Word, Equal) \
+ V(Int, Add) \
+ V(Int, Sub) \
+ V(Int, Mul) \
+ V(Int, Div) \
+ V(Int, UDiv) \
+ V(Int, Mod) \
+ V(Int, UMod) \
+ V(Int, LessThan) \
+ V(Int, LessThanOrEqual)
+#define PSEUDO_OP(Prefix, Suffix) \
+ const Operator* Prefix##Suffix() { \
+ return Is32() ? Prefix##32##Suffix() : Prefix##64##Suffix(); \
}
-
-#undef WORD_SIZE
-#undef UNOP
-#undef BINOP
-#undef OP1
-#undef SIMPLE
+ PSEUDO_OP_LIST(PSEUDO_OP)
+#undef PSEUDO_OP
+#undef PSEUDO_OP_LIST
private:
- Zone* zone_;
- MachineType word_;
+ const MachineOperatorBuilderImpl& impl_;
+ const MachineType word_;
};
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_MACHINE_OPERATOR_H_
diff --git a/deps/v8/src/compiler/machine-type.cc b/deps/v8/src/compiler/machine-type.cc
new file mode 100644
index 0000000000..94aa124e03
--- /dev/null
+++ b/deps/v8/src/compiler/machine-type.cc
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/machine-type.h"
+#include "src/ostreams.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define PRINT(bit) \
+ if (type & bit) { \
+ if (before) os << "|"; \
+ os << #bit; \
+ before = true; \
+ }
+
+
+OStream& operator<<(OStream& os, const MachineType& type) {
+ bool before = false;
+ PRINT(kRepBit);
+ PRINT(kRepWord8);
+ PRINT(kRepWord16);
+ PRINT(kRepWord32);
+ PRINT(kRepWord64);
+ PRINT(kRepFloat32);
+ PRINT(kRepFloat64);
+ PRINT(kRepTagged);
+
+ PRINT(kTypeBool);
+ PRINT(kTypeInt32);
+ PRINT(kTypeUint32);
+ PRINT(kTypeInt64);
+ PRINT(kTypeUint64);
+ PRINT(kTypeNumber);
+ PRINT(kTypeAny);
+ return os;
+}
+
+
+#undef PRINT
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/machine-type.h b/deps/v8/src/compiler/machine-type.h
index 716ca2236d..88b482c804 100644
--- a/deps/v8/src/compiler/machine-type.h
+++ b/deps/v8/src/compiler/machine-type.h
@@ -5,32 +5,169 @@
#ifndef V8_COMPILER_MACHINE_TYPE_H_
#define V8_COMPILER_MACHINE_TYPE_H_
+#include "src/base/bits.h"
+#include "src/globals.h"
+#include "src/zone.h"
+
namespace v8 {
namespace internal {
+
+class OStream;
+
namespace compiler {
-// An enumeration of the storage representations at the machine level.
-// - Words are uninterpreted bits of a given fixed size that can be used
-// to store integers and pointers. They are normally allocated to general
-// purpose registers by the backend and are not tracked for GC.
-// - Floats are bits of a given fixed size that are used to store floating
-// point numbers. They are normally allocated to the floating point
-// registers of the machine and are not tracked for the GC.
-// - Tagged values are the size of a reference into the heap and can store
-// small words or references into the heap using a language and potentially
-// machine-dependent tagging scheme. These values are tracked by the code
-// generator for precise GC.
+// Machine-level types and representations.
+// TODO(titzer): Use the real type system instead of MachineType.
enum MachineType {
- kMachineWord8,
- kMachineWord16,
- kMachineWord32,
- kMachineWord64,
- kMachineFloat64,
- kMachineTagged,
- kMachineLast
+ // Representations.
+ kRepBit = 1 << 0,
+ kRepWord8 = 1 << 1,
+ kRepWord16 = 1 << 2,
+ kRepWord32 = 1 << 3,
+ kRepWord64 = 1 << 4,
+ kRepFloat32 = 1 << 5,
+ kRepFloat64 = 1 << 6,
+ kRepTagged = 1 << 7,
+
+ // Types.
+ kTypeBool = 1 << 8,
+ kTypeInt32 = 1 << 9,
+ kTypeUint32 = 1 << 10,
+ kTypeInt64 = 1 << 11,
+ kTypeUint64 = 1 << 12,
+ kTypeNumber = 1 << 13,
+ kTypeAny = 1 << 14,
+
+ // Machine types.
+ kMachNone = 0,
+ kMachFloat32 = kRepFloat32 | kTypeNumber,
+ kMachFloat64 = kRepFloat64 | kTypeNumber,
+ kMachInt8 = kRepWord8 | kTypeInt32,
+ kMachUint8 = kRepWord8 | kTypeUint32,
+ kMachInt16 = kRepWord16 | kTypeInt32,
+ kMachUint16 = kRepWord16 | kTypeUint32,
+ kMachInt32 = kRepWord32 | kTypeInt32,
+ kMachUint32 = kRepWord32 | kTypeUint32,
+ kMachInt64 = kRepWord64 | kTypeInt64,
+ kMachUint64 = kRepWord64 | kTypeUint64,
+ kMachPtr = (kPointerSize == 4) ? kRepWord32 : kRepWord64,
+ kMachAnyTagged = kRepTagged | kTypeAny
};
+
+OStream& operator<<(OStream& os, const MachineType& type);
+
+typedef uint16_t MachineTypeUnion;
+
+// Globally useful machine types and constants.
+const MachineTypeUnion kRepMask = kRepBit | kRepWord8 | kRepWord16 |
+ kRepWord32 | kRepWord64 | kRepFloat32 |
+ kRepFloat64 | kRepTagged;
+const MachineTypeUnion kTypeMask = kTypeBool | kTypeInt32 | kTypeUint32 |
+ kTypeInt64 | kTypeUint64 | kTypeNumber |
+ kTypeAny;
+
+// Gets only the type of the given type.
+inline MachineType TypeOf(MachineType machine_type) {
+ int result = machine_type & kTypeMask;
+ return static_cast<MachineType>(result);
}
+
+// Gets only the representation of the given type.
+inline MachineType RepresentationOf(MachineType machine_type) {
+ int result = machine_type & kRepMask;
+ CHECK(base::bits::IsPowerOfTwo32(result));
+ return static_cast<MachineType>(result);
+}
+
+// Gets the element size in bytes of the machine type.
+inline int ElementSizeOf(MachineType machine_type) {
+ switch (RepresentationOf(machine_type)) {
+ case kRepBit:
+ case kRepWord8:
+ return 1;
+ case kRepWord16:
+ return 2;
+ case kRepWord32:
+ case kRepFloat32:
+ return 4;
+ case kRepWord64:
+ case kRepFloat64:
+ return 8;
+ case kRepTagged:
+ return kPointerSize;
+ default:
+ UNREACHABLE();
+ return kPointerSize;
+ }
}
-} // namespace v8::internal::compiler
+
+// Describes the inputs and outputs of a function or call.
+template <typename T>
+class Signature : public ZoneObject {
+ public:
+ Signature(size_t return_count, size_t parameter_count, T* reps)
+ : return_count_(return_count),
+ parameter_count_(parameter_count),
+ reps_(reps) {}
+
+ size_t return_count() const { return return_count_; }
+ size_t parameter_count() const { return parameter_count_; }
+
+ T GetParam(size_t index) const {
+ DCHECK(index < parameter_count_);
+ return reps_[return_count_ + index];
+ }
+
+ T GetReturn(size_t index = 0) const {
+ DCHECK(index < return_count_);
+ return reps_[index];
+ }
+
+ // For incrementally building signatures.
+ class Builder {
+ public:
+ Builder(Zone* zone, size_t return_count, size_t parameter_count)
+ : return_count_(return_count),
+ parameter_count_(parameter_count),
+ zone_(zone),
+ rcursor_(0),
+ pcursor_(0),
+ buffer_(zone->NewArray<T>(
+ static_cast<int>(return_count + parameter_count))) {}
+
+ const size_t return_count_;
+ const size_t parameter_count_;
+
+ void AddReturn(T val) {
+ DCHECK(rcursor_ < return_count_);
+ buffer_[rcursor_++] = val;
+ }
+ void AddParam(T val) {
+ DCHECK(pcursor_ < parameter_count_);
+ buffer_[return_count_ + pcursor_++] = val;
+ }
+ Signature<T>* Build() {
+ DCHECK(rcursor_ == return_count_);
+ DCHECK(pcursor_ == parameter_count_);
+ return new (zone_) Signature<T>(return_count_, parameter_count_, buffer_);
+ }
+
+ private:
+ Zone* zone_;
+ size_t rcursor_;
+ size_t pcursor_;
+ T* buffer_;
+ };
+
+ protected:
+ size_t return_count_;
+ size_t parameter_count_;
+ T* reps_;
+};
+
+typedef Signature<MachineType> MachineSignature;
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_MACHINE_TYPE_H_
diff --git a/deps/v8/src/compiler/node-aux-data-inl.h b/deps/v8/src/compiler/node-aux-data-inl.h
index 679320ab6f..79f1abfe95 100644
--- a/deps/v8/src/compiler/node-aux-data-inl.h
+++ b/deps/v8/src/compiler/node-aux-data-inl.h
@@ -15,7 +15,7 @@ namespace compiler {
template <class T>
NodeAuxData<T>::NodeAuxData(Zone* zone)
- : aux_data_(ZoneAllocator(zone)) {}
+ : aux_data_(zone) {}
template <class T>
diff --git a/deps/v8/src/compiler/node-aux-data.h b/deps/v8/src/compiler/node-aux-data.h
index 1e836338a9..7acce33949 100644
--- a/deps/v8/src/compiler/node-aux-data.h
+++ b/deps/v8/src/compiler/node-aux-data.h
@@ -5,9 +5,7 @@
#ifndef V8_COMPILER_NODE_AUX_DATA_H_
#define V8_COMPILER_NODE_AUX_DATA_H_
-#include <vector>
-
-#include "src/zone-allocator.h"
+#include "src/zone-containers.h"
namespace v8 {
namespace internal {
@@ -26,10 +24,7 @@ class NodeAuxData {
inline T Get(Node* node);
private:
- typedef zone_allocator<T> ZoneAllocator;
- typedef std::vector<T, ZoneAllocator> TZoneVector;
-
- TZoneVector aux_data_;
+ ZoneVector<T> aux_data_;
};
}
}
diff --git a/deps/v8/src/compiler/node-cache.cc b/deps/v8/src/compiler/node-cache.cc
index c3ee58c5a2..7cda167bac 100644
--- a/deps/v8/src/compiler/node-cache.cc
+++ b/deps/v8/src/compiler/node-cache.cc
@@ -31,7 +31,7 @@ inline int32_t NodeCacheHash(int64_t key) {
template <>
inline int32_t NodeCacheHash(double key) {
- return ComputeLongHash(BitCast<int64_t>(key));
+ return ComputeLongHash(bit_cast<int64_t>(key));
}
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index 3b34d07c08..6019cba3a5 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -5,7 +5,8 @@
#ifndef V8_COMPILER_NODE_MATCHERS_H_
#define V8_COMPILER_NODE_MATCHERS_H_
-#include "src/compiler/common-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
namespace v8 {
namespace internal {
@@ -16,7 +17,7 @@ struct NodeMatcher {
explicit NodeMatcher(Node* node) : node_(node) {}
Node* node() const { return node_; }
- Operator* op() const { return node()->op(); }
+ const Operator* op() const { return node()->op(); }
IrOpcode::Value opcode() const { return node()->opcode(); }
bool HasProperty(Operator::Property property) const {
@@ -35,27 +36,27 @@ struct NodeMatcher {
// A pattern matcher for abitrary value constants.
-template <typename T>
+template <typename T, IrOpcode::Value kOpcode>
struct ValueMatcher : public NodeMatcher {
explicit ValueMatcher(Node* node)
- : NodeMatcher(node),
- value_(),
- has_value_(CommonOperatorTraits<T>::HasValue(node->op())) {
- if (has_value_) value_ = CommonOperatorTraits<T>::ValueOf(node->op());
+ : NodeMatcher(node), value_(), has_value_(opcode() == kOpcode) {
+ if (has_value_) {
+ value_ = OpParameter<T>(node);
+ }
}
bool HasValue() const { return has_value_; }
- T Value() const {
+ const T& Value() const {
DCHECK(HasValue());
return value_;
}
- bool Is(T value) const {
- return HasValue() && CommonOperatorTraits<T>::Equals(Value(), value);
+ bool Is(const T& value) const {
+ return this->HasValue() && this->Value() == value;
}
- bool IsInRange(T low, T high) const {
- return HasValue() && low <= value_ && value_ <= high;
+ bool IsInRange(const T& low, const T& high) const {
+ return this->HasValue() && low <= this->Value() && this->Value() <= high;
}
private:
@@ -65,9 +66,9 @@ struct ValueMatcher : public NodeMatcher {
// A pattern matcher for integer constants.
-template <typename T>
-struct IntMatcher V8_FINAL : public ValueMatcher<T> {
- explicit IntMatcher(Node* node) : ValueMatcher<T>(node) {}
+template <typename T, IrOpcode::Value kOpcode>
+struct IntMatcher FINAL : public ValueMatcher<T, kOpcode> {
+ explicit IntMatcher(Node* node) : ValueMatcher<T, kOpcode>(node) {}
bool IsPowerOf2() const {
return this->HasValue() && this->Value() > 0 &&
@@ -75,28 +76,39 @@ struct IntMatcher V8_FINAL : public ValueMatcher<T> {
}
};
-typedef IntMatcher<int32_t> Int32Matcher;
-typedef IntMatcher<uint32_t> Uint32Matcher;
-typedef IntMatcher<int64_t> Int64Matcher;
-typedef IntMatcher<uint64_t> Uint64Matcher;
+typedef IntMatcher<int32_t, IrOpcode::kInt32Constant> Int32Matcher;
+typedef IntMatcher<uint32_t, IrOpcode::kInt32Constant> Uint32Matcher;
+typedef IntMatcher<int64_t, IrOpcode::kInt64Constant> Int64Matcher;
+typedef IntMatcher<uint64_t, IrOpcode::kInt64Constant> Uint64Matcher;
// A pattern matcher for floating point constants.
-template <typename T>
-struct FloatMatcher V8_FINAL : public ValueMatcher<T> {
- explicit FloatMatcher(Node* node) : ValueMatcher<T>(node) {}
+template <typename T, IrOpcode::Value kOpcode>
+struct FloatMatcher FINAL : public ValueMatcher<T, kOpcode> {
+ explicit FloatMatcher(Node* node) : ValueMatcher<T, kOpcode>(node) {}
bool IsNaN() const { return this->HasValue() && std::isnan(this->Value()); }
};
-typedef FloatMatcher<double> Float64Matcher;
+typedef FloatMatcher<float, IrOpcode::kFloat32Constant> Float32Matcher;
+typedef FloatMatcher<double, IrOpcode::kFloat64Constant> Float64Matcher;
+typedef FloatMatcher<double, IrOpcode::kNumberConstant> NumberMatcher;
+
+
+// A pattern matcher for heap object constants.
+template <typename T>
+struct HeapObjectMatcher FINAL
+ : public ValueMatcher<Unique<T>, IrOpcode::kHeapConstant> {
+ explicit HeapObjectMatcher(Node* node)
+ : ValueMatcher<Unique<T>, IrOpcode::kHeapConstant>(node) {}
+};
// For shorter pattern matching code, this struct matches both the left and
// right hand sides of a binary operation and can put constants on the right
// if they appear on the left hand side of a commutative operation.
template <typename Left, typename Right>
-struct BinopMatcher V8_FINAL : public NodeMatcher {
+struct BinopMatcher FINAL : public NodeMatcher {
explicit BinopMatcher(Node* node)
: NodeMatcher(node), left_(InputAt(0)), right_(InputAt(1)) {
if (HasProperty(Operator::kCommutative)) PutConstantOnRight();
@@ -126,8 +138,99 @@ typedef BinopMatcher<Uint32Matcher, Uint32Matcher> Uint32BinopMatcher;
typedef BinopMatcher<Int64Matcher, Int64Matcher> Int64BinopMatcher;
typedef BinopMatcher<Uint64Matcher, Uint64Matcher> Uint64BinopMatcher;
typedef BinopMatcher<Float64Matcher, Float64Matcher> Float64BinopMatcher;
-}
-}
-} // namespace v8::internal::compiler
+
+
+// Fairly intel-specify node matcher used for matching scale factors in
+// addressing modes.
+// Matches nodes of form [x * N] for N in {1,2,4,8}
+class ScaleFactorMatcher : public NodeMatcher {
+ public:
+ explicit ScaleFactorMatcher(Node* node)
+ : NodeMatcher(node), left_(NULL), power_(0) {
+ Match();
+ }
+
+ bool Matches() { return left_ != NULL; }
+ int Power() {
+ DCHECK(Matches());
+ return power_;
+ }
+ Node* Left() {
+ DCHECK(Matches());
+ return left_;
+ }
+
+ private:
+ void Match() {
+ if (opcode() != IrOpcode::kInt32Mul) return;
+ Int32BinopMatcher m(node());
+ if (!m.right().HasValue()) return;
+ int32_t value = m.right().Value();
+ switch (value) {
+ case 8:
+ power_++; // Fall through.
+ case 4:
+ power_++; // Fall through.
+ case 2:
+ power_++; // Fall through.
+ case 1:
+ break;
+ default:
+ return;
+ }
+ left_ = m.left().node();
+ }
+
+ Node* left_;
+ int power_;
+};
+
+
+// Fairly intel-specify node matcher used for matching index and displacement
+// operands in addressing modes.
+// Matches nodes of form:
+// [x * N]
+// [x * N + K]
+// [x + K]
+// [x] -- fallback case
+// for N in {1,2,4,8} and K int32_t
+class IndexAndDisplacementMatcher : public NodeMatcher {
+ public:
+ explicit IndexAndDisplacementMatcher(Node* node)
+ : NodeMatcher(node), index_node_(node), displacement_(0), power_(0) {
+ Match();
+ }
+
+ Node* index_node() { return index_node_; }
+ int displacement() { return displacement_; }
+ int power() { return power_; }
+
+ private:
+ void Match() {
+ if (opcode() == IrOpcode::kInt32Add) {
+ // Assume reduction has put constant on the right.
+ Int32BinopMatcher m(node());
+ if (m.right().HasValue()) {
+ displacement_ = m.right().Value();
+ index_node_ = m.left().node();
+ }
+ }
+ // Test scale factor.
+ ScaleFactorMatcher scale_matcher(index_node_);
+ if (scale_matcher.Matches()) {
+ index_node_ = scale_matcher.Left();
+ power_ = scale_matcher.Power();
+ }
+ }
+
+ Node* index_node_;
+ int displacement_;
+ int power_;
+};
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_NODE_MATCHERS_H_
diff --git a/deps/v8/src/compiler/node-properties-inl.h b/deps/v8/src/compiler/node-properties-inl.h
index 2d63b0cc1b..3f6d53105b 100644
--- a/deps/v8/src/compiler/node-properties-inl.h
+++ b/deps/v8/src/compiler/node-properties-inl.h
@@ -8,6 +8,7 @@
#include "src/v8.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
@@ -29,10 +30,14 @@ inline int NodeProperties::FirstContextIndex(Node* node) {
return PastValueIndex(node);
}
-inline int NodeProperties::FirstEffectIndex(Node* node) {
+inline int NodeProperties::FirstFrameStateIndex(Node* node) {
return PastContextIndex(node);
}
+inline int NodeProperties::FirstEffectIndex(Node* node) {
+ return PastFrameStateIndex(node);
+}
+
inline int NodeProperties::FirstControlIndex(Node* node) {
return PastEffectIndex(node);
}
@@ -48,6 +53,11 @@ inline int NodeProperties::PastContextIndex(Node* node) {
OperatorProperties::GetContextInputCount(node->op());
}
+inline int NodeProperties::PastFrameStateIndex(Node* node) {
+ return FirstFrameStateIndex(node) +
+ OperatorProperties::GetFrameStateInputCount(node->op());
+}
+
inline int NodeProperties::PastEffectIndex(Node* node) {
return FirstEffectIndex(node) +
OperatorProperties::GetEffectInputCount(node->op());
@@ -73,6 +83,11 @@ inline Node* NodeProperties::GetContextInput(Node* node) {
return node->InputAt(FirstContextIndex(node));
}
+inline Node* NodeProperties::GetFrameStateInput(Node* node) {
+ DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
+ return node->InputAt(FirstFrameStateIndex(node));
+}
+
inline Node* NodeProperties::GetEffectInput(Node* node, int index) {
DCHECK(0 <= index &&
index < OperatorProperties::GetEffectInputCount(node->op()));
@@ -85,6 +100,10 @@ inline Node* NodeProperties::GetControlInput(Node* node, int index) {
return node->InputAt(FirstControlIndex(node) + index);
}
+inline int NodeProperties::GetFrameStateIndex(Node* node) {
+ DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
+ return FirstFrameStateIndex(node);
+}
// -----------------------------------------------------------------------------
// Edge kinds.
@@ -143,11 +162,39 @@ inline void NodeProperties::ReplaceEffectInput(Node* node, Node* effect,
return node->ReplaceInput(FirstEffectIndex(node) + index, effect);
}
+inline void NodeProperties::ReplaceFrameStateInput(Node* node,
+ Node* frame_state) {
+ DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
+ node->ReplaceInput(FirstFrameStateIndex(node), frame_state);
+}
+
inline void NodeProperties::RemoveNonValueInputs(Node* node) {
node->TrimInputCount(OperatorProperties::GetValueInputCount(node->op()));
}
+// Replace value uses of {node} with {value} and effect uses of {node} with
+// {effect}. If {effect == NULL}, then use the effect input to {node}.
+inline void NodeProperties::ReplaceWithValue(Node* node, Node* value,
+ Node* effect) {
+ DCHECK(!OperatorProperties::HasControlOutput(node->op()));
+ if (effect == NULL && OperatorProperties::HasEffectInput(node->op())) {
+ effect = NodeProperties::GetEffectInput(node);
+ }
+
+ // Requires distinguishing between value and effect edges.
+ UseIter iter = node->uses().begin();
+ while (iter != node->uses().end()) {
+ if (NodeProperties::IsEffectEdge(iter.edge())) {
+ DCHECK_NE(NULL, effect);
+ iter = iter.UpdateToAndIncrement(effect);
+ } else {
+ iter = iter.UpdateToAndIncrement(value);
+ }
+ }
+}
+
+
// -----------------------------------------------------------------------------
// Type Bounds.
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 6088a0a3a0..94bd731ba7 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -19,9 +19,12 @@ class NodeProperties {
public:
static inline Node* GetValueInput(Node* node, int index);
static inline Node* GetContextInput(Node* node);
+ static inline Node* GetFrameStateInput(Node* node);
static inline Node* GetEffectInput(Node* node, int index = 0);
static inline Node* GetControlInput(Node* node, int index = 0);
+ static inline int GetFrameStateIndex(Node* node);
+
static inline bool IsValueEdge(Node::Edge edge);
static inline bool IsContextEdge(Node::Edge edge);
static inline bool IsEffectEdge(Node::Edge edge);
@@ -32,18 +35,22 @@ class NodeProperties {
static inline void ReplaceControlInput(Node* node, Node* control);
static inline void ReplaceEffectInput(Node* node, Node* effect,
int index = 0);
+ static inline void ReplaceFrameStateInput(Node* node, Node* frame_state);
static inline void RemoveNonValueInputs(Node* node);
+ static inline void ReplaceWithValue(Node* node, Node* value,
+ Node* effect = NULL);
static inline Bounds GetBounds(Node* node);
static inline void SetBounds(Node* node, Bounds bounds);
- private:
static inline int FirstValueIndex(Node* node);
static inline int FirstContextIndex(Node* node);
+ static inline int FirstFrameStateIndex(Node* node);
static inline int FirstEffectIndex(Node* node);
static inline int FirstControlIndex(Node* node);
static inline int PastValueIndex(Node* node);
static inline int PastContextIndex(Node* node);
+ static inline int PastFrameStateIndex(Node* node);
static inline int PastEffectIndex(Node* node);
static inline int PastControlIndex(Node* node);
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index 4cb5748b40..7df736ee12 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -10,23 +10,31 @@ namespace v8 {
namespace internal {
namespace compiler {
-void Node::CollectProjections(int projection_count, Node** projections) {
- for (int i = 0; i < projection_count; ++i) projections[i] = NULL;
+void Node::Kill() {
+ DCHECK_NOT_NULL(op());
+ RemoveAllInputs();
+ DCHECK(uses().empty());
+}
+
+
+void Node::CollectProjections(NodeVector* projections) {
+ for (size_t i = 0; i < projections->size(); i++) {
+ (*projections)[i] = NULL;
+ }
for (UseIter i = uses().begin(); i != uses().end(); ++i) {
if ((*i)->opcode() != IrOpcode::kProjection) continue;
- int32_t index = OpParameter<int32_t>(*i);
- DCHECK_GE(index, 0);
- DCHECK_LT(index, projection_count);
- DCHECK_EQ(NULL, projections[index]);
- projections[index] = *i;
+ size_t index = OpParameter<size_t>(*i);
+ DCHECK_LT(index, projections->size());
+ DCHECK_EQ(NULL, (*projections)[index]);
+ (*projections)[index] = *i;
}
}
-Node* Node::FindProjection(int32_t projection_index) {
+Node* Node::FindProjection(size_t projection_index) {
for (UseIter i = uses().begin(); i != uses().end(); ++i) {
if ((*i)->opcode() == IrOpcode::kProjection &&
- OpParameter<int32_t>(*i) == projection_index) {
+ OpParameter<size_t>(*i) == projection_index) {
return *i;
}
}
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index ddca510a0e..c3f5a532c6 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -23,8 +23,8 @@ namespace compiler {
class NodeData {
public:
- Operator* op() const { return op_; }
- void set_op(Operator* op) { op_ = op; }
+ const Operator* op() const { return op_; }
+ void set_op(const Operator* op) { op_ = op; }
IrOpcode::Value opcode() const {
DCHECK(op_->opcode() <= IrOpcode::kLast);
@@ -34,7 +34,7 @@ class NodeData {
Bounds bounds() { return bounds_; }
protected:
- Operator* op_;
+ const Operator* op_;
Bounds bounds_;
explicit NodeData(Zone* zone) : bounds_(Bounds(Type::None(zone))) {}
@@ -47,36 +47,34 @@ class NodeData {
// during compilation, e.g. during lowering passes. Other information that
// needs to be associated with Nodes during compilation must be stored
// out-of-line indexed by the Node's id.
-class Node : public GenericNode<NodeData, Node> {
+class Node FINAL : public GenericNode<NodeData, Node> {
public:
Node(GenericGraphBase* graph, int input_count)
: GenericNode<NodeData, Node>(graph, input_count) {}
- void Initialize(Operator* op) { set_op(op); }
+ void Initialize(const Operator* op) { set_op(op); }
- void CollectProjections(int projection_count, Node** projections);
- Node* FindProjection(int32_t projection_index);
+ bool IsDead() const { return InputCount() > 0 && InputAt(0) == NULL; }
+ void Kill();
+
+ void CollectProjections(ZoneVector<Node*>* projections);
+ Node* FindProjection(size_t projection_index);
};
OStream& operator<<(OStream& os, const Node& n);
typedef GenericGraphVisit::NullNodeVisitor<NodeData, Node> NullNodeVisitor;
-typedef zone_allocator<Node*> NodePtrZoneAllocator;
-
-typedef std::set<Node*, std::less<Node*>, NodePtrZoneAllocator> NodeSet;
+typedef std::set<Node*, std::less<Node*>, zone_allocator<Node*> > NodeSet;
typedef NodeSet::iterator NodeSetIter;
typedef NodeSet::reverse_iterator NodeSetRIter;
-typedef std::deque<Node*, NodePtrZoneAllocator> NodeDeque;
-typedef NodeDeque::iterator NodeDequeIter;
-
-typedef std::vector<Node*, NodePtrZoneAllocator> NodeVector;
+typedef ZoneVector<Node*> NodeVector;
typedef NodeVector::iterator NodeVectorIter;
+typedef NodeVector::const_iterator NodeVectorConstIter;
typedef NodeVector::reverse_iterator NodeVectorRIter;
-typedef zone_allocator<NodeVector> ZoneNodeVectorAllocator;
-typedef std::vector<NodeVector, ZoneNodeVectorAllocator> NodeVectorVector;
+typedef ZoneVector<NodeVector> NodeVectorVector;
typedef NodeVectorVector::iterator NodeVectorVectorIter;
typedef NodeVectorVector::reverse_iterator NodeVectorVectorRIter;
@@ -85,11 +83,12 @@ typedef Node::Inputs::iterator InputIter;
// Helper to extract parameters from Operator1<*> nodes.
template <typename T>
-static inline T OpParameter(Node* node) {
- return reinterpret_cast<Operator1<T>*>(node->op())->parameter();
+static inline const T& OpParameter(const Node* node) {
+ return OpParameter<T>(node->op());
}
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_NODE_H_
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 1371bfd16b..e210abd6b9 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -6,25 +6,26 @@
#define V8_COMPILER_OPCODES_H_
// Opcodes for control operators.
-#define CONTROL_OP_LIST(V) \
- V(Start) \
+#define INNER_CONTROL_OP_LIST(V) \
V(Dead) \
V(Loop) \
- V(End) \
V(Branch) \
V(IfTrue) \
V(IfFalse) \
V(Merge) \
V(Return) \
- V(Throw) \
- V(Continuation) \
- V(LazyDeoptimization) \
- V(Deoptimize)
+ V(Throw)
+
+#define CONTROL_OP_LIST(V) \
+ INNER_CONTROL_OP_LIST(V) \
+ V(Start) \
+ V(End)
// Opcodes for common operators.
#define LEAF_OP_LIST(V) \
V(Int32Constant) \
V(Int64Constant) \
+ V(Float32Constant) \
V(Float64Constant) \
V(ExternalConstant) \
V(NumberConstant) \
@@ -33,6 +34,9 @@
#define INNER_OP_LIST(V) \
V(Phi) \
V(EffectPhi) \
+ V(ControlEffect) \
+ V(ValueEffect) \
+ V(Finish) \
V(FrameState) \
V(StateValues) \
V(Call) \
@@ -83,7 +87,8 @@
V(JSToName) \
V(JSToObject)
-#define JS_OTHER_UNOP_LIST(V) V(JSTypeOf)
+#define JS_OTHER_UNOP_LIST(V) \
+ V(JSTypeOf)
#define JS_SIMPLE_UNOP_LIST(V) \
JS_LOGIC_UNOP_LIST(V) \
@@ -127,6 +132,7 @@
// Opcodes for VirtuaMachine-level operators.
#define SIMPLIFIED_OP_LIST(V) \
V(BooleanNot) \
+ V(BooleanToNumber) \
V(NumberEqual) \
V(NumberLessThan) \
V(NumberLessThanOrEqual) \
@@ -156,58 +162,65 @@
V(StoreElement)
// Opcodes for Machine-level operators.
-#define MACHINE_OP_LIST(V) \
- V(Load) \
- V(Store) \
- V(Word32And) \
- V(Word32Or) \
- V(Word32Xor) \
- V(Word32Shl) \
- V(Word32Shr) \
- V(Word32Sar) \
- V(Word32Equal) \
- V(Word64And) \
- V(Word64Or) \
- V(Word64Xor) \
- V(Word64Shl) \
- V(Word64Shr) \
- V(Word64Sar) \
- V(Word64Equal) \
- V(Int32Add) \
- V(Int32AddWithOverflow) \
- V(Int32Sub) \
- V(Int32SubWithOverflow) \
- V(Int32Mul) \
- V(Int32Div) \
- V(Int32UDiv) \
- V(Int32Mod) \
- V(Int32UMod) \
- V(Int32LessThan) \
- V(Int32LessThanOrEqual) \
- V(Uint32LessThan) \
- V(Uint32LessThanOrEqual) \
- V(Int64Add) \
- V(Int64Sub) \
- V(Int64Mul) \
- V(Int64Div) \
- V(Int64UDiv) \
- V(Int64Mod) \
- V(Int64UMod) \
- V(Int64LessThan) \
- V(Int64LessThanOrEqual) \
- V(ConvertInt64ToInt32) \
- V(ConvertInt32ToInt64) \
- V(ChangeInt32ToFloat64) \
- V(ChangeUint32ToFloat64) \
- V(ChangeFloat64ToInt32) \
- V(ChangeFloat64ToUint32) \
- V(Float64Add) \
- V(Float64Sub) \
- V(Float64Mul) \
- V(Float64Div) \
- V(Float64Mod) \
- V(Float64Equal) \
- V(Float64LessThan) \
+#define MACHINE_OP_LIST(V) \
+ V(Load) \
+ V(Store) \
+ V(Word32And) \
+ V(Word32Or) \
+ V(Word32Xor) \
+ V(Word32Shl) \
+ V(Word32Shr) \
+ V(Word32Sar) \
+ V(Word32Ror) \
+ V(Word32Equal) \
+ V(Word64And) \
+ V(Word64Or) \
+ V(Word64Xor) \
+ V(Word64Shl) \
+ V(Word64Shr) \
+ V(Word64Sar) \
+ V(Word64Ror) \
+ V(Word64Equal) \
+ V(Int32Add) \
+ V(Int32AddWithOverflow) \
+ V(Int32Sub) \
+ V(Int32SubWithOverflow) \
+ V(Int32Mul) \
+ V(Int32Div) \
+ V(Int32UDiv) \
+ V(Int32Mod) \
+ V(Int32UMod) \
+ V(Int32LessThan) \
+ V(Int32LessThanOrEqual) \
+ V(Uint32LessThan) \
+ V(Uint32LessThanOrEqual) \
+ V(Int64Add) \
+ V(Int64Sub) \
+ V(Int64Mul) \
+ V(Int64Div) \
+ V(Int64UDiv) \
+ V(Int64Mod) \
+ V(Int64UMod) \
+ V(Int64LessThan) \
+ V(Int64LessThanOrEqual) \
+ V(ChangeFloat32ToFloat64) \
+ V(ChangeFloat64ToInt32) \
+ V(ChangeFloat64ToUint32) \
+ V(ChangeInt32ToFloat64) \
+ V(ChangeInt32ToInt64) \
+ V(ChangeUint32ToFloat64) \
+ V(ChangeUint32ToUint64) \
+ V(TruncateFloat64ToFloat32) \
+ V(TruncateFloat64ToInt32) \
+ V(TruncateInt64ToInt32) \
+ V(Float64Add) \
+ V(Float64Sub) \
+ V(Float64Mul) \
+ V(Float64Div) \
+ V(Float64Mod) \
+ V(Float64Sqrt) \
+ V(Float64Equal) \
+ V(Float64LessThan) \
V(Float64LessThanOrEqual)
#define VALUE_OP_LIST(V) \
diff --git a/deps/v8/src/compiler/operator-properties-inl.h b/deps/v8/src/compiler/operator-properties-inl.h
index 42833fdeb4..9dae10699a 100644
--- a/deps/v8/src/compiler/operator-properties-inl.h
+++ b/deps/v8/src/compiler/operator-properties-inl.h
@@ -5,8 +5,7 @@
#ifndef V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
#define V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
-#include "src/v8.h"
-
+#include "src/compiler/common-operator.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator-properties.h"
@@ -15,55 +14,110 @@ namespace v8 {
namespace internal {
namespace compiler {
-inline bool OperatorProperties::HasValueInput(Operator* op) {
+inline bool OperatorProperties::HasValueInput(const Operator* op) {
return OperatorProperties::GetValueInputCount(op) > 0;
}
-inline bool OperatorProperties::HasContextInput(Operator* op) {
+inline bool OperatorProperties::HasContextInput(const Operator* op) {
IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
return IrOpcode::IsJsOpcode(opcode);
}
-inline bool OperatorProperties::HasEffectInput(Operator* op) {
+inline bool OperatorProperties::HasEffectInput(const Operator* op) {
return OperatorProperties::GetEffectInputCount(op) > 0;
}
-inline bool OperatorProperties::HasControlInput(Operator* op) {
+inline bool OperatorProperties::HasControlInput(const Operator* op) {
return OperatorProperties::GetControlInputCount(op) > 0;
}
+inline bool OperatorProperties::HasFrameStateInput(const Operator* op) {
+ if (!FLAG_turbo_deoptimization) {
+ return false;
+ }
+
+ switch (op->opcode()) {
+ case IrOpcode::kFrameState:
+ return true;
+ case IrOpcode::kJSCallRuntime: {
+ Runtime::FunctionId function = OpParameter<Runtime::FunctionId>(op);
+ return Linkage::NeedsFrameState(function);
+ }
+
+ // Strict equality cannot lazily deoptimize.
+ case IrOpcode::kJSStrictEqual:
+ case IrOpcode::kJSStrictNotEqual:
+ return false;
+
+ // Calls
+ case IrOpcode::kJSCallFunction:
+ case IrOpcode::kJSCallConstruct:
+
+ // Compare operations
+ case IrOpcode::kJSEqual:
+ case IrOpcode::kJSNotEqual:
+ case IrOpcode::kJSLessThan:
+ case IrOpcode::kJSGreaterThan:
+ case IrOpcode::kJSLessThanOrEqual:
+ case IrOpcode::kJSGreaterThanOrEqual:
+
+ // Binary operations
+ case IrOpcode::kJSBitwiseOr:
+ case IrOpcode::kJSBitwiseXor:
+ case IrOpcode::kJSBitwiseAnd:
+ case IrOpcode::kJSShiftLeft:
+ case IrOpcode::kJSShiftRight:
+ case IrOpcode::kJSShiftRightLogical:
+ case IrOpcode::kJSAdd:
+ case IrOpcode::kJSSubtract:
+ case IrOpcode::kJSMultiply:
+ case IrOpcode::kJSDivide:
+ case IrOpcode::kJSModulus:
+ case IrOpcode::kJSLoadProperty:
+ case IrOpcode::kJSStoreProperty:
+ case IrOpcode::kJSLoadNamed:
+ case IrOpcode::kJSStoreNamed:
+ return true;
+
+ default:
+ return false;
+ }
+}
-inline int OperatorProperties::GetValueInputCount(Operator* op) {
+inline int OperatorProperties::GetValueInputCount(const Operator* op) {
return op->InputCount();
}
-inline int OperatorProperties::GetContextInputCount(Operator* op) {
+inline int OperatorProperties::GetContextInputCount(const Operator* op) {
return OperatorProperties::HasContextInput(op) ? 1 : 0;
}
-inline int OperatorProperties::GetEffectInputCount(Operator* op) {
- if (op->opcode() == IrOpcode::kEffectPhi) {
- return static_cast<Operator1<int>*>(op)->parameter();
+inline int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
+ return OperatorProperties::HasFrameStateInput(op) ? 1 : 0;
+}
+
+inline int OperatorProperties::GetEffectInputCount(const Operator* op) {
+ if (op->opcode() == IrOpcode::kEffectPhi ||
+ op->opcode() == IrOpcode::kFinish) {
+ return OpParameter<int>(op);
}
if (op->HasProperty(Operator::kNoRead) && op->HasProperty(Operator::kNoWrite))
return 0; // no effects.
return 1;
}
-inline int OperatorProperties::GetControlInputCount(Operator* op) {
+inline int OperatorProperties::GetControlInputCount(const Operator* op) {
switch (op->opcode()) {
case IrOpcode::kPhi:
case IrOpcode::kEffectPhi:
+ case IrOpcode::kControlEffect:
return 1;
#define OPCODE_CASE(x) case IrOpcode::k##x:
CONTROL_OP_LIST(OPCODE_CASE)
#undef OPCODE_CASE
- return static_cast<ControlOperator*>(op)->ControlInputCount();
+ // Control operators are Operator1<int>.
+ return OpParameter<int>(op);
default:
- // If a node can lazily deoptimize, it needs control dependency.
- if (CanLazilyDeoptimize(op)) {
- return 1;
- }
// Operators that have write effects must have a control
// dependency. Effect dependencies only ensure the correct order of
// write/read operations without consideration of control flow. Without an
@@ -74,44 +128,47 @@ inline int OperatorProperties::GetControlInputCount(Operator* op) {
return 0;
}
-inline int OperatorProperties::GetTotalInputCount(Operator* op) {
+inline int OperatorProperties::GetTotalInputCount(const Operator* op) {
return GetValueInputCount(op) + GetContextInputCount(op) +
- GetEffectInputCount(op) + GetControlInputCount(op);
+ GetFrameStateInputCount(op) + GetEffectInputCount(op) +
+ GetControlInputCount(op);
}
// -----------------------------------------------------------------------------
// Output properties.
-inline bool OperatorProperties::HasValueOutput(Operator* op) {
+inline bool OperatorProperties::HasValueOutput(const Operator* op) {
return GetValueOutputCount(op) > 0;
}
-inline bool OperatorProperties::HasEffectOutput(Operator* op) {
- return op->opcode() == IrOpcode::kStart || GetEffectInputCount(op) > 0;
+inline bool OperatorProperties::HasEffectOutput(const Operator* op) {
+ return op->opcode() == IrOpcode::kStart ||
+ op->opcode() == IrOpcode::kControlEffect ||
+ op->opcode() == IrOpcode::kValueEffect ||
+ (op->opcode() != IrOpcode::kFinish && GetEffectInputCount(op) > 0);
}
-inline bool OperatorProperties::HasControlOutput(Operator* op) {
+inline bool OperatorProperties::HasControlOutput(const Operator* op) {
IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
- return (opcode != IrOpcode::kEnd && IrOpcode::IsControlOpcode(opcode)) ||
- CanLazilyDeoptimize(op);
+ return (opcode != IrOpcode::kEnd && IrOpcode::IsControlOpcode(opcode));
}
-inline int OperatorProperties::GetValueOutputCount(Operator* op) {
+inline int OperatorProperties::GetValueOutputCount(const Operator* op) {
return op->OutputCount();
}
-inline int OperatorProperties::GetEffectOutputCount(Operator* op) {
+inline int OperatorProperties::GetEffectOutputCount(const Operator* op) {
return HasEffectOutput(op) ? 1 : 0;
}
-inline int OperatorProperties::GetControlOutputCount(Operator* node) {
+inline int OperatorProperties::GetControlOutputCount(const Operator* node) {
return node->opcode() == IrOpcode::kBranch ? 2 : HasControlOutput(node) ? 1
: 0;
}
-inline bool OperatorProperties::IsBasicBlockBegin(Operator* op) {
+inline bool OperatorProperties::IsBasicBlockBegin(const Operator* op) {
uint8_t opcode = op->opcode();
return opcode == IrOpcode::kStart || opcode == IrOpcode::kEnd ||
opcode == IrOpcode::kDead || opcode == IrOpcode::kLoop ||
@@ -119,73 +176,8 @@ inline bool OperatorProperties::IsBasicBlockBegin(Operator* op) {
opcode == IrOpcode::kIfFalse;
}
-inline bool OperatorProperties::CanBeScheduled(Operator* op) { return true; }
-
-inline bool OperatorProperties::HasFixedSchedulePosition(Operator* op) {
- IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
- return (IrOpcode::IsControlOpcode(opcode)) ||
- opcode == IrOpcode::kParameter || opcode == IrOpcode::kEffectPhi ||
- opcode == IrOpcode::kPhi;
-}
-
-inline bool OperatorProperties::IsScheduleRoot(Operator* op) {
- uint8_t opcode = op->opcode();
- return opcode == IrOpcode::kEnd || opcode == IrOpcode::kEffectPhi ||
- opcode == IrOpcode::kPhi;
-}
-
-inline bool OperatorProperties::CanLazilyDeoptimize(Operator* op) {
- // TODO(jarin) This function allows turning on lazy deoptimization
- // incrementally. It will change as we turn on lazy deopt for
- // more nodes.
-
- if (!FLAG_turbo_deoptimization) {
- return false;
- }
-
- switch (op->opcode()) {
- case IrOpcode::kCall: {
- CallOperator* call_op = reinterpret_cast<CallOperator*>(op);
- CallDescriptor* descriptor = call_op->parameter();
- return descriptor->CanLazilyDeoptimize();
- }
- case IrOpcode::kJSCallRuntime: {
- Runtime::FunctionId function =
- reinterpret_cast<Operator1<Runtime::FunctionId>*>(op)->parameter();
- // TODO(jarin) At the moment, we only support lazy deoptimization for
- // the %DeoptimizeFunction runtime function.
- return function == Runtime::kDeoptimizeFunction;
- }
-
- // JS function calls
- case IrOpcode::kJSCallFunction:
- case IrOpcode::kJSCallConstruct:
-
- // Binary operations
- case IrOpcode::kJSBitwiseOr:
- case IrOpcode::kJSBitwiseXor:
- case IrOpcode::kJSBitwiseAnd:
- case IrOpcode::kJSShiftLeft:
- case IrOpcode::kJSShiftRight:
- case IrOpcode::kJSShiftRightLogical:
- case IrOpcode::kJSAdd:
- case IrOpcode::kJSSubtract:
- case IrOpcode::kJSMultiply:
- case IrOpcode::kJSDivide:
- case IrOpcode::kJSModulus:
- case IrOpcode::kJSLoadProperty:
- case IrOpcode::kJSStoreProperty:
- case IrOpcode::kJSLoadNamed:
- case IrOpcode::kJSStoreNamed:
- return true;
-
- default:
- return false;
- }
- return false;
-}
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
diff --git a/deps/v8/src/compiler/operator-properties.h b/deps/v8/src/compiler/operator-properties.h
index cbc8ed9af0..718eea0e2a 100644
--- a/deps/v8/src/compiler/operator-properties.h
+++ b/deps/v8/src/compiler/operator-properties.h
@@ -5,8 +5,6 @@
#ifndef V8_COMPILER_OPERATOR_PROPERTIES_H_
#define V8_COMPILER_OPERATOR_PROPERTIES_H_
-#include "src/v8.h"
-
namespace v8 {
namespace internal {
namespace compiler {
@@ -15,35 +13,32 @@ class Operator;
class OperatorProperties {
public:
- static inline bool HasValueInput(Operator* node);
- static inline bool HasContextInput(Operator* node);
- static inline bool HasEffectInput(Operator* node);
- static inline bool HasControlInput(Operator* node);
-
- static inline int GetValueInputCount(Operator* op);
- static inline int GetContextInputCount(Operator* op);
- static inline int GetEffectInputCount(Operator* op);
- static inline int GetControlInputCount(Operator* op);
- static inline int GetTotalInputCount(Operator* op);
-
- static inline bool HasValueOutput(Operator* op);
- static inline bool HasEffectOutput(Operator* op);
- static inline bool HasControlOutput(Operator* op);
-
- static inline int GetValueOutputCount(Operator* op);
- static inline int GetEffectOutputCount(Operator* op);
- static inline int GetControlOutputCount(Operator* op);
-
- static inline bool IsBasicBlockBegin(Operator* op);
-
- static inline bool CanBeScheduled(Operator* op);
- static inline bool HasFixedSchedulePosition(Operator* op);
- static inline bool IsScheduleRoot(Operator* op);
-
- static inline bool CanLazilyDeoptimize(Operator* op);
+ static inline bool HasValueInput(const Operator* op);
+ static inline bool HasContextInput(const Operator* op);
+ static inline bool HasEffectInput(const Operator* op);
+ static inline bool HasControlInput(const Operator* op);
+ static inline bool HasFrameStateInput(const Operator* op);
+
+ static inline int GetValueInputCount(const Operator* op);
+ static inline int GetContextInputCount(const Operator* op);
+ static inline int GetEffectInputCount(const Operator* op);
+ static inline int GetControlInputCount(const Operator* op);
+ static inline int GetFrameStateInputCount(const Operator* op);
+ static inline int GetTotalInputCount(const Operator* op);
+
+ static inline bool HasValueOutput(const Operator* op);
+ static inline bool HasEffectOutput(const Operator* op);
+ static inline bool HasControlOutput(const Operator* op);
+
+ static inline int GetValueOutputCount(const Operator* op);
+ static inline int GetEffectOutputCount(const Operator* op);
+ static inline int GetControlOutputCount(const Operator* op);
+
+ static inline bool IsBasicBlockBegin(const Operator* op);
};
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_OPERATOR_PROPERTIES_H_
diff --git a/deps/v8/src/compiler/operator.cc b/deps/v8/src/compiler/operator.cc
new file mode 100644
index 0000000000..35f9c889be
--- /dev/null
+++ b/deps/v8/src/compiler/operator.cc
@@ -0,0 +1,26 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Operator::~Operator() {}
+
+
+SimpleOperator::SimpleOperator(Opcode opcode, Properties properties,
+ int input_count, int output_count,
+ const char* mnemonic)
+ : Operator(opcode, properties, mnemonic),
+ input_count_(input_count),
+ output_count_(output_count) {}
+
+
+SimpleOperator::~SimpleOperator() {}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/operator.h b/deps/v8/src/compiler/operator.h
index 4294d344fe..5137806f12 100644
--- a/deps/v8/src/compiler/operator.h
+++ b/deps/v8/src/compiler/operator.h
@@ -5,9 +5,7 @@
#ifndef V8_COMPILER_OPERATOR_H_
#define V8_COMPILER_OPERATOR_H_
-#include "src/v8.h"
-
-#include "src/assembler.h"
+#include "src/base/flags.h"
#include "src/ostreams.h"
#include "src/unique.h"
@@ -29,9 +27,7 @@ namespace compiler {
// meaningful to the operator itself.
class Operator : public ZoneObject {
public:
- Operator(uint8_t opcode, uint16_t properties)
- : opcode_(opcode), properties_(properties) {}
- virtual ~Operator() {}
+ typedef uint8_t Opcode;
// Properties inform the operator-independent optimizer about legal
// transformations for nodes that have this operator.
@@ -49,50 +45,61 @@ class Operator : public ZoneObject {
kEliminatable = kNoWrite | kNoThrow,
kPure = kNoRead | kNoWrite | kNoThrow | kIdempotent
};
+ typedef base::Flags<Property, uint8_t> Properties;
+
+ Operator(Opcode opcode, Properties properties, const char* mnemonic)
+ : opcode_(opcode), properties_(properties), mnemonic_(mnemonic) {}
+ virtual ~Operator();
// A small integer unique to all instances of a particular kind of operator,
// useful for quick matching for specific kinds of operators. For fast access
// the opcode is stored directly in the operator object.
- inline uint8_t opcode() const { return opcode_; }
+ Opcode opcode() const { return opcode_; }
// Returns a constant string representing the mnemonic of the operator,
// without the static parameters. Useful for debugging.
- virtual const char* mnemonic() = 0;
+ const char* mnemonic() const { return mnemonic_; }
// Check if this operator equals another operator. Equivalent operators can
// be merged, and nodes with equivalent operators and equivalent inputs
// can be merged.
- virtual bool Equals(Operator* other) = 0;
+ virtual bool Equals(const Operator* other) const = 0;
// Compute a hashcode to speed up equivalence-set checking.
// Equal operators should always have equal hashcodes, and unequal operators
// should have unequal hashcodes with high probability.
- virtual int HashCode() = 0;
+ virtual int HashCode() const = 0;
// Check whether this operator has the given property.
- inline bool HasProperty(Property property) const {
- return (properties_ & static_cast<int>(property)) == property;
+ bool HasProperty(Property property) const {
+ return (properties() & property) == property;
}
// Number of data inputs to the operator, for verifying graph structure.
- virtual int InputCount() = 0;
+ virtual int InputCount() const = 0;
// Number of data outputs from the operator, for verifying graph structure.
- virtual int OutputCount() = 0;
+ virtual int OutputCount() const = 0;
- inline Property properties() { return static_cast<Property>(properties_); }
+ Properties properties() const { return properties_; }
// TODO(titzer): API for input and output types, for typechecking graph.
- private:
+ protected:
// Print the full operator into the given stream, including any
// static parameters. Useful for debugging and visualizing the IR.
virtual OStream& PrintTo(OStream& os) const = 0; // NOLINT
friend OStream& operator<<(OStream& os, const Operator& op);
- uint8_t opcode_;
- uint16_t properties_;
+ private:
+ Opcode opcode_;
+ Properties properties_;
+ const char* mnemonic_;
+
+ DISALLOW_COPY_AND_ASSIGN(Operator);
};
+DEFINE_OPERATORS_FOR_FLAGS(Operator::Properties)
+
OStream& operator<<(OStream& os, const Operator& op);
// An implementation of Operator that has no static parameters. Such operators
@@ -100,27 +107,26 @@ OStream& operator<<(OStream& os, const Operator& op);
// They can represented by singletons and shared globally.
class SimpleOperator : public Operator {
public:
- SimpleOperator(uint8_t opcode, uint16_t properties, int input_count,
- int output_count, const char* mnemonic)
- : Operator(opcode, properties),
- input_count_(input_count),
- output_count_(output_count),
- mnemonic_(mnemonic) {}
+ SimpleOperator(Opcode opcode, Properties properties, int input_count,
+ int output_count, const char* mnemonic);
+ ~SimpleOperator();
- virtual const char* mnemonic() { return mnemonic_; }
- virtual bool Equals(Operator* that) { return opcode() == that->opcode(); }
- virtual int HashCode() { return opcode(); }
- virtual int InputCount() { return input_count_; }
- virtual int OutputCount() { return output_count_; }
+ virtual bool Equals(const Operator* that) const FINAL {
+ return opcode() == that->opcode();
+ }
+ virtual int HashCode() const FINAL { return opcode(); }
+ virtual int InputCount() const FINAL { return input_count_; }
+ virtual int OutputCount() const FINAL { return output_count_; }
private:
- virtual OStream& PrintTo(OStream& os) const { // NOLINT
- return os << mnemonic_;
+ virtual OStream& PrintTo(OStream& os) const FINAL { // NOLINT
+ return os << mnemonic();
}
int input_count_;
int output_count_;
- const char* mnemonic_;
+
+ DISALLOW_COPY_AND_ASSIGN(SimpleOperator);
};
// Template specialization implements a kind of type class for dealing with the
@@ -136,25 +142,6 @@ struct StaticParameterTraits {
}
};
-template <>
-struct StaticParameterTraits<ExternalReference> {
- static OStream& PrintTo(OStream& os, ExternalReference val) { // NOLINT
- os << val.address();
- const Runtime::Function* function =
- Runtime::FunctionForEntry(val.address());
- if (function != NULL) {
- os << " <" << function->name << ".entry>";
- }
- return os;
- }
- static int HashCode(ExternalReference a) {
- return reinterpret_cast<intptr_t>(a.address()) & 0xFFFFFFFF;
- }
- static bool Equals(ExternalReference a, ExternalReference b) {
- return a == b;
- }
-};
-
// Specialization for static parameters of type {int}.
template <>
struct StaticParameterTraits<int> {
@@ -172,39 +159,33 @@ struct StaticParameterTraits<double> {
return os << val;
}
static int HashCode(double a) {
- return static_cast<int>(BitCast<int64_t>(a));
+ return static_cast<int>(bit_cast<int64_t>(a));
}
static bool Equals(double a, double b) {
- return BitCast<int64_t>(a) == BitCast<int64_t>(b);
+ return bit_cast<int64_t>(a) == bit_cast<int64_t>(b);
}
};
-// Specialization for static parameters of type {PrintableUnique<Object>}.
+// Specialization for static parameters of type {Unique<Object>}.
template <>
-struct StaticParameterTraits<PrintableUnique<Object> > {
- static OStream& PrintTo(OStream& os, PrintableUnique<Object> val) { // NOLINT
- return os << val.string();
+struct StaticParameterTraits<Unique<Object> > {
+ static OStream& PrintTo(OStream& os, Unique<Object> val) { // NOLINT
+ return os << Brief(*val.handle());
}
- static int HashCode(PrintableUnique<Object> a) {
+ static int HashCode(Unique<Object> a) {
return static_cast<int>(a.Hashcode());
}
- static bool Equals(PrintableUnique<Object> a, PrintableUnique<Object> b) {
- return a == b;
- }
+ static bool Equals(Unique<Object> a, Unique<Object> b) { return a == b; }
};
-// Specialization for static parameters of type {PrintableUnique<Name>}.
+// Specialization for static parameters of type {Unique<Name>}.
template <>
-struct StaticParameterTraits<PrintableUnique<Name> > {
- static OStream& PrintTo(OStream& os, PrintableUnique<Name> val) { // NOLINT
- return os << val.string();
- }
- static int HashCode(PrintableUnique<Name> a) {
- return static_cast<int>(a.Hashcode());
- }
- static bool Equals(PrintableUnique<Name> a, PrintableUnique<Name> b) {
- return a == b;
+struct StaticParameterTraits<Unique<Name> > {
+ static OStream& PrintTo(OStream& os, Unique<Name> val) { // NOLINT
+ return os << Brief(*val.handle());
}
+ static int HashCode(Unique<Name> a) { return static_cast<int>(a.Hashcode()); }
+ static bool Equals(Unique<Name> a, Unique<Name> b) { return a == b; }
};
#if DEBUG
@@ -213,15 +194,15 @@ struct StaticParameterTraits<PrintableUnique<Name> > {
template <>
struct StaticParameterTraits<Handle<Object> > {
static OStream& PrintTo(OStream& os, Handle<Object> val) { // NOLINT
- UNREACHABLE(); // Should use PrintableUnique<Object> instead
+ UNREACHABLE(); // Should use Unique<Object> instead
return os;
}
static int HashCode(Handle<Object> a) {
- UNREACHABLE(); // Should use PrintableUnique<Object> instead
+ UNREACHABLE(); // Should use Unique<Object> instead
return 0;
}
static bool Equals(Handle<Object> a, Handle<Object> b) {
- UNREACHABLE(); // Should use PrintableUnique<Object> instead
+ UNREACHABLE(); // Should use Unique<Object> instead
return false;
}
};
@@ -233,48 +214,49 @@ struct StaticParameterTraits<Handle<Object> > {
template <typename T>
class Operator1 : public Operator {
public:
- Operator1(uint8_t opcode, uint16_t properties, int input_count,
+ Operator1(Opcode opcode, Properties properties, int input_count,
int output_count, const char* mnemonic, T parameter)
- : Operator(opcode, properties),
+ : Operator(opcode, properties, mnemonic),
input_count_(input_count),
output_count_(output_count),
- mnemonic_(mnemonic),
parameter_(parameter) {}
const T& parameter() const { return parameter_; }
- virtual const char* mnemonic() { return mnemonic_; }
- virtual bool Equals(Operator* other) {
+ virtual bool Equals(const Operator* other) const OVERRIDE {
if (opcode() != other->opcode()) return false;
- Operator1<T>* that = static_cast<Operator1<T>*>(other);
- T temp1 = this->parameter_;
- T temp2 = that->parameter_;
- return StaticParameterTraits<T>::Equals(temp1, temp2);
+ const Operator1<T>* that = static_cast<const Operator1<T>*>(other);
+ return StaticParameterTraits<T>::Equals(this->parameter_, that->parameter_);
}
- virtual int HashCode() {
+ virtual int HashCode() const OVERRIDE {
return opcode() + 33 * StaticParameterTraits<T>::HashCode(this->parameter_);
}
- virtual int InputCount() { return input_count_; }
- virtual int OutputCount() { return output_count_; }
+ virtual int InputCount() const OVERRIDE { return input_count_; }
+ virtual int OutputCount() const OVERRIDE { return output_count_; }
virtual OStream& PrintParameter(OStream& os) const { // NOLINT
return StaticParameterTraits<T>::PrintTo(os << "[", parameter_) << "]";
}
- private:
- virtual OStream& PrintTo(OStream& os) const { // NOLINT
- return PrintParameter(os << mnemonic_);
+ protected:
+ virtual OStream& PrintTo(OStream& os) const FINAL { // NOLINT
+ return PrintParameter(os << mnemonic());
}
+ private:
int input_count_;
int output_count_;
- const char* mnemonic_;
T parameter_;
};
-// Type definitions for operators with specific types of parameters.
-typedef Operator1<PrintableUnique<Name> > NameOperator;
-}
+
+// Helper to extract parameters from Operator1<*> operator.
+template <typename T>
+static inline const T& OpParameter(const Operator* op) {
+ return reinterpret_cast<const Operator1<T>*>(op)->parameter();
}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_OPERATOR_H_
diff --git a/deps/v8/src/compiler/phi-reducer.h b/deps/v8/src/compiler/phi-reducer.h
index a9b1450431..5870d0433f 100644
--- a/deps/v8/src/compiler/phi-reducer.h
+++ b/deps/v8/src/compiler/phi-reducer.h
@@ -12,9 +12,9 @@ namespace internal {
namespace compiler {
// Replaces redundant phis if all the inputs are the same or the phi itself.
-class PhiReducer V8_FINAL : public Reducer {
+class PhiReducer FINAL : public Reducer {
public:
- virtual Reduction Reduce(Node* node) V8_OVERRIDE {
+ virtual Reduction Reduce(Node* node) OVERRIDE {
if (node->opcode() != IrOpcode::kPhi &&
node->opcode() != IrOpcode::kEffectPhi)
return NoChange();
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index b0b3eb76ef..333382aaf3 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -6,6 +6,8 @@
#include "src/base/platform/elapsed-timer.h"
#include "src/compiler/ast-graph-builder.h"
+#include "src/compiler/basic-block-instrumentor.h"
+#include "src/compiler/change-lowering.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/graph-replay.h"
#include "src/compiler/graph-visualizer.h"
@@ -13,13 +15,17 @@
#include "src/compiler/instruction-selector.h"
#include "src/compiler/js-context-specialization.h"
#include "src/compiler/js-generic-lowering.h"
+#include "src/compiler/js-inlining.h"
#include "src/compiler/js-typed-lowering.h"
+#include "src/compiler/machine-operator-reducer.h"
#include "src/compiler/phi-reducer.h"
#include "src/compiler/register-allocator.h"
#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
#include "src/compiler/simplified-lowering.h"
+#include "src/compiler/simplified-operator-reducer.h"
#include "src/compiler/typer.h"
+#include "src/compiler/value-numbering-reducer.h"
#include "src/compiler/verifier.h"
#include "src/hydrogen.h"
#include "src/ostreams.h"
@@ -73,23 +79,48 @@ class PhaseStats {
};
+static inline bool VerifyGraphs() {
+#ifdef DEBUG
+ return true;
+#else
+ return FLAG_turbo_verify;
+#endif
+}
+
+
void Pipeline::VerifyAndPrintGraph(Graph* graph, const char* phase) {
if (FLAG_trace_turbo) {
char buffer[256];
Vector<char> filename(buffer, sizeof(buffer));
- SmartArrayPointer<char> functionname =
- info_->shared_info()->DebugName()->ToCString();
- if (strlen(functionname.get()) > 0) {
- SNPrintF(filename, "turbo-%s-%s.dot", functionname.get(), phase);
+ if (!info_->shared_info().is_null()) {
+ SmartArrayPointer<char> functionname =
+ info_->shared_info()->DebugName()->ToCString();
+ if (strlen(functionname.get()) > 0) {
+ SNPrintF(filename, "turbo-%s-%s", functionname.get(), phase);
+ } else {
+ SNPrintF(filename, "turbo-%p-%s", static_cast<void*>(info_), phase);
+ }
} else {
- SNPrintF(filename, "turbo-%p-%s.dot", static_cast<void*>(info_), phase);
+ SNPrintF(filename, "turbo-none-%s", phase);
}
std::replace(filename.start(), filename.start() + filename.length(), ' ',
'_');
- FILE* file = base::OS::FOpen(filename.start(), "w+");
- OFStream of(file);
- of << AsDOT(*graph);
- fclose(file);
+
+ char dot_buffer[256];
+ Vector<char> dot_filename(dot_buffer, sizeof(dot_buffer));
+ SNPrintF(dot_filename, "%s.dot", filename.start());
+ FILE* dot_file = base::OS::FOpen(dot_filename.start(), "w+");
+ OFStream dot_of(dot_file);
+ dot_of << AsDOT(*graph);
+ fclose(dot_file);
+
+ char json_buffer[256];
+ Vector<char> json_filename(json_buffer, sizeof(json_buffer));
+ SNPrintF(json_filename, "%s.json", filename.start());
+ FILE* json_file = base::OS::FOpen(json_filename.start(), "w+");
+ OFStream json_of(json_file);
+ json_of << AsJSON(*graph);
+ fclose(json_file);
OFStream os(stdout);
os << "-- " << phase << " graph printed to file " << filename.start()
@@ -112,7 +143,7 @@ class AstGraphBuilderWithPositions : public AstGraphBuilder {
}
#define DEF_VISIT(type) \
- virtual void Visit##type(type* node) V8_OVERRIDE { \
+ virtual void Visit##type(type* node) OVERRIDE { \
SourcePositionTable::Scope pos(source_positions_, \
SourcePosition(node->position())); \
AstGraphBuilder::Visit##type(node); \
@@ -133,6 +164,17 @@ static void TraceSchedule(Schedule* schedule) {
Handle<Code> Pipeline::GenerateCode() {
+ if (info()->function()->dont_optimize_reason() == kTryCatchStatement ||
+ info()->function()->dont_optimize_reason() == kTryFinallyStatement ||
+ // TODO(turbofan): Make ES6 for-of work and remove this bailout.
+ info()->function()->dont_optimize_reason() == kForOfStatement ||
+ // TODO(turbofan): Make super work and remove this bailout.
+ info()->function()->dont_optimize_reason() == kSuperReference ||
+ // TODO(turbofan): Make OSR work and remove this bailout.
+ info()->is_osr()) {
+ return Handle<Code>::null();
+ }
+
if (FLAG_turbo_stats) isolate()->GetTStatistics()->Initialize(info_);
if (FLAG_trace_turbo) {
@@ -151,8 +193,10 @@ Handle<Code> Pipeline::GenerateCode() {
// construction. This is currently only needed for the node cache, which the
// typer could sweep over later.
Typer typer(zone());
+ MachineOperatorBuilder machine;
CommonOperatorBuilder common(zone());
- JSGraph jsgraph(&graph, &common, &typer);
+ JSOperatorBuilder javascript(zone());
+ JSGraph jsgraph(&graph, &common, &javascript, &typer, &machine);
Node* context_node;
{
PhaseStats graph_builder_stats(info(), PhaseStats::CREATE_GRAPH,
@@ -176,25 +220,37 @@ Handle<Code> Pipeline::GenerateCode() {
VerifyAndPrintGraph(&graph, "Initial untyped");
- if (FLAG_context_specialization) {
- SourcePositionTable::Scope pos_(&source_positions,
- SourcePosition::Unknown());
+ if (info()->is_context_specializing()) {
+ SourcePositionTable::Scope pos(&source_positions,
+ SourcePosition::Unknown());
// Specialize the code to the context as aggressively as possible.
JSContextSpecializer spec(info(), &jsgraph, context_node);
spec.SpecializeToContext();
VerifyAndPrintGraph(&graph, "Context specialized");
}
+ if (info()->is_inlining_enabled()) {
+ SourcePositionTable::Scope pos(&source_positions,
+ SourcePosition::Unknown());
+ JSInliner inliner(info(), &jsgraph);
+ inliner.Inline();
+ VerifyAndPrintGraph(&graph, "Inlined");
+ }
+
// Print a replay of the initial graph.
if (FLAG_print_turbo_replay) {
GraphReplayPrinter::PrintReplay(&graph);
}
- if (FLAG_turbo_types) {
+ // Bailout here in case target architecture is not supported.
+ if (!SupportedTarget()) return Handle<Code>::null();
+
+ if (info()->is_typing_enabled()) {
{
// Type the graph.
PhaseStats typer_stats(info(), PhaseStats::CREATE_GRAPH, "typer");
typer.Run(&graph, info()->context());
+ VerifyAndPrintGraph(&graph, "Typed");
}
// All new nodes must be typed.
typer.DecorateGraph(&graph);
@@ -202,42 +258,80 @@ Handle<Code> Pipeline::GenerateCode() {
// Lower JSOperators where we can determine types.
PhaseStats lowering_stats(info(), PhaseStats::CREATE_GRAPH,
"typed lowering");
- JSTypedLowering lowering(&jsgraph, &source_positions);
- lowering.LowerAllNodes();
+ SourcePositionTable::Scope pos(&source_positions,
+ SourcePosition::Unknown());
+ JSTypedLowering lowering(&jsgraph);
+ GraphReducer graph_reducer(&graph);
+ graph_reducer.AddReducer(&lowering);
+ graph_reducer.ReduceGraph();
VerifyAndPrintGraph(&graph, "Lowered typed");
}
- }
-
- Handle<Code> code = Handle<Code>::null();
- if (SupportedTarget()) {
{
- // Lower any remaining generic JSOperators.
+ // Lower simplified operators and insert changes.
PhaseStats lowering_stats(info(), PhaseStats::CREATE_GRAPH,
- "generic lowering");
- MachineOperatorBuilder machine(zone());
- JSGenericLowering lowering(info(), &jsgraph, &machine, &source_positions);
+ "simplified lowering");
+ SourcePositionTable::Scope pos(&source_positions,
+ SourcePosition::Unknown());
+ SimplifiedLowering lowering(&jsgraph);
lowering.LowerAllNodes();
- VerifyAndPrintGraph(&graph, "Lowered generic");
+ VerifyAndPrintGraph(&graph, "Lowered simplified");
}
-
- // Compute a schedule.
- Schedule* schedule = ComputeSchedule(&graph);
- TraceSchedule(schedule);
-
{
- // Generate optimized code.
- PhaseStats codegen_stats(info(), PhaseStats::CODEGEN, "codegen");
+ // Lower changes that have been inserted before.
+ PhaseStats lowering_stats(info(), PhaseStats::OPTIMIZATION,
+ "change lowering");
+ SourcePositionTable::Scope pos(&source_positions,
+ SourcePosition::Unknown());
Linkage linkage(info());
- code = GenerateCode(&linkage, &graph, schedule, &source_positions);
- info()->SetCode(code);
+ // TODO(turbofan): Value numbering disabled for now.
+ // ValueNumberingReducer vn_reducer(zone());
+ SimplifiedOperatorReducer simple_reducer(&jsgraph);
+ ChangeLowering lowering(&jsgraph, &linkage);
+ MachineOperatorReducer mach_reducer(&jsgraph);
+ GraphReducer graph_reducer(&graph);
+ // TODO(titzer): Figure out if we should run all reducers at once here.
+ // graph_reducer.AddReducer(&vn_reducer);
+ graph_reducer.AddReducer(&simple_reducer);
+ graph_reducer.AddReducer(&lowering);
+ graph_reducer.AddReducer(&mach_reducer);
+ graph_reducer.ReduceGraph();
+
+ VerifyAndPrintGraph(&graph, "Lowered changes");
}
+ }
+
+ {
+ // Lower any remaining generic JSOperators.
+ PhaseStats lowering_stats(info(), PhaseStats::CREATE_GRAPH,
+ "generic lowering");
+ SourcePositionTable::Scope pos(&source_positions,
+ SourcePosition::Unknown());
+ JSGenericLowering lowering(info(), &jsgraph);
+ GraphReducer graph_reducer(&graph);
+ graph_reducer.AddReducer(&lowering);
+ graph_reducer.ReduceGraph();
- // Print optimized code.
- v8::internal::CodeGenerator::PrintCode(code, info());
+ VerifyAndPrintGraph(&graph, "Lowered generic");
}
+ source_positions.RemoveDecorator();
+
+ Handle<Code> code = Handle<Code>::null();
+ {
+ // Compute a schedule.
+ Schedule* schedule = ComputeSchedule(&graph);
+ // Generate optimized code.
+ PhaseStats codegen_stats(info(), PhaseStats::CODEGEN, "codegen");
+ Linkage linkage(info());
+ code = GenerateCode(&linkage, &graph, schedule, &source_positions);
+ info()->SetCode(code);
+ }
+
+ // Print optimized code.
+ v8::internal::CodeGenerator::PrintCode(code, info());
+
if (FLAG_trace_turbo) {
OFStream os(stdout);
os << "--------------------------------------------------\n"
@@ -252,7 +346,10 @@ Handle<Code> Pipeline::GenerateCode() {
Schedule* Pipeline::ComputeSchedule(Graph* graph) {
PhaseStats schedule_stats(info(), PhaseStats::CODEGEN, "scheduling");
- return Scheduler::ComputeSchedule(graph);
+ Schedule* schedule = Scheduler::ComputeSchedule(graph);
+ TraceSchedule(schedule);
+ if (VerifyGraphs()) ScheduleVerifier::Run(schedule);
+ return schedule;
}
@@ -287,6 +384,11 @@ Handle<Code> Pipeline::GenerateCode(Linkage* linkage, Graph* graph,
DCHECK_NOT_NULL(schedule);
CHECK(SupportedBackend());
+ BasicBlockProfiler::Data* profiler_data = NULL;
+ if (FLAG_turbo_profiling) {
+ profiler_data = BasicBlockInstrumentor::Instrument(info_, graph, schedule);
+ }
+
InstructionSequence sequence(linkage, graph, schedule);
// Select and schedule instructions covering the scheduled graph.
@@ -305,12 +407,12 @@ Handle<Code> Pipeline::GenerateCode(Linkage* linkage, Graph* graph,
{
int node_count = graph->NodeCount();
if (node_count > UnallocatedOperand::kMaxVirtualRegisters) {
- linkage->info()->set_bailout_reason(kNotEnoughVirtualRegistersForValues);
+ linkage->info()->AbortOptimization(kNotEnoughVirtualRegistersForValues);
return Handle<Code>::null();
}
RegisterAllocator allocator(&sequence);
if (!allocator.Allocate()) {
- linkage->info()->set_bailout_reason(kNotEnoughVirtualRegistersRegalloc);
+ linkage->info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
return Handle<Code>::null();
}
}
@@ -323,7 +425,15 @@ Handle<Code> Pipeline::GenerateCode(Linkage* linkage, Graph* graph,
// Generate native sequence.
CodeGenerator generator(&sequence);
- return generator.GenerateCode();
+ Handle<Code> code = generator.GenerateCode();
+ if (profiler_data != NULL) {
+#if ENABLE_DISASSEMBLER
+ OStringStream os;
+ code->Disassemble(NULL, os);
+ profiler_data->SetCode(&os);
+#endif
+ }
+ return code;
}
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 4c1c0bcea9..9f8241a63c 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -17,7 +17,6 @@ namespace internal {
namespace compiler {
// Clients of this interface shouldn't depend on lots of compiler internals.
-class CallDescriptor;
class Graph;
class Schedule;
class SourcePositionTable;
@@ -35,27 +34,19 @@ class Pipeline {
Handle<Code> GenerateCodeForMachineGraph(Linkage* linkage, Graph* graph,
Schedule* schedule = NULL);
- CompilationInfo* info() const { return info_; }
- Zone* zone() { return info_->zone(); }
- Isolate* isolate() { return info_->isolate(); }
-
static inline bool SupportedBackend() { return V8_TURBOFAN_BACKEND != 0; }
static inline bool SupportedTarget() { return V8_TURBOFAN_TARGET != 0; }
- static inline bool VerifyGraphs() {
-#ifdef DEBUG
- return true;
-#else
- return FLAG_turbo_verify;
-#endif
- }
-
static void SetUp();
static void TearDown();
private:
CompilationInfo* info_;
+ CompilationInfo* info() const { return info_; }
+ Isolate* isolate() { return info_->isolate(); }
+ Zone* zone() { return info_->zone(); }
+
Schedule* ComputeSchedule(Graph* graph);
void VerifyAndPrintGraph(Graph* graph, const char* phase);
Handle<Code> GenerateCode(Linkage* linkage, Graph* graph, Schedule* schedule,
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index afbd268dcd..7f45eb941d 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/code-factory.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/raw-machine-assembler.h"
#include "src/compiler/scheduler.h"
@@ -10,23 +11,27 @@ namespace v8 {
namespace internal {
namespace compiler {
-RawMachineAssembler::RawMachineAssembler(
- Graph* graph, MachineCallDescriptorBuilder* call_descriptor_builder,
- MachineType word)
+RawMachineAssembler::RawMachineAssembler(Graph* graph,
+ MachineSignature* machine_sig,
+ MachineType word)
: GraphBuilder(graph),
schedule_(new (zone()) Schedule(zone())),
- machine_(zone(), word),
+ machine_(word),
common_(zone()),
- call_descriptor_builder_(call_descriptor_builder),
+ machine_sig_(machine_sig),
+ call_descriptor_(
+ Linkage::GetSimplifiedCDescriptor(graph->zone(), machine_sig)),
parameters_(NULL),
- exit_label_(schedule()->exit()),
- current_block_(schedule()->entry()) {
- Node* s = graph->NewNode(common_.Start(parameter_count()));
+ exit_label_(schedule()->end()),
+ current_block_(schedule()->start()) {
+ int param_count = static_cast<int>(parameter_count());
+ Node* s = graph->NewNode(common_.Start(param_count));
graph->SetStart(s);
if (parameter_count() == 0) return;
- parameters_ = zone()->NewArray<Node*>(parameter_count());
- for (int i = 0; i < parameter_count(); ++i) {
- parameters_[i] = NewNode(common()->Parameter(i), graph->start());
+ parameters_ = zone()->NewArray<Node*>(param_count);
+ for (size_t i = 0; i < parameter_count(); ++i) {
+ parameters_[i] =
+ NewNode(common()->Parameter(static_cast<int>(i)), graph->start());
}
}
@@ -42,8 +47,8 @@ Schedule* RawMachineAssembler::Export() {
}
-Node* RawMachineAssembler::Parameter(int index) {
- DCHECK(0 <= index && index < parameter_count());
+Node* RawMachineAssembler::Parameter(size_t index) {
+ DCHECK(index < parameter_count());
return parameters_[index];
}
@@ -55,7 +60,7 @@ RawMachineAssembler::Label* RawMachineAssembler::Exit() {
void RawMachineAssembler::Goto(Label* label) {
- DCHECK(current_block_ != schedule()->exit());
+ DCHECK(current_block_ != schedule()->end());
schedule()->AddGoto(CurrentBlock(), Use(label));
current_block_ = NULL;
}
@@ -63,7 +68,7 @@ void RawMachineAssembler::Goto(Label* label) {
void RawMachineAssembler::Branch(Node* condition, Label* true_val,
Label* false_val) {
- DCHECK(current_block_ != schedule()->exit());
+ DCHECK(current_block_ != schedule()->end());
Node* branch = NewNode(common()->Branch(), condition);
schedule()->AddBranch(CurrentBlock(), branch, Use(true_val), Use(false_val));
current_block_ = NULL;
@@ -76,42 +81,44 @@ void RawMachineAssembler::Return(Node* value) {
}
-void RawMachineAssembler::Deoptimize(Node* state) {
- Node* deopt = graph()->NewNode(common()->Deoptimize(), state);
- schedule()->AddDeoptimize(CurrentBlock(), deopt);
- current_block_ = NULL;
+Node* RawMachineAssembler::CallFunctionStub0(Node* function, Node* receiver,
+ Node* context, Node* frame_state,
+ CallFunctionFlags flags) {
+ Callable callable = CodeFactory::CallFunction(isolate(), 0, flags);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ callable.descriptor(), 1, CallDescriptor::kNeedsFrameState, zone());
+ Node* stub_code = HeapConstant(callable.code());
+ Node* call = graph()->NewNode(common()->Call(desc), stub_code, function,
+ receiver, context, frame_state);
+ schedule()->AddNode(CurrentBlock(), call);
+ return call;
}
Node* RawMachineAssembler::CallJS0(Node* function, Node* receiver,
- Label* continuation, Label* deoptimization) {
+ Node* context, Node* frame_state) {
CallDescriptor* descriptor = Linkage::GetJSCallDescriptor(1, zone());
- Node* call = graph()->NewNode(common()->Call(descriptor), function, receiver);
- schedule()->AddCall(CurrentBlock(), call, Use(continuation),
- Use(deoptimization));
- current_block_ = NULL;
+ Node* call = graph()->NewNode(common()->Call(descriptor), function, receiver,
+ context, frame_state);
+ schedule()->AddNode(CurrentBlock(), call);
return call;
}
Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function,
- Node* arg0, Label* continuation,
- Label* deoptimization) {
- CallDescriptor* descriptor =
- Linkage::GetRuntimeCallDescriptor(function, 1, Operator::kNoProperties,
- CallDescriptor::kCanDeoptimize, zone());
+ Node* arg0, Node* context,
+ Node* frame_state) {
+ CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
+ function, 1, Operator::kNoProperties, zone());
Node* centry = HeapConstant(CEntryStub(isolate(), 1).GetCode());
Node* ref = NewNode(
common()->ExternalConstant(ExternalReference(function, isolate())));
Node* arity = Int32Constant(1);
- Node* context = Parameter(1);
Node* call = graph()->NewNode(common()->Call(descriptor), centry, arg0, ref,
- arity, context);
- schedule()->AddCall(CurrentBlock(), call, Use(continuation),
- Use(deoptimization));
- current_block_ = NULL;
+ arity, context, frame_state);
+ schedule()->AddNode(CurrentBlock(), call);
return call;
}
@@ -142,7 +149,7 @@ BasicBlock* RawMachineAssembler::CurrentBlock() {
}
-Node* RawMachineAssembler::MakeNode(Operator* op, int input_count,
+Node* RawMachineAssembler::MakeNode(const Operator* op, int input_count,
Node** inputs) {
DCHECK(ScheduleValid());
DCHECK(current_block_ != NULL);
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 6839ade4fe..846ff1c03d 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -9,7 +9,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-builder.h"
-#include "src/compiler/machine-node-factory.h"
+#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
@@ -23,8 +23,7 @@ class BasicBlock;
class Schedule;
-class RawMachineAssembler : public GraphBuilder,
- public MachineNodeFactory<RawMachineAssembler> {
+class RawMachineAssembler : public GraphBuilder {
public:
class Label {
public:
@@ -45,56 +44,379 @@ class RawMachineAssembler : public GraphBuilder,
DISALLOW_COPY_AND_ASSIGN(Label);
};
- RawMachineAssembler(Graph* graph,
- MachineCallDescriptorBuilder* call_descriptor_builder,
- MachineType word = MachineOperatorBuilder::pointer_rep());
+ RawMachineAssembler(Graph* graph, MachineSignature* machine_sig,
+ MachineType word = kMachPtr);
virtual ~RawMachineAssembler() {}
Isolate* isolate() const { return zone()->isolate(); }
Zone* zone() const { return graph()->zone(); }
MachineOperatorBuilder* machine() { return &machine_; }
CommonOperatorBuilder* common() { return &common_; }
- CallDescriptor* call_descriptor() const {
- return call_descriptor_builder_->BuildCallDescriptor(zone());
+ CallDescriptor* call_descriptor() const { return call_descriptor_; }
+ size_t parameter_count() const { return machine_sig_->parameter_count(); }
+ MachineSignature* machine_sig() const { return machine_sig_; }
+
+ Node* UndefinedConstant() {
+ Unique<Object> unique = Unique<Object>::CreateImmovable(
+ isolate()->factory()->undefined_value());
+ return NewNode(common()->HeapConstant(unique));
+ }
+
+ // Constants.
+ Node* PointerConstant(void* value) {
+ return IntPtrConstant(reinterpret_cast<intptr_t>(value));
+ }
+ Node* IntPtrConstant(intptr_t value) {
+ // TODO(dcarney): mark generated code as unserializable if value != 0.
+ return kPointerSize == 8 ? Int64Constant(value)
+ : Int32Constant(static_cast<int>(value));
+ }
+ Node* Int32Constant(int32_t value) {
+ return NewNode(common()->Int32Constant(value));
+ }
+ Node* Int64Constant(int64_t value) {
+ return NewNode(common()->Int64Constant(value));
+ }
+ Node* NumberConstant(double value) {
+ return NewNode(common()->NumberConstant(value));
+ }
+ Node* Float32Constant(float value) {
+ return NewNode(common()->Float32Constant(value));
+ }
+ Node* Float64Constant(double value) {
+ return NewNode(common()->Float64Constant(value));
+ }
+ Node* HeapConstant(Handle<Object> object) {
+ Unique<Object> val = Unique<Object>::CreateUninitialized(object);
+ return NewNode(common()->HeapConstant(val));
+ }
+
+ Node* Projection(int index, Node* a) {
+ return NewNode(common()->Projection(index), a);
+ }
+
+ // Memory Operations.
+ Node* Load(MachineType rep, Node* base) {
+ return Load(rep, base, Int32Constant(0));
+ }
+ Node* Load(MachineType rep, Node* base, Node* index) {
+ return NewNode(machine()->Load(rep), base, index);
+ }
+ void Store(MachineType rep, Node* base, Node* value) {
+ Store(rep, base, Int32Constant(0), value);
+ }
+ void Store(MachineType rep, Node* base, Node* index, Node* value) {
+ NewNode(machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)), base,
+ index, value);
+ }
+ // Arithmetic Operations.
+ Node* WordAnd(Node* a, Node* b) {
+ return NewNode(machine()->WordAnd(), a, b);
+ }
+ Node* WordOr(Node* a, Node* b) { return NewNode(machine()->WordOr(), a, b); }
+ Node* WordXor(Node* a, Node* b) {
+ return NewNode(machine()->WordXor(), a, b);
+ }
+ Node* WordShl(Node* a, Node* b) {
+ return NewNode(machine()->WordShl(), a, b);
+ }
+ Node* WordShr(Node* a, Node* b) {
+ return NewNode(machine()->WordShr(), a, b);
+ }
+ Node* WordSar(Node* a, Node* b) {
+ return NewNode(machine()->WordSar(), a, b);
+ }
+ Node* WordRor(Node* a, Node* b) {
+ return NewNode(machine()->WordRor(), a, b);
+ }
+ Node* WordEqual(Node* a, Node* b) {
+ return NewNode(machine()->WordEqual(), a, b);
+ }
+ Node* WordNotEqual(Node* a, Node* b) {
+ return WordBinaryNot(WordEqual(a, b));
+ }
+ Node* WordNot(Node* a) {
+ if (machine()->Is32()) {
+ return Word32Not(a);
+ } else {
+ return Word64Not(a);
+ }
+ }
+ Node* WordBinaryNot(Node* a) {
+ if (machine()->Is32()) {
+ return Word32BinaryNot(a);
+ } else {
+ return Word64BinaryNot(a);
+ }
+ }
+
+ Node* Word32And(Node* a, Node* b) {
+ return NewNode(machine()->Word32And(), a, b);
+ }
+ Node* Word32Or(Node* a, Node* b) {
+ return NewNode(machine()->Word32Or(), a, b);
+ }
+ Node* Word32Xor(Node* a, Node* b) {
+ return NewNode(machine()->Word32Xor(), a, b);
+ }
+ Node* Word32Shl(Node* a, Node* b) {
+ return NewNode(machine()->Word32Shl(), a, b);
+ }
+ Node* Word32Shr(Node* a, Node* b) {
+ return NewNode(machine()->Word32Shr(), a, b);
+ }
+ Node* Word32Sar(Node* a, Node* b) {
+ return NewNode(machine()->Word32Sar(), a, b);
+ }
+ Node* Word32Ror(Node* a, Node* b) {
+ return NewNode(machine()->Word32Ror(), a, b);
+ }
+ Node* Word32Equal(Node* a, Node* b) {
+ return NewNode(machine()->Word32Equal(), a, b);
+ }
+ Node* Word32NotEqual(Node* a, Node* b) {
+ return Word32BinaryNot(Word32Equal(a, b));
+ }
+ Node* Word32Not(Node* a) { return Word32Xor(a, Int32Constant(-1)); }
+ Node* Word32BinaryNot(Node* a) { return Word32Equal(a, Int32Constant(0)); }
+
+ Node* Word64And(Node* a, Node* b) {
+ return NewNode(machine()->Word64And(), a, b);
+ }
+ Node* Word64Or(Node* a, Node* b) {
+ return NewNode(machine()->Word64Or(), a, b);
+ }
+ Node* Word64Xor(Node* a, Node* b) {
+ return NewNode(machine()->Word64Xor(), a, b);
+ }
+ Node* Word64Shl(Node* a, Node* b) {
+ return NewNode(machine()->Word64Shl(), a, b);
+ }
+ Node* Word64Shr(Node* a, Node* b) {
+ return NewNode(machine()->Word64Shr(), a, b);
+ }
+ Node* Word64Sar(Node* a, Node* b) {
+ return NewNode(machine()->Word64Sar(), a, b);
+ }
+ Node* Word64Ror(Node* a, Node* b) {
+ return NewNode(machine()->Word64Ror(), a, b);
+ }
+ Node* Word64Equal(Node* a, Node* b) {
+ return NewNode(machine()->Word64Equal(), a, b);
+ }
+ Node* Word64NotEqual(Node* a, Node* b) {
+ return Word64BinaryNot(Word64Equal(a, b));
+ }
+ Node* Word64Not(Node* a) { return Word64Xor(a, Int64Constant(-1)); }
+ Node* Word64BinaryNot(Node* a) { return Word64Equal(a, Int64Constant(0)); }
+
+ Node* Int32Add(Node* a, Node* b) {
+ return NewNode(machine()->Int32Add(), a, b);
+ }
+ Node* Int32AddWithOverflow(Node* a, Node* b) {
+ return NewNode(machine()->Int32AddWithOverflow(), a, b);
+ }
+ Node* Int32Sub(Node* a, Node* b) {
+ return NewNode(machine()->Int32Sub(), a, b);
+ }
+ Node* Int32SubWithOverflow(Node* a, Node* b) {
+ return NewNode(machine()->Int32SubWithOverflow(), a, b);
+ }
+ Node* Int32Mul(Node* a, Node* b) {
+ return NewNode(machine()->Int32Mul(), a, b);
}
- int parameter_count() const {
- return call_descriptor_builder_->parameter_count();
+ Node* Int32Div(Node* a, Node* b) {
+ return NewNode(machine()->Int32Div(), a, b);
}
- const MachineType* parameter_types() const {
- return call_descriptor_builder_->parameter_types();
+ Node* Int32UDiv(Node* a, Node* b) {
+ return NewNode(machine()->Int32UDiv(), a, b);
+ }
+ Node* Int32Mod(Node* a, Node* b) {
+ return NewNode(machine()->Int32Mod(), a, b);
+ }
+ Node* Int32UMod(Node* a, Node* b) {
+ return NewNode(machine()->Int32UMod(), a, b);
+ }
+ Node* Int32LessThan(Node* a, Node* b) {
+ return NewNode(machine()->Int32LessThan(), a, b);
+ }
+ Node* Int32LessThanOrEqual(Node* a, Node* b) {
+ return NewNode(machine()->Int32LessThanOrEqual(), a, b);
+ }
+ Node* Uint32LessThan(Node* a, Node* b) {
+ return NewNode(machine()->Uint32LessThan(), a, b);
+ }
+ Node* Uint32LessThanOrEqual(Node* a, Node* b) {
+ return NewNode(machine()->Uint32LessThanOrEqual(), a, b);
+ }
+ Node* Int32GreaterThan(Node* a, Node* b) { return Int32LessThan(b, a); }
+ Node* Int32GreaterThanOrEqual(Node* a, Node* b) {
+ return Int32LessThanOrEqual(b, a);
+ }
+ Node* Int32Neg(Node* a) { return Int32Sub(Int32Constant(0), a); }
+
+ Node* Int64Add(Node* a, Node* b) {
+ return NewNode(machine()->Int64Add(), a, b);
+ }
+ Node* Int64Sub(Node* a, Node* b) {
+ return NewNode(machine()->Int64Sub(), a, b);
+ }
+ Node* Int64Mul(Node* a, Node* b) {
+ return NewNode(machine()->Int64Mul(), a, b);
+ }
+ Node* Int64Div(Node* a, Node* b) {
+ return NewNode(machine()->Int64Div(), a, b);
+ }
+ Node* Int64UDiv(Node* a, Node* b) {
+ return NewNode(machine()->Int64UDiv(), a, b);
+ }
+ Node* Int64Mod(Node* a, Node* b) {
+ return NewNode(machine()->Int64Mod(), a, b);
+ }
+ Node* Int64UMod(Node* a, Node* b) {
+ return NewNode(machine()->Int64UMod(), a, b);
+ }
+ Node* Int64Neg(Node* a) { return Int64Sub(Int64Constant(0), a); }
+ Node* Int64LessThan(Node* a, Node* b) {
+ return NewNode(machine()->Int64LessThan(), a, b);
+ }
+ Node* Int64LessThanOrEqual(Node* a, Node* b) {
+ return NewNode(machine()->Int64LessThanOrEqual(), a, b);
+ }
+ Node* Int64GreaterThan(Node* a, Node* b) { return Int64LessThan(b, a); }
+ Node* Int64GreaterThanOrEqual(Node* a, Node* b) {
+ return Int64LessThanOrEqual(b, a);
+ }
+
+ // TODO(turbofan): What is this used for?
+ Node* ConvertIntPtrToInt32(Node* a) {
+ return kPointerSize == 8 ? NewNode(machine()->TruncateInt64ToInt32(), a)
+ : a;
+ }
+ Node* ConvertInt32ToIntPtr(Node* a) {
+ return kPointerSize == 8 ? NewNode(machine()->ChangeInt32ToInt64(), a) : a;
+ }
+
+#define INTPTR_BINOP(prefix, name) \
+ Node* IntPtr##name(Node* a, Node* b) { \
+ return kPointerSize == 8 ? prefix##64##name(a, b) \
+ : prefix##32##name(a, b); \
+ }
+
+ INTPTR_BINOP(Int, Add);
+ INTPTR_BINOP(Int, Sub);
+ INTPTR_BINOP(Int, LessThan);
+ INTPTR_BINOP(Int, LessThanOrEqual);
+ INTPTR_BINOP(Word, Equal);
+ INTPTR_BINOP(Word, NotEqual);
+ INTPTR_BINOP(Int, GreaterThanOrEqual);
+ INTPTR_BINOP(Int, GreaterThan);
+
+#undef INTPTR_BINOP
+
+ Node* Float64Add(Node* a, Node* b) {
+ return NewNode(machine()->Float64Add(), a, b);
+ }
+ Node* Float64Sub(Node* a, Node* b) {
+ return NewNode(machine()->Float64Sub(), a, b);
+ }
+ Node* Float64Mul(Node* a, Node* b) {
+ return NewNode(machine()->Float64Mul(), a, b);
+ }
+ Node* Float64Div(Node* a, Node* b) {
+ return NewNode(machine()->Float64Div(), a, b);
+ }
+ Node* Float64Mod(Node* a, Node* b) {
+ return NewNode(machine()->Float64Mod(), a, b);
+ }
+ Node* Float64Equal(Node* a, Node* b) {
+ return NewNode(machine()->Float64Equal(), a, b);
+ }
+ Node* Float64NotEqual(Node* a, Node* b) {
+ return WordBinaryNot(Float64Equal(a, b));
+ }
+ Node* Float64LessThan(Node* a, Node* b) {
+ return NewNode(machine()->Float64LessThan(), a, b);
+ }
+ Node* Float64LessThanOrEqual(Node* a, Node* b) {
+ return NewNode(machine()->Float64LessThanOrEqual(), a, b);
+ }
+ Node* Float64GreaterThan(Node* a, Node* b) { return Float64LessThan(b, a); }
+ Node* Float64GreaterThanOrEqual(Node* a, Node* b) {
+ return Float64LessThanOrEqual(b, a);
+ }
+
+ // Conversions.
+ Node* ChangeFloat32ToFloat64(Node* a) {
+ return NewNode(machine()->ChangeFloat32ToFloat64(), a);
+ }
+ Node* ChangeInt32ToFloat64(Node* a) {
+ return NewNode(machine()->ChangeInt32ToFloat64(), a);
+ }
+ Node* ChangeUint32ToFloat64(Node* a) {
+ return NewNode(machine()->ChangeUint32ToFloat64(), a);
+ }
+ Node* ChangeFloat64ToInt32(Node* a) {
+ return NewNode(machine()->ChangeFloat64ToInt32(), a);
+ }
+ Node* ChangeFloat64ToUint32(Node* a) {
+ return NewNode(machine()->ChangeFloat64ToUint32(), a);
+ }
+ Node* ChangeInt32ToInt64(Node* a) {
+ return NewNode(machine()->ChangeInt32ToInt64(), a);
+ }
+ Node* ChangeUint32ToUint64(Node* a) {
+ return NewNode(machine()->ChangeUint32ToUint64(), a);
+ }
+ Node* TruncateFloat64ToFloat32(Node* a) {
+ return NewNode(machine()->TruncateFloat64ToFloat32(), a);
+ }
+ Node* TruncateFloat64ToInt32(Node* a) {
+ return NewNode(machine()->TruncateFloat64ToInt32(), a);
+ }
+ Node* TruncateInt64ToInt32(Node* a) {
+ return NewNode(machine()->TruncateInt64ToInt32(), a);
}
// Parameters.
- Node* Parameter(int index);
+ Node* Parameter(size_t index);
// Control flow.
Label* Exit();
void Goto(Label* label);
void Branch(Node* condition, Label* true_val, Label* false_val);
+ // Call through CallFunctionStub with lazy deopt and frame-state.
+ Node* CallFunctionStub0(Node* function, Node* receiver, Node* context,
+ Node* frame_state, CallFunctionFlags flags);
// Call to a JS function with zero parameters.
- Node* CallJS0(Node* function, Node* receiver, Label* continuation,
- Label* deoptimization);
+ Node* CallJS0(Node* function, Node* receiver, Node* context,
+ Node* frame_state);
// Call to a runtime function with zero parameters.
- Node* CallRuntime1(Runtime::FunctionId function, Node* arg0,
- Label* continuation, Label* deoptimization);
+ Node* CallRuntime1(Runtime::FunctionId function, Node* arg0, Node* context,
+ Node* frame_state);
void Return(Node* value);
void Bind(Label* label);
void Deoptimize(Node* state);
// Variables.
- Node* Phi(Node* n1, Node* n2) { return NewNode(common()->Phi(2), n1, n2); }
- Node* Phi(Node* n1, Node* n2, Node* n3) {
- return NewNode(common()->Phi(3), n1, n2, n3);
+ Node* Phi(MachineType type, Node* n1, Node* n2) {
+ return NewNode(common()->Phi(type, 2), n1, n2);
}
- Node* Phi(Node* n1, Node* n2, Node* n3, Node* n4) {
- return NewNode(common()->Phi(4), n1, n2, n3, n4);
+ Node* Phi(MachineType type, Node* n1, Node* n2, Node* n3) {
+ return NewNode(common()->Phi(type, 3), n1, n2, n3);
+ }
+ Node* Phi(MachineType type, Node* n1, Node* n2, Node* n3, Node* n4) {
+ return NewNode(common()->Phi(type, 4), n1, n2, n3, n4);
}
// MachineAssembler is invalid after export.
Schedule* Export();
protected:
- virtual Node* MakeNode(Operator* op, int input_count, Node** inputs);
+ virtual Node* MakeNode(const Operator* op, int input_count,
+ Node** inputs) FINAL;
+
+ bool ScheduleValid() { return schedule_ != NULL; }
Schedule* schedule() {
DCHECK(ScheduleValid());
@@ -102,19 +424,15 @@ class RawMachineAssembler : public GraphBuilder,
}
private:
- bool ScheduleValid() { return schedule_ != NULL; }
-
BasicBlock* Use(Label* label);
BasicBlock* EnsureBlock(Label* label);
BasicBlock* CurrentBlock();
- typedef std::vector<MachineType, zone_allocator<MachineType> >
- RepresentationVector;
-
Schedule* schedule_;
MachineOperatorBuilder machine_;
CommonOperatorBuilder common_;
- MachineCallDescriptorBuilder* call_descriptor_builder_;
+ MachineSignature* machine_sig_;
+ CallDescriptor* call_descriptor_;
Node** parameters_;
Label exit_label_;
BasicBlock* current_block_;
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index 972a904509..0dd358edb3 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -626,7 +626,7 @@ LiveRange* RegisterAllocator::FixedLiveRangeFor(int index) {
LiveRange* RegisterAllocator::FixedDoubleLiveRangeFor(int index) {
- DCHECK(index < DoubleRegister::NumAllocatableRegisters());
+ DCHECK(index < DoubleRegister::NumAllocatableAliasedRegisters());
LiveRange* result = fixed_double_live_ranges_[index];
if (result == NULL) {
result = new (zone()) LiveRange(FixedDoubleLiveRangeID(index), code_zone());
@@ -1016,7 +1016,8 @@ void RegisterAllocator::ProcessInstructions(BasicBlock* block,
}
if (instr->ClobbersDoubleRegisters()) {
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableAliasedRegisters();
+ ++i) {
if (!IsOutputDoubleRegisterOf(instr, i)) {
LiveRange* range = FixedDoubleLiveRangeFor(i);
range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
@@ -1110,7 +1111,7 @@ bool RegisterAllocator::Allocate() {
assigned_registers_ = new (code_zone())
BitVector(Register::NumAllocatableRegisters(), code_zone());
assigned_double_registers_ = new (code_zone())
- BitVector(DoubleRegister::NumAllocatableRegisters(), code_zone());
+ BitVector(DoubleRegister::NumAllocatableAliasedRegisters(), code_zone());
MeetRegisterConstraints();
if (!AllocationOk()) return false;
ResolvePhis();
@@ -1514,7 +1515,7 @@ void RegisterAllocator::AllocateGeneralRegisters() {
void RegisterAllocator::AllocateDoubleRegisters() {
RegisterAllocatorPhase phase("L_Allocate double registers", this);
- num_registers_ = DoubleRegister::NumAllocatableRegisters();
+ num_registers_ = DoubleRegister::NumAllocatableAliasedRegisters();
mode_ = DOUBLE_REGISTERS;
AllocateRegisters();
}
@@ -1538,7 +1539,7 @@ void RegisterAllocator::AllocateRegisters() {
DCHECK(inactive_live_ranges_.is_empty());
if (mode_ == DOUBLE_REGISTERS) {
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableAliasedRegisters(); ++i) {
LiveRange* current = fixed_double_live_ranges_.at(i);
if (current != NULL) {
AddToInactive(current);
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index bd5fb5f793..f50a7ef1e3 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -5,6 +5,7 @@
#ifndef V8_COMPILER_REPRESENTATION_CHANGE_H_
#define V8_COMPILER_REPRESENTATION_CHANGE_H_
+#include "src/base/bits.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties-inl.h"
@@ -14,222 +15,256 @@ namespace v8 {
namespace internal {
namespace compiler {
-// The types and representations tracked during representation inference
-// and change insertion.
-// TODO(titzer): First, merge MachineType and RepType.
-// TODO(titzer): Second, Use the real type system instead of RepType.
-enum RepType {
- // Representations.
- rBit = 1 << 0,
- rWord32 = 1 << 1,
- rWord64 = 1 << 2,
- rFloat64 = 1 << 3,
- rTagged = 1 << 4,
-
- // Types.
- tBool = 1 << 5,
- tInt32 = 1 << 6,
- tUint32 = 1 << 7,
- tInt64 = 1 << 8,
- tUint64 = 1 << 9,
- tNumber = 1 << 10,
- tAny = 1 << 11
-};
-
-#define REP_TYPE_STRLEN 24
-
-typedef uint16_t RepTypeUnion;
-
-
-inline void RenderRepTypeUnion(char* buf, RepTypeUnion info) {
- base::OS::SNPrintF(buf, REP_TYPE_STRLEN, "{%s%s%s%s%s %s%s%s%s%s%s%s}",
- (info & rBit) ? "k" : " ", (info & rWord32) ? "w" : " ",
- (info & rWord64) ? "q" : " ",
- (info & rFloat64) ? "f" : " ",
- (info & rTagged) ? "t" : " ", (info & tBool) ? "Z" : " ",
- (info & tInt32) ? "I" : " ", (info & tUint32) ? "U" : " ",
- (info & tInt64) ? "L" : " ", (info & tUint64) ? "J" : " ",
- (info & tNumber) ? "N" : " ", (info & tAny) ? "*" : " ");
-}
-
-
-const RepTypeUnion rMask = rBit | rWord32 | rWord64 | rFloat64 | rTagged;
-const RepTypeUnion tMask =
- tBool | tInt32 | tUint32 | tInt64 | tUint64 | tNumber | tAny;
-const RepType rPtr = kPointerSize == 4 ? rWord32 : rWord64;
-
// Contains logic related to changing the representation of values for constants
// and other nodes, as well as lowering Simplified->Machine operators.
// Eagerly folds any representation changes for constants.
class RepresentationChanger {
public:
RepresentationChanger(JSGraph* jsgraph, SimplifiedOperatorBuilder* simplified,
- MachineOperatorBuilder* machine, Isolate* isolate)
+ Isolate* isolate)
: jsgraph_(jsgraph),
simplified_(simplified),
- machine_(machine),
isolate_(isolate),
testing_type_errors_(false),
type_error_(false) {}
+ // TODO(titzer): should Word64 also be implicitly convertable to others?
+ static const MachineTypeUnion rWord =
+ kRepBit | kRepWord8 | kRepWord16 | kRepWord32;
- Node* GetRepresentationFor(Node* node, RepTypeUnion output_type,
- RepTypeUnion use_type) {
- if (!IsPowerOf2(output_type & rMask)) {
+ Node* GetRepresentationFor(Node* node, MachineTypeUnion output_type,
+ MachineTypeUnion use_type) {
+ if (!base::bits::IsPowerOfTwo32(output_type & kRepMask)) {
// There should be only one output representation.
return TypeError(node, output_type, use_type);
}
- if ((use_type & rMask) == (output_type & rMask)) {
+ if ((use_type & kRepMask) == (output_type & kRepMask)) {
// Representations are the same. That's a no-op.
return node;
}
- if (use_type & rTagged) {
+ if ((use_type & rWord) && (output_type & rWord)) {
+ // Both are words less than or equal to 32-bits.
+ // Since loads of integers from memory implicitly sign or zero extend the
+ // value to the full machine word size and stores implicitly truncate,
+ // no representation change is necessary.
+ return node;
+ }
+ if (use_type & kRepTagged) {
return GetTaggedRepresentationFor(node, output_type);
- } else if (use_type & rFloat64) {
+ } else if (use_type & kRepFloat32) {
+ return GetFloat32RepresentationFor(node, output_type);
+ } else if (use_type & kRepFloat64) {
return GetFloat64RepresentationFor(node, output_type);
- } else if (use_type & rWord32) {
- return GetWord32RepresentationFor(node, output_type, use_type & tUint32);
- } else if (use_type & rBit) {
+ } else if (use_type & kRepBit) {
return GetBitRepresentationFor(node, output_type);
- } else if (use_type & rWord64) {
+ } else if (use_type & rWord) {
+ return GetWord32RepresentationFor(node, output_type,
+ use_type & kTypeUint32);
+ } else if (use_type & kRepWord64) {
return GetWord64RepresentationFor(node, output_type);
} else {
return node;
}
}
- Node* GetTaggedRepresentationFor(Node* node, RepTypeUnion output_type) {
+ Node* GetTaggedRepresentationFor(Node* node, MachineTypeUnion output_type) {
// Eagerly fold representation changes for constants.
switch (node->opcode()) {
case IrOpcode::kNumberConstant:
case IrOpcode::kHeapConstant:
return node; // No change necessary.
case IrOpcode::kInt32Constant:
- if (output_type & tUint32) {
- uint32_t value = ValueOf<uint32_t>(node->op());
+ if (output_type & kTypeUint32) {
+ uint32_t value = OpParameter<uint32_t>(node);
return jsgraph()->Constant(static_cast<double>(value));
- } else if (output_type & tInt32) {
- int32_t value = ValueOf<int32_t>(node->op());
+ } else if (output_type & kTypeInt32) {
+ int32_t value = OpParameter<int32_t>(node);
return jsgraph()->Constant(value);
- } else if (output_type & rBit) {
- return ValueOf<int32_t>(node->op()) == 0 ? jsgraph()->FalseConstant()
- : jsgraph()->TrueConstant();
+ } else if (output_type & kRepBit) {
+ return OpParameter<int32_t>(node) == 0 ? jsgraph()->FalseConstant()
+ : jsgraph()->TrueConstant();
} else {
- return TypeError(node, output_type, rTagged);
+ return TypeError(node, output_type, kRepTagged);
}
case IrOpcode::kFloat64Constant:
- return jsgraph()->Constant(ValueOf<double>(node->op()));
+ return jsgraph()->Constant(OpParameter<double>(node));
+ case IrOpcode::kFloat32Constant:
+ return jsgraph()->Constant(OpParameter<float>(node));
default:
break;
}
// Select the correct X -> Tagged operator.
- Operator* op;
- if (output_type & rBit) {
+ const Operator* op;
+ if (output_type & kRepBit) {
op = simplified()->ChangeBitToBool();
- } else if (output_type & rWord32) {
- if (output_type & tUint32) {
+ } else if (output_type & rWord) {
+ if (output_type & kTypeUint32) {
op = simplified()->ChangeUint32ToTagged();
- } else if (output_type & tInt32) {
+ } else if (output_type & kTypeInt32) {
op = simplified()->ChangeInt32ToTagged();
} else {
- return TypeError(node, output_type, rTagged);
+ return TypeError(node, output_type, kRepTagged);
}
- } else if (output_type & rFloat64) {
+ } else if (output_type & kRepFloat32) { // float32 -> float64 -> tagged
+ node = InsertChangeFloat32ToFloat64(node);
+ op = simplified()->ChangeFloat64ToTagged();
+ } else if (output_type & kRepFloat64) {
op = simplified()->ChangeFloat64ToTagged();
} else {
- return TypeError(node, output_type, rTagged);
+ return TypeError(node, output_type, kRepTagged);
+ }
+ return jsgraph()->graph()->NewNode(op, node);
+ }
+
+ Node* GetFloat32RepresentationFor(Node* node, MachineTypeUnion output_type) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
+ case IrOpcode::kFloat64Constant:
+ case IrOpcode::kNumberConstant:
+ return jsgraph()->Float32Constant(
+ DoubleToFloat32(OpParameter<double>(node)));
+ case IrOpcode::kInt32Constant:
+ if (output_type & kTypeUint32) {
+ uint32_t value = OpParameter<uint32_t>(node);
+ return jsgraph()->Float32Constant(static_cast<float>(value));
+ } else {
+ int32_t value = OpParameter<int32_t>(node);
+ return jsgraph()->Float32Constant(static_cast<float>(value));
+ }
+ case IrOpcode::kFloat32Constant:
+ return node; // No change necessary.
+ default:
+ break;
+ }
+ // Select the correct X -> Float32 operator.
+ const Operator* op;
+ if (output_type & kRepBit) {
+ return TypeError(node, output_type, kRepFloat32);
+ } else if (output_type & rWord) {
+ if (output_type & kTypeUint32) {
+ op = machine()->ChangeUint32ToFloat64();
+ } else {
+ op = machine()->ChangeInt32ToFloat64();
+ }
+ // int32 -> float64 -> float32
+ node = jsgraph()->graph()->NewNode(op, node);
+ op = machine()->TruncateFloat64ToFloat32();
+ } else if (output_type & kRepTagged) {
+ op = simplified()
+ ->ChangeTaggedToFloat64(); // tagged -> float64 -> float32
+ node = jsgraph()->graph()->NewNode(op, node);
+ op = machine()->TruncateFloat64ToFloat32();
+ } else if (output_type & kRepFloat64) {
+ op = machine()->ChangeFloat32ToFloat64();
+ } else {
+ return TypeError(node, output_type, kRepFloat32);
}
return jsgraph()->graph()->NewNode(op, node);
}
- Node* GetFloat64RepresentationFor(Node* node, RepTypeUnion output_type) {
+ Node* GetFloat64RepresentationFor(Node* node, MachineTypeUnion output_type) {
// Eagerly fold representation changes for constants.
switch (node->opcode()) {
case IrOpcode::kNumberConstant:
- return jsgraph()->Float64Constant(ValueOf<double>(node->op()));
+ return jsgraph()->Float64Constant(OpParameter<double>(node));
case IrOpcode::kInt32Constant:
- if (output_type & tUint32) {
- uint32_t value = ValueOf<uint32_t>(node->op());
+ if (output_type & kTypeUint32) {
+ uint32_t value = OpParameter<uint32_t>(node);
return jsgraph()->Float64Constant(static_cast<double>(value));
} else {
- int32_t value = ValueOf<int32_t>(node->op());
+ int32_t value = OpParameter<int32_t>(node);
return jsgraph()->Float64Constant(value);
}
case IrOpcode::kFloat64Constant:
return node; // No change necessary.
+ case IrOpcode::kFloat32Constant:
+ return jsgraph()->Float64Constant(OpParameter<float>(node));
default:
break;
}
// Select the correct X -> Float64 operator.
- Operator* op;
- if (output_type & rWord32) {
- if (output_type & tUint32) {
+ const Operator* op;
+ if (output_type & kRepBit) {
+ return TypeError(node, output_type, kRepFloat64);
+ } else if (output_type & rWord) {
+ if (output_type & kTypeUint32) {
op = machine()->ChangeUint32ToFloat64();
} else {
op = machine()->ChangeInt32ToFloat64();
}
- } else if (output_type & rTagged) {
+ } else if (output_type & kRepTagged) {
op = simplified()->ChangeTaggedToFloat64();
+ } else if (output_type & kRepFloat32) {
+ op = machine()->ChangeFloat32ToFloat64();
} else {
- return TypeError(node, output_type, rFloat64);
+ return TypeError(node, output_type, kRepFloat64);
}
return jsgraph()->graph()->NewNode(op, node);
}
- Node* GetWord32RepresentationFor(Node* node, RepTypeUnion output_type,
+ Node* MakeInt32Constant(double value) {
+ if (value < 0) {
+ DCHECK(IsInt32Double(value));
+ int32_t iv = static_cast<int32_t>(value);
+ return jsgraph()->Int32Constant(iv);
+ } else {
+ DCHECK(IsUint32Double(value));
+ int32_t iv = static_cast<int32_t>(static_cast<uint32_t>(value));
+ return jsgraph()->Int32Constant(iv);
+ }
+ }
+
+ Node* GetWord32RepresentationFor(Node* node, MachineTypeUnion output_type,
bool use_unsigned) {
// Eagerly fold representation changes for constants.
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
return node; // No change necessary.
+ case IrOpcode::kFloat32Constant:
+ return MakeInt32Constant(OpParameter<float>(node));
case IrOpcode::kNumberConstant:
- case IrOpcode::kFloat64Constant: {
- double value = ValueOf<double>(node->op());
- if (value < 0) {
- DCHECK(IsInt32Double(value));
- int32_t iv = static_cast<int32_t>(value);
- return jsgraph()->Int32Constant(iv);
- } else {
- DCHECK(IsUint32Double(value));
- int32_t iv = static_cast<int32_t>(static_cast<uint32_t>(value));
- return jsgraph()->Int32Constant(iv);
- }
- }
+ case IrOpcode::kFloat64Constant:
+ return MakeInt32Constant(OpParameter<double>(node));
default:
break;
}
// Select the correct X -> Word32 operator.
- Operator* op = NULL;
- if (output_type & rFloat64) {
- if (output_type & tUint32 || use_unsigned) {
+ const Operator* op = NULL;
+ if (output_type & kRepFloat64) {
+ if (output_type & kTypeUint32 || use_unsigned) {
op = machine()->ChangeFloat64ToUint32();
} else {
op = machine()->ChangeFloat64ToInt32();
}
- } else if (output_type & rTagged) {
- if (output_type & tUint32 || use_unsigned) {
+ } else if (output_type & kRepFloat32) {
+ node = InsertChangeFloat32ToFloat64(node); // float32 -> float64 -> int32
+ if (output_type & kTypeUint32 || use_unsigned) {
+ op = machine()->ChangeFloat64ToUint32();
+ } else {
+ op = machine()->ChangeFloat64ToInt32();
+ }
+ } else if (output_type & kRepTagged) {
+ if (output_type & kTypeUint32 || use_unsigned) {
op = simplified()->ChangeTaggedToUint32();
} else {
op = simplified()->ChangeTaggedToInt32();
}
- } else if (output_type & rBit) {
- return node; // Sloppy comparison -> word32.
} else {
- return TypeError(node, output_type, rWord32);
+ return TypeError(node, output_type, kRepWord32);
}
return jsgraph()->graph()->NewNode(op, node);
}
- Node* GetBitRepresentationFor(Node* node, RepTypeUnion output_type) {
+ Node* GetBitRepresentationFor(Node* node, MachineTypeUnion output_type) {
// Eagerly fold representation changes for constants.
switch (node->opcode()) {
case IrOpcode::kInt32Constant: {
- int32_t value = ValueOf<int32_t>(node->op());
+ int32_t value = OpParameter<int32_t>(node);
if (value == 0 || value == 1) return node;
return jsgraph()->OneConstant(); // value != 0
}
case IrOpcode::kHeapConstant: {
- Handle<Object> handle = ValueOf<Handle<Object> >(node->op());
+ Handle<Object> handle = OpParameter<Unique<Object> >(node).handle();
DCHECK(*handle == isolate()->heap()->true_value() ||
*handle == isolate()->heap()->false_value());
return jsgraph()->Int32Constant(
@@ -239,54 +274,39 @@ class RepresentationChanger {
break;
}
// Select the correct X -> Bit operator.
- Operator* op;
- if (output_type & rWord32) {
+ const Operator* op;
+ if (output_type & rWord) {
return node; // No change necessary.
- } else if (output_type & rWord64) {
+ } else if (output_type & kRepWord64) {
return node; // TODO(titzer): No change necessary, on 64-bit.
- } else if (output_type & rTagged) {
+ } else if (output_type & kRepTagged) {
op = simplified()->ChangeBoolToBit();
} else {
- return TypeError(node, output_type, rBit);
+ return TypeError(node, output_type, kRepBit);
}
return jsgraph()->graph()->NewNode(op, node);
}
- Node* GetWord64RepresentationFor(Node* node, RepTypeUnion output_type) {
- if (output_type & rBit) {
+ Node* GetWord64RepresentationFor(Node* node, MachineTypeUnion output_type) {
+ if (output_type & kRepBit) {
return node; // Sloppy comparison -> word64
}
// Can't really convert Word64 to anything else. Purported to be internal.
- return TypeError(node, output_type, rWord64);
- }
-
- static RepType TypeForMachineType(MachineType rep) {
- // TODO(titzer): merge MachineType and RepType.
- switch (rep) {
- case kMachineWord8:
- return rWord32;
- case kMachineWord16:
- return rWord32;
- case kMachineWord32:
- return rWord32;
- case kMachineWord64:
- return rWord64;
- case kMachineFloat64:
- return rFloat64;
- case kMachineTagged:
- return rTagged;
- default:
- UNREACHABLE();
- return static_cast<RepType>(0);
- }
+ return TypeError(node, output_type, kRepWord64);
}
- Operator* Int32OperatorFor(IrOpcode::Value opcode) {
+ const Operator* Int32OperatorFor(IrOpcode::Value opcode) {
switch (opcode) {
case IrOpcode::kNumberAdd:
return machine()->Int32Add();
case IrOpcode::kNumberSubtract:
return machine()->Int32Sub();
+ case IrOpcode::kNumberMultiply:
+ return machine()->Int32Mul();
+ case IrOpcode::kNumberDivide:
+ return machine()->Int32Div();
+ case IrOpcode::kNumberModulus:
+ return machine()->Int32Mod();
case IrOpcode::kNumberEqual:
return machine()->Word32Equal();
case IrOpcode::kNumberLessThan:
@@ -299,12 +319,18 @@ class RepresentationChanger {
}
}
- Operator* Uint32OperatorFor(IrOpcode::Value opcode) {
+ const Operator* Uint32OperatorFor(IrOpcode::Value opcode) {
switch (opcode) {
case IrOpcode::kNumberAdd:
return machine()->Int32Add();
case IrOpcode::kNumberSubtract:
return machine()->Int32Sub();
+ case IrOpcode::kNumberMultiply:
+ return machine()->Int32Mul();
+ case IrOpcode::kNumberDivide:
+ return machine()->Int32UDiv();
+ case IrOpcode::kNumberModulus:
+ return machine()->Int32UMod();
case IrOpcode::kNumberEqual:
return machine()->Word32Equal();
case IrOpcode::kNumberLessThan:
@@ -317,7 +343,7 @@ class RepresentationChanger {
}
}
- Operator* Float64OperatorFor(IrOpcode::Value opcode) {
+ const Operator* Float64OperatorFor(IrOpcode::Value opcode) {
switch (opcode) {
case IrOpcode::kNumberAdd:
return machine()->Float64Add();
@@ -341,42 +367,27 @@ class RepresentationChanger {
}
}
- RepType TypeForField(const FieldAccess& access) {
- RepType tElement = static_cast<RepType>(0); // TODO(titzer)
- RepType rElement = TypeForMachineType(access.representation);
- return static_cast<RepType>(tElement | rElement);
- }
-
- RepType TypeForElement(const ElementAccess& access) {
- RepType tElement = static_cast<RepType>(0); // TODO(titzer)
- RepType rElement = TypeForMachineType(access.representation);
- return static_cast<RepType>(tElement | rElement);
- }
-
- RepType TypeForBasePointer(const FieldAccess& access) {
- if (access.tag() != 0) return static_cast<RepType>(tAny | rTagged);
- return kPointerSize == 8 ? rWord64 : rWord32;
+ MachineType TypeForBasePointer(const FieldAccess& access) {
+ return access.tag() != 0 ? kMachAnyTagged : kMachPtr;
}
- RepType TypeForBasePointer(const ElementAccess& access) {
- if (access.tag() != 0) return static_cast<RepType>(tAny | rTagged);
- return kPointerSize == 8 ? rWord64 : rWord32;
+ MachineType TypeForBasePointer(const ElementAccess& access) {
+ return access.tag() != 0 ? kMachAnyTagged : kMachPtr;
}
- RepType TypeFromUpperBound(Type* type) {
+ MachineType TypeFromUpperBound(Type* type) {
if (type->Is(Type::None()))
- return tAny; // TODO(titzer): should be an error
- if (type->Is(Type::Signed32())) return tInt32;
- if (type->Is(Type::Unsigned32())) return tUint32;
- if (type->Is(Type::Number())) return tNumber;
- if (type->Is(Type::Boolean())) return tBool;
- return tAny;
+ return kTypeAny; // TODO(titzer): should be an error
+ if (type->Is(Type::Signed32())) return kTypeInt32;
+ if (type->Is(Type::Unsigned32())) return kTypeUint32;
+ if (type->Is(Type::Number())) return kTypeNumber;
+ if (type->Is(Type::Boolean())) return kTypeBool;
+ return kTypeAny;
}
private:
JSGraph* jsgraph_;
SimplifiedOperatorBuilder* simplified_;
- MachineOperatorBuilder* machine_;
Isolate* isolate_;
friend class RepresentationChangerTester; // accesses the below fields.
@@ -384,28 +395,38 @@ class RepresentationChanger {
bool testing_type_errors_; // If {true}, don't abort on a type error.
bool type_error_; // Set when a type error is detected.
- Node* TypeError(Node* node, RepTypeUnion output_type, RepTypeUnion use) {
+ Node* TypeError(Node* node, MachineTypeUnion output_type,
+ MachineTypeUnion use) {
type_error_ = true;
if (!testing_type_errors_) {
- char buf1[REP_TYPE_STRLEN];
- char buf2[REP_TYPE_STRLEN];
- RenderRepTypeUnion(buf1, output_type);
- RenderRepTypeUnion(buf2, use);
+ OStringStream out_str;
+ out_str << static_cast<MachineType>(output_type);
+
+ OStringStream use_str;
+ use_str << static_cast<MachineType>(use);
+
V8_Fatal(__FILE__, __LINE__,
- "RepresentationChangerError: node #%d:%s of rep"
- "%s cannot be changed to rep%s",
- node->id(), node->op()->mnemonic(), buf1, buf2);
+ "RepresentationChangerError: node #%d:%s of "
+ "%s cannot be changed to %s",
+ node->id(), node->op()->mnemonic(), out_str.c_str(),
+ use_str.c_str());
}
return node;
}
+ Node* InsertChangeFloat32ToFloat64(Node* node) {
+ return jsgraph()->graph()->NewNode(machine()->ChangeFloat32ToFloat64(),
+ node);
+ }
+
JSGraph* jsgraph() { return jsgraph_; }
Isolate* isolate() { return isolate_; }
SimplifiedOperatorBuilder* simplified() { return simplified_; }
- MachineOperatorBuilder* machine() { return machine_; }
+ MachineOperatorBuilder* machine() { return jsgraph()->machine(); }
};
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_REPRESENTATION_CHANGE_H_
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index 64766765bf..a3b5ed383b 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -24,10 +24,6 @@ OStream& operator<<(OStream& os, const BasicBlockData::Control& c) {
return os << "return";
case BasicBlockData::kThrow:
return os << "throw";
- case BasicBlockData::kCall:
- return os << "call";
- case BasicBlockData::kDeoptimize:
- return os << "deoptimize";
}
UNREACHABLE();
return os;
diff --git a/deps/v8/src/compiler/schedule.h b/deps/v8/src/compiler/schedule.h
index e730f3324c..0094d57525 100644
--- a/deps/v8/src/compiler/schedule.h
+++ b/deps/v8/src/compiler/schedule.h
@@ -22,6 +22,7 @@ namespace internal {
namespace compiler {
class BasicBlock;
+class BasicBlockInstrumentor;
class Graph;
class ConstructScheduleData;
class CodeGenerator; // Because of a namespace bug in clang.
@@ -30,16 +31,15 @@ class BasicBlockData {
public:
// Possible control nodes that can end a block.
enum Control {
- kNone, // Control not initialized yet.
- kGoto, // Goto a single successor block.
- kBranch, // Branch if true to first successor, otherwise second.
- kReturn, // Return a value from this method.
- kThrow, // Throw an exception.
- kCall, // Call to a possibly deoptimizing or throwing function.
- kDeoptimize // Deoptimize.
+ kNone, // Control not initialized yet.
+ kGoto, // Goto a single successor block.
+ kBranch, // Branch if true to first successor, otherwise second.
+ kReturn, // Return a value from this method.
+ kThrow // Throw an exception.
};
int32_t rpo_number_; // special RPO number of the block.
+ BasicBlock* dominator_; // Immediate dominator of the block.
BasicBlock* loop_header_; // Pointer to dominating loop header basic block,
// NULL if none. For loop headers, this points to
// enclosing loop header.
@@ -55,6 +55,7 @@ class BasicBlockData {
explicit BasicBlockData(Zone* zone)
: rpo_number_(-1),
+ dominator_(NULL),
loop_header_(NULL),
loop_depth_(0),
loop_end_(-1),
@@ -63,7 +64,7 @@ class BasicBlockData {
deferred_(false),
control_(kNone),
control_input_(NULL),
- nodes_(NodeVector::allocator_type(zone)) {}
+ nodes_(zone) {}
inline bool IsLoopHeader() const { return loop_end_ >= 0; }
inline bool LoopContains(BasicBlockData* block) const {
@@ -92,7 +93,7 @@ OStream& operator<<(OStream& os, const BasicBlockData::Control& c);
// A basic block contains an ordered list of nodes and ends with a control
// node. Note that if a basic block has phis, then all phis must appear as the
// first nodes in the block.
-class BasicBlock V8_FINAL : public GenericNode<BasicBlockData, BasicBlock> {
+class BasicBlock FINAL : public GenericNode<BasicBlockData, BasicBlock> {
public:
BasicBlock(GenericGraphBase* graph, int input_count)
: GenericNode<BasicBlockData, BasicBlock>(graph, input_count) {}
@@ -145,8 +146,7 @@ class BasicBlock V8_FINAL : public GenericNode<BasicBlockData, BasicBlock> {
typedef GenericGraphVisit::NullNodeVisitor<BasicBlockData, BasicBlock>
NullBasicBlockVisitor;
-typedef zone_allocator<BasicBlock*> BasicBlockPtrZoneAllocator;
-typedef std::vector<BasicBlock*, BasicBlockPtrZoneAllocator> BasicBlockVector;
+typedef ZoneVector<BasicBlock*> BasicBlockVector;
typedef BasicBlockVector::iterator BasicBlockVectorIter;
typedef BasicBlockVector::reverse_iterator BasicBlockVectorRIter;
@@ -156,23 +156,17 @@ typedef BasicBlockVector::reverse_iterator BasicBlockVectorRIter;
// by the graph's dependencies. A schedule is required to generate code.
class Schedule : public GenericGraph<BasicBlock> {
public:
- explicit Schedule(Zone* zone)
+ explicit Schedule(Zone* zone, size_t node_count_hint = 0)
: GenericGraph<BasicBlock>(zone),
zone_(zone),
- all_blocks_(BasicBlockVector::allocator_type(zone)),
- nodeid_to_block_(BasicBlockVector::allocator_type(zone)),
- rpo_order_(BasicBlockVector::allocator_type(zone)),
- immediate_dominator_(BasicBlockVector::allocator_type(zone)) {
- NewBasicBlock(); // entry.
- NewBasicBlock(); // exit.
- SetStart(entry());
- SetEnd(exit());
+ all_blocks_(zone),
+ nodeid_to_block_(zone),
+ rpo_order_(zone) {
+ SetStart(NewBasicBlock()); // entry.
+ SetEnd(NewBasicBlock()); // exit.
+ nodeid_to_block_.reserve(node_count_hint);
}
- // TODO(titzer): rewrite users of these methods to use start() and end().
- BasicBlock* entry() const { return all_blocks_[0]; } // Return entry block.
- BasicBlock* exit() const { return all_blocks_[1]; } // Return exit block.
-
// Return the block which contains {node}, if any.
BasicBlock* block(Node* node) const {
if (node->id() < static_cast<NodeId>(nodeid_to_block_.size())) {
@@ -181,10 +175,6 @@ class Schedule : public GenericGraph<BasicBlock> {
return NULL;
}
- BasicBlock* dominator(BasicBlock* block) {
- return immediate_dominator_[block->id()];
- }
-
bool IsScheduled(Node* node) {
int length = static_cast<int>(nodeid_to_block_.size());
if (node->id() >= length) return false;
@@ -219,8 +209,8 @@ class Schedule : public GenericGraph<BasicBlock> {
// doesn't actually add the node to the block.
inline void PlanNode(BasicBlock* block, Node* node) {
if (FLAG_trace_turbo_scheduler) {
- PrintF("Planning node %d for future add to block %d\n", node->id(),
- block->id());
+ PrintF("Planning #%d:%s for future add to B%d\n", node->id(),
+ node->op()->mnemonic(), block->id());
}
DCHECK(this->block(node) == NULL);
SetBlockForNode(block, node);
@@ -229,7 +219,8 @@ class Schedule : public GenericGraph<BasicBlock> {
// BasicBlock building: add a node to the end of the block.
inline void AddNode(BasicBlock* block, Node* node) {
if (FLAG_trace_turbo_scheduler) {
- PrintF("Adding node %d to block %d\n", node->id(), block->id());
+ PrintF("Adding #%d:%s to B%d\n", node->id(), node->op()->mnemonic(),
+ block->id());
}
DCHECK(this->block(node) == NULL || this->block(node) == block);
block->nodes_.push_back(node);
@@ -243,19 +234,6 @@ class Schedule : public GenericGraph<BasicBlock> {
AddSuccessor(block, succ);
}
- // BasicBlock building: add a (branching) call at the end of {block}.
- void AddCall(BasicBlock* block, Node* call, BasicBlock* cont_block,
- BasicBlock* deopt_block) {
- DCHECK(block->control_ == BasicBlock::kNone);
- DCHECK(call->opcode() == IrOpcode::kCall);
- block->control_ = BasicBlock::kCall;
- // Insert the deopt block first so that the RPO order builder picks
- // it first (and thus it ends up late in the RPO order).
- AddSuccessor(block, deopt_block);
- AddSuccessor(block, cont_block);
- SetControlInput(block, call);
- }
-
// BasicBlock building: add a branch at the end of {block}.
void AddBranch(BasicBlock* block, Node* branch, BasicBlock* tblock,
BasicBlock* fblock) {
@@ -265,15 +243,22 @@ class Schedule : public GenericGraph<BasicBlock> {
AddSuccessor(block, tblock);
AddSuccessor(block, fblock);
SetControlInput(block, branch);
+ if (branch->opcode() == IrOpcode::kBranch) {
+ // TODO(titzer): require a Branch node here. (sloppy tests).
+ SetBlockForNode(block, branch);
+ }
}
// BasicBlock building: add a return at the end of {block}.
void AddReturn(BasicBlock* block, Node* input) {
- // TODO(titzer): require a Return node here.
DCHECK(block->control_ == BasicBlock::kNone);
block->control_ = BasicBlock::kReturn;
SetControlInput(block, input);
- if (block != exit()) AddSuccessor(block, exit());
+ if (block != end()) AddSuccessor(block, end());
+ if (input->opcode() == IrOpcode::kReturn) {
+ // TODO(titzer): require a Return node here. (sloppy tests).
+ SetBlockForNode(block, input);
+ }
}
// BasicBlock building: add a throw at the end of {block}.
@@ -281,16 +266,7 @@ class Schedule : public GenericGraph<BasicBlock> {
DCHECK(block->control_ == BasicBlock::kNone);
block->control_ = BasicBlock::kThrow;
SetControlInput(block, input);
- if (block != exit()) AddSuccessor(block, exit());
- }
-
- // BasicBlock building: add a deopt at the end of {block}.
- void AddDeoptimize(BasicBlock* block, Node* state) {
- DCHECK(block->control_ == BasicBlock::kNone);
- block->control_ = BasicBlock::kDeoptimize;
- SetControlInput(block, state);
- block->deferred_ = true; // By default, consider deopts the slow path.
- if (block != exit()) AddSuccessor(block, exit());
+ if (block != end()) AddSuccessor(block, end());
}
friend class Scheduler;
@@ -304,6 +280,7 @@ class Schedule : public GenericGraph<BasicBlock> {
private:
friend class ScheduleVisualizer;
+ friend class BasicBlockInstrumentor;
void SetControlInput(BasicBlock* block, Node* node) {
block->control_input_ = node;
@@ -322,9 +299,6 @@ class Schedule : public GenericGraph<BasicBlock> {
BasicBlockVector all_blocks_; // All basic blocks in the schedule.
BasicBlockVector nodeid_to_block_; // Map from node to containing block.
BasicBlockVector rpo_order_; // Reverse-post-order block list.
- BasicBlockVector immediate_dominator_; // Maps to a block's immediate
- // dominator, indexed by block
- // id.
};
OStream& operator<<(OStream& os, const Schedule& s);
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index 6a40091698..58878a0776 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <deque>
+#include <queue>
+
#include "src/compiler/scheduler.h"
#include "src/compiler/graph.h"
@@ -15,274 +18,290 @@ namespace v8 {
namespace internal {
namespace compiler {
-Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
- : graph_(graph),
- schedule_(schedule),
- branches_(NodeVector::allocator_type(zone)),
- calls_(NodeVector::allocator_type(zone)),
- deopts_(NodeVector::allocator_type(zone)),
- returns_(NodeVector::allocator_type(zone)),
- loops_and_merges_(NodeVector::allocator_type(zone)),
- node_block_placement_(BasicBlockVector::allocator_type(zone)),
- unscheduled_uses_(IntVector::allocator_type(zone)),
- scheduled_nodes_(NodeVectorVector::allocator_type(zone)),
- schedule_root_nodes_(NodeVector::allocator_type(zone)),
- schedule_early_rpo_index_(IntVector::allocator_type(zone)) {}
-
-
-Schedule* Scheduler::ComputeSchedule(Graph* graph) {
- Zone tmp_zone(graph->zone()->isolate());
- Schedule* schedule = new (graph->zone()) Schedule(graph->zone());
- Scheduler scheduler(&tmp_zone, graph, schedule);
-
- schedule->AddNode(schedule->end(), graph->end());
+static inline void Trace(const char* msg, ...) {
+ if (FLAG_trace_turbo_scheduler) {
+ va_list arguments;
+ va_start(arguments, msg);
+ base::OS::VPrint(msg, arguments);
+ va_end(arguments);
+ }
+}
- scheduler.PrepareAuxiliaryNodeData();
- scheduler.CreateBlocks();
- scheduler.WireBlocks();
- scheduler.PrepareAuxiliaryBlockData();
- Scheduler::ComputeSpecialRPO(schedule);
- scheduler.GenerateImmediateDominatorTree();
+// Internal class to build a control flow graph (i.e the basic blocks and edges
+// between them within a Schedule) from the node graph.
+// Visits the control edges of the graph backwards from end in order to find
+// the connected control subgraph, needed for scheduling.
+class CFGBuilder {
+ public:
+ Scheduler* scheduler_;
+ Schedule* schedule_;
+ ZoneQueue<Node*> queue_;
+ NodeVector control_;
+
+ CFGBuilder(Zone* zone, Scheduler* scheduler)
+ : scheduler_(scheduler),
+ schedule_(scheduler->schedule_),
+ queue_(zone),
+ control_(zone) {}
+
+ // Run the control flow graph construction algorithm by walking the graph
+ // backwards from end through control edges, building and connecting the
+ // basic blocks for control nodes.
+ void Run() {
+ Graph* graph = scheduler_->graph_;
+ FixNode(schedule_->start(), graph->start());
+ Queue(graph->end());
+
+ while (!queue_.empty()) { // Breadth-first backwards traversal.
+ Node* node = queue_.front();
+ queue_.pop();
+ int max = NodeProperties::PastControlIndex(node);
+ for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
+ Queue(node->InputAt(i));
+ }
+ }
- scheduler.PrepareUses();
- scheduler.ScheduleEarly();
- scheduler.ScheduleLate();
+ for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
+ ConnectBlocks(*i); // Connect block to its predecessor/successors.
+ }
- return schedule;
-}
+ FixNode(schedule_->end(), graph->end());
+ }
+ void FixNode(BasicBlock* block, Node* node) {
+ schedule_->AddNode(block, node);
+ scheduler_->GetData(node)->is_connected_control_ = true;
+ scheduler_->GetData(node)->placement_ = Scheduler::kFixed;
+ }
-class CreateBlockVisitor : public NullNodeVisitor {
- public:
- explicit CreateBlockVisitor(Scheduler* scheduler) : scheduler_(scheduler) {}
+ void Queue(Node* node) {
+ // Mark the connected control nodes as they queued.
+ Scheduler::SchedulerData* data = scheduler_->GetData(node);
+ if (!data->is_connected_control_) {
+ BuildBlocks(node);
+ queue_.push(node);
+ control_.push_back(node);
+ data->is_connected_control_ = true;
+ }
+ }
- GenericGraphVisit::Control Post(Node* node) {
- Schedule* schedule = scheduler_->schedule_;
+ void BuildBlocks(Node* node) {
switch (node->opcode()) {
- case IrOpcode::kIfTrue:
- case IrOpcode::kIfFalse:
- case IrOpcode::kContinuation:
- case IrOpcode::kLazyDeoptimization: {
- BasicBlock* block = schedule->NewBasicBlock();
- schedule->AddNode(block, node);
- break;
- }
case IrOpcode::kLoop:
- case IrOpcode::kMerge: {
- BasicBlock* block = schedule->NewBasicBlock();
- schedule->AddNode(block, node);
- scheduler_->loops_and_merges_.push_back(node);
+ case IrOpcode::kMerge:
+ BuildBlockForNode(node);
break;
- }
- case IrOpcode::kBranch: {
- scheduler_->branches_.push_back(node);
+ case IrOpcode::kBranch:
+ BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse);
break;
- }
- case IrOpcode::kDeoptimize: {
- scheduler_->deopts_.push_back(node);
+ default:
break;
- }
- case IrOpcode::kCall: {
- if (OperatorProperties::CanLazilyDeoptimize(node->op())) {
- scheduler_->calls_.push_back(node);
- }
+ }
+ }
+
+ void ConnectBlocks(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kLoop:
+ case IrOpcode::kMerge:
+ ConnectMerge(node);
+ break;
+ case IrOpcode::kBranch:
+ scheduler_->schedule_root_nodes_.push_back(node);
+ ConnectBranch(node);
break;
- }
case IrOpcode::kReturn:
- scheduler_->returns_.push_back(node);
+ scheduler_->schedule_root_nodes_.push_back(node);
+ ConnectReturn(node);
break;
default:
break;
}
-
- return GenericGraphVisit::CONTINUE;
}
- private:
- Scheduler* scheduler_;
-};
-
-
-void Scheduler::CreateBlocks() {
- CreateBlockVisitor create_blocks(this);
- if (FLAG_trace_turbo_scheduler) {
- PrintF("---------------- CREATING BLOCKS ------------------\n");
+ void BuildBlockForNode(Node* node) {
+ if (schedule_->block(node) == NULL) {
+ BasicBlock* block = schedule_->NewBasicBlock();
+ Trace("Create block B%d for #%d:%s\n", block->id(), node->id(),
+ node->op()->mnemonic());
+ FixNode(block, node);
+ }
}
- schedule_->AddNode(schedule_->entry(), graph_->start());
- graph_->VisitNodeInputsFromEnd(&create_blocks);
-}
+ void BuildBlocksForSuccessors(Node* node, IrOpcode::Value a,
+ IrOpcode::Value b) {
+ Node* successors[2];
+ CollectSuccessorProjections(node, successors, a, b);
+ BuildBlockForNode(successors[0]);
+ BuildBlockForNode(successors[1]);
+ }
-void Scheduler::WireBlocks() {
- if (FLAG_trace_turbo_scheduler) {
- PrintF("----------------- WIRING BLOCKS -------------------\n");
+ // Collect the branch-related projections from a node, such as IfTrue,
+ // IfFalse.
+ // TODO(titzer): consider moving this to node.h
+ void CollectSuccessorProjections(Node* node, Node** buffer,
+ IrOpcode::Value true_opcode,
+ IrOpcode::Value false_opcode) {
+ buffer[0] = NULL;
+ buffer[1] = NULL;
+ for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
+ if ((*i)->opcode() == true_opcode) {
+ DCHECK_EQ(NULL, buffer[0]);
+ buffer[0] = *i;
+ }
+ if ((*i)->opcode() == false_opcode) {
+ DCHECK_EQ(NULL, buffer[1]);
+ buffer[1] = *i;
+ }
+ }
+ DCHECK_NE(NULL, buffer[0]);
+ DCHECK_NE(NULL, buffer[1]);
}
- AddSuccessorsForBranches();
- AddSuccessorsForReturns();
- AddSuccessorsForCalls();
- AddSuccessorsForDeopts();
- AddPredecessorsForLoopsAndMerges();
- // TODO(danno): Handle Throw, et al.
-}
+ void CollectSuccessorBlocks(Node* node, BasicBlock** buffer,
+ IrOpcode::Value true_opcode,
+ IrOpcode::Value false_opcode) {
+ Node* successors[2];
+ CollectSuccessorProjections(node, successors, true_opcode, false_opcode);
+ buffer[0] = schedule_->block(successors[0]);
+ buffer[1] = schedule_->block(successors[1]);
+ }
-void Scheduler::PrepareAuxiliaryNodeData() {
- unscheduled_uses_.resize(graph_->NodeCount(), 0);
- schedule_early_rpo_index_.resize(graph_->NodeCount(), 0);
-}
+ void ConnectBranch(Node* branch) {
+ Node* branch_block_node = NodeProperties::GetControlInput(branch);
+ BasicBlock* branch_block = schedule_->block(branch_block_node);
+ DCHECK(branch_block != NULL);
+ BasicBlock* successor_blocks[2];
+ CollectSuccessorBlocks(branch, successor_blocks, IrOpcode::kIfTrue,
+ IrOpcode::kIfFalse);
-void Scheduler::PrepareAuxiliaryBlockData() {
- Zone* zone = schedule_->zone();
- scheduled_nodes_.resize(schedule_->BasicBlockCount(),
- NodeVector(NodeVector::allocator_type(zone)));
- schedule_->immediate_dominator_.resize(schedule_->BasicBlockCount(), NULL);
-}
+ TraceConnect(branch, branch_block, successor_blocks[0]);
+ TraceConnect(branch, branch_block, successor_blocks[1]);
+ schedule_->AddBranch(branch_block, branch, successor_blocks[0],
+ successor_blocks[1]);
+ }
-void Scheduler::AddPredecessorsForLoopsAndMerges() {
- for (NodeVectorIter i = loops_and_merges_.begin();
- i != loops_and_merges_.end(); ++i) {
- Node* merge_or_loop = *i;
- BasicBlock* block = schedule_->block(merge_or_loop);
+ void ConnectMerge(Node* merge) {
+ BasicBlock* block = schedule_->block(merge);
DCHECK(block != NULL);
// For all of the merge's control inputs, add a goto at the end to the
// merge's basic block.
- for (InputIter j = (*i)->inputs().begin(); j != (*i)->inputs().end(); ++j) {
- if (OperatorProperties::IsBasicBlockBegin((*i)->op())) {
- BasicBlock* predecessor_block = schedule_->block(*j);
- if ((*j)->opcode() != IrOpcode::kReturn &&
- (*j)->opcode() != IrOpcode::kDeoptimize) {
- DCHECK(predecessor_block != NULL);
- if (FLAG_trace_turbo_scheduler) {
- IrOpcode::Value opcode = (*i)->opcode();
- PrintF("node %d (%s) in block %d -> block %d\n", (*i)->id(),
- IrOpcode::Mnemonic(opcode), predecessor_block->id(),
- block->id());
- }
- schedule_->AddGoto(predecessor_block, block);
- }
+ for (InputIter j = merge->inputs().begin(); j != merge->inputs().end();
+ ++j) {
+ BasicBlock* predecessor_block = schedule_->block(*j);
+ if ((*j)->opcode() != IrOpcode::kReturn) {
+ TraceConnect(merge, predecessor_block, block);
+ schedule_->AddGoto(predecessor_block, block);
}
}
}
-}
+ void ConnectReturn(Node* ret) {
+ Node* return_block_node = NodeProperties::GetControlInput(ret);
+ BasicBlock* return_block = schedule_->block(return_block_node);
+ TraceConnect(ret, return_block, NULL);
+ schedule_->AddReturn(return_block, ret);
+ }
-void Scheduler::AddSuccessorsForCalls() {
- for (NodeVectorIter i = calls_.begin(); i != calls_.end(); ++i) {
- Node* call = *i;
- DCHECK(call->opcode() == IrOpcode::kCall);
- DCHECK(OperatorProperties::CanLazilyDeoptimize(call->op()));
-
- Node* lazy_deopt_node = NULL;
- Node* cont_node = NULL;
- // Find the continuation and lazy-deopt nodes among the uses.
- for (UseIter use_iter = call->uses().begin();
- use_iter != call->uses().end(); ++use_iter) {
- switch ((*use_iter)->opcode()) {
- case IrOpcode::kContinuation: {
- DCHECK(cont_node == NULL);
- cont_node = *use_iter;
- break;
- }
- case IrOpcode::kLazyDeoptimization: {
- DCHECK(lazy_deopt_node == NULL);
- lazy_deopt_node = *use_iter;
- break;
- }
- default:
- break;
- }
- }
- DCHECK(lazy_deopt_node != NULL);
- DCHECK(cont_node != NULL);
- BasicBlock* cont_successor_block = schedule_->block(cont_node);
- BasicBlock* deopt_successor_block = schedule_->block(lazy_deopt_node);
- Node* call_block_node = NodeProperties::GetControlInput(call);
- BasicBlock* call_block = schedule_->block(call_block_node);
- if (FLAG_trace_turbo_scheduler) {
- IrOpcode::Value opcode = call->opcode();
- PrintF("node %d (%s) in block %d -> block %d\n", call->id(),
- IrOpcode::Mnemonic(opcode), call_block->id(),
- cont_successor_block->id());
- PrintF("node %d (%s) in block %d -> block %d\n", call->id(),
- IrOpcode::Mnemonic(opcode), call_block->id(),
- deopt_successor_block->id());
+ void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
+ DCHECK_NE(NULL, block);
+ if (succ == NULL) {
+ Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(),
+ block->id());
+ } else {
+ Trace("Connect #%d:%s, B%d -> B%d\n", node->id(), node->op()->mnemonic(),
+ block->id(), succ->id());
}
- schedule_->AddCall(call_block, call, cont_successor_block,
- deopt_successor_block);
}
+};
+
+
+Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
+ SchedulerData def = {0, 0, false, false, kUnknown};
+ return def;
}
-void Scheduler::AddSuccessorsForDeopts() {
- for (NodeVectorIter i = deopts_.begin(); i != deopts_.end(); ++i) {
- Node* deopt_block_node = NodeProperties::GetControlInput(*i);
- BasicBlock* deopt_block = schedule_->block(deopt_block_node);
- DCHECK(deopt_block != NULL);
- if (FLAG_trace_turbo_scheduler) {
- IrOpcode::Value opcode = (*i)->opcode();
- PrintF("node %d (%s) in block %d -> end\n", (*i)->id(),
- IrOpcode::Mnemonic(opcode), deopt_block->id());
- }
- schedule_->AddDeoptimize(deopt_block, *i);
- }
+Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
+ : zone_(zone),
+ graph_(graph),
+ schedule_(schedule),
+ scheduled_nodes_(zone),
+ schedule_root_nodes_(zone),
+ node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone),
+ has_floating_control_(false) {}
+
+
+Schedule* Scheduler::ComputeSchedule(Graph* graph) {
+ Schedule* schedule;
+ bool had_floating_control = false;
+ do {
+ Zone tmp_zone(graph->zone()->isolate());
+ schedule = new (graph->zone())
+ Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
+ Scheduler scheduler(&tmp_zone, graph, schedule);
+
+ scheduler.BuildCFG();
+
+ Scheduler::ComputeSpecialRPO(schedule);
+ scheduler.GenerateImmediateDominatorTree();
+
+ scheduler.PrepareUses();
+ scheduler.ScheduleEarly();
+ scheduler.ScheduleLate();
+
+ had_floating_control = scheduler.ConnectFloatingControl();
+ } while (had_floating_control);
+
+ return schedule;
}
-void Scheduler::AddSuccessorsForBranches() {
- for (NodeVectorIter i = branches_.begin(); i != branches_.end(); ++i) {
- Node* branch = *i;
- DCHECK(branch->opcode() == IrOpcode::kBranch);
- Node* branch_block_node = NodeProperties::GetControlInput(branch);
- BasicBlock* branch_block = schedule_->block(branch_block_node);
- DCHECK(branch_block != NULL);
- UseIter use_iter = branch->uses().begin();
- Node* first_successor = *use_iter;
- ++use_iter;
- DCHECK(use_iter != branch->uses().end());
- Node* second_successor = *use_iter;
- DCHECK(++use_iter == branch->uses().end());
- Node* true_successor_node = first_successor->opcode() == IrOpcode::kIfTrue
- ? first_successor
- : second_successor;
- Node* false_successor_node = first_successor->opcode() == IrOpcode::kIfTrue
- ? second_successor
- : first_successor;
- DCHECK(true_successor_node->opcode() == IrOpcode::kIfTrue);
- DCHECK(false_successor_node->opcode() == IrOpcode::kIfFalse);
- BasicBlock* true_successor_block = schedule_->block(true_successor_node);
- BasicBlock* false_successor_block = schedule_->block(false_successor_node);
- DCHECK(true_successor_block != NULL);
- DCHECK(false_successor_block != NULL);
- if (FLAG_trace_turbo_scheduler) {
- IrOpcode::Value opcode = branch->opcode();
- PrintF("node %d (%s) in block %d -> block %d\n", branch->id(),
- IrOpcode::Mnemonic(opcode), branch_block->id(),
- true_successor_block->id());
- PrintF("node %d (%s) in block %d -> block %d\n", branch->id(),
- IrOpcode::Mnemonic(opcode), branch_block->id(),
- false_successor_block->id());
+Scheduler::Placement Scheduler::GetPlacement(Node* node) {
+ SchedulerData* data = GetData(node);
+ if (data->placement_ == kUnknown) { // Compute placement, once, on demand.
+ switch (node->opcode()) {
+ case IrOpcode::kParameter:
+ // Parameters are always fixed to the start node.
+ data->placement_ = kFixed;
+ break;
+ case IrOpcode::kPhi:
+ case IrOpcode::kEffectPhi: {
+ // Phis and effect phis are fixed if their control inputs are.
+ data->placement_ = GetPlacement(NodeProperties::GetControlInput(node));
+ break;
+ }
+#define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V:
+ CONTROL_OP_LIST(DEFINE_FLOATING_CONTROL_CASE)
+#undef DEFINE_FLOATING_CONTROL_CASE
+ {
+ // Control nodes that were not control-reachable from end may float.
+ data->placement_ = kSchedulable;
+ if (!data->is_connected_control_) {
+ data->is_floating_control_ = true;
+ has_floating_control_ = true;
+ Trace("Floating control found: #%d:%s\n", node->id(),
+ node->op()->mnemonic());
+ }
+ break;
+ }
+ default:
+ data->placement_ = kSchedulable;
+ break;
}
- schedule_->AddBranch(branch_block, branch, true_successor_block,
- false_successor_block);
}
+ return data->placement_;
}
-void Scheduler::AddSuccessorsForReturns() {
- for (NodeVectorIter i = returns_.begin(); i != returns_.end(); ++i) {
- Node* return_block_node = NodeProperties::GetControlInput(*i);
- BasicBlock* return_block = schedule_->block(return_block_node);
- DCHECK(return_block != NULL);
- if (FLAG_trace_turbo_scheduler) {
- IrOpcode::Value opcode = (*i)->opcode();
- PrintF("node %d (%s) in block %d -> end\n", (*i)->id(),
- IrOpcode::Mnemonic(opcode), return_block->id());
- }
- schedule_->AddReturn(return_block, *i);
- }
+void Scheduler::BuildCFG() {
+ Trace("---------------- CREATING CFG ------------------\n");
+ CFGBuilder cfg_builder(zone_, this);
+ cfg_builder.Run();
+ // Initialize per-block data.
+ scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
}
@@ -292,9 +311,9 @@ BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
int b2_rpo = GetRPONumber(b2);
DCHECK(b1_rpo != b2_rpo);
if (b1_rpo < b2_rpo) {
- b2 = schedule_->immediate_dominator_[b2->id()];
+ b2 = b2->dominator_;
} else {
- b1 = schedule_->immediate_dominator_[b1->id()];
+ b1 = b1->dominator_;
}
}
return b1;
@@ -304,12 +323,10 @@ BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
void Scheduler::GenerateImmediateDominatorTree() {
// Build the dominator graph. TODO(danno): consider using Lengauer & Tarjan's
// if this becomes really slow.
- if (FLAG_trace_turbo_scheduler) {
- PrintF("------------ IMMEDIATE BLOCK DOMINATORS -----------\n");
- }
+ Trace("------------ IMMEDIATE BLOCK DOMINATORS -----------\n");
for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) {
BasicBlock* current_rpo = schedule_->rpo_order_[i];
- if (current_rpo != schedule_->entry()) {
+ if (current_rpo != schedule_->start()) {
BasicBlock::Predecessors::iterator current_pred =
current_rpo->predecessors().begin();
BasicBlock::Predecessors::iterator end =
@@ -328,10 +345,8 @@ void Scheduler::GenerateImmediateDominatorTree() {
}
++current_pred;
}
- schedule_->immediate_dominator_[current_rpo->id()] = dominator;
- if (FLAG_trace_turbo_scheduler) {
- PrintF("Block %d's idom is %d\n", current_rpo->id(), dominator->id());
- }
+ current_rpo->dominator_ = dominator;
+ Trace("Block %d's idom is %d\n", current_rpo->id(), dominator->id());
}
}
}
@@ -345,53 +360,43 @@ class ScheduleEarlyNodeVisitor : public NullNodeVisitor {
schedule_(scheduler->schedule_) {}
GenericGraphVisit::Control Pre(Node* node) {
- int id = node->id();
int max_rpo = 0;
// Fixed nodes already know their schedule early position.
- if (IsFixedNode(node)) {
+ if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
BasicBlock* block = schedule_->block(node);
DCHECK(block != NULL);
max_rpo = block->rpo_number_;
- if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) {
+ if (scheduler_->GetData(node)->minimum_rpo_ != max_rpo) {
has_changed_rpo_constraints_ = true;
}
- scheduler_->schedule_early_rpo_index_[id] = max_rpo;
- if (FLAG_trace_turbo_scheduler) {
- PrintF("Node %d pre-scheduled early at rpo limit %d\n", id, max_rpo);
- }
+ scheduler_->GetData(node)->minimum_rpo_ = max_rpo;
+ Trace("Preschedule #%d:%s minimum_rpo = %d\n", node->id(),
+ node->op()->mnemonic(), max_rpo);
}
return GenericGraphVisit::CONTINUE;
}
GenericGraphVisit::Control Post(Node* node) {
- int id = node->id();
int max_rpo = 0;
// Otherwise, the minimum rpo for the node is the max of all of the inputs.
- if (!IsFixedNode(node)) {
- DCHECK(!OperatorProperties::IsBasicBlockBegin(node->op()));
+ if (scheduler_->GetPlacement(node) != Scheduler::kFixed) {
for (InputIter i = node->inputs().begin(); i != node->inputs().end();
++i) {
- int control_rpo = scheduler_->schedule_early_rpo_index_[(*i)->id()];
+ int control_rpo = scheduler_->GetData(*i)->minimum_rpo_;
if (control_rpo > max_rpo) {
max_rpo = control_rpo;
}
}
- if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) {
+ if (scheduler_->GetData(node)->minimum_rpo_ != max_rpo) {
has_changed_rpo_constraints_ = true;
}
- scheduler_->schedule_early_rpo_index_[id] = max_rpo;
- if (FLAG_trace_turbo_scheduler) {
- PrintF("Node %d post-scheduled early at rpo limit %d\n", id, max_rpo);
- }
+ scheduler_->GetData(node)->minimum_rpo_ = max_rpo;
+ Trace("Postschedule #%d:%s minimum_rpo = %d\n", node->id(),
+ node->op()->mnemonic(), max_rpo);
}
return GenericGraphVisit::CONTINUE;
}
- static bool IsFixedNode(Node* node) {
- return OperatorProperties::HasFixedSchedulePosition(node->op()) ||
- !OperatorProperties::CanBeScheduled(node->op());
- }
-
// TODO(mstarzinger): Dirty hack to unblock others, schedule early should be
// rewritten to use a pre-order traversal from the start instead.
bool has_changed_rpo_constraints_;
@@ -403,9 +408,7 @@ class ScheduleEarlyNodeVisitor : public NullNodeVisitor {
void Scheduler::ScheduleEarly() {
- if (FLAG_trace_turbo_scheduler) {
- PrintF("------------------- SCHEDULE EARLY ----------------\n");
- }
+ Trace("------------------- SCHEDULE EARLY ----------------\n");
int fixpoint_count = 0;
ScheduleEarlyNodeVisitor visitor(this);
@@ -415,9 +418,7 @@ void Scheduler::ScheduleEarly() {
fixpoint_count++;
}
- if (FLAG_trace_turbo_scheduler) {
- PrintF("It took %d iterations to determine fixpoint\n", fixpoint_count);
- }
+ Trace("It took %d iterations to determine fixpoint\n", fixpoint_count);
}
@@ -427,26 +428,21 @@ class PrepareUsesVisitor : public NullNodeVisitor {
: scheduler_(scheduler), schedule_(scheduler->schedule_) {}
GenericGraphVisit::Control Pre(Node* node) {
- // Some nodes must be scheduled explicitly to ensure they are in exactly the
- // right place; it's a convenient place during the preparation of use counts
- // to schedule them.
- if (!schedule_->IsScheduled(node) &&
- OperatorProperties::HasFixedSchedulePosition(node->op())) {
- if (FLAG_trace_turbo_scheduler) {
- PrintF("Fixed position node %d is unscheduled, scheduling now\n",
- node->id());
- }
- IrOpcode::Value opcode = node->opcode();
- BasicBlock* block =
- opcode == IrOpcode::kParameter
- ? schedule_->entry()
- : schedule_->block(NodeProperties::GetControlInput(node));
- DCHECK(block != NULL);
- schedule_->AddNode(block, node);
- }
-
- if (OperatorProperties::IsScheduleRoot(node->op())) {
+ if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
+ // Fixed nodes are always roots for schedule late.
scheduler_->schedule_root_nodes_.push_back(node);
+ if (!schedule_->IsScheduled(node)) {
+ // Make sure root nodes are scheduled in their respective blocks.
+ Trace(" Scheduling fixed position node #%d:%s\n", node->id(),
+ node->op()->mnemonic());
+ IrOpcode::Value opcode = node->opcode();
+ BasicBlock* block =
+ opcode == IrOpcode::kParameter
+ ? schedule_->start()
+ : schedule_->block(NodeProperties::GetControlInput(node));
+ DCHECK(block != NULL);
+ schedule_->AddNode(block, node);
+ }
}
return GenericGraphVisit::CONTINUE;
@@ -456,14 +452,12 @@ class PrepareUsesVisitor : public NullNodeVisitor {
// If the edge is from an unscheduled node, then tally it in the use count
// for all of its inputs. The same criterion will be used in ScheduleLate
// for decrementing use counts.
- if (!schedule_->IsScheduled(from) &&
- OperatorProperties::CanBeScheduled(from->op())) {
- DCHECK(!OperatorProperties::HasFixedSchedulePosition(from->op()));
- ++scheduler_->unscheduled_uses_[to->id()];
- if (FLAG_trace_turbo_scheduler) {
- PrintF("Incrementing uses of node %d from %d to %d\n", to->id(),
- from->id(), scheduler_->unscheduled_uses_[to->id()]);
- }
+ if (!schedule_->IsScheduled(from)) {
+ DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
+ ++(scheduler_->GetData(to)->unscheduled_count_);
+ Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", to->id(),
+ to->op()->mnemonic(), from->id(), from->op()->mnemonic(),
+ scheduler_->GetData(to)->unscheduled_count_);
}
}
@@ -474,9 +468,7 @@ class PrepareUsesVisitor : public NullNodeVisitor {
void Scheduler::PrepareUses() {
- if (FLAG_trace_turbo_scheduler) {
- PrintF("------------------- PREPARE USES ------------------\n");
- }
+ Trace("------------------- PREPARE USES ------------------\n");
// Count the uses of every node, it will be used to ensure that all of a
// node's uses are scheduled before the node itself.
PrepareUsesVisitor prepare_uses(this);
@@ -490,20 +482,18 @@ class ScheduleLateNodeVisitor : public NullNodeVisitor {
: scheduler_(scheduler), schedule_(scheduler_->schedule_) {}
GenericGraphVisit::Control Pre(Node* node) {
- // Don't schedule nodes that cannot be scheduled or are already scheduled.
- if (!OperatorProperties::CanBeScheduled(node->op()) ||
- schedule_->IsScheduled(node)) {
+ // Don't schedule nodes that are already scheduled.
+ if (schedule_->IsScheduled(node)) {
return GenericGraphVisit::CONTINUE;
}
- DCHECK(!OperatorProperties::HasFixedSchedulePosition(node->op()));
+ Scheduler::SchedulerData* data = scheduler_->GetData(node);
+ DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
// If all the uses of a node have been scheduled, then the node itself can
// be scheduled.
- bool eligible = scheduler_->unscheduled_uses_[node->id()] == 0;
- if (FLAG_trace_turbo_scheduler) {
- PrintF("Testing for schedule eligibility for node %d -> %s\n", node->id(),
- eligible ? "true" : "false");
- }
+ bool eligible = data->unscheduled_count_ == 0;
+ Trace("Testing for schedule eligibility for #%d:%s = %s\n", node->id(),
+ node->op()->mnemonic(), eligible ? "true" : "false");
if (!eligible) return GenericGraphVisit::DEFER;
// Determine the dominating block for all of the uses of this node. It is
@@ -519,36 +509,31 @@ class ScheduleLateNodeVisitor : public NullNodeVisitor {
}
DCHECK(block != NULL);
- int min_rpo = scheduler_->schedule_early_rpo_index_[node->id()];
- if (FLAG_trace_turbo_scheduler) {
- PrintF(
- "Schedule late conservative for node %d is block %d at "
- "loop depth %d, min rpo = %d\n",
- node->id(), block->id(), block->loop_depth_, min_rpo);
- }
+ int min_rpo = data->minimum_rpo_;
+ Trace(
+ "Schedule late conservative for #%d:%s is B%d at loop depth %d, "
+ "minimum_rpo = %d\n",
+ node->id(), node->op()->mnemonic(), block->id(), block->loop_depth_,
+ min_rpo);
// Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
- // into enlcosing loop pre-headers until they would preceed their
+ // into enclosing loop pre-headers until they would preceed their
// ScheduleEarly position.
BasicBlock* hoist_block = block;
while (hoist_block != NULL && hoist_block->rpo_number_ >= min_rpo) {
if (hoist_block->loop_depth_ < block->loop_depth_) {
block = hoist_block;
- if (FLAG_trace_turbo_scheduler) {
- PrintF("Hoisting node %d to block %d\n", node->id(), block->id());
- }
+ Trace(" hoisting #%d:%s to block %d\n", node->id(),
+ node->op()->mnemonic(), block->id());
}
// Try to hoist to the pre-header of the loop header.
hoist_block = hoist_block->loop_header();
if (hoist_block != NULL) {
- BasicBlock* pre_header = schedule_->dominator(hoist_block);
+ BasicBlock* pre_header = hoist_block->dominator_;
DCHECK(pre_header == NULL ||
*hoist_block->predecessors().begin() == pre_header);
- if (FLAG_trace_turbo_scheduler) {
- PrintF(
- "Try hoist to pre-header block %d of loop header block %d,"
- " depth would be %d\n",
- pre_header->id(), hoist_block->id(), pre_header->loop_depth_);
- }
+ Trace(
+ " hoist to pre-header B%d of loop header B%d, depth would be %d\n",
+ pre_header->id(), hoist_block->id(), pre_header->loop_depth_);
hoist_block = pre_header;
}
}
@@ -562,46 +547,43 @@ class ScheduleLateNodeVisitor : public NullNodeVisitor {
BasicBlock* GetBlockForUse(Node::Edge edge) {
Node* use = edge.from();
IrOpcode::Value opcode = use->opcode();
- // If the use is a phi, forward through the the phi to the basic block
- // corresponding to the phi's input.
if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
+ // If the use is from a fixed (i.e. non-floating) phi, use the block
+ // of the corresponding control input to the merge.
int index = edge.index();
- if (FLAG_trace_turbo_scheduler) {
- PrintF("Use %d is input %d to a phi\n", use->id(), index);
+ if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
+ Trace(" input@%d into a fixed phi #%d:%s\n", index, use->id(),
+ use->op()->mnemonic());
+ Node* merge = NodeProperties::GetControlInput(use, 0);
+ opcode = merge->opcode();
+ DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
+ use = NodeProperties::GetControlInput(merge, index);
}
- use = NodeProperties::GetControlInput(use, 0);
- opcode = use->opcode();
- DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
- use = NodeProperties::GetControlInput(use, index);
}
BasicBlock* result = schedule_->block(use);
if (result == NULL) return NULL;
- if (FLAG_trace_turbo_scheduler) {
- PrintF("Must dominate use %d in block %d\n", use->id(), result->id());
- }
+ Trace(" must dominate use #%d:%s in B%d\n", use->id(),
+ use->op()->mnemonic(), result->id());
return result;
}
- bool IsNodeEligible(Node* node) {
- bool eligible = scheduler_->unscheduled_uses_[node->id()] == 0;
- return eligible;
- }
-
void ScheduleNode(BasicBlock* block, Node* node) {
schedule_->PlanNode(block, node);
scheduler_->scheduled_nodes_[block->id()].push_back(node);
// Reduce the use count of the node's inputs to potentially make them
- // scheduable.
+ // schedulable.
for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
- DCHECK(scheduler_->unscheduled_uses_[(*i)->id()] > 0);
- --scheduler_->unscheduled_uses_[(*i)->id()];
+ Scheduler::SchedulerData* data = scheduler_->GetData(*i);
+ DCHECK(data->unscheduled_count_ > 0);
+ --data->unscheduled_count_;
if (FLAG_trace_turbo_scheduler) {
- PrintF("Decrementing use count for node %d from node %d (now %d)\n",
- (*i)->id(), i.edge().from()->id(),
- scheduler_->unscheduled_uses_[(*i)->id()]);
- if (scheduler_->unscheduled_uses_[(*i)->id()] == 0) {
- PrintF("node %d is now eligible for scheduling\n", (*i)->id());
+ Trace(" Use count for #%d:%s (used by #%d:%s)-- = %d\n", (*i)->id(),
+ (*i)->op()->mnemonic(), i.edge().from()->id(),
+ i.edge().from()->op()->mnemonic(), data->unscheduled_count_);
+ if (data->unscheduled_count_ == 0) {
+ Trace(" newly eligible #%d:%s\n", (*i)->id(),
+ (*i)->op()->mnemonic());
}
}
}
@@ -613,18 +595,25 @@ class ScheduleLateNodeVisitor : public NullNodeVisitor {
void Scheduler::ScheduleLate() {
+ Trace("------------------- SCHEDULE LATE -----------------\n");
if (FLAG_trace_turbo_scheduler) {
- PrintF("------------------- SCHEDULE LATE -----------------\n");
+ Trace("roots: ");
+ for (NodeVectorIter i = schedule_root_nodes_.begin();
+ i != schedule_root_nodes_.end(); ++i) {
+ Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic());
+ }
+ Trace("\n");
}
// Schedule: Places nodes in dominator block of all their uses.
ScheduleLateNodeVisitor schedule_late_visitor(this);
- for (NodeVectorIter i = schedule_root_nodes_.begin();
- i != schedule_root_nodes_.end(); ++i) {
+ {
+ Zone zone(zone_->isolate());
GenericGraphVisit::Visit<ScheduleLateNodeVisitor,
NodeInputIterationTraits<Node> >(
- graph_, *i, &schedule_late_visitor);
+ graph_, &zone, schedule_root_nodes_.begin(), schedule_root_nodes_.end(),
+ &schedule_late_visitor);
}
// Add collected nodes for basic blocks to their blocks in the right order.
@@ -639,6 +628,102 @@ void Scheduler::ScheduleLate() {
}
+bool Scheduler::ConnectFloatingControl() {
+ if (!has_floating_control_) return false;
+
+ Trace("Connecting floating control...\n");
+
+ // Process blocks and instructions backwards to find and connect floating
+ // control nodes into the control graph according to the block they were
+ // scheduled into.
+ int max = static_cast<int>(schedule_->rpo_order()->size());
+ for (int i = max - 1; i >= 0; i--) {
+ BasicBlock* block = schedule_->rpo_order()->at(i);
+ // TODO(titzer): we place at most one floating control structure per
+ // basic block because scheduling currently can interleave phis from
+ // one subgraph with the merges from another subgraph.
+ bool one_placed = false;
+ for (int j = static_cast<int>(block->nodes_.size()) - 1; j >= 0; j--) {
+ Node* node = block->nodes_[j];
+ SchedulerData* data = GetData(node);
+ if (data->is_floating_control_ && !data->is_connected_control_ &&
+ !one_placed) {
+ Trace(" Floating control #%d:%s was scheduled in B%d\n", node->id(),
+ node->op()->mnemonic(), block->id());
+ ConnectFloatingControlSubgraph(block, node);
+ one_placed = true;
+ }
+ }
+ }
+
+ return true;
+}
+
+
+void Scheduler::ConnectFloatingControlSubgraph(BasicBlock* block, Node* end) {
+ Node* block_start = block->nodes_[0];
+ DCHECK(IrOpcode::IsControlOpcode(block_start->opcode()));
+ // Find the current "control successor" of the node that starts the block
+ // by searching the control uses for a control input edge from a connected
+ // control node.
+ Node* control_succ = NULL;
+ for (UseIter i = block_start->uses().begin(); i != block_start->uses().end();
+ ++i) {
+ Node::Edge edge = i.edge();
+ if (NodeProperties::IsControlEdge(edge) &&
+ GetData(edge.from())->is_connected_control_) {
+ DCHECK_EQ(NULL, control_succ);
+ control_succ = edge.from();
+ control_succ->ReplaceInput(edge.index(), end);
+ }
+ }
+ DCHECK_NE(NULL, control_succ);
+ Trace(" Inserting floating control end %d:%s between %d:%s -> %d:%s\n",
+ end->id(), end->op()->mnemonic(), control_succ->id(),
+ control_succ->op()->mnemonic(), block_start->id(),
+ block_start->op()->mnemonic());
+
+ // Find the "start" node of the control subgraph, which should be the
+ // unique node that is itself floating control but has a control input that
+ // is not floating.
+ Node* start = NULL;
+ ZoneQueue<Node*> queue(zone_);
+ queue.push(end);
+ GetData(end)->is_connected_control_ = true;
+ while (!queue.empty()) {
+ Node* node = queue.front();
+ queue.pop();
+ Trace(" Search #%d:%s for control subgraph start\n", node->id(),
+ node->op()->mnemonic());
+ int max = NodeProperties::PastControlIndex(node);
+ for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
+ Node* input = node->InputAt(i);
+ SchedulerData* data = GetData(input);
+ if (data->is_floating_control_) {
+ // {input} is floating control.
+ if (!data->is_connected_control_) {
+ // First time seeing {input} during this traversal, queue it.
+ queue.push(input);
+ data->is_connected_control_ = true;
+ }
+ } else {
+ // Otherwise, {node} is the start node, because it is floating control
+ // but is connected to {input} that is not floating control.
+ DCHECK_EQ(NULL, start); // There can be only one.
+ start = node;
+ }
+ }
+ }
+
+ DCHECK_NE(NULL, start);
+ start->ReplaceInput(NodeProperties::FirstControlIndex(start), block_start);
+
+ Trace(" Connecting floating control start %d:%s to %d:%s\n", start->id(),
+ start->op()->mnemonic(), block_start->id(),
+ block_start->op()->mnemonic());
+}
+
+
// Numbering for BasicBlockData.rpo_number_ for this block traversal:
static const int kBlockOnStack = -2;
static const int kBlockVisited1 = -3;
@@ -848,11 +933,9 @@ static void VerifySpecialRPO(int num_loops, LoopInfo* loops,
BasicBlockVector* Scheduler::ComputeSpecialRPO(Schedule* schedule) {
Zone tmp_zone(schedule->zone()->isolate());
Zone* zone = &tmp_zone;
- if (FLAG_trace_turbo_scheduler) {
- PrintF("------------- COMPUTING SPECIAL RPO ---------------\n");
- }
+ Trace("------------- COMPUTING SPECIAL RPO ---------------\n");
// RPO should not have been computed for this schedule yet.
- CHECK_EQ(kBlockUnvisited1, schedule->entry()->rpo_number_);
+ CHECK_EQ(kBlockUnvisited1, schedule->start()->rpo_number_);
CHECK_EQ(0, static_cast<int>(schedule->rpo_order_.size()));
// Perform an iterative RPO traversal using an explicit stack,
@@ -860,7 +943,7 @@ BasicBlockVector* Scheduler::ComputeSpecialRPO(Schedule* schedule) {
ZoneList<std::pair<BasicBlock*, int> > backedges(1, zone);
SpecialRPOStackFrame* stack =
zone->NewArray<SpecialRPOStackFrame>(schedule->BasicBlockCount());
- BasicBlock* entry = schedule->entry();
+ BasicBlock* entry = schedule->start();
BlockList* order = NULL;
int stack_depth = Push(stack, 0, entry, kBlockUnvisited1);
int num_loops = 0;
@@ -1010,10 +1093,8 @@ BasicBlockVector* Scheduler::ComputeSpecialRPO(Schedule* schedule) {
current->loop_end_ = end == NULL ? static_cast<int>(final_order->size())
: end->block->rpo_number_;
current_header = current_loop->header;
- if (FLAG_trace_turbo_scheduler) {
- PrintF("Block %d is a loop header, increment loop depth to %d\n",
- current->id(), loop_depth);
- }
+ Trace("B%d is a loop header, increment loop depth to %d\n", current->id(),
+ loop_depth);
} else {
while (current_header != NULL &&
current->rpo_number_ >= current_header->loop_end_) {
@@ -1025,15 +1106,12 @@ BasicBlockVector* Scheduler::ComputeSpecialRPO(Schedule* schedule) {
}
}
current->loop_depth_ = loop_depth;
- if (FLAG_trace_turbo_scheduler) {
- if (current->loop_header_ == NULL) {
- PrintF("Block %d's loop header is NULL, loop depth %d\n", current->id(),
- current->loop_depth_);
- } else {
- PrintF("Block %d's loop header is block %d, loop depth %d\n",
- current->id(), current->loop_header_->id(),
- current->loop_depth_);
- }
+ if (current->loop_header_ == NULL) {
+ Trace("B%d is not in a loop (depth == %d)\n", current->id(),
+ current->loop_depth_);
+ } else {
+ Trace("B%d has loop header B%d, (depth == %d)\n", current->id(),
+ current->loop_header_->id(), current->loop_depth_);
}
}
diff --git a/deps/v8/src/compiler/scheduler.h b/deps/v8/src/compiler/scheduler.h
index db620edb55..b21662f60c 100644
--- a/deps/v8/src/compiler/scheduler.h
+++ b/deps/v8/src/compiler/scheduler.h
@@ -5,13 +5,10 @@
#ifndef V8_COMPILER_SCHEDULER_H_
#define V8_COMPILER_SCHEDULER_H_
-#include <vector>
-
#include "src/v8.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/schedule.h"
-#include "src/zone-allocator.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -22,28 +19,52 @@ namespace compiler {
// ordering the basic blocks in the special RPO order.
class Scheduler {
public:
- // Create a new schedule and place all computations from the graph in it.
+ // The complete scheduling algorithm.
+ // Create a new schedule and place all nodes from the graph into it.
static Schedule* ComputeSchedule(Graph* graph);
// Compute the RPO of blocks in an existing schedule.
static BasicBlockVector* ComputeSpecialRPO(Schedule* schedule);
+ // (Exposed for testing only)
+ // Build and connect the CFG for a node graph, but don't schedule nodes.
+ static void ComputeCFG(Graph* graph, Schedule* schedule);
+
private:
+ enum Placement { kUnknown, kSchedulable, kFixed };
+
+ // Per-node data tracked during scheduling.
+ struct SchedulerData {
+ int unscheduled_count_; // Number of unscheduled uses of this node.
+ int minimum_rpo_; // Minimum legal RPO placement.
+ bool is_connected_control_; // {true} if control-connected to the end node.
+ bool is_floating_control_; // {true} if control, but not control-connected
+ // to the end node.
+ Placement placement_ : 3; // Whether the node is fixed, schedulable,
+ // or not yet known.
+ };
+
+ Zone* zone_;
Graph* graph_;
Schedule* schedule_;
- NodeVector branches_;
- NodeVector calls_;
- NodeVector deopts_;
- NodeVector returns_;
- NodeVector loops_and_merges_;
- BasicBlockVector node_block_placement_;
- IntVector unscheduled_uses_;
NodeVectorVector scheduled_nodes_;
NodeVector schedule_root_nodes_;
- IntVector schedule_early_rpo_index_;
+ ZoneVector<SchedulerData> node_data_;
+ bool has_floating_control_;
Scheduler(Zone* zone, Graph* graph, Schedule* schedule);
+ SchedulerData DefaultSchedulerData();
+
+ SchedulerData* GetData(Node* node) {
+ DCHECK(node->id() < static_cast<int>(node_data_.size()));
+ return &node_data_[node->id()];
+ }
+
+ void BuildCFG();
+
+ Placement GetPlacement(Node* node);
+
int GetRPONumber(BasicBlock* block) {
DCHECK(block->rpo_number_ >= 0 &&
block->rpo_number_ < static_cast<int>(schedule_->rpo_order_.size()));
@@ -51,23 +72,11 @@ class Scheduler {
return block->rpo_number_;
}
- void PrepareAuxiliaryNodeData();
- void PrepareAuxiliaryBlockData();
-
- friend class CreateBlockVisitor;
- void CreateBlocks();
-
- void WireBlocks();
-
- void AddPredecessorsForLoopsAndMerges();
- void AddSuccessorsForBranches();
- void AddSuccessorsForReturns();
- void AddSuccessorsForCalls();
- void AddSuccessorsForDeopts();
-
void GenerateImmediateDominatorTree();
BasicBlock* GetCommonDominator(BasicBlock* b1, BasicBlock* b2);
+ friend class CFGBuilder;
+
friend class ScheduleEarlyNodeVisitor;
void ScheduleEarly();
@@ -76,6 +85,10 @@ class Scheduler {
friend class ScheduleLateNodeVisitor;
void ScheduleLate();
+
+ bool ConnectFloatingControl();
+
+ void ConnectFloatingControlSubgraph(BasicBlock* block, Node* node);
};
}
}
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index e32a51e136..7ab20f5d24 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -4,9 +4,8 @@
#include "src/compiler/simplified-lowering.h"
-#include <deque>
-#include <queue>
-
+#include "src/base/bits.h"
+#include "src/code-factory.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-inl.h"
#include "src/compiler/node-properties-inl.h"
@@ -54,10 +53,10 @@ class RepresentationSelector {
public:
// Information for each node tracked during the fixpoint.
struct NodeInfo {
- RepTypeUnion use : 14; // Union of all usages for the node.
+ MachineTypeUnion use : 15; // Union of all usages for the node.
bool queued : 1; // Bookkeeping for the traversal.
bool visited : 1; // Bookkeeping for the traversal.
- RepTypeUnion output : 14; // Output type of the node.
+ MachineTypeUnion output : 15; // Output type of the node.
};
RepresentationSelector(JSGraph* jsgraph, Zone* zone,
@@ -65,13 +64,12 @@ class RepresentationSelector {
: jsgraph_(jsgraph),
count_(jsgraph->graph()->NodeCount()),
info_(zone->NewArray<NodeInfo>(count_)),
- nodes_(NodeVector::allocator_type(zone)),
- replacements_(NodeVector::allocator_type(zone)),
+ nodes_(zone),
+ replacements_(zone),
contains_js_nodes_(false),
phase_(PROPAGATE),
changer_(changer),
- queue_(std::deque<Node*, NodePtrZoneAllocator>(
- NodePtrZoneAllocator(zone))) {
+ queue_(zone) {
memset(info_, 0, sizeof(NodeInfo) * count_);
}
@@ -115,7 +113,7 @@ class RepresentationSelector {
// Enqueue {node} if the {use} contains new information for that node.
// Add {node} to {nodes_} if this is the first time it's been visited.
- void Enqueue(Node* node, RepTypeUnion use = 0) {
+ void Enqueue(Node* node, MachineTypeUnion use = 0) {
if (phase_ != PROPAGATE) return;
NodeInfo* info = GetInfo(node);
if (!info->visited) {
@@ -147,15 +145,16 @@ class RepresentationSelector {
bool lower() { return phase_ == LOWER; }
- void Enqueue(Node* node, RepType use) {
- Enqueue(node, static_cast<RepTypeUnion>(use));
+ void Enqueue(Node* node, MachineType use) {
+ Enqueue(node, static_cast<MachineTypeUnion>(use));
}
- void SetOutput(Node* node, RepTypeUnion output) {
+ void SetOutput(Node* node, MachineTypeUnion output) {
// Every node should have at most one output representation. Note that
// phis can have 0, if they have not been used in a representation-inducing
// instruction.
- DCHECK((output & rMask) == 0 || IsPowerOf2(output & rMask));
+ DCHECK((output & kRepMask) == 0 ||
+ base::bits::IsPowerOfTwo32(output & kRepMask));
GetInfo(node)->output = output;
}
@@ -165,16 +164,16 @@ class RepresentationSelector {
NodeProperties::GetBounds(node->InputAt(1)).upper->Is(type);
}
- void ProcessInput(Node* node, int index, RepTypeUnion use) {
+ void ProcessInput(Node* node, int index, MachineTypeUnion use) {
Node* input = node->InputAt(index);
if (phase_ == PROPAGATE) {
// In the propagate phase, propagate the usage information backward.
Enqueue(input, use);
} else {
// In the change phase, insert a change before the use if necessary.
- if ((use & rMask) == 0) return; // No input requirement on the use.
- RepTypeUnion output = GetInfo(input)->output;
- if ((output & rMask & use) == 0) {
+ if ((use & kRepMask) == 0) return; // No input requirement on the use.
+ MachineTypeUnion output = GetInfo(input)->output;
+ if ((output & kRepMask & use) == 0) {
// Output representation doesn't match usage.
TRACE((" change: #%d:%s(@%d #%d:%s) ", node->id(),
node->op()->mnemonic(), index, input->id(),
@@ -190,25 +189,31 @@ class RepresentationSelector {
}
}
- static const RepTypeUnion kFloat64 = rFloat64 | tNumber;
- static const RepTypeUnion kInt32 = rWord32 | tInt32;
- static const RepTypeUnion kUint32 = rWord32 | tUint32;
- static const RepTypeUnion kInt64 = rWord64 | tInt64;
- static const RepTypeUnion kUint64 = rWord64 | tUint64;
- static const RepTypeUnion kAnyTagged = rTagged | tAny;
+ void ProcessRemainingInputs(Node* node, int index) {
+ DCHECK_GE(index, NodeProperties::PastValueIndex(node));
+ DCHECK_GE(index, NodeProperties::PastContextIndex(node));
+ for (int i = std::max(index, NodeProperties::FirstEffectIndex(node));
+ i < NodeProperties::PastEffectIndex(node); ++i) {
+ Enqueue(node->InputAt(i)); // Effect inputs: just visit
+ }
+ for (int i = std::max(index, NodeProperties::FirstControlIndex(node));
+ i < NodeProperties::PastControlIndex(node); ++i) {
+ Enqueue(node->InputAt(i)); // Control inputs: just visit
+ }
+ }
// The default, most general visitation case. For {node}, process all value,
// context, effect, and control inputs, assuming that value inputs should have
- // {rTagged} representation and can observe all output values {tAny}.
+ // {kRepTagged} representation and can observe all output values {kTypeAny}.
void VisitInputs(Node* node) {
InputIter i = node->inputs().begin();
for (int j = OperatorProperties::GetValueInputCount(node->op()); j > 0;
++i, j--) {
- ProcessInput(node, i.index(), kAnyTagged); // Value inputs
+ ProcessInput(node, i.index(), kMachAnyTagged); // Value inputs
}
for (int j = OperatorProperties::GetContextInputCount(node->op()); j > 0;
++i, j--) {
- ProcessInput(node, i.index(), kAnyTagged); // Context inputs
+ ProcessInput(node, i.index(), kMachAnyTagged); // Context inputs
}
for (int j = OperatorProperties::GetEffectInputCount(node->op()); j > 0;
++i, j--) {
@@ -218,11 +223,12 @@ class RepresentationSelector {
++i, j--) {
Enqueue(*i); // Control inputs: just visit
}
- SetOutput(node, kAnyTagged);
+ SetOutput(node, kMachAnyTagged);
}
// Helper for binops of the I x I -> O variety.
- void VisitBinop(Node* node, RepTypeUnion input_use, RepTypeUnion output) {
+ void VisitBinop(Node* node, MachineTypeUnion input_use,
+ MachineTypeUnion output) {
DCHECK_EQ(2, node->InputCount());
ProcessInput(node, 0, input_use);
ProcessInput(node, 1, input_use);
@@ -230,93 +236,125 @@ class RepresentationSelector {
}
// Helper for unops of the I -> O variety.
- void VisitUnop(Node* node, RepTypeUnion input_use, RepTypeUnion output) {
+ void VisitUnop(Node* node, MachineTypeUnion input_use,
+ MachineTypeUnion output) {
DCHECK_EQ(1, node->InputCount());
ProcessInput(node, 0, input_use);
SetOutput(node, output);
}
// Helper for leaf nodes.
- void VisitLeaf(Node* node, RepTypeUnion output) {
+ void VisitLeaf(Node* node, MachineTypeUnion output) {
DCHECK_EQ(0, node->InputCount());
SetOutput(node, output);
}
// Helpers for specific types of binops.
- void VisitFloat64Binop(Node* node) { VisitBinop(node, kFloat64, kFloat64); }
- void VisitInt32Binop(Node* node) { VisitBinop(node, kInt32, kInt32); }
- void VisitUint32Binop(Node* node) { VisitBinop(node, kUint32, kUint32); }
- void VisitInt64Binop(Node* node) { VisitBinop(node, kInt64, kInt64); }
- void VisitUint64Binop(Node* node) { VisitBinop(node, kUint64, kUint64); }
- void VisitFloat64Cmp(Node* node) { VisitBinop(node, kFloat64, rBit); }
- void VisitInt32Cmp(Node* node) { VisitBinop(node, kInt32, rBit); }
- void VisitUint32Cmp(Node* node) { VisitBinop(node, kUint32, rBit); }
- void VisitInt64Cmp(Node* node) { VisitBinop(node, kInt64, rBit); }
- void VisitUint64Cmp(Node* node) { VisitBinop(node, kUint64, rBit); }
+ void VisitFloat64Binop(Node* node) {
+ VisitBinop(node, kMachFloat64, kMachFloat64);
+ }
+ void VisitInt32Binop(Node* node) { VisitBinop(node, kMachInt32, kMachInt32); }
+ void VisitUint32Binop(Node* node) {
+ VisitBinop(node, kMachUint32, kMachUint32);
+ }
+ void VisitInt64Binop(Node* node) { VisitBinop(node, kMachInt64, kMachInt64); }
+ void VisitUint64Binop(Node* node) {
+ VisitBinop(node, kMachUint64, kMachUint64);
+ }
+ void VisitFloat64Cmp(Node* node) { VisitBinop(node, kMachFloat64, kRepBit); }
+ void VisitInt32Cmp(Node* node) { VisitBinop(node, kMachInt32, kRepBit); }
+ void VisitUint32Cmp(Node* node) { VisitBinop(node, kMachUint32, kRepBit); }
+ void VisitInt64Cmp(Node* node) { VisitBinop(node, kMachInt64, kRepBit); }
+ void VisitUint64Cmp(Node* node) { VisitBinop(node, kMachUint64, kRepBit); }
// Helper for handling phis.
- void VisitPhi(Node* node, RepTypeUnion use) {
+ void VisitPhi(Node* node, MachineTypeUnion use,
+ SimplifiedLowering* lowering) {
// First, propagate the usage information to inputs of the phi.
- int values = OperatorProperties::GetValueInputCount(node->op());
- Node::Inputs inputs = node->inputs();
- for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
- ++iter, --values) {
+ if (!lower()) {
+ int values = OperatorProperties::GetValueInputCount(node->op());
// Propagate {use} of the phi to value inputs, and 0 to control.
- // TODO(titzer): it'd be nice to have distinguished edge kinds here.
- ProcessInput(node, iter.index(), values > 0 ? use : 0);
+ Node::Inputs inputs = node->inputs();
+ for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+ ++iter, --values) {
+ // TODO(titzer): it'd be nice to have distinguished edge kinds here.
+ ProcessInput(node, iter.index(), values > 0 ? use : 0);
+ }
}
// Phis adapt to whatever output representation their uses demand,
// pushing representation changes to their inputs.
- RepTypeUnion use_rep = GetUseInfo(node) & rMask;
- RepTypeUnion use_type = GetUseInfo(node) & tMask;
- RepTypeUnion rep = 0;
- if (use_rep & rTagged) {
- rep = rTagged; // Tagged overrides everything.
- } else if (use_rep & rFloat64) {
- rep = rFloat64;
- } else if (use_rep & rWord64) {
- rep = rWord64;
- } else if (use_rep & rWord32) {
- rep = rWord32;
- } else if (use_rep & rBit) {
- rep = rBit;
+ MachineTypeUnion use_rep = GetUseInfo(node) & kRepMask;
+ MachineTypeUnion use_type = GetUseInfo(node) & kTypeMask;
+ MachineTypeUnion rep = 0;
+ if (use_rep & kRepTagged) {
+ rep = kRepTagged; // Tagged overrides everything.
+ } else if (use_rep & kRepFloat32) {
+ rep = kRepFloat32;
+ } else if (use_rep & kRepFloat64) {
+ rep = kRepFloat64;
+ } else if (use_rep & kRepWord64) {
+ rep = kRepWord64;
+ } else if (use_rep & kRepWord32) {
+ rep = kRepWord32;
+ } else if (use_rep & kRepBit) {
+ rep = kRepBit;
} else {
// There was no representation associated with any of the uses.
// TODO(titzer): Select the best rep using phi's type, not the usage type?
- if (use_type & tAny) {
- rep = rTagged;
- } else if (use_type & tNumber) {
- rep = rFloat64;
- } else if (use_type & tInt64 || use_type & tUint64) {
- rep = rWord64;
- } else if (use_type & tInt32 || use_type & tUint32) {
- rep = rWord32;
- } else if (use_type & tBool) {
- rep = rBit;
+ if (use_type & kTypeAny) {
+ rep = kRepTagged;
+ } else if (use_type & kTypeNumber) {
+ rep = kRepFloat64;
+ } else if (use_type & kTypeInt64 || use_type & kTypeUint64) {
+ rep = kRepWord64;
+ } else if (use_type & kTypeInt32 || use_type & kTypeUint32) {
+ rep = kRepWord32;
+ } else if (use_type & kTypeBool) {
+ rep = kRepBit;
} else {
UNREACHABLE(); // should have at least a usage type!
}
}
// Preserve the usage type, but set the representation.
Type* upper = NodeProperties::GetBounds(node).upper;
- SetOutput(node, rep | changer_->TypeFromUpperBound(upper));
+ MachineTypeUnion output_type = rep | changer_->TypeFromUpperBound(upper);
+ SetOutput(node, output_type);
+
+ if (lower()) {
+ int values = OperatorProperties::GetValueInputCount(node->op());
+
+ // Update the phi operator.
+ MachineType type = static_cast<MachineType>(output_type);
+ if (type != OpParameter<MachineType>(node)) {
+ node->set_op(lowering->common()->Phi(type, values));
+ }
+
+ // Convert inputs to the output representation of this phi.
+ Node::Inputs inputs = node->inputs();
+ for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+ ++iter, --values) {
+ // TODO(titzer): it'd be nice to have distinguished edge kinds here.
+ ProcessInput(node, iter.index(), values > 0 ? output_type : 0);
+ }
+ }
}
- Operator* Int32Op(Node* node) {
+ const Operator* Int32Op(Node* node) {
return changer_->Int32OperatorFor(node->opcode());
}
- Operator* Uint32Op(Node* node) {
+ const Operator* Uint32Op(Node* node) {
return changer_->Uint32OperatorFor(node->opcode());
}
- Operator* Float64Op(Node* node) {
+ const Operator* Float64Op(Node* node) {
return changer_->Float64OperatorFor(node->opcode());
}
// Dispatching routine for visiting the node {node} with the usage {use}.
// Depending on the operator, propagate new usage info to the inputs.
- void VisitNode(Node* node, RepTypeUnion use, SimplifiedLowering* lowering) {
+ void VisitNode(Node* node, MachineTypeUnion use,
+ SimplifiedLowering* lowering) {
switch (node->opcode()) {
//------------------------------------------------------------------
// Common operators.
@@ -328,21 +366,21 @@ class RepresentationSelector {
// TODO(titzer): use representation from linkage.
Type* upper = NodeProperties::GetBounds(node).upper;
ProcessInput(node, 0, 0);
- SetOutput(node, rTagged | changer_->TypeFromUpperBound(upper));
+ SetOutput(node, kRepTagged | changer_->TypeFromUpperBound(upper));
return;
}
case IrOpcode::kInt32Constant:
- return VisitLeaf(node, rWord32);
+ return VisitLeaf(node, kRepWord32);
case IrOpcode::kInt64Constant:
- return VisitLeaf(node, rWord64);
+ return VisitLeaf(node, kRepWord64);
case IrOpcode::kFloat64Constant:
- return VisitLeaf(node, rFloat64);
+ return VisitLeaf(node, kRepFloat64);
case IrOpcode::kExternalConstant:
- return VisitLeaf(node, rPtr);
+ return VisitLeaf(node, kMachPtr);
case IrOpcode::kNumberConstant:
- return VisitLeaf(node, rTagged);
+ return VisitLeaf(node, kRepTagged);
case IrOpcode::kHeapConstant:
- return VisitLeaf(node, rTagged);
+ return VisitLeaf(node, kRepTagged);
case IrOpcode::kEnd:
case IrOpcode::kIfTrue:
@@ -353,11 +391,11 @@ class RepresentationSelector {
return VisitInputs(node); // default visit for all node inputs.
case IrOpcode::kBranch:
- ProcessInput(node, 0, rBit);
+ ProcessInput(node, 0, kRepBit);
Enqueue(NodeProperties::GetControlInput(node, 0));
break;
case IrOpcode::kPhi:
- return VisitPhi(node, use);
+ return VisitPhi(node, use, lowering);
//------------------------------------------------------------------
// JavaScript operators.
@@ -372,27 +410,45 @@ class RepresentationSelector {
#undef DEFINE_JS_CASE
contains_js_nodes_ = true;
VisitInputs(node);
- return SetOutput(node, rTagged);
+ return SetOutput(node, kRepTagged);
//------------------------------------------------------------------
// Simplified operators.
//------------------------------------------------------------------
case IrOpcode::kBooleanNot: {
if (lower()) {
- RepTypeUnion input = GetInfo(node->InputAt(0))->output;
- if (input & rBit) {
- // BooleanNot(x: rBit) => WordEqual(x, #0)
+ MachineTypeUnion input = GetInfo(node->InputAt(0))->output;
+ if (input & kRepBit) {
+ // BooleanNot(x: kRepBit) => WordEqual(x, #0)
node->set_op(lowering->machine()->WordEqual());
node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
} else {
- // BooleanNot(x: rTagged) => WordEqual(x, #false)
+ // BooleanNot(x: kRepTagged) => WordEqual(x, #false)
node->set_op(lowering->machine()->WordEqual());
node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
}
} else {
// No input representation requirement; adapt during lowering.
- ProcessInput(node, 0, tBool);
- SetOutput(node, rBit);
+ ProcessInput(node, 0, kTypeBool);
+ SetOutput(node, kRepBit);
+ }
+ break;
+ }
+ case IrOpcode::kBooleanToNumber: {
+ if (lower()) {
+ MachineTypeUnion input = GetInfo(node->InputAt(0))->output;
+ if (input & kRepBit) {
+ // BooleanToNumber(x: kRepBit) => x
+ DeferReplacement(node, node->InputAt(0));
+ } else {
+ // BooleanToNumber(x: kRepTagged) => WordEqual(x, #true)
+ node->set_op(lowering->machine()->WordEqual());
+ node->AppendInput(jsgraph_->zone(), jsgraph_->TrueConstant());
+ }
+ } else {
+ // No input representation requirement; adapt during lowering.
+ ProcessInput(node, 0, kTypeBool);
+ SetOutput(node, kMachInt32);
}
break;
}
@@ -420,12 +476,12 @@ class RepresentationSelector {
// Add and subtract reduce to Int32Add/Sub if the inputs
// are already integers and all uses are truncating.
if (BothInputsAre(node, Type::Signed32()) &&
- (use & (tUint32 | tNumber | tAny)) == 0) {
+ (use & (kTypeUint32 | kTypeNumber | kTypeAny)) == 0) {
// => signed Int32Add/Sub
VisitInt32Binop(node);
if (lower()) node->set_op(Int32Op(node));
} else if (BothInputsAre(node, Type::Unsigned32()) &&
- (use & (tInt32 | tNumber | tAny)) == 0) {
+ (use & (kTypeInt32 | kTypeNumber | kTypeAny)) == 0) {
// => unsigned Int32Add/Sub
VisitUint32Binop(node);
if (lower()) node->set_op(Uint32Op(node));
@@ -445,84 +501,86 @@ class RepresentationSelector {
break;
}
case IrOpcode::kNumberToInt32: {
- RepTypeUnion use_rep = use & rMask;
+ MachineTypeUnion use_rep = use & kRepMask;
if (lower()) {
- RepTypeUnion in = GetInfo(node->InputAt(0))->output;
- if ((in & tMask) == tInt32 || (in & rMask) == rWord32) {
+ MachineTypeUnion in = GetInfo(node->InputAt(0))->output;
+ if ((in & kTypeMask) == kTypeInt32 || (in & kRepMask) == kRepWord32) {
// If the input has type int32, or is already a word32, just change
// representation if necessary.
- VisitUnop(node, tInt32 | use_rep, tInt32 | use_rep);
+ VisitUnop(node, kTypeInt32 | use_rep, kTypeInt32 | use_rep);
DeferReplacement(node, node->InputAt(0));
} else {
// Require the input in float64 format and perform truncation.
- // TODO(turbofan): could also avoid the truncation with a tag check.
- VisitUnop(node, tInt32 | rFloat64, tInt32 | rWord32);
- // TODO(titzer): should be a truncation.
- node->set_op(lowering->machine()->ChangeFloat64ToInt32());
+ // TODO(turbofan): avoid a truncation with a smi check.
+ VisitUnop(node, kTypeInt32 | kRepFloat64, kTypeInt32 | kRepWord32);
+ node->set_op(lowering->machine()->TruncateFloat64ToInt32());
}
} else {
// Propagate a type to the input, but pass through representation.
- VisitUnop(node, tInt32, tInt32 | use_rep);
+ VisitUnop(node, kTypeInt32, kTypeInt32 | use_rep);
}
break;
}
case IrOpcode::kNumberToUint32: {
- RepTypeUnion use_rep = use & rMask;
+ MachineTypeUnion use_rep = use & kRepMask;
if (lower()) {
- RepTypeUnion in = GetInfo(node->InputAt(0))->output;
- if ((in & tMask) == tUint32 || (in & rMask) == rWord32) {
+ MachineTypeUnion in = GetInfo(node->InputAt(0))->output;
+ if ((in & kTypeMask) == kTypeUint32 ||
+ (in & kRepMask) == kRepWord32) {
// The input has type int32, just change representation.
- VisitUnop(node, tUint32 | use_rep, tUint32 | use_rep);
+ VisitUnop(node, kTypeUint32 | use_rep, kTypeUint32 | use_rep);
DeferReplacement(node, node->InputAt(0));
} else {
// Require the input in float64 format to perform truncation.
- // TODO(turbofan): could also avoid the truncation with a tag check.
- VisitUnop(node, tUint32 | rFloat64, tUint32 | rWord32);
- // TODO(titzer): should be a truncation.
- node->set_op(lowering->machine()->ChangeFloat64ToUint32());
+ // TODO(turbofan): avoid the truncation with a smi check.
+ VisitUnop(node, kTypeUint32 | kRepFloat64,
+ kTypeUint32 | kRepWord32);
+ node->set_op(lowering->machine()->TruncateFloat64ToInt32());
}
} else {
// Propagate a type to the input, but pass through representation.
- VisitUnop(node, tUint32, tUint32 | use_rep);
+ VisitUnop(node, kTypeUint32, kTypeUint32 | use_rep);
}
break;
}
case IrOpcode::kReferenceEqual: {
- VisitBinop(node, kAnyTagged, rBit);
+ VisitBinop(node, kMachAnyTagged, kRepBit);
if (lower()) node->set_op(lowering->machine()->WordEqual());
break;
}
case IrOpcode::kStringEqual: {
- VisitBinop(node, kAnyTagged, rBit);
- // TODO(titzer): lower StringEqual to stub/runtime call.
+ VisitBinop(node, kMachAnyTagged, kRepBit);
+ if (lower()) lowering->DoStringEqual(node);
break;
}
case IrOpcode::kStringLessThan: {
- VisitBinop(node, kAnyTagged, rBit);
- // TODO(titzer): lower StringLessThan to stub/runtime call.
+ VisitBinop(node, kMachAnyTagged, kRepBit);
+ if (lower()) lowering->DoStringLessThan(node);
break;
}
case IrOpcode::kStringLessThanOrEqual: {
- VisitBinop(node, kAnyTagged, rBit);
- // TODO(titzer): lower StringLessThanOrEqual to stub/runtime call.
+ VisitBinop(node, kMachAnyTagged, kRepBit);
+ if (lower()) lowering->DoStringLessThanOrEqual(node);
break;
}
case IrOpcode::kStringAdd: {
- VisitBinop(node, kAnyTagged, kAnyTagged);
- // TODO(titzer): lower StringAdd to stub/runtime call.
+ VisitBinop(node, kMachAnyTagged, kMachAnyTagged);
+ if (lower()) lowering->DoStringAdd(node);
break;
}
case IrOpcode::kLoadField: {
FieldAccess access = FieldAccessOf(node->op());
ProcessInput(node, 0, changer_->TypeForBasePointer(access));
- SetOutput(node, changer_->TypeForField(access));
+ ProcessRemainingInputs(node, 1);
+ SetOutput(node, access.machine_type);
if (lower()) lowering->DoLoadField(node);
break;
}
case IrOpcode::kStoreField: {
FieldAccess access = FieldAccessOf(node->op());
ProcessInput(node, 0, changer_->TypeForBasePointer(access));
- ProcessInput(node, 1, changer_->TypeForField(access));
+ ProcessInput(node, 1, access.machine_type);
+ ProcessRemainingInputs(node, 2);
SetOutput(node, 0);
if (lower()) lowering->DoStoreField(node);
break;
@@ -530,16 +588,20 @@ class RepresentationSelector {
case IrOpcode::kLoadElement: {
ElementAccess access = ElementAccessOf(node->op());
ProcessInput(node, 0, changer_->TypeForBasePointer(access));
- ProcessInput(node, 1, kInt32); // element index
- SetOutput(node, changer_->TypeForElement(access));
+ ProcessInput(node, 1, kMachInt32); // element index
+ ProcessInput(node, 2, kMachInt32); // length
+ ProcessRemainingInputs(node, 3);
+ SetOutput(node, access.machine_type);
if (lower()) lowering->DoLoadElement(node);
break;
}
case IrOpcode::kStoreElement: {
ElementAccess access = ElementAccessOf(node->op());
ProcessInput(node, 0, changer_->TypeForBasePointer(access));
- ProcessInput(node, 1, kInt32); // element index
- ProcessInput(node, 2, changer_->TypeForElement(access));
+ ProcessInput(node, 1, kMachInt32); // element index
+ ProcessInput(node, 2, kMachInt32); // length
+ ProcessInput(node, 3, access.machine_type);
+ ProcessRemainingInputs(node, 4);
SetOutput(node, 0);
if (lower()) lowering->DoStoreElement(node);
break;
@@ -550,26 +612,28 @@ class RepresentationSelector {
//------------------------------------------------------------------
case IrOpcode::kLoad: {
// TODO(titzer): machine loads/stores need to know BaseTaggedness!?
- RepType tBase = rTagged;
- MachineType rep = OpParameter<MachineType>(node);
+ MachineType tBase = kRepTagged;
+ LoadRepresentation rep = OpParameter<LoadRepresentation>(node);
ProcessInput(node, 0, tBase); // pointer or object
- ProcessInput(node, 1, kInt32); // index
- SetOutput(node, changer_->TypeForMachineType(rep));
+ ProcessInput(node, 1, kMachInt32); // index
+ ProcessRemainingInputs(node, 2);
+ SetOutput(node, rep);
break;
}
case IrOpcode::kStore: {
// TODO(titzer): machine loads/stores need to know BaseTaggedness!?
- RepType tBase = rTagged;
+ MachineType tBase = kRepTagged;
StoreRepresentation rep = OpParameter<StoreRepresentation>(node);
ProcessInput(node, 0, tBase); // pointer or object
- ProcessInput(node, 1, kInt32); // index
- ProcessInput(node, 2, changer_->TypeForMachineType(rep.rep));
+ ProcessInput(node, 1, kMachInt32); // index
+ ProcessInput(node, 2, rep.machine_type());
+ ProcessRemainingInputs(node, 3);
SetOutput(node, 0);
break;
}
case IrOpcode::kWord32Shr:
// We output unsigned int32 for shift right because JavaScript.
- return VisitBinop(node, rWord32, rWord32 | tUint32);
+ return VisitBinop(node, kRepWord32, kRepWord32 | kTypeUint32);
case IrOpcode::kWord32And:
case IrOpcode::kWord32Or:
case IrOpcode::kWord32Xor:
@@ -578,9 +642,9 @@ class RepresentationSelector {
// We use signed int32 as the output type for these word32 operations,
// though the machine bits are the same for either signed or unsigned,
// because JavaScript considers the result from these operations signed.
- return VisitBinop(node, rWord32, rWord32 | tInt32);
+ return VisitBinop(node, kRepWord32, kRepWord32 | kTypeInt32);
case IrOpcode::kWord32Equal:
- return VisitBinop(node, rWord32, rBit);
+ return VisitBinop(node, kRepWord32, kRepBit);
case IrOpcode::kInt32Add:
case IrOpcode::kInt32Sub:
@@ -619,23 +683,39 @@ class RepresentationSelector {
case IrOpcode::kWord64Shl:
case IrOpcode::kWord64Shr:
case IrOpcode::kWord64Sar:
- return VisitBinop(node, rWord64, rWord64);
+ return VisitBinop(node, kRepWord64, kRepWord64);
case IrOpcode::kWord64Equal:
- return VisitBinop(node, rWord64, rBit);
-
- case IrOpcode::kConvertInt32ToInt64:
- return VisitUnop(node, tInt32 | rWord32, tInt32 | rWord64);
- case IrOpcode::kConvertInt64ToInt32:
- return VisitUnop(node, tInt64 | rWord64, tInt32 | rWord32);
-
+ return VisitBinop(node, kRepWord64, kRepBit);
+
+ case IrOpcode::kChangeInt32ToInt64:
+ return VisitUnop(node, kTypeInt32 | kRepWord32,
+ kTypeInt32 | kRepWord64);
+ case IrOpcode::kChangeUint32ToUint64:
+ return VisitUnop(node, kTypeUint32 | kRepWord32,
+ kTypeUint32 | kRepWord64);
+ case IrOpcode::kTruncateFloat64ToFloat32:
+ return VisitUnop(node, kTypeNumber | kRepFloat64,
+ kTypeNumber | kRepFloat32);
+ case IrOpcode::kTruncateInt64ToInt32:
+ // TODO(titzer): Is kTypeInt32 correct here?
+ return VisitUnop(node, kTypeInt32 | kRepWord64,
+ kTypeInt32 | kRepWord32);
+
+ case IrOpcode::kChangeFloat32ToFloat64:
+ return VisitUnop(node, kTypeNumber | kRepFloat32,
+ kTypeNumber | kRepFloat64);
case IrOpcode::kChangeInt32ToFloat64:
- return VisitUnop(node, tInt32 | rWord32, tInt32 | rFloat64);
+ return VisitUnop(node, kTypeInt32 | kRepWord32,
+ kTypeInt32 | kRepFloat64);
case IrOpcode::kChangeUint32ToFloat64:
- return VisitUnop(node, tUint32 | rWord32, tUint32 | rFloat64);
+ return VisitUnop(node, kTypeUint32 | kRepWord32,
+ kTypeUint32 | kRepFloat64);
case IrOpcode::kChangeFloat64ToInt32:
- return VisitUnop(node, tInt32 | rFloat64, tInt32 | rWord32);
+ return VisitUnop(node, kTypeInt32 | kRepFloat64,
+ kTypeInt32 | kRepWord32);
case IrOpcode::kChangeFloat64ToUint32:
- return VisitUnop(node, tUint32 | rFloat64, tUint32 | rWord32);
+ return VisitUnop(node, kTypeUint32 | kRepFloat64,
+ kTypeUint32 | kRepWord32);
case IrOpcode::kFloat64Add:
case IrOpcode::kFloat64Sub:
@@ -643,6 +723,8 @@ class RepresentationSelector {
case IrOpcode::kFloat64Div:
case IrOpcode::kFloat64Mod:
return VisitFloat64Binop(node);
+ case IrOpcode::kFloat64Sqrt:
+ return VisitUnop(node, kMachFloat64, kMachFloat64);
case IrOpcode::kFloat64Equal:
case IrOpcode::kFloat64LessThan:
case IrOpcode::kFloat64LessThanOrEqual:
@@ -674,11 +756,10 @@ class RepresentationSelector {
TRACE(("\n"));
}
- void PrintInfo(RepTypeUnion info) {
+ void PrintInfo(MachineTypeUnion info) {
if (FLAG_trace_representation) {
- char buf[REP_TYPE_STRLEN];
- RenderRepTypeUnion(buf, info);
- TRACE(("%s", buf));
+ OFStream os(stdout);
+ os << static_cast<MachineType>(info);
}
}
@@ -691,8 +772,7 @@ class RepresentationSelector {
bool contains_js_nodes_; // {true} if a JS operator was seen
Phase phase_; // current phase of algorithm
RepresentationChanger* changer_; // for inserting representation changes
-
- std::queue<Node*, std::deque<Node*, NodePtrZoneAllocator> > queue_;
+ ZoneQueue<Node*> queue_; // queue for traversing the graph
NodeInfo* GetInfo(Node* node) {
DCHECK(node->id() >= 0);
@@ -700,7 +780,7 @@ class RepresentationSelector {
return &info_[node->id()];
}
- RepTypeUnion GetUseInfo(Node* node) { return GetInfo(node)->use; }
+ MachineTypeUnion GetUseInfo(Node* node) { return GetInfo(node)->use; }
};
@@ -714,12 +794,10 @@ Node* SimplifiedLowering::IsTagged(Node* node) {
void SimplifiedLowering::LowerAllNodes() {
SimplifiedOperatorBuilder simplified(graph()->zone());
- RepresentationChanger changer(jsgraph(), &simplified, machine(),
+ RepresentationChanger changer(jsgraph(), &simplified,
graph()->zone()->isolate());
RepresentationSelector selector(jsgraph(), zone(), &changer);
selector.Run(this);
-
- LoweringBuilder::LowerAllNodes();
}
@@ -742,162 +820,12 @@ Node* SimplifiedLowering::OffsetMinusTagConstant(int32_t offset) {
}
-static void UpdateControlSuccessors(Node* before, Node* node) {
- DCHECK(IrOpcode::IsControlOpcode(before->opcode()));
- UseIter iter = before->uses().begin();
- while (iter != before->uses().end()) {
- if (IrOpcode::IsControlOpcode((*iter)->opcode()) &&
- NodeProperties::IsControlEdge(iter.edge())) {
- iter = iter.UpdateToAndIncrement(node);
- continue;
- }
- ++iter;
- }
-}
-
-
-void SimplifiedLowering::DoChangeTaggedToUI32(Node* node, Node* effect,
- Node* control, bool is_signed) {
- // if (IsTagged(val))
- // ConvertFloat64To(Int32|Uint32)(Load[kMachineFloat64](input, #value_offset))
- // else Untag(val)
- Node* val = node->InputAt(0);
- Node* branch = graph()->NewNode(common()->Branch(), IsTagged(val), control);
-
- // true branch.
- Node* tbranch = graph()->NewNode(common()->IfTrue(), branch);
- Node* loaded = graph()->NewNode(
- machine()->Load(kMachineFloat64), val,
- OffsetMinusTagConstant(HeapNumber::kValueOffset), effect);
- Operator* op = is_signed ? machine()->ChangeFloat64ToInt32()
- : machine()->ChangeFloat64ToUint32();
- Node* converted = graph()->NewNode(op, loaded);
-
- // false branch.
- Node* fbranch = graph()->NewNode(common()->IfFalse(), branch);
- Node* untagged = Untag(val);
-
- // merge.
- Node* merge = graph()->NewNode(common()->Merge(2), tbranch, fbranch);
- Node* phi = graph()->NewNode(common()->Phi(2), converted, untagged, merge);
- UpdateControlSuccessors(control, merge);
- branch->ReplaceInput(1, control);
- node->ReplaceUses(phi);
-}
-
-
-void SimplifiedLowering::DoChangeTaggedToFloat64(Node* node, Node* effect,
- Node* control) {
- // if (IsTagged(input)) Load[kMachineFloat64](input, #value_offset)
- // else ConvertFloat64(Untag(input))
- Node* val = node->InputAt(0);
- Node* branch = graph()->NewNode(common()->Branch(), IsTagged(val), control);
-
- // true branch.
- Node* tbranch = graph()->NewNode(common()->IfTrue(), branch);
- Node* loaded = graph()->NewNode(
- machine()->Load(kMachineFloat64), val,
- OffsetMinusTagConstant(HeapNumber::kValueOffset), effect);
-
- // false branch.
- Node* fbranch = graph()->NewNode(common()->IfFalse(), branch);
- Node* untagged = Untag(val);
- Node* converted =
- graph()->NewNode(machine()->ChangeInt32ToFloat64(), untagged);
-
- // merge.
- Node* merge = graph()->NewNode(common()->Merge(2), tbranch, fbranch);
- Node* phi = graph()->NewNode(common()->Phi(2), loaded, converted, merge);
- UpdateControlSuccessors(control, merge);
- branch->ReplaceInput(1, control);
- node->ReplaceUses(phi);
-}
-
-
-void SimplifiedLowering::DoChangeUI32ToTagged(Node* node, Node* effect,
- Node* control, bool is_signed) {
- Node* val = node->InputAt(0);
- Node* is_smi = NULL;
- if (is_signed) {
- if (SmiValuesAre32Bits()) {
- // All int32s fit in this case.
- DCHECK(kPointerSize == 8);
- return node->ReplaceUses(SmiTag(val));
- } else {
- // TODO(turbofan): use an Int32AddWithOverflow to tag and check here.
- Node* lt = graph()->NewNode(machine()->Int32LessThanOrEqual(), val,
- jsgraph()->Int32Constant(Smi::kMaxValue));
- Node* gt =
- graph()->NewNode(machine()->Int32LessThanOrEqual(),
- jsgraph()->Int32Constant(Smi::kMinValue), val);
- is_smi = graph()->NewNode(machine()->Word32And(), lt, gt);
- }
- } else {
- // Check if Uint32 value is in the smi range.
- is_smi = graph()->NewNode(machine()->Uint32LessThanOrEqual(), val,
- jsgraph()->Int32Constant(Smi::kMaxValue));
- }
-
- // TODO(turbofan): fold smi test branch eagerly.
- // if (IsSmi(input)) SmiTag(input);
- // else InlineAllocAndInitHeapNumber(ConvertToFloat64(input)))
- Node* branch = graph()->NewNode(common()->Branch(), is_smi, control);
-
- // true branch.
- Node* tbranch = graph()->NewNode(common()->IfTrue(), branch);
- Node* smi_tagged = SmiTag(val);
-
- // false branch.
- Node* fbranch = graph()->NewNode(common()->IfFalse(), branch);
- Node* heap_num = jsgraph()->Constant(0.0); // TODO(titzer): alloc and init
-
- // merge.
- Node* merge = graph()->NewNode(common()->Merge(2), tbranch, fbranch);
- Node* phi = graph()->NewNode(common()->Phi(2), smi_tagged, heap_num, merge);
- UpdateControlSuccessors(control, merge);
- branch->ReplaceInput(1, control);
- node->ReplaceUses(phi);
-}
-
-
-void SimplifiedLowering::DoChangeFloat64ToTagged(Node* node, Node* effect,
- Node* control) {
- return; // TODO(titzer): need to call runtime to allocate in one branch
-}
-
-
-void SimplifiedLowering::DoChangeBoolToBit(Node* node, Node* effect,
- Node* control) {
- Node* cmp = graph()->NewNode(machine()->WordEqual(), node->InputAt(0),
- jsgraph()->TrueConstant());
- node->ReplaceUses(cmp);
-}
-
-
-void SimplifiedLowering::DoChangeBitToBool(Node* node, Node* effect,
- Node* control) {
- Node* val = node->InputAt(0);
- Node* branch = graph()->NewNode(common()->Branch(), val, control);
-
- // true branch.
- Node* tbranch = graph()->NewNode(common()->IfTrue(), branch);
- // false branch.
- Node* fbranch = graph()->NewNode(common()->IfFalse(), branch);
- // merge.
- Node* merge = graph()->NewNode(common()->Merge(2), tbranch, fbranch);
- Node* phi = graph()->NewNode(common()->Phi(2), jsgraph()->TrueConstant(),
- jsgraph()->FalseConstant(), merge);
- UpdateControlSuccessors(control, merge);
- branch->ReplaceInput(1, control);
- node->ReplaceUses(phi);
-}
-
-
static WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
MachineType representation,
Type* type) {
// TODO(turbofan): skip write barriers for Smis, etc.
- if (base_is_tagged == kTaggedBase && representation == kMachineTagged) {
+ if (base_is_tagged == kTaggedBase &&
+ RepresentationOf(representation) == kRepTagged) {
// Write barriers are only for writes into heap objects (i.e. tagged base).
return kFullWriteBarrier;
}
@@ -907,7 +835,7 @@ static WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
void SimplifiedLowering::DoLoadField(Node* node) {
const FieldAccess& access = FieldAccessOf(node->op());
- node->set_op(machine_.Load(access.representation));
+ node->set_op(machine()->Load(access.machine_type));
Node* offset = jsgraph()->Int32Constant(access.offset - access.tag());
node->InsertInput(zone(), 1, offset);
}
@@ -916,8 +844,9 @@ void SimplifiedLowering::DoLoadField(Node* node) {
void SimplifiedLowering::DoStoreField(Node* node) {
const FieldAccess& access = FieldAccessOf(node->op());
WriteBarrierKind kind = ComputeWriteBarrierKind(
- access.base_is_tagged, access.representation, access.type);
- node->set_op(machine_.Store(access.representation, kind));
+ access.base_is_tagged, access.machine_type, access.type);
+ node->set_op(
+ machine()->Store(StoreRepresentation(access.machine_type, kind)));
Node* offset = jsgraph()->Int32Constant(access.offset - access.tag());
node->InsertInput(zone(), 1, offset);
}
@@ -925,28 +854,7 @@ void SimplifiedLowering::DoStoreField(Node* node) {
Node* SimplifiedLowering::ComputeIndex(const ElementAccess& access,
Node* index) {
- int element_size = 0;
- switch (access.representation) {
- case kMachineTagged:
- element_size = kPointerSize;
- break;
- case kMachineWord8:
- element_size = 1;
- break;
- case kMachineWord16:
- element_size = 2;
- break;
- case kMachineWord32:
- element_size = 4;
- break;
- case kMachineWord64:
- case kMachineFloat64:
- element_size = 8;
- break;
- case kMachineLast:
- UNREACHABLE();
- break;
- }
+ int element_size = ElementSizeOf(access.machine_type);
if (element_size != 1) {
index = graph()->NewNode(machine()->Int32Mul(),
jsgraph()->Int32Constant(element_size), index);
@@ -960,55 +868,77 @@ Node* SimplifiedLowering::ComputeIndex(const ElementAccess& access,
void SimplifiedLowering::DoLoadElement(Node* node) {
const ElementAccess& access = ElementAccessOf(node->op());
- node->set_op(machine_.Load(access.representation));
+ node->set_op(machine()->Load(access.machine_type));
node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
+ node->RemoveInput(2);
}
void SimplifiedLowering::DoStoreElement(Node* node) {
const ElementAccess& access = ElementAccessOf(node->op());
WriteBarrierKind kind = ComputeWriteBarrierKind(
- access.base_is_tagged, access.representation, access.type);
- node->set_op(machine_.Store(access.representation, kind));
+ access.base_is_tagged, access.machine_type, access.type);
+ node->set_op(
+ machine()->Store(StoreRepresentation(access.machine_type, kind)));
node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
+ node->RemoveInput(2);
}
-void SimplifiedLowering::Lower(Node* node) {}
-
-
-void SimplifiedLowering::LowerChange(Node* node, Node* effect, Node* control) {
- switch (node->opcode()) {
- case IrOpcode::kChangeTaggedToInt32:
- DoChangeTaggedToUI32(node, effect, control, true);
- break;
- case IrOpcode::kChangeTaggedToUint32:
- DoChangeTaggedToUI32(node, effect, control, false);
- break;
- case IrOpcode::kChangeTaggedToFloat64:
- DoChangeTaggedToFloat64(node, effect, control);
- break;
- case IrOpcode::kChangeInt32ToTagged:
- DoChangeUI32ToTagged(node, effect, control, true);
- break;
- case IrOpcode::kChangeUint32ToTagged:
- DoChangeUI32ToTagged(node, effect, control, false);
- break;
- case IrOpcode::kChangeFloat64ToTagged:
- DoChangeFloat64ToTagged(node, effect, control);
- break;
- case IrOpcode::kChangeBoolToBit:
- DoChangeBoolToBit(node, effect, control);
- break;
- case IrOpcode::kChangeBitToBool:
- DoChangeBitToBool(node, effect, control);
- break;
- default:
- UNREACHABLE();
- break;
- }
+void SimplifiedLowering::DoStringAdd(Node* node) {
+ Callable callable = CodeFactory::StringAdd(
+ zone()->isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc =
+ Linkage::GetStubCallDescriptor(callable.descriptor(), 0, flags, zone());
+ node->set_op(common()->Call(desc));
+ node->InsertInput(zone(), 0, jsgraph()->HeapConstant(callable.code()));
+ node->AppendInput(zone(), jsgraph()->UndefinedConstant());
+ node->AppendInput(zone(), graph()->start());
+ node->AppendInput(zone(), graph()->start());
}
+
+Node* SimplifiedLowering::StringComparison(Node* node, bool requires_ordering) {
+ CEntryStub stub(zone()->isolate(), 1);
+ Runtime::FunctionId f =
+ requires_ordering ? Runtime::kStringCompare : Runtime::kStringEquals;
+ ExternalReference ref(f, zone()->isolate());
+ Operator::Properties props = node->op()->properties();
+ // TODO(mstarzinger): We should call StringCompareStub here instead, once an
+ // interface descriptor is available for it.
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(f, 2, props, zone());
+ return graph()->NewNode(common()->Call(desc),
+ jsgraph()->HeapConstant(stub.GetCode()),
+ NodeProperties::GetValueInput(node, 0),
+ NodeProperties::GetValueInput(node, 1),
+ jsgraph()->ExternalConstant(ref),
+ jsgraph()->Int32Constant(2),
+ jsgraph()->UndefinedConstant());
+}
+
+
+void SimplifiedLowering::DoStringEqual(Node* node) {
+ node->set_op(machine()->WordEqual());
+ node->ReplaceInput(0, StringComparison(node, false));
+ node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
+}
+
+
+void SimplifiedLowering::DoStringLessThan(Node* node) {
+ node->set_op(machine()->IntLessThan());
+ node->ReplaceInput(0, StringComparison(node, true));
+ node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
+}
+
+
+void SimplifiedLowering::DoStringLessThanOrEqual(Node* node) {
+ node->set_op(machine()->IntLessThanOrEqual());
+ node->ReplaceInput(0, StringComparison(node, true));
+ node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
+}
+
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index c85515d944..2ba7e3bd88 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -5,9 +5,7 @@
#ifndef V8_COMPILER_SIMPLIFIED_LOWERING_H_
#define V8_COMPILER_SIMPLIFIED_LOWERING_H_
-#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-graph.h"
-#include "src/compiler/lowering-builder.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
@@ -16,44 +14,32 @@ namespace v8 {
namespace internal {
namespace compiler {
-class SimplifiedLowering : public LoweringBuilder {
+class SimplifiedLowering {
public:
- explicit SimplifiedLowering(JSGraph* jsgraph,
- SourcePositionTable* source_positions)
- : LoweringBuilder(jsgraph->graph(), source_positions),
- jsgraph_(jsgraph),
- machine_(jsgraph->zone()) {}
+ explicit SimplifiedLowering(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
virtual ~SimplifiedLowering() {}
void LowerAllNodes();
- virtual void Lower(Node* node);
- void LowerChange(Node* node, Node* effect, Node* control);
-
// TODO(titzer): These are exposed for direct testing. Use a friend class.
void DoLoadField(Node* node);
void DoStoreField(Node* node);
void DoLoadElement(Node* node);
void DoStoreElement(Node* node);
+ void DoStringAdd(Node* node);
+ void DoStringEqual(Node* node);
+ void DoStringLessThan(Node* node);
+ void DoStringLessThanOrEqual(Node* node);
private:
JSGraph* jsgraph_;
- MachineOperatorBuilder machine_;
Node* SmiTag(Node* node);
Node* IsTagged(Node* node);
Node* Untag(Node* node);
Node* OffsetMinusTagConstant(int32_t offset);
Node* ComputeIndex(const ElementAccess& access, Node* index);
-
- void DoChangeTaggedToUI32(Node* node, Node* effect, Node* control,
- bool is_signed);
- void DoChangeUI32ToTagged(Node* node, Node* effect, Node* control,
- bool is_signed);
- void DoChangeTaggedToFloat64(Node* node, Node* effect, Node* control);
- void DoChangeFloat64ToTagged(Node* node, Node* effect, Node* control);
- void DoChangeBoolToBit(Node* node, Node* effect, Node* control);
- void DoChangeBitToBool(Node* node, Node* effect, Node* control);
+ Node* StringComparison(Node* node, bool requires_ordering);
friend class RepresentationSelector;
@@ -61,7 +47,7 @@ class SimplifiedLowering : public LoweringBuilder {
JSGraph* jsgraph() { return jsgraph_; }
Graph* graph() { return jsgraph()->graph(); }
CommonOperatorBuilder* common() { return jsgraph()->common(); }
- MachineOperatorBuilder* machine() { return &machine_; }
+ MachineOperatorBuilder* machine() { return jsgraph()->machine(); }
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/simplified-node-factory.h b/deps/v8/src/compiler/simplified-node-factory.h
deleted file mode 100644
index 8660ce6700..0000000000
--- a/deps/v8/src/compiler/simplified-node-factory.h
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_SIMPLIFIED_NODE_FACTORY_H_
-#define V8_COMPILER_SIMPLIFIED_NODE_FACTORY_H_
-
-#include "src/compiler/node.h"
-#include "src/compiler/simplified-operator.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-#define SIMPLIFIED() static_cast<NodeFactory*>(this)->simplified()
-#define NEW_NODE_1(op, a) static_cast<NodeFactory*>(this)->NewNode(op, a)
-#define NEW_NODE_2(op, a, b) static_cast<NodeFactory*>(this)->NewNode(op, a, b)
-#define NEW_NODE_3(op, a, b, c) \
- static_cast<NodeFactory*>(this)->NewNode(op, a, b, c)
-
-template <typename NodeFactory>
-class SimplifiedNodeFactory {
- public:
- Node* BooleanNot(Node* a) {
- return NEW_NODE_1(SIMPLIFIED()->BooleanNot(), a);
- }
-
- Node* NumberEqual(Node* a, Node* b) {
- return NEW_NODE_2(SIMPLIFIED()->NumberEqual(), a, b);
- }
- Node* NumberNotEqual(Node* a, Node* b) {
- return NEW_NODE_2(SIMPLIFIED()->NumberNotEqual(), a, b);
- }
- Node* NumberLessThan(Node* a, Node* b) {
- return NEW_NODE_2(SIMPLIFIED()->NumberLessThan(), a, b);
- }
- Node* NumberLessThanOrEqual(Node* a, Node* b) {
- return NEW_NODE_2(SIMPLIFIED()->NumberLessThanOrEqual(), a, b);
- }
- Node* NumberAdd(Node* a, Node* b) {
- return NEW_NODE_2(SIMPLIFIED()->NumberAdd(), a, b);
- }
- Node* NumberSubtract(Node* a, Node* b) {
- return NEW_NODE_2(SIMPLIFIED()->NumberSubtract(), a, b);
- }
- Node* NumberMultiply(Node* a, Node* b) {
- return NEW_NODE_2(SIMPLIFIED()->NumberMultiply(), a, b);
- }
- Node* NumberDivide(Node* a, Node* b) {
- return NEW_NODE_2(SIMPLIFIED()->NumberDivide(), a, b);
- }
- Node* NumberModulus(Node* a, Node* b) {
- return NEW_NODE_2(SIMPLIFIED()->NumberModulus(), a, b);
- }
- Node* NumberToInt32(Node* a) {
- return NEW_NODE_1(SIMPLIFIED()->NumberToInt32(), a);
- }
- Node* NumberToUint32(Node* a) {
- return NEW_NODE_1(SIMPLIFIED()->NumberToUint32(), a);
- }
-
- Node* ReferenceEqual(Type* type, Node* a, Node* b) {
- return NEW_NODE_2(SIMPLIFIED()->ReferenceEqual(), a, b);
- }
-
- Node* StringEqual(Node* a, Node* b) {
- return NEW_NODE_2(SIMPLIFIED()->StringEqual(), a, b);
- }
- Node* StringLessThan(Node* a, Node* b) {
- return NEW_NODE_2(SIMPLIFIED()->StringLessThan(), a, b);
- }
- Node* StringLessThanOrEqual(Node* a, Node* b) {
- return NEW_NODE_2(SIMPLIFIED()->StringLessThanOrEqual(), a, b);
- }
- Node* StringAdd(Node* a, Node* b) {
- return NEW_NODE_2(SIMPLIFIED()->StringAdd(), a, b);
- }
-
- Node* ChangeTaggedToInt32(Node* a) {
- return NEW_NODE_1(SIMPLIFIED()->ChangeTaggedToInt32(), a);
- }
- Node* ChangeTaggedToUint32(Node* a) {
- return NEW_NODE_1(SIMPLIFIED()->ChangeTaggedToUint32(), a);
- }
- Node* ChangeTaggedToFloat64(Node* a) {
- return NEW_NODE_1(SIMPLIFIED()->ChangeTaggedToFloat64(), a);
- }
- Node* ChangeInt32ToTagged(Node* a) {
- return NEW_NODE_1(SIMPLIFIED()->ChangeInt32ToTagged(), a);
- }
- Node* ChangeUint32ToTagged(Node* a) {
- return NEW_NODE_1(SIMPLIFIED()->ChangeUint32ToTagged(), a);
- }
- Node* ChangeFloat64ToTagged(Node* a) {
- return NEW_NODE_1(SIMPLIFIED()->ChangeFloat64ToTagged(), a);
- }
- Node* ChangeBoolToBit(Node* a) {
- return NEW_NODE_1(SIMPLIFIED()->ChangeBoolToBit(), a);
- }
- Node* ChangeBitToBool(Node* a) {
- return NEW_NODE_1(SIMPLIFIED()->ChangeBitToBool(), a);
- }
-
- Node* LoadField(const FieldAccess& access, Node* object) {
- return NEW_NODE_1(SIMPLIFIED()->LoadField(access), object);
- }
- Node* StoreField(const FieldAccess& access, Node* object, Node* value) {
- return NEW_NODE_2(SIMPLIFIED()->StoreField(access), object, value);
- }
- Node* LoadElement(const ElementAccess& access, Node* object, Node* index) {
- return NEW_NODE_2(SIMPLIFIED()->LoadElement(access), object, index);
- }
- Node* StoreElement(const ElementAccess& access, Node* object, Node* index,
- Node* value) {
- return NEW_NODE_3(SIMPLIFIED()->StoreElement(access), object, index, value);
- }
-};
-
-#undef NEW_NODE_1
-#undef NEW_NODE_2
-#undef NEW_NODE_3
-#undef SIMPLIFIED
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_SIMPLIFIED_NODE_FACTORY_H_
diff --git a/deps/v8/src/compiler/simplified-operator-reducer-unittest.cc b/deps/v8/src/compiler/simplified-operator-reducer-unittest.cc
new file mode 100644
index 0000000000..739264ed43
--- /dev/null
+++ b/deps/v8/src/compiler/simplified-operator-reducer-unittest.cc
@@ -0,0 +1,483 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-unittest.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/simplified-operator-reducer.h"
+#include "src/compiler/typer.h"
+#include "src/conversions.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SimplifiedOperatorReducerTest : public GraphTest {
+ public:
+ explicit SimplifiedOperatorReducerTest(int num_parameters = 1)
+ : GraphTest(num_parameters), simplified_(zone()) {}
+ virtual ~SimplifiedOperatorReducerTest() {}
+
+ protected:
+ Reduction Reduce(Node* node) {
+ Typer typer(zone());
+ MachineOperatorBuilder machine;
+ JSOperatorBuilder javascript(zone());
+ JSGraph jsgraph(graph(), common(), &javascript, &typer, &machine);
+ SimplifiedOperatorReducer reducer(&jsgraph);
+ return reducer.Reduce(node);
+ }
+
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+ private:
+ SimplifiedOperatorBuilder simplified_;
+};
+
+
+template <typename T>
+class SimplifiedOperatorReducerTestWithParam
+ : public SimplifiedOperatorReducerTest,
+ public ::testing::WithParamInterface<T> {
+ public:
+ explicit SimplifiedOperatorReducerTestWithParam(int num_parameters = 1)
+ : SimplifiedOperatorReducerTest(num_parameters) {}
+ virtual ~SimplifiedOperatorReducerTestWithParam() {}
+};
+
+
+namespace {
+
+static const double kFloat64Values[] = {
+ -V8_INFINITY, -6.52696e+290, -1.05768e+290, -5.34203e+268, -1.01997e+268,
+ -8.22758e+266, -1.58402e+261, -5.15246e+241, -5.92107e+226, -1.21477e+226,
+ -1.67913e+188, -1.6257e+184, -2.60043e+170, -2.52941e+168, -3.06033e+116,
+ -4.56201e+52, -3.56788e+50, -9.9066e+38, -3.07261e+31, -2.1271e+09,
+ -1.91489e+09, -1.73053e+09, -9.30675e+08, -26030, -20453,
+ -15790, -11699, -111, -97, -78,
+ -63, -58, -1.53858e-06, -2.98914e-12, -1.14741e-39,
+ -8.20347e-57, -1.48932e-59, -3.17692e-66, -8.93103e-81, -3.91337e-83,
+ -6.0489e-92, -8.83291e-113, -4.28266e-117, -1.92058e-178, -2.0567e-192,
+ -1.68167e-194, -1.51841e-214, -3.98738e-234, -7.31851e-242, -2.21875e-253,
+ -1.11612e-293, -0.0, 0.0, 2.22507e-308, 1.06526e-307,
+ 4.16643e-227, 6.76624e-223, 2.0432e-197, 3.16254e-184, 1.37315e-173,
+ 2.88603e-172, 1.54155e-99, 4.42923e-81, 1.40539e-73, 5.4462e-73,
+ 1.24064e-58, 3.11167e-58, 2.75826e-39, 0.143815, 58,
+ 67, 601, 7941, 11644, 13697,
+ 25680, 29882, 1.32165e+08, 1.62439e+08, 4.16837e+08,
+ 9.59097e+08, 1.32491e+09, 1.8728e+09, 1.0672e+17, 2.69606e+46,
+ 1.98285e+79, 1.0098e+82, 7.93064e+88, 3.67444e+121, 9.36506e+123,
+ 7.27954e+162, 3.05316e+168, 1.16171e+175, 1.64771e+189, 1.1622e+202,
+ 2.00748e+239, 2.51778e+244, 3.90282e+306, 1.79769e+308, V8_INFINITY};
+
+
+static const int32_t kInt32Values[] = {
+ -2147483647 - 1, -2104508227, -2103151830, -1435284490, -1378926425,
+ -1318814539, -1289388009, -1287537572, -1279026536, -1241605942,
+ -1226046939, -941837148, -779818051, -413830641, -245798087,
+ -184657557, -127145950, -105483328, -32325, -26653,
+ -23858, -23834, -22363, -19858, -19044,
+ -18744, -15528, -5309, -3372, -2093,
+ -104, -98, -97, -93, -84,
+ -80, -78, -76, -72, -58,
+ -57, -56, -55, -45, -40,
+ -34, -32, -25, -24, -5,
+ -2, 0, 3, 10, 24,
+ 34, 42, 46, 47, 48,
+ 52, 56, 64, 65, 71,
+ 76, 79, 81, 82, 97,
+ 102, 103, 104, 106, 107,
+ 109, 116, 122, 3653, 4485,
+ 12405, 16504, 26262, 28704, 29755,
+ 30554, 16476817, 605431957, 832401070, 873617242,
+ 914205764, 1062628108, 1087581664, 1488498068, 1534668023,
+ 1661587028, 1696896187, 1866841746, 2032089723, 2147483647};
+
+
+static const uint32_t kUint32Values[] = {
+ 0x0, 0x5, 0x8, 0xc, 0xd, 0x26,
+ 0x28, 0x29, 0x30, 0x34, 0x3e, 0x42,
+ 0x50, 0x5b, 0x63, 0x71, 0x77, 0x7c,
+ 0x83, 0x88, 0x96, 0x9c, 0xa3, 0xfa,
+ 0x7a7, 0x165d, 0x234d, 0x3acb, 0x43a5, 0x4573,
+ 0x5b4f, 0x5f14, 0x6996, 0x6c6e, 0x7289, 0x7b9a,
+ 0x7bc9, 0x86bb, 0xa839, 0xaa41, 0xb03b, 0xc942,
+ 0xce68, 0xcf4c, 0xd3ad, 0xdea3, 0xe90c, 0xed86,
+ 0xfba5, 0x172dcc6, 0x114d8fc1, 0x182d6c9d, 0x1b1e3fad, 0x1db033bf,
+ 0x1e1de755, 0x1f625c80, 0x28f6cf00, 0x2acb6a94, 0x2c20240e, 0x2f0fe54e,
+ 0x31863a7c, 0x33325474, 0x3532fae3, 0x3bab82ea, 0x4c4b83a2, 0x4cd93d1e,
+ 0x4f7331d4, 0x5491b09b, 0x57cc6ff9, 0x60d3b4dc, 0x653f5904, 0x690ae256,
+ 0x69fe3276, 0x6bebf0ba, 0x6e2c69a3, 0x73b84ff7, 0x7b3a1924, 0x7ed032d9,
+ 0x84dd734b, 0x8552ea53, 0x8680754f, 0x8e9660eb, 0x94fe2b9c, 0x972d30cf,
+ 0x9b98c482, 0xb158667e, 0xb432932c, 0xb5b70989, 0xb669971a, 0xb7c359d1,
+ 0xbeb15c0d, 0xc171c53d, 0xc743dd38, 0xc8e2af50, 0xc98e2df0, 0xd9d1cdf9,
+ 0xdcc91049, 0xe46f396d, 0xee991950, 0xef64e521, 0xf7aeefc9, 0xffffffff};
+
+
+MATCHER(IsNaN, std::string(negation ? "isn't" : "is") + " NaN") {
+ return std::isnan(arg);
+}
+
+} // namespace
+
+
+// -----------------------------------------------------------------------------
+// Unary operators
+
+
+namespace {
+
+struct UnaryOperator {
+ const Operator* (SimplifiedOperatorBuilder::*constructor)();
+ const char* constructor_name;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const UnaryOperator& unop) {
+ return os << unop.constructor_name;
+}
+
+
+static const UnaryOperator kUnaryOperators[] = {
+ {&SimplifiedOperatorBuilder::BooleanNot, "BooleanNot"},
+ {&SimplifiedOperatorBuilder::ChangeBitToBool, "ChangeBitToBool"},
+ {&SimplifiedOperatorBuilder::ChangeBoolToBit, "ChangeBoolToBit"},
+ {&SimplifiedOperatorBuilder::ChangeFloat64ToTagged,
+ "ChangeFloat64ToTagged"},
+ {&SimplifiedOperatorBuilder::ChangeInt32ToTagged, "ChangeInt32ToTagged"},
+ {&SimplifiedOperatorBuilder::ChangeTaggedToFloat64,
+ "ChangeTaggedToFloat64"},
+ {&SimplifiedOperatorBuilder::ChangeTaggedToInt32, "ChangeTaggedToInt32"},
+ {&SimplifiedOperatorBuilder::ChangeTaggedToUint32, "ChangeTaggedToUint32"},
+ {&SimplifiedOperatorBuilder::ChangeUint32ToTagged, "ChangeUint32ToTagged"}};
+
+} // namespace
+
+
+typedef SimplifiedOperatorReducerTestWithParam<UnaryOperator>
+ SimplifiedUnaryOperatorTest;
+
+
+TEST_P(SimplifiedUnaryOperatorTest, Parameter) {
+ const UnaryOperator& unop = GetParam();
+ Reduction reduction = Reduce(
+ graph()->NewNode((simplified()->*unop.constructor)(), Parameter(0)));
+ EXPECT_FALSE(reduction.Changed());
+}
+
+
+INSTANTIATE_TEST_CASE_P(SimplifiedOperatorReducerTest,
+ SimplifiedUnaryOperatorTest,
+ ::testing::ValuesIn(kUnaryOperators));
+
+
+// -----------------------------------------------------------------------------
+// BooleanNot
+
+
+TEST_F(SimplifiedOperatorReducerTest, BooleanNotWithBooleanNot) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(
+ graph()->NewNode(simplified()->BooleanNot(),
+ graph()->NewNode(simplified()->BooleanNot(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, BooleanNotWithFalseConstant) {
+ Reduction reduction0 =
+ Reduce(graph()->NewNode(simplified()->BooleanNot(), FalseConstant()));
+ ASSERT_TRUE(reduction0.Changed());
+ EXPECT_THAT(reduction0.replacement(), IsTrueConstant());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, BooleanNotWithTrueConstant) {
+ Reduction reduction1 =
+ Reduce(graph()->NewNode(simplified()->BooleanNot(), TrueConstant()));
+ ASSERT_TRUE(reduction1.Changed());
+ EXPECT_THAT(reduction1.replacement(), IsFalseConstant());
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeBoolToBit
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBitToBoolWithChangeBoolToBit) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeBitToBool(),
+ graph()->NewNode(simplified()->ChangeBoolToBit(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBitToBoolWithZeroConstant) {
+ Reduction reduction = Reduce(
+ graph()->NewNode(simplified()->ChangeBitToBool(), Int32Constant(0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFalseConstant());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBitToBoolWithOneConstant) {
+ Reduction reduction = Reduce(
+ graph()->NewNode(simplified()->ChangeBitToBool(), Int32Constant(1)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsTrueConstant());
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeBoolToBit
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBoolToBitWithFalseConstant) {
+ Reduction reduction = Reduce(
+ graph()->NewNode(simplified()->ChangeBoolToBit(), FalseConstant()));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBoolToBitWithTrueConstant) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(simplified()->ChangeBoolToBit(), TrueConstant()));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(1));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBoolToBitWithChangeBitToBool) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeBoolToBit(),
+ graph()->NewNode(simplified()->ChangeBitToBool(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeFloat64ToTagged
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeFloat64ToTaggedWithConstant) {
+ TRACED_FOREACH(double, n, kFloat64Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeFloat64ToTagged(), Float64Constant(n)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsNumberConstant(n));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeInt32ToTagged
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeInt32ToTaggedWithConstant) {
+ TRACED_FOREACH(int32_t, n, kInt32Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeInt32ToTagged(), Int32Constant(n)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsNumberConstant(FastI2D(n)));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeTaggedToFloat64
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+ ChangeTaggedToFloat64WithChangeFloat64ToTagged) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToFloat64(),
+ graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+ ChangeTaggedToFloat64WithChangeInt32ToTagged) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToFloat64(),
+ graph()->NewNode(simplified()->ChangeInt32ToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsChangeInt32ToFloat64(param0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+ ChangeTaggedToFloat64WithChangeUint32ToTagged) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToFloat64(),
+ graph()->NewNode(simplified()->ChangeUint32ToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsChangeUint32ToFloat64(param0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithConstant) {
+ TRACED_FOREACH(double, n, kFloat64Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToFloat64(), NumberConstant(n)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFloat64Constant(n));
+ }
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithNaNConstant1) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
+ NumberConstant(-base::OS::nan_value())));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFloat64Constant(IsNaN()));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithNaNConstant2) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
+ NumberConstant(base::OS::nan_value())));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFloat64Constant(IsNaN()));
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeTaggedToInt32
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+ ChangeTaggedToInt32WithChangeFloat64ToTagged) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToInt32(),
+ graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToInt32(param0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+ ChangeTaggedToInt32WithChangeInt32ToTagged) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToInt32(),
+ graph()->NewNode(simplified()->ChangeInt32ToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithConstant) {
+ TRACED_FOREACH(double, n, kFloat64Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToInt32(), NumberConstant(n)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(DoubleToInt32(n)));
+ }
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithNaNConstant1) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(simplified()->ChangeTaggedToInt32(),
+ NumberConstant(-base::OS::nan_value())));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithNaNConstant2) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(simplified()->ChangeTaggedToInt32(),
+ NumberConstant(base::OS::nan_value())));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeTaggedToUint32
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+ ChangeTaggedToUint32WithChangeFloat64ToTagged) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToUint32(),
+ graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToUint32(param0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+ ChangeTaggedToUint32WithChangeUint32ToTagged) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToUint32(),
+ graph()->NewNode(simplified()->ChangeUint32ToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithConstant) {
+ TRACED_FOREACH(double, n, kFloat64Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToUint32(), NumberConstant(n)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(),
+ IsInt32Constant(bit_cast<int32_t>(DoubleToUint32(n))));
+ }
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithNaNConstant1) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(simplified()->ChangeTaggedToUint32(),
+ NumberConstant(-base::OS::nan_value())));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithNaNConstant2) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(simplified()->ChangeTaggedToUint32(),
+ NumberConstant(base::OS::nan_value())));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeUint32ToTagged
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeUint32ToTagged) {
+ TRACED_FOREACH(uint32_t, n, kUint32Values) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(simplified()->ChangeUint32ToTagged(),
+ Int32Constant(bit_cast<int32_t>(n))));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsNumberConstant(FastUI2D(n)));
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
new file mode 100644
index 0000000000..f6181ea988
--- /dev/null
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -0,0 +1,147 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/simplified-operator-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+SimplifiedOperatorReducer::~SimplifiedOperatorReducer() {}
+
+
+Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kBooleanNot: {
+ HeapObjectMatcher<HeapObject> m(node->InputAt(0));
+ if (m.Is(Unique<HeapObject>::CreateImmovable(factory()->false_value()))) {
+ return Replace(jsgraph()->TrueConstant());
+ }
+ if (m.Is(Unique<HeapObject>::CreateImmovable(factory()->true_value()))) {
+ return Replace(jsgraph()->FalseConstant());
+ }
+ if (m.IsBooleanNot()) return Replace(m.node()->InputAt(0));
+ break;
+ }
+ case IrOpcode::kChangeBitToBool: {
+ Int32Matcher m(node->InputAt(0));
+ if (m.Is(0)) return Replace(jsgraph()->FalseConstant());
+ if (m.Is(1)) return Replace(jsgraph()->TrueConstant());
+ if (m.IsChangeBoolToBit()) return Replace(m.node()->InputAt(0));
+ break;
+ }
+ case IrOpcode::kChangeBoolToBit: {
+ HeapObjectMatcher<HeapObject> m(node->InputAt(0));
+ if (m.Is(Unique<HeapObject>::CreateImmovable(factory()->false_value()))) {
+ return ReplaceInt32(0);
+ }
+ if (m.Is(Unique<HeapObject>::CreateImmovable(factory()->true_value()))) {
+ return ReplaceInt32(1);
+ }
+ if (m.IsChangeBitToBool()) return Replace(m.node()->InputAt(0));
+ break;
+ }
+ case IrOpcode::kChangeFloat64ToTagged: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceNumber(m.Value());
+ break;
+ }
+ case IrOpcode::kChangeInt32ToTagged: {
+ Int32Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceNumber(m.Value());
+ break;
+ }
+ case IrOpcode::kChangeTaggedToFloat64: {
+ NumberMatcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(m.Value());
+ if (m.IsChangeFloat64ToTagged()) return Replace(m.node()->InputAt(0));
+ if (m.IsChangeInt32ToTagged()) {
+ return Change(node, machine()->ChangeInt32ToFloat64(),
+ m.node()->InputAt(0));
+ }
+ if (m.IsChangeUint32ToTagged()) {
+ return Change(node, machine()->ChangeUint32ToFloat64(),
+ m.node()->InputAt(0));
+ }
+ break;
+ }
+ case IrOpcode::kChangeTaggedToInt32: {
+ NumberMatcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
+ if (m.IsChangeFloat64ToTagged()) {
+ return Change(node, machine()->ChangeFloat64ToInt32(),
+ m.node()->InputAt(0));
+ }
+ if (m.IsChangeInt32ToTagged()) return Replace(m.node()->InputAt(0));
+ break;
+ }
+ case IrOpcode::kChangeTaggedToUint32: {
+ NumberMatcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceUint32(DoubleToUint32(m.Value()));
+ if (m.IsChangeFloat64ToTagged()) {
+ return Change(node, machine()->ChangeFloat64ToUint32(),
+ m.node()->InputAt(0));
+ }
+ if (m.IsChangeUint32ToTagged()) return Replace(m.node()->InputAt(0));
+ break;
+ }
+ case IrOpcode::kChangeUint32ToTagged: {
+ Uint32Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceNumber(FastUI2D(m.Value()));
+ break;
+ }
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+
+Reduction SimplifiedOperatorReducer::Change(Node* node, const Operator* op,
+ Node* a) {
+ node->set_op(op);
+ node->ReplaceInput(0, a);
+ return Changed(node);
+}
+
+
+Reduction SimplifiedOperatorReducer::ReplaceFloat64(double value) {
+ return Replace(jsgraph()->Float64Constant(value));
+}
+
+
+Reduction SimplifiedOperatorReducer::ReplaceInt32(int32_t value) {
+ return Replace(jsgraph()->Int32Constant(value));
+}
+
+
+Reduction SimplifiedOperatorReducer::ReplaceNumber(double value) {
+ return Replace(jsgraph()->Constant(value));
+}
+
+
+Reduction SimplifiedOperatorReducer::ReplaceNumber(int32_t value) {
+ return Replace(jsgraph()->Constant(value));
+}
+
+
+Graph* SimplifiedOperatorReducer::graph() const { return jsgraph()->graph(); }
+
+
+Factory* SimplifiedOperatorReducer::factory() const {
+ return jsgraph()->isolate()->factory();
+}
+
+
+MachineOperatorBuilder* SimplifiedOperatorReducer::machine() const {
+ return jsgraph()->machine();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.h b/deps/v8/src/compiler/simplified-operator-reducer.h
new file mode 100644
index 0000000000..32f49adc56
--- /dev/null
+++ b/deps/v8/src/compiler/simplified-operator-reducer.h
@@ -0,0 +1,53 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SIMPLIFIED_OPERATOR_REDUCER_H_
+#define V8_COMPILER_SIMPLIFIED_OPERATOR_REDUCER_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class Heap;
+
+namespace compiler {
+
+// Forward declarations.
+class JSGraph;
+class MachineOperatorBuilder;
+
+class SimplifiedOperatorReducer FINAL : public Reducer {
+ public:
+ explicit SimplifiedOperatorReducer(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
+ virtual ~SimplifiedOperatorReducer();
+
+ virtual Reduction Reduce(Node* node) OVERRIDE;
+
+ private:
+ Reduction Change(Node* node, const Operator* op, Node* a);
+ Reduction ReplaceFloat64(double value);
+ Reduction ReplaceInt32(int32_t value);
+ Reduction ReplaceUint32(uint32_t value) {
+ return ReplaceInt32(bit_cast<int32_t>(value));
+ }
+ Reduction ReplaceNumber(double value);
+ Reduction ReplaceNumber(int32_t value);
+
+ Graph* graph() const;
+ Factory* factory() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ MachineOperatorBuilder* machine() const;
+
+ JSGraph* jsgraph_;
+
+ DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorReducer);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_SIMPLIFIED_OPERATOR_REDUCER_H_
diff --git a/deps/v8/src/compiler/simplified-operator-unittest.cc b/deps/v8/src/compiler/simplified-operator-unittest.cc
new file mode 100644
index 0000000000..4014f24e90
--- /dev/null
+++ b/deps/v8/src/compiler/simplified-operator-unittest.cc
@@ -0,0 +1,222 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/simplified-operator.h"
+
+#include "src/compiler/operator-properties-inl.h"
+#include "src/test/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(bmeurer): Drop once we use std::ostream instead of our OStream.
+inline std::ostream& operator<<(std::ostream& os, const ElementAccess& access) {
+ OStringStream ost;
+ ost << access;
+ return os << ost.c_str();
+}
+
+
+// -----------------------------------------------------------------------------
+// Pure operators.
+
+
+namespace {
+
+struct PureOperator {
+ const Operator* (SimplifiedOperatorBuilder::*constructor)();
+ IrOpcode::Value opcode;
+ Operator::Properties properties;
+ int value_input_count;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const PureOperator& pop) {
+ return os << IrOpcode::Mnemonic(pop.opcode);
+}
+
+
+const PureOperator kPureOperators[] = {
+#define PURE(Name, properties, input_count) \
+ { \
+ &SimplifiedOperatorBuilder::Name, IrOpcode::k##Name, \
+ Operator::kPure | properties, input_count \
+ }
+ PURE(BooleanNot, Operator::kNoProperties, 1),
+ PURE(NumberEqual, Operator::kCommutative, 2),
+ PURE(NumberLessThan, Operator::kNoProperties, 2),
+ PURE(NumberLessThanOrEqual, Operator::kNoProperties, 2),
+ PURE(NumberAdd, Operator::kCommutative, 2),
+ PURE(NumberSubtract, Operator::kNoProperties, 2),
+ PURE(NumberMultiply, Operator::kCommutative, 2),
+ PURE(NumberDivide, Operator::kNoProperties, 2),
+ PURE(NumberModulus, Operator::kNoProperties, 2),
+ PURE(NumberToInt32, Operator::kNoProperties, 1),
+ PURE(NumberToUint32, Operator::kNoProperties, 1),
+ PURE(StringEqual, Operator::kCommutative, 2),
+ PURE(StringLessThan, Operator::kNoProperties, 2),
+ PURE(StringLessThanOrEqual, Operator::kNoProperties, 2),
+ PURE(StringAdd, Operator::kNoProperties, 2),
+ PURE(ChangeTaggedToInt32, Operator::kNoProperties, 1),
+ PURE(ChangeTaggedToUint32, Operator::kNoProperties, 1),
+ PURE(ChangeTaggedToFloat64, Operator::kNoProperties, 1),
+ PURE(ChangeInt32ToTagged, Operator::kNoProperties, 1),
+ PURE(ChangeUint32ToTagged, Operator::kNoProperties, 1),
+ PURE(ChangeFloat64ToTagged, Operator::kNoProperties, 1),
+ PURE(ChangeBoolToBit, Operator::kNoProperties, 1),
+ PURE(ChangeBitToBool, Operator::kNoProperties, 1)
+#undef PURE
+};
+
+} // namespace
+
+
+class SimplifiedPureOperatorTest
+ : public TestWithZone,
+ public ::testing::WithParamInterface<PureOperator> {};
+
+
+TEST_P(SimplifiedPureOperatorTest, InstancesAreGloballyShared) {
+ const PureOperator& pop = GetParam();
+ SimplifiedOperatorBuilder simplified1(zone());
+ SimplifiedOperatorBuilder simplified2(zone());
+ EXPECT_EQ((simplified1.*pop.constructor)(), (simplified2.*pop.constructor)());
+}
+
+
+TEST_P(SimplifiedPureOperatorTest, NumberOfInputsAndOutputs) {
+ SimplifiedOperatorBuilder simplified(zone());
+ const PureOperator& pop = GetParam();
+ const Operator* op = (simplified.*pop.constructor)();
+
+ EXPECT_EQ(pop.value_input_count, OperatorProperties::GetValueInputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetEffectInputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetControlInputCount(op));
+ EXPECT_EQ(pop.value_input_count, OperatorProperties::GetTotalInputCount(op));
+
+ EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+}
+
+
+TEST_P(SimplifiedPureOperatorTest, OpcodeIsCorrect) {
+ SimplifiedOperatorBuilder simplified(zone());
+ const PureOperator& pop = GetParam();
+ const Operator* op = (simplified.*pop.constructor)();
+ EXPECT_EQ(pop.opcode, op->opcode());
+}
+
+
+TEST_P(SimplifiedPureOperatorTest, Properties) {
+ SimplifiedOperatorBuilder simplified(zone());
+ const PureOperator& pop = GetParam();
+ const Operator* op = (simplified.*pop.constructor)();
+ EXPECT_EQ(pop.properties, op->properties() & pop.properties);
+}
+
+INSTANTIATE_TEST_CASE_P(SimplifiedOperatorTest, SimplifiedPureOperatorTest,
+ ::testing::ValuesIn(kPureOperators));
+
+
+// -----------------------------------------------------------------------------
+// Element access operators.
+
+namespace {
+
+const ElementAccess kElementAccesses[] = {
+ {kTaggedBase, FixedArray::kHeaderSize, Type::Any(), kMachAnyTagged},
+ {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
+ kMachInt8},
+ {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
+ kMachInt16},
+ {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
+ kMachInt32},
+ {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
+ kMachUint8},
+ {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
+ kMachUint16},
+ {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
+ kMachUint32},
+ {kUntaggedBase, 0, Type::Signed32(), kMachInt8},
+ {kUntaggedBase, 0, Type::Unsigned32(), kMachUint8},
+ {kUntaggedBase, 0, Type::Signed32(), kMachInt16},
+ {kUntaggedBase, 0, Type::Unsigned32(), kMachUint16},
+ {kUntaggedBase, 0, Type::Signed32(), kMachInt32},
+ {kUntaggedBase, 0, Type::Unsigned32(), kMachUint32},
+ {kUntaggedBase, 0, Type::Number(), kRepFloat32},
+ {kUntaggedBase, 0, Type::Number(), kRepFloat64},
+ {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
+ kMachInt8},
+ {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
+ kMachUint8},
+ {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
+ kMachInt16},
+ {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
+ kMachUint16},
+ {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
+ kMachInt32},
+ {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
+ kMachUint32},
+ {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
+ kRepFloat32},
+ {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
+ kRepFloat64}};
+
+} // namespace
+
+
+class SimplifiedElementAccessOperatorTest
+ : public TestWithZone,
+ public ::testing::WithParamInterface<ElementAccess> {};
+
+
+TEST_P(SimplifiedElementAccessOperatorTest, LoadElement) {
+ SimplifiedOperatorBuilder simplified(zone());
+ const ElementAccess& access = GetParam();
+ const Operator* op = simplified.LoadElement(access);
+
+ EXPECT_EQ(IrOpcode::kLoadElement, op->opcode());
+ EXPECT_EQ(Operator::kNoThrow | Operator::kNoWrite, op->properties());
+ EXPECT_EQ(access, ElementAccessOf(op));
+
+ EXPECT_EQ(3, OperatorProperties::GetValueInputCount(op));
+ EXPECT_EQ(1, OperatorProperties::GetEffectInputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetControlInputCount(op));
+ EXPECT_EQ(4, OperatorProperties::GetTotalInputCount(op));
+
+ EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
+ EXPECT_EQ(1, OperatorProperties::GetEffectOutputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+}
+
+
+TEST_P(SimplifiedElementAccessOperatorTest, StoreElement) {
+ SimplifiedOperatorBuilder simplified(zone());
+ const ElementAccess& access = GetParam();
+ const Operator* op = simplified.StoreElement(access);
+
+ EXPECT_EQ(IrOpcode::kStoreElement, op->opcode());
+ EXPECT_EQ(Operator::kNoRead | Operator::kNoThrow, op->properties());
+ EXPECT_EQ(access, ElementAccessOf(op));
+
+ EXPECT_EQ(4, OperatorProperties::GetValueInputCount(op));
+ EXPECT_EQ(1, OperatorProperties::GetEffectInputCount(op));
+ EXPECT_EQ(1, OperatorProperties::GetControlInputCount(op));
+ EXPECT_EQ(6, OperatorProperties::GetTotalInputCount(op));
+
+ EXPECT_EQ(0, OperatorProperties::GetValueOutputCount(op));
+ EXPECT_EQ(1, OperatorProperties::GetEffectOutputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+}
+
+
+INSTANTIATE_TEST_CASE_P(SimplifiedOperatorTest,
+ SimplifiedElementAccessOperatorTest,
+ ::testing::ValuesIn(kElementAccesses));
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
new file mode 100644
index 0000000000..642ffc7bc1
--- /dev/null
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -0,0 +1,178 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/simplified-operator.h"
+
+#include "src/base/lazy-instance.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/types-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+OStream& operator<<(OStream& os, BaseTaggedness base_taggedness) {
+ switch (base_taggedness) {
+ case kUntaggedBase:
+ return os << "untagged base";
+ case kTaggedBase:
+ return os << "tagged base";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+bool operator==(ElementAccess const& lhs, ElementAccess const& rhs) {
+ return lhs.base_is_tagged == rhs.base_is_tagged &&
+ lhs.header_size == rhs.header_size && lhs.type == rhs.type &&
+ lhs.machine_type == rhs.machine_type;
+}
+
+
+bool operator!=(ElementAccess const& lhs, ElementAccess const& rhs) {
+ return !(lhs == rhs);
+}
+
+
+OStream& operator<<(OStream& os, ElementAccess const& access) {
+ os << "[" << access.base_is_tagged << ", " << access.header_size << ", ";
+ access.type->PrintTo(os);
+ os << ", " << access.machine_type << "]";
+ return os;
+}
+
+
+const FieldAccess& FieldAccessOf(const Operator* op) {
+ DCHECK_NOT_NULL(op);
+ DCHECK(op->opcode() == IrOpcode::kLoadField ||
+ op->opcode() == IrOpcode::kStoreField);
+ return OpParameter<FieldAccess>(op);
+}
+
+
+const ElementAccess& ElementAccessOf(const Operator* op) {
+ DCHECK_NOT_NULL(op);
+ DCHECK(op->opcode() == IrOpcode::kLoadElement ||
+ op->opcode() == IrOpcode::kStoreElement);
+ return OpParameter<ElementAccess>(op);
+}
+
+
+// Specialization for static parameters of type {FieldAccess}.
+template <>
+struct StaticParameterTraits<FieldAccess> {
+ static OStream& PrintTo(OStream& os, const FieldAccess& val) {
+ return os << val.offset;
+ }
+ static int HashCode(const FieldAccess& val) {
+ return (val.offset < 16) | (val.machine_type & 0xffff);
+ }
+ static bool Equals(const FieldAccess& lhs, const FieldAccess& rhs) {
+ return lhs.base_is_tagged == rhs.base_is_tagged &&
+ lhs.offset == rhs.offset && lhs.machine_type == rhs.machine_type &&
+ lhs.type->Is(rhs.type);
+ }
+};
+
+
+// Specialization for static parameters of type {ElementAccess}.
+template <>
+struct StaticParameterTraits<ElementAccess> {
+ static OStream& PrintTo(OStream& os, const ElementAccess& access) {
+ return os << access;
+ }
+ static int HashCode(const ElementAccess& access) {
+ return (access.header_size < 16) | (access.machine_type & 0xffff);
+ }
+ static bool Equals(const ElementAccess& lhs, const ElementAccess& rhs) {
+ return lhs.base_is_tagged == rhs.base_is_tagged &&
+ lhs.header_size == rhs.header_size &&
+ lhs.machine_type == rhs.machine_type && lhs.type->Is(rhs.type);
+ }
+};
+
+
+#define PURE_OP_LIST(V) \
+ V(BooleanNot, Operator::kNoProperties, 1) \
+ V(BooleanToNumber, Operator::kNoProperties, 1) \
+ V(NumberEqual, Operator::kCommutative, 2) \
+ V(NumberLessThan, Operator::kNoProperties, 2) \
+ V(NumberLessThanOrEqual, Operator::kNoProperties, 2) \
+ V(NumberAdd, Operator::kCommutative, 2) \
+ V(NumberSubtract, Operator::kNoProperties, 2) \
+ V(NumberMultiply, Operator::kCommutative, 2) \
+ V(NumberDivide, Operator::kNoProperties, 2) \
+ V(NumberModulus, Operator::kNoProperties, 2) \
+ V(NumberToInt32, Operator::kNoProperties, 1) \
+ V(NumberToUint32, Operator::kNoProperties, 1) \
+ V(StringEqual, Operator::kCommutative, 2) \
+ V(StringLessThan, Operator::kNoProperties, 2) \
+ V(StringLessThanOrEqual, Operator::kNoProperties, 2) \
+ V(StringAdd, Operator::kNoProperties, 2) \
+ V(ChangeTaggedToInt32, Operator::kNoProperties, 1) \
+ V(ChangeTaggedToUint32, Operator::kNoProperties, 1) \
+ V(ChangeTaggedToFloat64, Operator::kNoProperties, 1) \
+ V(ChangeInt32ToTagged, Operator::kNoProperties, 1) \
+ V(ChangeUint32ToTagged, Operator::kNoProperties, 1) \
+ V(ChangeFloat64ToTagged, Operator::kNoProperties, 1) \
+ V(ChangeBoolToBit, Operator::kNoProperties, 1) \
+ V(ChangeBitToBool, Operator::kNoProperties, 1)
+
+
+#define ACCESS_OP_LIST(V) \
+ V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1) \
+ V(StoreField, FieldAccess, Operator::kNoRead, 2, 0) \
+ V(LoadElement, ElementAccess, Operator::kNoWrite, 3, 1) \
+ V(StoreElement, ElementAccess, Operator::kNoRead, 4, 0)
+
+
+struct SimplifiedOperatorBuilderImpl FINAL {
+#define PURE(Name, properties, input_count) \
+ struct Name##Operator FINAL : public SimpleOperator { \
+ Name##Operator() \
+ : SimpleOperator(IrOpcode::k##Name, Operator::kPure | properties, \
+ input_count, 1, #Name) {} \
+ }; \
+ Name##Operator k##Name;
+ PURE_OP_LIST(PURE)
+#undef PURE
+};
+
+
+static base::LazyInstance<SimplifiedOperatorBuilderImpl>::type kImpl =
+ LAZY_INSTANCE_INITIALIZER;
+
+
+SimplifiedOperatorBuilder::SimplifiedOperatorBuilder(Zone* zone)
+ : impl_(kImpl.Get()), zone_(zone) {}
+
+
+#define PURE(Name, properties, input_count) \
+ const Operator* SimplifiedOperatorBuilder::Name() { return &impl_.k##Name; }
+PURE_OP_LIST(PURE)
+#undef PURE
+
+
+const Operator* SimplifiedOperatorBuilder::ReferenceEqual(Type* type) {
+ // TODO(titzer): What about the type parameter?
+ return new (zone()) SimpleOperator(IrOpcode::kReferenceEqual,
+ Operator::kCommutative | Operator::kPure,
+ 2, 1, "ReferenceEqual");
+}
+
+
+#define ACCESS(Name, Type, properties, input_count, output_count) \
+ const Operator* SimplifiedOperatorBuilder::Name(const Type& access) { \
+ return new (zone()) \
+ Operator1<Type>(IrOpcode::k##Name, Operator::kNoThrow | properties, \
+ input_count, output_count, #Name, access); \
+ }
+ACCESS_OP_LIST(ACCESS)
+#undef ACCESS
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 9cf08c3704..32f0e8b1b7 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -5,16 +5,31 @@
#ifndef V8_COMPILER_SIMPLIFIED_OPERATOR_H_
#define V8_COMPILER_SIMPLIFIED_OPERATOR_H_
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/opcodes.h"
-#include "src/zone.h"
+#include "src/compiler/machine-type.h"
+#include "src/handles.h"
namespace v8 {
namespace internal {
+
+// Forward declarations.
+template <class>
+class TypeImpl;
+struct ZoneTypeConfig;
+typedef TypeImpl<ZoneTypeConfig> Type;
+class Zone;
+
+
namespace compiler {
+// Forward declarations.
+class Operator;
+struct SimplifiedOperatorBuilderImpl;
+
+
enum BaseTaggedness { kUntaggedBase, kTaggedBase };
+OStream& operator<<(OStream&, BaseTaggedness);
+
// An access descriptor for loads/stores of fixed structures like field
// accesses of heap objects. Accesses from either tagged or untagged base
// pointers are supported; untagging is done automatically during lowering.
@@ -23,7 +38,7 @@ struct FieldAccess {
int offset; // offset of the field, without tag.
Handle<Name> name; // debugging only.
Type* type; // type of the field.
- MachineType representation; // machine representation of field.
+ MachineType machine_type; // machine type of the field.
int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
};
@@ -37,61 +52,23 @@ struct ElementAccess {
BaseTaggedness base_is_tagged; // specifies if the base pointer is tagged.
int header_size; // size of the header, without tag.
Type* type; // type of the element.
- MachineType representation; // machine representation of element.
+ MachineType machine_type; // machine type of the element.
int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
};
+bool operator==(ElementAccess const& lhs, ElementAccess const& rhs);
+bool operator!=(ElementAccess const& lhs, ElementAccess const& rhs);
-// If the accessed object is not a heap object, add this to the header_size.
-static const int kNonHeapObjectHeaderSize = kHeapObjectTag;
+OStream& operator<<(OStream&, ElementAccess const&);
-// Specialization for static parameters of type {FieldAccess}.
-template <>
-struct StaticParameterTraits<const FieldAccess> {
- static OStream& PrintTo(OStream& os, const FieldAccess& val) { // NOLINT
- return os << val.offset;
- }
- static int HashCode(const FieldAccess& val) {
- return (val.offset < 16) | (val.representation & 0xffff);
- }
- static bool Equals(const FieldAccess& a, const FieldAccess& b) {
- return a.base_is_tagged == b.base_is_tagged && a.offset == b.offset &&
- a.representation == b.representation && a.type->Is(b.type);
- }
-};
-
-
-// Specialization for static parameters of type {ElementAccess}.
-template <>
-struct StaticParameterTraits<const ElementAccess> {
- static OStream& PrintTo(OStream& os, const ElementAccess& val) { // NOLINT
- return os << val.header_size;
- }
- static int HashCode(const ElementAccess& val) {
- return (val.header_size < 16) | (val.representation & 0xffff);
- }
- static bool Equals(const ElementAccess& a, const ElementAccess& b) {
- return a.base_is_tagged == b.base_is_tagged &&
- a.header_size == b.header_size &&
- a.representation == b.representation && a.type->Is(b.type);
- }
-};
-
-
-inline const FieldAccess FieldAccessOf(Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kLoadField ||
- op->opcode() == IrOpcode::kStoreField);
- return static_cast<Operator1<FieldAccess>*>(op)->parameter();
-}
+// If the accessed object is not a heap object, add this to the header_size.
+static const int kNonHeapObjectHeaderSize = kHeapObjectTag;
-inline const ElementAccess ElementAccessOf(Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kLoadElement ||
- op->opcode() == IrOpcode::kStoreElement);
- return static_cast<Operator1<ElementAccess>*>(op)->parameter();
-}
+const FieldAccess& FieldAccessOf(const Operator* op) WARN_UNUSED_RESULT;
+const ElementAccess& ElementAccessOf(const Operator* op) WARN_UNUSED_RESULT;
// Interface for building simplified operators, which represent the
@@ -116,74 +93,60 @@ inline const ElementAccess ElementAccessOf(Operator* op) {
// - Bool: a tagged pointer to either the canonical JS #false or
// the canonical JS #true object
// - Bit: an untagged integer 0 or 1, but word-sized
-class SimplifiedOperatorBuilder {
+class SimplifiedOperatorBuilder FINAL {
public:
- explicit inline SimplifiedOperatorBuilder(Zone* zone) : zone_(zone) {}
-
-#define SIMPLE(name, properties, inputs, outputs) \
- return new (zone_) \
- SimpleOperator(IrOpcode::k##name, properties, inputs, outputs, #name);
-
-#define OP1(name, ptype, pname, properties, inputs, outputs) \
- return new (zone_) \
- Operator1<ptype>(IrOpcode::k##name, properties | Operator::kNoThrow, \
- inputs, outputs, #name, pname)
-
-#define UNOP(name) SIMPLE(name, Operator::kPure, 1, 1)
-#define BINOP(name) SIMPLE(name, Operator::kPure, 2, 1)
-
- Operator* BooleanNot() const { UNOP(BooleanNot); }
-
- Operator* NumberEqual() const { BINOP(NumberEqual); }
- Operator* NumberLessThan() const { BINOP(NumberLessThan); }
- Operator* NumberLessThanOrEqual() const { BINOP(NumberLessThanOrEqual); }
- Operator* NumberAdd() const { BINOP(NumberAdd); }
- Operator* NumberSubtract() const { BINOP(NumberSubtract); }
- Operator* NumberMultiply() const { BINOP(NumberMultiply); }
- Operator* NumberDivide() const { BINOP(NumberDivide); }
- Operator* NumberModulus() const { BINOP(NumberModulus); }
- Operator* NumberToInt32() const { UNOP(NumberToInt32); }
- Operator* NumberToUint32() const { UNOP(NumberToUint32); }
-
- Operator* ReferenceEqual(Type* type) const { BINOP(ReferenceEqual); }
-
- Operator* StringEqual() const { BINOP(StringEqual); }
- Operator* StringLessThan() const { BINOP(StringLessThan); }
- Operator* StringLessThanOrEqual() const { BINOP(StringLessThanOrEqual); }
- Operator* StringAdd() const { BINOP(StringAdd); }
-
- Operator* ChangeTaggedToInt32() const { UNOP(ChangeTaggedToInt32); }
- Operator* ChangeTaggedToUint32() const { UNOP(ChangeTaggedToUint32); }
- Operator* ChangeTaggedToFloat64() const { UNOP(ChangeTaggedToFloat64); }
- Operator* ChangeInt32ToTagged() const { UNOP(ChangeInt32ToTagged); }
- Operator* ChangeUint32ToTagged() const { UNOP(ChangeUint32ToTagged); }
- Operator* ChangeFloat64ToTagged() const { UNOP(ChangeFloat64ToTagged); }
- Operator* ChangeBoolToBit() const { UNOP(ChangeBoolToBit); }
- Operator* ChangeBitToBool() const { UNOP(ChangeBitToBool); }
-
- Operator* LoadField(const FieldAccess& access) const {
- OP1(LoadField, FieldAccess, access, Operator::kNoWrite, 1, 1);
- }
- Operator* StoreField(const FieldAccess& access) const {
- OP1(StoreField, FieldAccess, access, Operator::kNoRead, 2, 0);
- }
- Operator* LoadElement(const ElementAccess& access) const {
- OP1(LoadElement, ElementAccess, access, Operator::kNoWrite, 2, 1);
- }
- Operator* StoreElement(const ElementAccess& access) const {
- OP1(StoreElement, ElementAccess, access, Operator::kNoRead, 3, 0);
- }
-
-#undef BINOP
-#undef UNOP
-#undef OP1
-#undef SIMPLE
+ explicit SimplifiedOperatorBuilder(Zone* zone);
+
+ const Operator* BooleanNot();
+ const Operator* BooleanToNumber();
+
+ const Operator* NumberEqual();
+ const Operator* NumberLessThan();
+ const Operator* NumberLessThanOrEqual();
+ const Operator* NumberAdd();
+ const Operator* NumberSubtract();
+ const Operator* NumberMultiply();
+ const Operator* NumberDivide();
+ const Operator* NumberModulus();
+ const Operator* NumberToInt32();
+ const Operator* NumberToUint32();
+
+ const Operator* ReferenceEqual(Type* type);
+
+ const Operator* StringEqual();
+ const Operator* StringLessThan();
+ const Operator* StringLessThanOrEqual();
+ const Operator* StringAdd();
+
+ const Operator* ChangeTaggedToInt32();
+ const Operator* ChangeTaggedToUint32();
+ const Operator* ChangeTaggedToFloat64();
+ const Operator* ChangeInt32ToTagged();
+ const Operator* ChangeUint32ToTagged();
+ const Operator* ChangeFloat64ToTagged();
+ const Operator* ChangeBoolToBit();
+ const Operator* ChangeBitToBool();
+
+ const Operator* LoadField(const FieldAccess&);
+ const Operator* StoreField(const FieldAccess&);
+
+ // load-element [base + index], length
+ const Operator* LoadElement(ElementAccess const&);
+
+ // store-element [base + index], length, value
+ const Operator* StoreElement(ElementAccess const&);
private:
- Zone* zone_;
+ Zone* zone() const { return zone_; }
+
+ const SimplifiedOperatorBuilderImpl& impl_;
+ Zone* const zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorBuilder);
};
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_SIMPLIFIED_OPERATOR_H_
diff --git a/deps/v8/src/compiler/source-position.h b/deps/v8/src/compiler/source-position.h
index b81582fd99..778f067161 100644
--- a/deps/v8/src/compiler/source-position.h
+++ b/deps/v8/src/compiler/source-position.h
@@ -14,7 +14,7 @@ namespace compiler {
// Encapsulates encoding and decoding of sources positions from which Nodes
// originated.
-class SourcePosition V8_FINAL {
+class SourcePosition FINAL {
public:
explicit SourcePosition(int raw = kUnknownPosition) : raw_(raw) {}
@@ -43,7 +43,7 @@ inline bool operator!=(const SourcePosition& lhs, const SourcePosition& rhs) {
}
-class SourcePositionTable V8_FINAL {
+class SourcePositionTable FINAL {
public:
class Scope {
public:
diff --git a/deps/v8/src/compiler/structured-machine-assembler.cc b/deps/v8/src/compiler/structured-machine-assembler.cc
deleted file mode 100644
index dbf2134a1f..0000000000
--- a/deps/v8/src/compiler/structured-machine-assembler.cc
+++ /dev/null
@@ -1,664 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/pipeline.h"
-#include "src/compiler/scheduler.h"
-#include "src/compiler/structured-machine-assembler.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-Node* Variable::Get() const { return smasm_->GetVariable(offset_); }
-
-
-void Variable::Set(Node* value) const { smasm_->SetVariable(offset_, value); }
-
-
-StructuredMachineAssembler::StructuredMachineAssembler(
- Graph* graph, MachineCallDescriptorBuilder* call_descriptor_builder,
- MachineType word)
- : GraphBuilder(graph),
- schedule_(new (zone()) Schedule(zone())),
- machine_(zone(), word),
- common_(zone()),
- call_descriptor_builder_(call_descriptor_builder),
- parameters_(NULL),
- current_environment_(new (zone())
- Environment(zone(), schedule()->entry(), false)),
- number_of_variables_(0) {
- Node* s = graph->NewNode(common_.Start(parameter_count()));
- graph->SetStart(s);
- if (parameter_count() == 0) return;
- parameters_ = zone()->NewArray<Node*>(parameter_count());
- for (int i = 0; i < parameter_count(); ++i) {
- parameters_[i] = NewNode(common()->Parameter(i), graph->start());
- }
-}
-
-
-Schedule* StructuredMachineAssembler::Export() {
- // Compute the correct codegen order.
- DCHECK(schedule_->rpo_order()->empty());
- Scheduler::ComputeSpecialRPO(schedule_);
- // Invalidate MachineAssembler.
- Schedule* schedule = schedule_;
- schedule_ = NULL;
- return schedule;
-}
-
-
-Node* StructuredMachineAssembler::Parameter(int index) {
- DCHECK(0 <= index && index < parameter_count());
- return parameters_[index];
-}
-
-
-Node* StructuredMachineAssembler::MakeNode(Operator* op, int input_count,
- Node** inputs) {
- DCHECK(ScheduleValid());
- DCHECK(current_environment_ != NULL);
- Node* node = graph()->NewNode(op, input_count, inputs);
- BasicBlock* block = NULL;
- switch (op->opcode()) {
- case IrOpcode::kParameter:
- case IrOpcode::kInt32Constant:
- case IrOpcode::kInt64Constant:
- case IrOpcode::kFloat64Constant:
- case IrOpcode::kExternalConstant:
- case IrOpcode::kNumberConstant:
- case IrOpcode::kHeapConstant:
- // Parameters and constants must be in start.
- block = schedule()->start();
- break;
- default:
- // Verify all leaf nodes handled above.
- DCHECK((op->OutputCount() == 0) == (op->opcode() == IrOpcode::kStore));
- block = current_environment_->block_;
- break;
- }
- if (block != NULL) {
- schedule()->AddNode(block, node);
- }
- return node;
-}
-
-
-Variable StructuredMachineAssembler::NewVariable(Node* initial_value) {
- CHECK(initial_value != NULL);
- int offset = number_of_variables_++;
- // Extend current environment to correct number of values.
- NodeVector* variables = CurrentVars();
- size_t to_add = number_of_variables_ - variables->size();
- if (to_add != 0) {
- variables->reserve(number_of_variables_);
- variables->insert(variables->end(), to_add, NULL);
- }
- variables->at(offset) = initial_value;
- return Variable(this, offset);
-}
-
-
-Node* StructuredMachineAssembler::GetVariable(int offset) {
- DCHECK(ScheduleValid());
- return VariableAt(current_environment_, offset);
-}
-
-
-void StructuredMachineAssembler::SetVariable(int offset, Node* value) {
- DCHECK(ScheduleValid());
- Node*& ref = VariableAt(current_environment_, offset);
- ref = value;
-}
-
-
-Node*& StructuredMachineAssembler::VariableAt(Environment* environment,
- int32_t offset) {
- // Variable used out of scope.
- CHECK(static_cast<size_t>(offset) < environment->variables_.size());
- Node*& value = environment->variables_.at(offset);
- CHECK(value != NULL); // Variable used out of scope.
- return value;
-}
-
-
-void StructuredMachineAssembler::Return(Node* value) {
- BasicBlock* block = current_environment_->block_;
- if (block != NULL) {
- schedule()->AddReturn(block, value);
- }
- CopyCurrentAsDead();
-}
-
-
-void StructuredMachineAssembler::CopyCurrentAsDead() {
- DCHECK(current_environment_ != NULL);
- bool is_dead = current_environment_->is_dead_;
- current_environment_->is_dead_ = true;
- Environment* next = Copy(current_environment_);
- current_environment_->is_dead_ = is_dead;
- current_environment_ = next;
-}
-
-
-StructuredMachineAssembler::Environment* StructuredMachineAssembler::Copy(
- Environment* env, int truncate_at) {
- Environment* new_env = new (zone()) Environment(zone(), NULL, env->is_dead_);
- if (!new_env->is_dead_) {
- new_env->block_ = schedule()->NewBasicBlock();
- }
- new_env->variables_.reserve(truncate_at);
- NodeVectorIter end = env->variables_.end();
- DCHECK(truncate_at <= static_cast<int>(env->variables_.size()));
- end -= static_cast<int>(env->variables_.size()) - truncate_at;
- new_env->variables_.insert(new_env->variables_.begin(),
- env->variables_.begin(), end);
- return new_env;
-}
-
-
-StructuredMachineAssembler::Environment*
-StructuredMachineAssembler::CopyForLoopHeader(Environment* env) {
- Environment* new_env = new (zone()) Environment(zone(), NULL, env->is_dead_);
- if (!new_env->is_dead_) {
- new_env->block_ = schedule()->NewBasicBlock();
- }
- new_env->variables_.reserve(env->variables_.size());
- for (NodeVectorIter i = env->variables_.begin(); i != env->variables_.end();
- ++i) {
- Node* phi = NULL;
- if (*i != NULL) {
- phi = graph()->NewNode(common()->Phi(1), *i);
- if (new_env->block_ != NULL) {
- schedule()->AddNode(new_env->block_, phi);
- }
- }
- new_env->variables_.push_back(phi);
- }
- return new_env;
-}
-
-
-void StructuredMachineAssembler::MergeBackEdgesToLoopHeader(
- Environment* header, EnvironmentVector* environments) {
- // Only merge as many variables are were declared before this loop.
- int n = static_cast<int>(header->variables_.size());
- // TODO(dcarney): invert loop order and extend phis once.
- for (EnvironmentVector::iterator i = environments->begin();
- i != environments->end(); ++i) {
- Environment* from = *i;
- if (from->is_dead_) continue;
- AddGoto(from, header);
- for (int i = 0; i < n; ++i) {
- Node* phi = header->variables_[i];
- if (phi == NULL) continue;
- phi->set_op(common()->Phi(phi->InputCount() + 1));
- phi->AppendInput(zone(), VariableAt(from, i));
- }
- }
-}
-
-
-void StructuredMachineAssembler::Merge(EnvironmentVector* environments,
- int truncate_at) {
- DCHECK(current_environment_ == NULL || current_environment_->is_dead_);
- Environment* next = new (zone()) Environment(zone(), NULL, false);
- current_environment_ = next;
- size_t n_vars = number_of_variables_;
- NodeVector& vars = next->variables_;
- vars.reserve(n_vars);
- Node** scratch = NULL;
- size_t n_envs = environments->size();
- Environment** live_environments = reinterpret_cast<Environment**>(
- alloca(sizeof(environments->at(0)) * n_envs));
- size_t n_live = 0;
- for (size_t i = 0; i < n_envs; i++) {
- if (environments->at(i)->is_dead_) continue;
- live_environments[n_live++] = environments->at(i);
- }
- n_envs = n_live;
- if (n_live == 0) next->is_dead_ = true;
- if (!next->is_dead_) {
- next->block_ = schedule()->NewBasicBlock();
- }
- for (size_t j = 0; j < n_vars; ++j) {
- Node* resolved = NULL;
- // Find first non equal variable.
- size_t i = 0;
- for (; i < n_envs; i++) {
- DCHECK(live_environments[i]->variables_.size() <= n_vars);
- Node* val = NULL;
- if (j < static_cast<size_t>(truncate_at)) {
- val = live_environments[i]->variables_.at(j);
- // TODO(dcarney): record start position at time of split.
- // all variables after this should not be NULL.
- if (val != NULL) {
- val = VariableAt(live_environments[i], static_cast<int>(j));
- }
- }
- if (val == resolved) continue;
- if (i != 0) break;
- resolved = val;
- }
- // Have to generate a phi.
- if (i < n_envs) {
- // All values thus far uninitialized, variable used out of scope.
- CHECK(resolved != NULL);
- // Init scratch buffer.
- if (scratch == NULL) {
- scratch = static_cast<Node**>(alloca(n_envs * sizeof(resolved)));
- }
- for (size_t k = 0; k < i; k++) {
- scratch[k] = resolved;
- }
- for (; i < n_envs; i++) {
- scratch[i] = live_environments[i]->variables_[j];
- }
- resolved = graph()->NewNode(common()->Phi(static_cast<int>(n_envs)),
- static_cast<int>(n_envs), scratch);
- if (next->block_ != NULL) {
- schedule()->AddNode(next->block_, resolved);
- }
- }
- vars.push_back(resolved);
- }
-}
-
-
-void StructuredMachineAssembler::AddGoto(Environment* from, Environment* to) {
- if (to->is_dead_) {
- DCHECK(from->is_dead_);
- return;
- }
- DCHECK(!from->is_dead_);
- schedule()->AddGoto(from->block_, to->block_);
-}
-
-
-// TODO(dcarney): add pass before rpo to schedule to compute these.
-BasicBlock* StructuredMachineAssembler::TrampolineFor(BasicBlock* block) {
- BasicBlock* trampoline = schedule()->NewBasicBlock();
- schedule()->AddGoto(trampoline, block);
- return trampoline;
-}
-
-
-void StructuredMachineAssembler::AddBranch(Environment* environment,
- Node* condition,
- Environment* true_val,
- Environment* false_val) {
- DCHECK(environment->is_dead_ == true_val->is_dead_);
- DCHECK(environment->is_dead_ == false_val->is_dead_);
- if (true_val->block_ == false_val->block_) {
- if (environment->is_dead_) return;
- AddGoto(environment, true_val);
- return;
- }
- Node* branch = graph()->NewNode(common()->Branch(), condition);
- if (environment->is_dead_) return;
- BasicBlock* true_block = TrampolineFor(true_val->block_);
- BasicBlock* false_block = TrampolineFor(false_val->block_);
- schedule()->AddBranch(environment->block_, branch, true_block, false_block);
-}
-
-
-StructuredMachineAssembler::Environment::Environment(Zone* zone,
- BasicBlock* block,
- bool is_dead)
- : block_(block),
- variables_(NodeVector::allocator_type(zone)),
- is_dead_(is_dead) {}
-
-
-StructuredMachineAssembler::IfBuilder::IfBuilder(
- StructuredMachineAssembler* smasm)
- : smasm_(smasm),
- if_clauses_(IfClauses::allocator_type(smasm_->zone())),
- pending_exit_merges_(EnvironmentVector::allocator_type(smasm_->zone())) {
- DCHECK(smasm_->current_environment_ != NULL);
- PushNewIfClause();
- DCHECK(!IsDone());
-}
-
-
-StructuredMachineAssembler::IfBuilder&
-StructuredMachineAssembler::IfBuilder::If() {
- DCHECK(smasm_->current_environment_ != NULL);
- IfClause* clause = CurrentClause();
- if (clause->then_environment_ != NULL || clause->else_environment_ != NULL) {
- PushNewIfClause();
- }
- return *this;
-}
-
-
-StructuredMachineAssembler::IfBuilder&
-StructuredMachineAssembler::IfBuilder::If(Node* condition) {
- If();
- IfClause* clause = CurrentClause();
- // Store branch for future resolution.
- UnresolvedBranch* next = new (smasm_->zone())
- UnresolvedBranch(smasm_->current_environment_, condition, NULL);
- if (clause->unresolved_list_tail_ != NULL) {
- clause->unresolved_list_tail_->next_ = next;
- }
- clause->unresolved_list_tail_ = next;
- // Push onto merge queues.
- clause->pending_else_merges_.push_back(next);
- clause->pending_then_merges_.push_back(next);
- smasm_->current_environment_ = NULL;
- return *this;
-}
-
-
-void StructuredMachineAssembler::IfBuilder::And() {
- CurrentClause()->ResolvePendingMerges(smasm_, kCombineThen, kExpressionTerm);
-}
-
-
-void StructuredMachineAssembler::IfBuilder::Or() {
- CurrentClause()->ResolvePendingMerges(smasm_, kCombineElse, kExpressionTerm);
-}
-
-
-void StructuredMachineAssembler::IfBuilder::Then() {
- CurrentClause()->ResolvePendingMerges(smasm_, kCombineThen, kExpressionDone);
-}
-
-
-void StructuredMachineAssembler::IfBuilder::Else() {
- AddCurrentToPending();
- CurrentClause()->ResolvePendingMerges(smasm_, kCombineElse, kExpressionDone);
-}
-
-
-void StructuredMachineAssembler::IfBuilder::AddCurrentToPending() {
- if (smasm_->current_environment_ != NULL &&
- !smasm_->current_environment_->is_dead_) {
- pending_exit_merges_.push_back(smasm_->current_environment_);
- }
- smasm_->current_environment_ = NULL;
-}
-
-
-void StructuredMachineAssembler::IfBuilder::PushNewIfClause() {
- int curr_size =
- static_cast<int>(smasm_->current_environment_->variables_.size());
- IfClause* clause = new (smasm_->zone()) IfClause(smasm_->zone(), curr_size);
- if_clauses_.push_back(clause);
-}
-
-
-StructuredMachineAssembler::IfBuilder::IfClause::IfClause(
- Zone* zone, int initial_environment_size)
- : unresolved_list_tail_(NULL),
- initial_environment_size_(initial_environment_size),
- expression_states_(ExpressionStates::allocator_type(zone)),
- pending_then_merges_(PendingMergeStack::allocator_type(zone)),
- pending_else_merges_(PendingMergeStack::allocator_type(zone)),
- then_environment_(NULL),
- else_environment_(NULL) {
- PushNewExpressionState();
-}
-
-
-StructuredMachineAssembler::IfBuilder::PendingMergeStackRange
-StructuredMachineAssembler::IfBuilder::IfClause::ComputeRelevantMerges(
- CombineType combine_type) {
- DCHECK(!expression_states_.empty());
- PendingMergeStack* stack;
- int start;
- if (combine_type == kCombineThen) {
- stack = &pending_then_merges_;
- start = expression_states_.back().pending_then_size_;
- } else {
- DCHECK(combine_type == kCombineElse);
- stack = &pending_else_merges_;
- start = expression_states_.back().pending_else_size_;
- }
- PendingMergeStackRange data;
- data.merge_stack_ = stack;
- data.start_ = start;
- data.size_ = static_cast<int>(stack->size()) - start;
- return data;
-}
-
-
-void StructuredMachineAssembler::IfBuilder::IfClause::ResolvePendingMerges(
- StructuredMachineAssembler* smasm, CombineType combine_type,
- ResolutionType resolution_type) {
- DCHECK(smasm->current_environment_ == NULL);
- PendingMergeStackRange data = ComputeRelevantMerges(combine_type);
- DCHECK_EQ(data.merge_stack_->back(), unresolved_list_tail_);
- DCHECK(data.size_ > 0);
- // TODO(dcarney): assert no new variables created during expression building.
- int truncate_at = initial_environment_size_;
- if (data.size_ == 1) {
- // Just copy environment in common case.
- smasm->current_environment_ =
- smasm->Copy(unresolved_list_tail_->environment_, truncate_at);
- } else {
- EnvironmentVector environments(
- EnvironmentVector::allocator_type(smasm->zone()));
- environments.reserve(data.size_);
- CopyEnvironments(data, &environments);
- DCHECK(static_cast<int>(environments.size()) == data.size_);
- smasm->Merge(&environments, truncate_at);
- }
- Environment* then_environment = then_environment_;
- Environment* else_environment = NULL;
- if (resolution_type == kExpressionDone) {
- DCHECK(expression_states_.size() == 1);
- // Set the current then_ or else_environment_ to the new merged environment.
- if (combine_type == kCombineThen) {
- DCHECK(then_environment_ == NULL && else_environment_ == NULL);
- this->then_environment_ = smasm->current_environment_;
- } else {
- DCHECK(else_environment_ == NULL);
- this->else_environment_ = smasm->current_environment_;
- }
- } else {
- DCHECK(resolution_type == kExpressionTerm);
- DCHECK(then_environment_ == NULL && else_environment_ == NULL);
- }
- if (combine_type == kCombineThen) {
- then_environment = smasm->current_environment_;
- } else {
- DCHECK(combine_type == kCombineElse);
- else_environment = smasm->current_environment_;
- }
- // Finalize branches and clear the pending stack.
- FinalizeBranches(smasm, data, combine_type, then_environment,
- else_environment);
-}
-
-
-void StructuredMachineAssembler::IfBuilder::IfClause::CopyEnvironments(
- const PendingMergeStackRange& data, EnvironmentVector* environments) {
- PendingMergeStack::iterator i = data.merge_stack_->begin();
- PendingMergeStack::iterator end = data.merge_stack_->end();
- for (i += data.start_; i != end; ++i) {
- environments->push_back((*i)->environment_);
- }
-}
-
-
-void StructuredMachineAssembler::IfBuilder::IfClause::PushNewExpressionState() {
- ExpressionState next;
- next.pending_then_size_ = static_cast<int>(pending_then_merges_.size());
- next.pending_else_size_ = static_cast<int>(pending_else_merges_.size());
- expression_states_.push_back(next);
-}
-
-
-void StructuredMachineAssembler::IfBuilder::IfClause::PopExpressionState() {
- expression_states_.pop_back();
- DCHECK(!expression_states_.empty());
-}
-
-
-void StructuredMachineAssembler::IfBuilder::IfClause::FinalizeBranches(
- StructuredMachineAssembler* smasm, const PendingMergeStackRange& data,
- CombineType combine_type, Environment* const then_environment,
- Environment* const else_environment) {
- DCHECK(unresolved_list_tail_ != NULL);
- DCHECK(smasm->current_environment_ != NULL);
- if (data.size_ == 0) return;
- PendingMergeStack::iterator curr = data.merge_stack_->begin();
- PendingMergeStack::iterator end = data.merge_stack_->end();
- // Finalize everything but the head first,
- // in the order the branches enter the merge block.
- end -= 1;
- Environment* true_val = then_environment;
- Environment* false_val = else_environment;
- Environment** next;
- if (combine_type == kCombineThen) {
- next = &false_val;
- } else {
- DCHECK(combine_type == kCombineElse);
- next = &true_val;
- }
- for (curr += data.start_; curr != end; ++curr) {
- UnresolvedBranch* branch = *curr;
- *next = branch->next_->environment_;
- smasm->AddBranch(branch->environment_, branch->condition_, true_val,
- false_val);
- }
- DCHECK(curr + 1 == data.merge_stack_->end());
- // Now finalize the tail if possible.
- if (then_environment != NULL && else_environment != NULL) {
- UnresolvedBranch* branch = *curr;
- smasm->AddBranch(branch->environment_, branch->condition_, then_environment,
- else_environment);
- }
- // Clear the merge stack.
- PendingMergeStack::iterator begin = data.merge_stack_->begin();
- begin += data.start_;
- data.merge_stack_->erase(begin, data.merge_stack_->end());
- DCHECK_EQ(static_cast<int>(data.merge_stack_->size()), data.start_);
-}
-
-
-void StructuredMachineAssembler::IfBuilder::End() {
- DCHECK(!IsDone());
- AddCurrentToPending();
- size_t current_pending = pending_exit_merges_.size();
- // All unresolved branch edges are now set to pending.
- for (IfClauses::iterator i = if_clauses_.begin(); i != if_clauses_.end();
- ++i) {
- IfClause* clause = *i;
- DCHECK(clause->expression_states_.size() == 1);
- PendingMergeStackRange data;
- // Copy then environments.
- data = clause->ComputeRelevantMerges(kCombineThen);
- clause->CopyEnvironments(data, &pending_exit_merges_);
- Environment* head = NULL;
- // Will resolve the head node in the else_merge
- if (data.size_ > 0 && clause->then_environment_ == NULL &&
- clause->else_environment_ == NULL) {
- head = pending_exit_merges_.back();
- pending_exit_merges_.pop_back();
- }
- // Copy else environments.
- data = clause->ComputeRelevantMerges(kCombineElse);
- clause->CopyEnvironments(data, &pending_exit_merges_);
- if (head != NULL) {
- // Must have data to merge, or else head will never get a branch.
- DCHECK(data.size_ != 0);
- pending_exit_merges_.push_back(head);
- }
- }
- smasm_->Merge(&pending_exit_merges_,
- if_clauses_[0]->initial_environment_size_);
- // Anything initally pending jumps into the new environment.
- for (size_t i = 0; i < current_pending; ++i) {
- smasm_->AddGoto(pending_exit_merges_[i], smasm_->current_environment_);
- }
- // Resolve all branches.
- for (IfClauses::iterator i = if_clauses_.begin(); i != if_clauses_.end();
- ++i) {
- IfClause* clause = *i;
- // Must finalize all environments, so ensure they are set correctly.
- Environment* then_environment = clause->then_environment_;
- if (then_environment == NULL) {
- then_environment = smasm_->current_environment_;
- }
- Environment* else_environment = clause->else_environment_;
- PendingMergeStackRange data;
- // Finalize then environments.
- data = clause->ComputeRelevantMerges(kCombineThen);
- clause->FinalizeBranches(smasm_, data, kCombineThen, then_environment,
- else_environment);
- // Finalize else environments.
- // Now set the else environment so head is finalized for edge case above.
- if (else_environment == NULL) {
- else_environment = smasm_->current_environment_;
- }
- data = clause->ComputeRelevantMerges(kCombineElse);
- clause->FinalizeBranches(smasm_, data, kCombineElse, then_environment,
- else_environment);
- }
- // Future accesses to this builder should crash immediately.
- pending_exit_merges_.clear();
- if_clauses_.clear();
- DCHECK(IsDone());
-}
-
-
-StructuredMachineAssembler::LoopBuilder::LoopBuilder(
- StructuredMachineAssembler* smasm)
- : smasm_(smasm),
- header_environment_(NULL),
- pending_header_merges_(EnvironmentVector::allocator_type(smasm_->zone())),
- pending_exit_merges_(EnvironmentVector::allocator_type(smasm_->zone())) {
- DCHECK(smasm_->current_environment_ != NULL);
- // Create header environment.
- header_environment_ = smasm_->CopyForLoopHeader(smasm_->current_environment_);
- smasm_->AddGoto(smasm_->current_environment_, header_environment_);
- // Create body environment.
- Environment* body = smasm_->Copy(header_environment_);
- smasm_->AddGoto(header_environment_, body);
- smasm_->current_environment_ = body;
- DCHECK(!IsDone());
-}
-
-
-void StructuredMachineAssembler::LoopBuilder::Continue() {
- DCHECK(!IsDone());
- pending_header_merges_.push_back(smasm_->current_environment_);
- smasm_->CopyCurrentAsDead();
-}
-
-
-void StructuredMachineAssembler::LoopBuilder::Break() {
- DCHECK(!IsDone());
- pending_exit_merges_.push_back(smasm_->current_environment_);
- smasm_->CopyCurrentAsDead();
-}
-
-
-void StructuredMachineAssembler::LoopBuilder::End() {
- DCHECK(!IsDone());
- if (smasm_->current_environment_ != NULL) {
- Continue();
- }
- // Do loop header merges.
- smasm_->MergeBackEdgesToLoopHeader(header_environment_,
- &pending_header_merges_);
- int initial_size = static_cast<int>(header_environment_->variables_.size());
- // Do loop exit merges, truncating loop variables away.
- smasm_->Merge(&pending_exit_merges_, initial_size);
- for (EnvironmentVector::iterator i = pending_exit_merges_.begin();
- i != pending_exit_merges_.end(); ++i) {
- smasm_->AddGoto(*i, smasm_->current_environment_);
- }
- pending_header_merges_.clear();
- pending_exit_merges_.clear();
- header_environment_ = NULL;
- DCHECK(IsDone());
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/structured-machine-assembler.h b/deps/v8/src/compiler/structured-machine-assembler.h
deleted file mode 100644
index a6cb8ca88b..0000000000
--- a/deps/v8/src/compiler/structured-machine-assembler.h
+++ /dev/null
@@ -1,311 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_STRUCTURED_MACHINE_ASSEMBLER_H_
-#define V8_COMPILER_STRUCTURED_MACHINE_ASSEMBLER_H_
-
-#include "src/v8.h"
-
-#include "src/compiler/common-operator.h"
-#include "src/compiler/graph-builder.h"
-#include "src/compiler/machine-node-factory.h"
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/node.h"
-#include "src/compiler/operator.h"
-
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class BasicBlock;
-class Schedule;
-class StructuredMachineAssembler;
-
-
-class Variable : public ZoneObject {
- public:
- Node* Get() const;
- void Set(Node* value) const;
-
- private:
- Variable(StructuredMachineAssembler* smasm, int offset)
- : smasm_(smasm), offset_(offset) {}
-
- friend class StructuredMachineAssembler;
- friend class StructuredMachineAssemblerFriend;
- StructuredMachineAssembler* const smasm_;
- const int offset_;
-};
-
-
-class StructuredMachineAssembler
- : public GraphBuilder,
- public MachineNodeFactory<StructuredMachineAssembler> {
- public:
- class Environment : public ZoneObject {
- public:
- Environment(Zone* zone, BasicBlock* block, bool is_dead_);
-
- private:
- BasicBlock* block_;
- NodeVector variables_;
- bool is_dead_;
- friend class StructuredMachineAssembler;
- DISALLOW_COPY_AND_ASSIGN(Environment);
- };
-
- class IfBuilder;
- friend class IfBuilder;
- class LoopBuilder;
- friend class LoopBuilder;
-
- StructuredMachineAssembler(
- Graph* graph, MachineCallDescriptorBuilder* call_descriptor_builder,
- MachineType word = MachineOperatorBuilder::pointer_rep());
- virtual ~StructuredMachineAssembler() {}
-
- Isolate* isolate() const { return zone()->isolate(); }
- Zone* zone() const { return graph()->zone(); }
- MachineOperatorBuilder* machine() { return &machine_; }
- CommonOperatorBuilder* common() { return &common_; }
- CallDescriptor* call_descriptor() const {
- return call_descriptor_builder_->BuildCallDescriptor(zone());
- }
- int parameter_count() const {
- return call_descriptor_builder_->parameter_count();
- }
- const MachineType* parameter_types() const {
- return call_descriptor_builder_->parameter_types();
- }
-
- // Parameters.
- Node* Parameter(int index);
- // Variables.
- Variable NewVariable(Node* initial_value);
- // Control flow.
- void Return(Node* value);
-
- // MachineAssembler is invalid after export.
- Schedule* Export();
-
- protected:
- virtual Node* MakeNode(Operator* op, int input_count, Node** inputs);
-
- Schedule* schedule() {
- DCHECK(ScheduleValid());
- return schedule_;
- }
-
- private:
- bool ScheduleValid() { return schedule_ != NULL; }
-
- typedef std::vector<Environment*, zone_allocator<Environment*> >
- EnvironmentVector;
-
- NodeVector* CurrentVars() { return &current_environment_->variables_; }
- Node*& VariableAt(Environment* environment, int offset);
- Node* GetVariable(int offset);
- void SetVariable(int offset, Node* value);
-
- void AddBranch(Environment* environment, Node* condition,
- Environment* true_val, Environment* false_val);
- void AddGoto(Environment* from, Environment* to);
- BasicBlock* TrampolineFor(BasicBlock* block);
-
- void CopyCurrentAsDead();
- Environment* Copy(Environment* environment) {
- return Copy(environment, static_cast<int>(environment->variables_.size()));
- }
- Environment* Copy(Environment* environment, int truncate_at);
- void Merge(EnvironmentVector* environments, int truncate_at);
- Environment* CopyForLoopHeader(Environment* environment);
- void MergeBackEdgesToLoopHeader(Environment* header,
- EnvironmentVector* environments);
-
- typedef std::vector<MachineType, zone_allocator<MachineType> >
- RepresentationVector;
-
- Schedule* schedule_;
- MachineOperatorBuilder machine_;
- CommonOperatorBuilder common_;
- MachineCallDescriptorBuilder* call_descriptor_builder_;
- Node** parameters_;
- Environment* current_environment_;
- int number_of_variables_;
-
- friend class Variable;
- // For testing only.
- friend class StructuredMachineAssemblerFriend;
- DISALLOW_COPY_AND_ASSIGN(StructuredMachineAssembler);
-};
-
-// IfBuilder constructs of nested if-else expressions which more or less follow
-// C semantics. Foe example:
-//
-// if (x) {do_x} else if (y) {do_y} else {do_z}
-//
-// would look like this:
-//
-// IfBuilder b;
-// b.If(x).Then();
-// do_x
-// b.Else();
-// b.If().Then();
-// do_y
-// b.Else();
-// do_z
-// b.End();
-//
-// Then() and Else() can be skipped, representing an empty block in C.
-// Combinations like If(x).Then().If(x).Then() are legitimate, but
-// Else().Else() is not. That is, once you've nested an If(), you can't get to a
-// higher level If() branch.
-// TODO(dcarney): describe expressions once the api is finalized.
-class StructuredMachineAssembler::IfBuilder {
- public:
- explicit IfBuilder(StructuredMachineAssembler* smasm);
- ~IfBuilder() {
- if (!IsDone()) End();
- }
-
- IfBuilder& If(); // TODO(dcarney): this should take an expression.
- IfBuilder& If(Node* condition);
- void Then();
- void Else();
- void End();
-
- // The next 4 functions are exposed for expression support.
- // They will be private once I have a nice expression api.
- void And();
- void Or();
- IfBuilder& OpenParen() {
- DCHECK(smasm_->current_environment_ != NULL);
- CurrentClause()->PushNewExpressionState();
- return *this;
- }
- IfBuilder& CloseParen() {
- DCHECK(smasm_->current_environment_ == NULL);
- CurrentClause()->PopExpressionState();
- return *this;
- }
-
- private:
- // UnresolvedBranch represents the chain of environments created while
- // generating an expression. At this point, a branch Node
- // cannot be created, as the target environments of the branch are not yet
- // available, so everything required to create the branch Node is
- // stored in this structure until the target environments are resolved.
- struct UnresolvedBranch : public ZoneObject {
- UnresolvedBranch(Environment* environment, Node* condition,
- UnresolvedBranch* next)
- : environment_(environment), condition_(condition), next_(next) {}
- // environment_ will eventually be terminated by a branch on condition_.
- Environment* environment_;
- Node* condition_;
- // next_ is the next link in the UnresolvedBranch chain, and will be
- // either the true or false branch jumped to from environment_.
- UnresolvedBranch* next_;
- };
-
- struct ExpressionState {
- int pending_then_size_;
- int pending_else_size_;
- };
-
- typedef std::vector<ExpressionState, zone_allocator<ExpressionState> >
- ExpressionStates;
- typedef std::vector<UnresolvedBranch*, zone_allocator<UnresolvedBranch*> >
- PendingMergeStack;
- struct IfClause;
- typedef std::vector<IfClause*, zone_allocator<IfClause*> > IfClauses;
-
- struct PendingMergeStackRange {
- PendingMergeStack* merge_stack_;
- int start_;
- int size_;
- };
-
- enum CombineType { kCombineThen, kCombineElse };
- enum ResolutionType { kExpressionTerm, kExpressionDone };
-
- // IfClause represents one level of if-then-else nesting plus the associated
- // expression.
- // A call to If() triggers creation of a new nesting level after expression
- // creation is complete - ie Then() or Else() has been called.
- struct IfClause : public ZoneObject {
- IfClause(Zone* zone, int initial_environment_size);
- void CopyEnvironments(const PendingMergeStackRange& data,
- EnvironmentVector* environments);
- void ResolvePendingMerges(StructuredMachineAssembler* smasm,
- CombineType combine_type,
- ResolutionType resolution_type);
- PendingMergeStackRange ComputeRelevantMerges(CombineType combine_type);
- void FinalizeBranches(StructuredMachineAssembler* smasm,
- const PendingMergeStackRange& offset_data,
- CombineType combine_type,
- Environment* then_environment,
- Environment* else_environment);
- void PushNewExpressionState();
- void PopExpressionState();
-
- // Each invocation of And or Or creates a new UnresolvedBranch.
- // These form a singly-linked list, of which we only need to keep track of
- // the tail. On creation of an UnresolvedBranch, pending_then_merges_ and
- // pending_else_merges_ each push a copy, which are removed on merges to the
- // respective environment.
- UnresolvedBranch* unresolved_list_tail_;
- int initial_environment_size_;
- // expression_states_ keeps track of the state of pending_*_merges_,
- // pushing and popping the lengths of these on
- // OpenParend() and CloseParend() respectively.
- ExpressionStates expression_states_;
- PendingMergeStack pending_then_merges_;
- PendingMergeStack pending_else_merges_;
- // then_environment_ is created iff there is a call to Then(), otherwise
- // branches which would merge to it merge to the exit environment instead.
- // Likewise for else_environment_.
- Environment* then_environment_;
- Environment* else_environment_;
- };
-
- IfClause* CurrentClause() { return if_clauses_.back(); }
- void AddCurrentToPending();
- void PushNewIfClause();
- bool IsDone() { return if_clauses_.empty(); }
-
- StructuredMachineAssembler* smasm_;
- IfClauses if_clauses_;
- EnvironmentVector pending_exit_merges_;
- DISALLOW_COPY_AND_ASSIGN(IfBuilder);
-};
-
-
-class StructuredMachineAssembler::LoopBuilder {
- public:
- explicit LoopBuilder(StructuredMachineAssembler* smasm);
- ~LoopBuilder() {
- if (!IsDone()) End();
- }
-
- void Break();
- void Continue();
- void End();
-
- private:
- friend class StructuredMachineAssembler;
- bool IsDone() { return header_environment_ == NULL; }
-
- StructuredMachineAssembler* smasm_;
- Environment* header_environment_;
- EnvironmentVector pending_header_merges_;
- EnvironmentVector pending_exit_merges_;
- DISALLOW_COPY_AND_ASSIGN(LoopBuilder);
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_STRUCTURED_MACHINE_ASSEMBLER_H_
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 2aa18699dd..b6349c9e01 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -15,16 +15,26 @@ namespace internal {
namespace compiler {
Typer::Typer(Zone* zone) : zone_(zone) {
- Type* number = Type::Number(zone);
- Type* signed32 = Type::Signed32(zone);
- Type* unsigned32 = Type::Unsigned32(zone);
- Type* integral32 = Type::Integral32(zone);
- Type* object = Type::Object(zone);
- Type* undefined = Type::Undefined(zone);
+ Factory* f = zone->isolate()->factory();
+
+ Type* number = Type::Number();
+ Type* signed32 = Type::Signed32();
+ Type* unsigned32 = Type::Unsigned32();
+ Type* integral32 = Type::Integral32();
+ Type* object = Type::Object();
+ Type* undefined = Type::Undefined();
+ Type* weakint = Type::Union(
+ Type::Range(f->NewNumber(-V8_INFINITY), f->NewNumber(+V8_INFINITY), zone),
+ Type::Union(Type::NaN(), Type::MinusZero(), zone), zone);
+
number_fun0_ = Type::Function(number, zone);
number_fun1_ = Type::Function(number, number, zone);
number_fun2_ = Type::Function(number, number, number, zone);
+ weakint_fun1_ = Type::Function(weakint, number, zone);
imul_fun_ = Type::Function(signed32, integral32, integral32, zone);
+ random_fun_ = Type::Function(Type::Union(
+ Type::UnsignedSmall(), Type::OtherNumber(), zone), zone);
+
#define NATIVE_TYPE(sem, rep) \
Type::Intersect(Type::sem(zone), Type::rep(zone), zone)
@@ -70,21 +80,25 @@ class Typer::Visitor : public NullNodeVisitor {
Bounds TypeNode(Node* node) {
switch (node->opcode()) {
#define DECLARE_CASE(x) case IrOpcode::k##x: return Type##x(node);
+ DECLARE_CASE(Start)
VALUE_OP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
#define DECLARE_CASE(x) case IrOpcode::k##x:
- CONTROL_OP_LIST(DECLARE_CASE)
+ DECLARE_CASE(End)
+ INNER_CONTROL_OP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
break;
}
- return Bounds(Type::None(zone()));
+ UNREACHABLE();
+ return Bounds();
}
Type* TypeConstant(Handle<Object> value);
protected:
#define DECLARE_METHOD(x) inline Bounds Type##x(Node* node);
+ DECLARE_METHOD(Start)
VALUE_OP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
@@ -95,8 +109,9 @@ class Typer::Visitor : public NullNodeVisitor {
Type* ContextType(Node* node) {
Bounds result =
NodeProperties::GetBounds(NodeProperties::GetContextInput(node));
- DCHECK(result.upper->Is(Type::Internal()));
- DCHECK(result.lower->Equals(result.upper));
+ DCHECK(result.upper->Maybe(Type::Internal()));
+ // TODO(rossberg): More precisely, instead of the above assertion, we should
+ // back-propagate the constraint that it has to be a subtype of Internal.
return result.upper;
}
@@ -114,28 +129,27 @@ class Typer::RunVisitor : public Typer::Visitor {
public:
RunVisitor(Typer* typer, MaybeHandle<Context> context)
: Visitor(typer, context),
- phis(NodeSet::key_compare(), NodeSet::allocator_type(typer->zone())) {}
-
- GenericGraphVisit::Control Pre(Node* node) {
- return NodeProperties::IsControl(node)
- && node->opcode() != IrOpcode::kEnd
- && node->opcode() != IrOpcode::kMerge
- && node->opcode() != IrOpcode::kReturn
- ? GenericGraphVisit::SKIP : GenericGraphVisit::CONTINUE;
- }
+ redo(NodeSet::key_compare(), NodeSet::allocator_type(typer->zone())) {}
GenericGraphVisit::Control Post(Node* node) {
- Bounds bounds = TypeNode(node);
- if (node->opcode() == IrOpcode::kPhi) {
- // Remember phis for least fixpoint iteration.
- phis.insert(node);
- } else {
+ if (OperatorProperties::HasValueOutput(node->op())) {
+ Bounds bounds = TypeNode(node);
NodeProperties::SetBounds(node, bounds);
+ // Remember incompletely typed nodes for least fixpoint iteration.
+ int arity = OperatorProperties::GetValueInputCount(node->op());
+ for (int i = 0; i < arity; ++i) {
+ // TODO(rossberg): change once IsTyped is available.
+ // if (!NodeProperties::IsTyped(NodeProperties::GetValueInput(node, i)))
+ if (OperandType(node, i).upper->Is(Type::None())) {
+ redo.insert(node);
+ break;
+ }
+ }
}
return GenericGraphVisit::CONTINUE;
}
- NodeSet phis;
+ NodeSet redo;
};
@@ -145,13 +159,17 @@ class Typer::NarrowVisitor : public Typer::Visitor {
: Visitor(typer, context) {}
GenericGraphVisit::Control Pre(Node* node) {
- Bounds previous = NodeProperties::GetBounds(node);
- Bounds bounds = TypeNode(node);
- NodeProperties::SetBounds(node, Bounds::Both(bounds, previous, zone()));
- DCHECK(bounds.Narrows(previous));
- // Stop when nothing changed (but allow reentry in case it does later).
- return previous.Narrows(bounds)
- ? GenericGraphVisit::DEFER : GenericGraphVisit::REENTER;
+ if (OperatorProperties::HasValueOutput(node->op())) {
+ Bounds previous = NodeProperties::GetBounds(node);
+ Bounds bounds = TypeNode(node);
+ NodeProperties::SetBounds(node, Bounds::Both(bounds, previous, zone()));
+ DCHECK(bounds.Narrows(previous));
+ // Stop when nothing changed (but allow re-entry in case it does later).
+ return previous.Narrows(bounds)
+ ? GenericGraphVisit::DEFER : GenericGraphVisit::REENTER;
+ } else {
+ return GenericGraphVisit::SKIP;
+ }
}
GenericGraphVisit::Control Post(Node* node) {
@@ -166,14 +184,18 @@ class Typer::WidenVisitor : public Typer::Visitor {
: Visitor(typer, context) {}
GenericGraphVisit::Control Pre(Node* node) {
- Bounds previous = NodeProperties::GetBounds(node);
- Bounds bounds = TypeNode(node);
- DCHECK(previous.lower->Is(bounds.lower));
- DCHECK(previous.upper->Is(bounds.upper));
- NodeProperties::SetBounds(node, bounds); // TODO(rossberg): Either?
- // Stop when nothing changed (but allow reentry in case it does later).
- return bounds.Narrows(previous)
- ? GenericGraphVisit::DEFER : GenericGraphVisit::REENTER;
+ if (OperatorProperties::HasValueOutput(node->op())) {
+ Bounds previous = NodeProperties::GetBounds(node);
+ Bounds bounds = TypeNode(node);
+ DCHECK(previous.lower->Is(bounds.lower));
+ DCHECK(previous.upper->Is(bounds.upper));
+ NodeProperties::SetBounds(node, bounds); // TODO(rossberg): Either?
+ // Stop when nothing changed (but allow re-entry in case it does later).
+ return bounds.Narrows(previous)
+ ? GenericGraphVisit::DEFER : GenericGraphVisit::REENTER;
+ } else {
+ return GenericGraphVisit::SKIP;
+ }
}
GenericGraphVisit::Control Post(Node* node) {
@@ -186,7 +208,7 @@ void Typer::Run(Graph* graph, MaybeHandle<Context> context) {
RunVisitor typing(this, context);
graph->VisitNodeInputsFromEnd(&typing);
// Find least fixpoint.
- for (NodeSetIter i = typing.phis.begin(); i != typing.phis.end(); ++i) {
+ for (NodeSetIter i = typing.redo.begin(); i != typing.redo.end(); ++i) {
Widen(graph, *i, context);
}
}
@@ -205,13 +227,26 @@ void Typer::Widen(Graph* graph, Node* start, MaybeHandle<Context> context) {
void Typer::Init(Node* node) {
- Visitor typing(this, MaybeHandle<Context>());
- Bounds bounds = typing.TypeNode(node);
- NodeProperties::SetBounds(node, bounds);
+ if (OperatorProperties::HasValueOutput(node->op())) {
+ Visitor typing(this, MaybeHandle<Context>());
+ Bounds bounds = typing.TypeNode(node);
+ NodeProperties::SetBounds(node, bounds);
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+
+
+// Control operators.
+
+Bounds Typer::Visitor::TypeStart(Node* node) {
+ return Bounds(Type::Internal(zone()));
}
// Common operators.
+
Bounds Typer::Visitor::TypeParameter(Node* node) {
return Bounds::Unbounded(zone());
}
@@ -219,31 +254,37 @@ Bounds Typer::Visitor::TypeParameter(Node* node) {
Bounds Typer::Visitor::TypeInt32Constant(Node* node) {
// TODO(titzer): only call Type::Of() if the type is not already known.
- return Bounds(Type::Of(ValueOf<int32_t>(node->op()), zone()));
+ return Bounds(Type::Of(OpParameter<int32_t>(node), zone()));
}
Bounds Typer::Visitor::TypeInt64Constant(Node* node) {
// TODO(titzer): only call Type::Of() if the type is not already known.
return Bounds(
- Type::Of(static_cast<double>(ValueOf<int64_t>(node->op())), zone()));
+ Type::Of(static_cast<double>(OpParameter<int64_t>(node)), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeFloat32Constant(Node* node) {
+ // TODO(titzer): only call Type::Of() if the type is not already known.
+ return Bounds(Type::Of(OpParameter<float>(node), zone()));
}
Bounds Typer::Visitor::TypeFloat64Constant(Node* node) {
// TODO(titzer): only call Type::Of() if the type is not already known.
- return Bounds(Type::Of(ValueOf<double>(node->op()), zone()));
+ return Bounds(Type::Of(OpParameter<double>(node), zone()));
}
Bounds Typer::Visitor::TypeNumberConstant(Node* node) {
// TODO(titzer): only call Type::Of() if the type is not already known.
- return Bounds(Type::Of(ValueOf<double>(node->op()), zone()));
+ return Bounds(Type::Of(OpParameter<double>(node), zone()));
}
Bounds Typer::Visitor::TypeHeapConstant(Node* node) {
- return Bounds(TypeConstant(ValueOf<Handle<Object> >(node->op())));
+ return Bounds(TypeConstant(OpParameter<Unique<Object> >(node).handle()));
}
@@ -263,17 +304,36 @@ Bounds Typer::Visitor::TypePhi(Node* node) {
Bounds Typer::Visitor::TypeEffectPhi(Node* node) {
- return Bounds(Type::None(zone()));
+ UNREACHABLE();
+ return Bounds();
+}
+
+
+Bounds Typer::Visitor::TypeControlEffect(Node* node) {
+ UNREACHABLE();
+ return Bounds();
+}
+
+
+Bounds Typer::Visitor::TypeValueEffect(Node* node) {
+ UNREACHABLE();
+ return Bounds();
+}
+
+
+Bounds Typer::Visitor::TypeFinish(Node* node) {
+ return OperandType(node, 0);
}
Bounds Typer::Visitor::TypeFrameState(Node* node) {
- return Bounds(Type::None(zone()));
+ // TODO(rossberg): Ideally FrameState wouldn't have a value output.
+ return Bounds(Type::Internal(zone()));
}
Bounds Typer::Visitor::TypeStateValues(Node* node) {
- return Bounds(Type::None(zone()));
+ return Bounds(Type::Internal(zone()));
}
@@ -418,7 +478,7 @@ Bounds Typer::Visitor::TypeJSToName(Node* node) {
Bounds Typer::Visitor::TypeJSToObject(Node* node) {
- return Bounds(Type::None(zone()), Type::Object(zone()));
+ return Bounds(Type::None(zone()), Type::Receiver(zone()));
}
@@ -452,12 +512,14 @@ Bounds Typer::Visitor::TypeJSLoadNamed(Node* node) {
Bounds Typer::Visitor::TypeJSStoreProperty(Node* node) {
- return Bounds(Type::None(zone()));
+ UNREACHABLE();
+ return Bounds();
}
Bounds Typer::Visitor::TypeJSStoreNamed(Node* node) {
- return Bounds(Type::None(zone()));
+ UNREACHABLE();
+ return Bounds();
}
@@ -480,8 +542,10 @@ Bounds Typer::Visitor::TypeJSInstanceOf(Node* node) {
Bounds Typer::Visitor::TypeJSLoadContext(Node* node) {
Bounds outer = OperandType(node, 0);
- DCHECK(outer.upper->Is(Type::Internal()));
- DCHECK(outer.lower->Equals(outer.upper));
+ DCHECK(outer.upper->Maybe(Type::Internal()));
+ // TODO(rossberg): More precisely, instead of the above assertion, we should
+ // back-propagate the constraint that it has to be a subtype of Internal.
+
ContextAccess access = OpParameter<ContextAccess>(node);
Type* context_type = outer.upper;
MaybeHandle<Context> context;
@@ -499,7 +563,7 @@ Bounds Typer::Visitor::TypeJSLoadContext(Node* node) {
if (context_type->IsConstant()) {
context = Handle<Context>::cast(context_type->AsConstant()->Value());
}
- } else {
+ } else if (!context.is_null()) {
context = handle(context.ToHandleChecked()->previous(), isolate());
}
}
@@ -515,7 +579,8 @@ Bounds Typer::Visitor::TypeJSLoadContext(Node* node) {
Bounds Typer::Visitor::TypeJSStoreContext(Node* node) {
- return Bounds(Type::None(zone()));
+ UNREACHABLE();
+ return Bounds();
}
@@ -595,6 +660,11 @@ Bounds Typer::Visitor::TypeBooleanNot(Node* node) {
}
+Bounds Typer::Visitor::TypeBooleanToNumber(Node* node) {
+ return Bounds(Type::Number(zone()));
+}
+
+
Bounds Typer::Visitor::TypeNumberEqual(Node* node) {
return Bounds(Type::Boolean(zone()));
}
@@ -736,12 +806,14 @@ Bounds Typer::Visitor::TypeLoadElement(Node* node) {
Bounds Typer::Visitor::TypeStoreField(Node* node) {
- return Bounds(Type::None());
+ UNREACHABLE();
+ return Bounds();
}
Bounds Typer::Visitor::TypeStoreElement(Node* node) {
- return Bounds(Type::None());
+ UNREACHABLE();
+ return Bounds();
}
@@ -772,13 +844,13 @@ Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
} else if (*value == native->math_atan2_fun()) {
return typer_->number_fun2_;
} else if (*value == native->math_ceil_fun()) {
- return typer_->number_fun1_;
+ return typer_->weakint_fun1_;
} else if (*value == native->math_cos_fun()) {
return typer_->number_fun1_;
} else if (*value == native->math_exp_fun()) {
return typer_->number_fun1_;
} else if (*value == native->math_floor_fun()) {
- return typer_->number_fun1_;
+ return typer_->weakint_fun1_;
} else if (*value == native->math_imul_fun()) {
return typer_->imul_fun_;
} else if (*value == native->math_log_fun()) {
@@ -786,9 +858,9 @@ Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
} else if (*value == native->math_pow_fun()) {
return typer_->number_fun2_;
} else if (*value == native->math_random_fun()) {
- return typer_->number_fun0_;
+ return typer_->random_fun_;
} else if (*value == native->math_round_fun()) {
- return typer_->number_fun1_;
+ return typer_->weakint_fun1_;
} else if (*value == native->math_sin_fun()) {
return typer_->number_fun1_;
} else if (*value == native->math_sqrt_fun()) {
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index 2957e4b4a8..2adbab5ff7 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -39,7 +39,9 @@ class Typer {
Type* number_fun0_;
Type* number_fun1_;
Type* number_fun2_;
+ Type* weakint_fun1_;
Type* imul_fun_;
+ Type* random_fun_;
Type* array_buffer_fun_;
Type* int8_array_fun_;
Type* int16_array_fun_;
diff --git a/deps/v8/src/compiler/value-numbering-reducer-unittest.cc b/deps/v8/src/compiler/value-numbering-reducer-unittest.cc
new file mode 100644
index 0000000000..8db6458031
--- /dev/null
+++ b/deps/v8/src/compiler/value-numbering-reducer-unittest.cc
@@ -0,0 +1,120 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/compiler/graph.h"
+#include "src/compiler/value-numbering-reducer.h"
+#include "src/test/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+const SimpleOperator kOp0(0, Operator::kNoProperties, 0, 1, "op0");
+const SimpleOperator kOp1(1, Operator::kNoProperties, 1, 1, "op1");
+
+} // namespace
+
+
+class ValueNumberingReducerTest : public TestWithZone {
+ public:
+ ValueNumberingReducerTest() : graph_(zone()), reducer_(zone()) {}
+
+ protected:
+ Reduction Reduce(Node* node) { return reducer_.Reduce(node); }
+
+ Graph* graph() { return &graph_; }
+
+ private:
+ Graph graph_;
+ ValueNumberingReducer reducer_;
+};
+
+
+TEST_F(ValueNumberingReducerTest, AllInputsAreChecked) {
+ Node* na = graph()->NewNode(&kOp0);
+ Node* nb = graph()->NewNode(&kOp0);
+ Node* n1 = graph()->NewNode(&kOp0, na);
+ Node* n2 = graph()->NewNode(&kOp0, nb);
+ EXPECT_FALSE(Reduce(n1).Changed());
+ EXPECT_FALSE(Reduce(n2).Changed());
+}
+
+
+TEST_F(ValueNumberingReducerTest, DeadNodesAreNeverReturned) {
+ Node* n0 = graph()->NewNode(&kOp0);
+ Node* n1 = graph()->NewNode(&kOp1, n0);
+ EXPECT_FALSE(Reduce(n1).Changed());
+ n1->Kill();
+ EXPECT_FALSE(Reduce(graph()->NewNode(&kOp1, n0)).Changed());
+}
+
+
+TEST_F(ValueNumberingReducerTest, OperatorEqualityNotIdentity) {
+ static const size_t kMaxInputCount = 16;
+ Node* inputs[kMaxInputCount];
+ for (size_t i = 0; i < arraysize(inputs); ++i) {
+ Operator::Opcode opcode = static_cast<Operator::Opcode>(
+ std::numeric_limits<Operator::Opcode>::max() - i);
+ inputs[i] = graph()->NewNode(new (zone()) SimpleOperator(
+ opcode, Operator::kNoProperties, 0, 1, "Operator"));
+ }
+ TRACED_FORRANGE(size_t, input_count, 0, arraysize(inputs)) {
+ const SimpleOperator op1(static_cast<Operator::Opcode>(input_count),
+ Operator::kNoProperties,
+ static_cast<int>(input_count), 1, "op");
+ Node* n1 = graph()->NewNode(&op1, static_cast<int>(input_count), inputs);
+ Reduction r1 = Reduce(n1);
+ EXPECT_FALSE(r1.Changed());
+
+ const SimpleOperator op2(static_cast<Operator::Opcode>(input_count),
+ Operator::kNoProperties,
+ static_cast<int>(input_count), 1, "op");
+ Node* n2 = graph()->NewNode(&op2, static_cast<int>(input_count), inputs);
+ Reduction r2 = Reduce(n2);
+ EXPECT_TRUE(r2.Changed());
+ EXPECT_EQ(n1, r2.replacement());
+ }
+}
+
+
+TEST_F(ValueNumberingReducerTest, SubsequentReductionsYieldTheSameNode) {
+ static const size_t kMaxInputCount = 16;
+ Node* inputs[kMaxInputCount];
+ for (size_t i = 0; i < arraysize(inputs); ++i) {
+ Operator::Opcode opcode = static_cast<Operator::Opcode>(
+ std::numeric_limits<Operator::Opcode>::max() - i);
+ inputs[i] = graph()->NewNode(new (zone()) SimpleOperator(
+ opcode, Operator::kNoProperties, 0, 1, "Operator"));
+ }
+ TRACED_FORRANGE(size_t, input_count, 0, arraysize(inputs)) {
+ const SimpleOperator op1(1, Operator::kNoProperties,
+ static_cast<int>(input_count), 1, "op1");
+ Node* n = graph()->NewNode(&op1, static_cast<int>(input_count), inputs);
+ Reduction r = Reduce(n);
+ EXPECT_FALSE(r.Changed());
+
+ r = Reduce(graph()->NewNode(&op1, static_cast<int>(input_count), inputs));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(n, r.replacement());
+
+ r = Reduce(graph()->NewNode(&op1, static_cast<int>(input_count), inputs));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(n, r.replacement());
+ }
+}
+
+
+TEST_F(ValueNumberingReducerTest, WontReplaceNodeWithItself) {
+ Node* n = graph()->NewNode(&kOp0);
+ EXPECT_FALSE(Reduce(n).Changed());
+ EXPECT_FALSE(Reduce(n).Changed());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/value-numbering-reducer.cc b/deps/v8/src/compiler/value-numbering-reducer.cc
new file mode 100644
index 0000000000..595a4f3017
--- /dev/null
+++ b/deps/v8/src/compiler/value-numbering-reducer.cc
@@ -0,0 +1,74 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/value-numbering-reducer.h"
+
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+size_t HashCode(Node* node) { return node->op()->HashCode(); }
+
+
+bool Equals(Node* a, Node* b) {
+ DCHECK_NOT_NULL(a);
+ DCHECK_NOT_NULL(b);
+ DCHECK_NOT_NULL(a->op());
+ DCHECK_NOT_NULL(b->op());
+ if (!a->op()->Equals(b->op())) return false;
+ if (a->InputCount() != b->InputCount()) return false;
+ for (int j = 0; j < a->InputCount(); ++j) {
+ DCHECK_NOT_NULL(a->InputAt(j));
+ DCHECK_NOT_NULL(b->InputAt(j));
+ if (a->InputAt(j)->id() != b->InputAt(j)->id()) return false;
+ }
+ return true;
+}
+
+} // namespace
+
+
+class ValueNumberingReducer::Entry FINAL : public ZoneObject {
+ public:
+ Entry(Node* node, Entry* next) : node_(node), next_(next) {}
+
+ Node* node() const { return node_; }
+ Entry* next() const { return next_; }
+
+ private:
+ Node* node_;
+ Entry* next_;
+};
+
+
+ValueNumberingReducer::ValueNumberingReducer(Zone* zone) : zone_(zone) {
+ for (size_t i = 0; i < arraysize(buckets_); ++i) {
+ buckets_[i] = NULL;
+ }
+}
+
+
+ValueNumberingReducer::~ValueNumberingReducer() {}
+
+
+Reduction ValueNumberingReducer::Reduce(Node* node) {
+ Entry** head = &buckets_[HashCode(node) % arraysize(buckets_)];
+ for (Entry* entry = *head; entry; entry = entry->next()) {
+ if (entry->node()->IsDead()) continue;
+ if (entry->node() == node) return NoChange();
+ if (Equals(node, entry->node())) {
+ return Replace(entry->node());
+ }
+ }
+ *head = new (zone()) Entry(node, *head);
+ return NoChange();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/value-numbering-reducer.h b/deps/v8/src/compiler/value-numbering-reducer.h
new file mode 100644
index 0000000000..0d67e5dd31
--- /dev/null
+++ b/deps/v8/src/compiler/value-numbering-reducer.h
@@ -0,0 +1,36 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_VALUE_NUMBERING_REDUCER_H_
+#define V8_COMPILER_VALUE_NUMBERING_REDUCER_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class ValueNumberingReducer FINAL : public Reducer {
+ public:
+ explicit ValueNumberingReducer(Zone* zone);
+ ~ValueNumberingReducer();
+
+ virtual Reduction Reduce(Node* node) OVERRIDE;
+
+ private:
+ Zone* zone() const { return zone_; }
+
+ // TODO(turbofan): We currently use separate chaining with linked lists here,
+ // we may want to replace that with a more sophisticated data structure at
+ // some point in the future.
+ class Entry;
+ Entry* buckets_[117u];
+ Zone* zone_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_VALUE_NUMBERING_REDUCER_H_
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 97bb762aff..23cec7a809 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -4,6 +4,9 @@
#include "src/compiler/verifier.h"
+#include <deque>
+#include <queue>
+
#include "src/compiler/generic-algorithm.h"
#include "src/compiler/generic-node-inl.h"
#include "src/compiler/generic-node.h"
@@ -14,6 +17,8 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
+#include "src/compiler/schedule.h"
+#include "src/data-flow.h"
namespace v8 {
namespace internal {
@@ -58,13 +63,27 @@ class Verifier::Visitor : public NullNodeVisitor {
GenericGraphVisit::Control Verifier::Visitor::Pre(Node* node) {
int value_count = OperatorProperties::GetValueInputCount(node->op());
int context_count = OperatorProperties::GetContextInputCount(node->op());
+ int frame_state_count =
+ OperatorProperties::GetFrameStateInputCount(node->op());
int effect_count = OperatorProperties::GetEffectInputCount(node->op());
int control_count = OperatorProperties::GetControlInputCount(node->op());
// Verify number of inputs matches up.
- int input_count = value_count + context_count + effect_count + control_count;
+ int input_count = value_count + context_count + frame_state_count +
+ effect_count + control_count;
CHECK_EQ(input_count, node->InputCount());
+ // Verify that frame state has been inserted for the nodes that need it.
+ if (OperatorProperties::HasFrameStateInput(node->op())) {
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ CHECK(frame_state->opcode() == IrOpcode::kFrameState ||
+ // kFrameState uses undefined as a sentinel.
+ (node->opcode() == IrOpcode::kFrameState &&
+ frame_state->opcode() == IrOpcode::kHeapConstant));
+ CHECK(IsDefUseChainLinkPresent(frame_state, node));
+ CHECK(IsUseDefChainLinkPresent(frame_state, node));
+ }
+
// Verify all value inputs actually produce a value.
for (int i = 0; i < value_count; ++i) {
Node* value = NodeProperties::GetValueInput(node, i);
@@ -155,7 +174,7 @@ GenericGraphVisit::Control Verifier::Visitor::Pre(Node* node) {
CHECK_EQ(IrOpcode::kStart,
NodeProperties::GetValueInput(node, 0)->opcode());
// Parameter has an input that produces enough values.
- int index = static_cast<Operator1<int>*>(node->op())->parameter();
+ int index = OpParameter<int>(node);
Node* input = NodeProperties::GetValueInput(node, 0);
// Currently, parameter indices start at -1 instead of 0.
CHECK_GT(OperatorProperties::GetValueOutputCount(input->op()), index + 1);
@@ -186,26 +205,18 @@ GenericGraphVisit::Control Verifier::Visitor::Pre(Node* node) {
OperatorProperties::GetControlInputCount(control->op()));
break;
}
- case IrOpcode::kLazyDeoptimization:
- // TODO(jarin): what are the constraints on these?
- break;
- case IrOpcode::kDeoptimize:
- // TODO(jarin): what are the constraints on these?
- break;
case IrOpcode::kFrameState:
// TODO(jarin): what are the constraints on these?
break;
case IrOpcode::kCall:
// TODO(rossberg): what are the constraints on these?
break;
- case IrOpcode::kContinuation:
- // TODO(jarin): what are the constraints on these?
- break;
case IrOpcode::kProjection: {
// Projection has an input that produces enough values.
- int index = static_cast<Operator1<int>*>(node->op())->parameter();
+ size_t index = OpParameter<size_t>(node);
Node* input = NodeProperties::GetValueInput(node, 0);
- CHECK_GT(OperatorProperties::GetValueOutputCount(input->op()), index);
+ CHECK_GT(OperatorProperties::GetValueOutputCount(input->op()),
+ static_cast<int>(index));
break;
}
default:
@@ -240,6 +251,205 @@ void Verifier::Run(Graph* graph) {
visitor.reached_from_start.count(*it));
}
}
+
+
+static bool HasDominatingDef(Schedule* schedule, Node* node,
+ BasicBlock* container, BasicBlock* use_block,
+ int use_pos) {
+ BasicBlock* block = use_block;
+ while (true) {
+ while (use_pos >= 0) {
+ if (block->nodes_[use_pos] == node) return true;
+ use_pos--;
+ }
+ block = block->dominator_;
+ if (block == NULL) break;
+ use_pos = static_cast<int>(block->nodes_.size()) - 1;
+ if (node == block->control_input_) return true;
+ }
+ return false;
+}
+
+
+static void CheckInputsDominate(Schedule* schedule, BasicBlock* block,
+ Node* node, int use_pos) {
+ for (int j = OperatorProperties::GetValueInputCount(node->op()) - 1; j >= 0;
+ j--) {
+ BasicBlock* use_block = block;
+ if (node->opcode() == IrOpcode::kPhi) {
+ use_block = use_block->PredecessorAt(j);
+ use_pos = static_cast<int>(use_block->nodes_.size()) - 1;
+ }
+ Node* input = node->InputAt(j);
+ if (!HasDominatingDef(schedule, node->InputAt(j), block, use_block,
+ use_pos)) {
+ V8_Fatal(__FILE__, __LINE__,
+ "Node #%d:%s in B%d is not dominated by input@%d #%d:%s",
+ node->id(), node->op()->mnemonic(), block->id(), j, input->id(),
+ input->op()->mnemonic());
+ }
+ }
+}
+
+
+void ScheduleVerifier::Run(Schedule* schedule) {
+ const int count = schedule->BasicBlockCount();
+ Zone tmp_zone(schedule->zone()->isolate());
+ Zone* zone = &tmp_zone;
+ BasicBlock* start = schedule->start();
+ BasicBlockVector* rpo_order = schedule->rpo_order();
+
+ // Verify the RPO order contains only blocks from this schedule.
+ CHECK_GE(count, static_cast<int>(rpo_order->size()));
+ for (BasicBlockVector::iterator b = rpo_order->begin(); b != rpo_order->end();
+ ++b) {
+ CHECK_EQ((*b), schedule->GetBlockById((*b)->id()));
+ }
+
+ // Verify RPO numbers of blocks.
+ CHECK_EQ(start, rpo_order->at(0)); // Start should be first.
+ for (size_t b = 0; b < rpo_order->size(); b++) {
+ BasicBlock* block = rpo_order->at(b);
+ CHECK_EQ(static_cast<int>(b), block->rpo_number_);
+ BasicBlock* dom = block->dominator_;
+ if (b == 0) {
+ // All blocks except start should have a dominator.
+ CHECK_EQ(NULL, dom);
+ } else {
+ // Check that the immediate dominator appears somewhere before the block.
+ CHECK_NE(NULL, dom);
+ CHECK_LT(dom->rpo_number_, block->rpo_number_);
+ }
+ }
+
+ // Verify that all blocks reachable from start are in the RPO.
+ BoolVector marked(count, false, zone);
+ {
+ ZoneQueue<BasicBlock*> queue(zone);
+ queue.push(start);
+ marked[start->id()] = true;
+ while (!queue.empty()) {
+ BasicBlock* block = queue.front();
+ queue.pop();
+ for (int s = 0; s < block->SuccessorCount(); s++) {
+ BasicBlock* succ = block->SuccessorAt(s);
+ if (!marked[succ->id()]) {
+ marked[succ->id()] = true;
+ queue.push(succ);
+ }
+ }
+ }
+ }
+ // Verify marked blocks are in the RPO.
+ for (int i = 0; i < count; i++) {
+ BasicBlock* block = schedule->GetBlockById(i);
+ if (marked[i]) {
+ CHECK_GE(block->rpo_number_, 0);
+ CHECK_EQ(block, rpo_order->at(block->rpo_number_));
+ }
+ }
+ // Verify RPO blocks are marked.
+ for (size_t b = 0; b < rpo_order->size(); b++) {
+ CHECK(marked[rpo_order->at(b)->id()]);
+ }
+
+ {
+ // Verify the dominance relation.
+ ZoneList<BitVector*> dominators(count, zone);
+ dominators.Initialize(count, zone);
+ dominators.AddBlock(NULL, count, zone);
+
+ // Compute a set of all the nodes that dominate a given node by using
+ // a forward fixpoint. O(n^2).
+ ZoneQueue<BasicBlock*> queue(zone);
+ queue.push(start);
+ dominators[start->id()] = new (zone) BitVector(count, zone);
+ while (!queue.empty()) {
+ BasicBlock* block = queue.front();
+ queue.pop();
+ BitVector* block_doms = dominators[block->id()];
+ BasicBlock* idom = block->dominator_;
+ if (idom != NULL && !block_doms->Contains(idom->id())) {
+ V8_Fatal(__FILE__, __LINE__, "Block B%d is not dominated by B%d",
+ block->id(), idom->id());
+ }
+ for (int s = 0; s < block->SuccessorCount(); s++) {
+ BasicBlock* succ = block->SuccessorAt(s);
+ BitVector* succ_doms = dominators[succ->id()];
+
+ if (succ_doms == NULL) {
+ // First time visiting the node. S.doms = B U B.doms
+ succ_doms = new (zone) BitVector(count, zone);
+ succ_doms->CopyFrom(*block_doms);
+ succ_doms->Add(block->id());
+ dominators[succ->id()] = succ_doms;
+ queue.push(succ);
+ } else {
+ // Nth time visiting the successor. S.doms = S.doms ^ (B U B.doms)
+ bool had = succ_doms->Contains(block->id());
+ if (had) succ_doms->Remove(block->id());
+ if (succ_doms->IntersectIsChanged(*block_doms)) queue.push(succ);
+ if (had) succ_doms->Add(block->id());
+ }
+ }
+ }
+
+ // Verify the immediateness of dominators.
+ for (BasicBlockVector::iterator b = rpo_order->begin();
+ b != rpo_order->end(); ++b) {
+ BasicBlock* block = *b;
+ BasicBlock* idom = block->dominator_;
+ if (idom == NULL) continue;
+ BitVector* block_doms = dominators[block->id()];
+
+ for (BitVector::Iterator it(block_doms); !it.Done(); it.Advance()) {
+ BasicBlock* dom = schedule->GetBlockById(it.Current());
+ if (dom != idom && !dominators[idom->id()]->Contains(dom->id())) {
+ V8_Fatal(__FILE__, __LINE__,
+ "Block B%d is not immediately dominated by B%d", block->id(),
+ idom->id());
+ }
+ }
+ }
+ }
+
+ // Verify phis are placed in the block of their control input.
+ for (BasicBlockVector::iterator b = rpo_order->begin(); b != rpo_order->end();
+ ++b) {
+ for (BasicBlock::const_iterator i = (*b)->begin(); i != (*b)->end(); ++i) {
+ Node* phi = *i;
+ if (phi->opcode() != IrOpcode::kPhi) continue;
+ // TODO(titzer): Nasty special case. Phis from RawMachineAssembler
+ // schedules don't have control inputs.
+ if (phi->InputCount() >
+ OperatorProperties::GetValueInputCount(phi->op())) {
+ Node* control = NodeProperties::GetControlInput(phi);
+ CHECK(control->opcode() == IrOpcode::kMerge ||
+ control->opcode() == IrOpcode::kLoop);
+ CHECK_EQ((*b), schedule->block(control));
+ }
+ }
+ }
+
+ // Verify that all uses are dominated by their definitions.
+ for (BasicBlockVector::iterator b = rpo_order->begin(); b != rpo_order->end();
+ ++b) {
+ BasicBlock* block = *b;
+
+ // Check inputs to control for this block.
+ Node* control = block->control_input_;
+ if (control != NULL) {
+ CHECK_EQ(block, schedule->block(control));
+ CheckInputsDominate(schedule, block, control,
+ static_cast<int>(block->nodes_.size()) - 1);
+ }
+ // Check inputs for all nodes in the block.
+ for (size_t i = 0; i < block->nodes_.size(); i++) {
+ Node* node = block->nodes_[i];
+ CheckInputsDominate(schedule, block, node, static_cast<int>(i) - 1);
+ }
+ }
+}
}
}
} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/verifier.h b/deps/v8/src/compiler/verifier.h
index 788c6a5657..b5c028ef30 100644
--- a/deps/v8/src/compiler/verifier.h
+++ b/deps/v8/src/compiler/verifier.h
@@ -7,12 +7,15 @@
#include "src/v8.h"
-#include "src/compiler/graph.h"
-
namespace v8 {
namespace internal {
namespace compiler {
+class Graph;
+class Schedule;
+
+// Verifies properties of a graph, such as the well-formedness of inputs to
+// each node, etc.
class Verifier {
public:
static void Run(Graph* graph);
@@ -21,6 +24,12 @@ class Verifier {
class Visitor;
DISALLOW_COPY_AND_ASSIGN(Verifier);
};
+
+// Verifies properties of a schedule, such as dominance, phi placement, etc.
+class ScheduleVerifier {
+ public:
+ static void Run(Schedule* schedule);
+};
}
}
} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index 9f278ad898..fdf63855d5 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -75,6 +75,11 @@ class X64OperandConverter : public InstructionOperandConverter {
case Constant::kInt64:
immediate.value = constant.ToInt64();
return immediate;
+ case Constant::kFloat32:
+ immediate.type = kImm64Handle;
+ immediate.handle =
+ isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED);
+ return immediate;
case Constant::kFloat64:
immediate.type = kImm64Handle;
immediate.handle =
@@ -99,6 +104,7 @@ class X64OperandConverter : public InstructionOperandConverter {
case Constant::kInt32:
return Immediate(constant.ToInt32());
case Constant::kInt64:
+ case Constant::kFloat32:
case Constant::kFloat64:
case Constant::kExternalReference:
case Constant::kHeapObject:
@@ -139,22 +145,79 @@ class X64OperandConverter : public InstructionOperandConverter {
return result;
}
- Operand MemoryOperand(int* first_input) {
- const int offset = *first_input;
- switch (AddressingModeField::decode(instr_->opcode())) {
- case kMode_MR1I: {
- *first_input += 2;
- Register index = InputRegister(offset + 1);
- return Operand(InputRegister(offset + 0), index, times_1,
- 0); // TODO(dcarney): K != 0
- }
- case kMode_MRI:
- *first_input += 2;
- return Operand(InputRegister(offset + 0), InputInt32(offset + 1));
- default:
+ static int NextOffset(int* offset) {
+ int i = *offset;
+ (*offset)++;
+ return i;
+ }
+
+ static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
+ STATIC_ASSERT(0 == static_cast<int>(times_1));
+ STATIC_ASSERT(1 == static_cast<int>(times_2));
+ STATIC_ASSERT(2 == static_cast<int>(times_4));
+ STATIC_ASSERT(3 == static_cast<int>(times_8));
+ int scale = static_cast<int>(mode - one);
+ DCHECK(scale >= 0 && scale < 4);
+ return static_cast<ScaleFactor>(scale);
+ }
+
+ Operand MemoryOperand(int* offset) {
+ AddressingMode mode = AddressingModeField::decode(instr_->opcode());
+ switch (mode) {
+ case kMode_MR: {
+ Register base = InputRegister(NextOffset(offset));
+ int32_t disp = 0;
+ return Operand(base, disp);
+ }
+ case kMode_MRI: {
+ Register base = InputRegister(NextOffset(offset));
+ int32_t disp = InputInt32(NextOffset(offset));
+ return Operand(base, disp);
+ }
+ case kMode_MR1:
+ case kMode_MR2:
+ case kMode_MR4:
+ case kMode_MR8: {
+ Register base = InputRegister(NextOffset(offset));
+ Register index = InputRegister(NextOffset(offset));
+ ScaleFactor scale = ScaleFor(kMode_MR1, mode);
+ int32_t disp = 0;
+ return Operand(base, index, scale, disp);
+ }
+ case kMode_MR1I:
+ case kMode_MR2I:
+ case kMode_MR4I:
+ case kMode_MR8I: {
+ Register base = InputRegister(NextOffset(offset));
+ Register index = InputRegister(NextOffset(offset));
+ ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
+ int32_t disp = InputInt32(NextOffset(offset));
+ return Operand(base, index, scale, disp);
+ }
+ case kMode_M1:
+ case kMode_M2:
+ case kMode_M4:
+ case kMode_M8: {
+ Register index = InputRegister(NextOffset(offset));
+ ScaleFactor scale = ScaleFor(kMode_M1, mode);
+ int32_t disp = 0;
+ return Operand(index, scale, disp);
+ }
+ case kMode_M1I:
+ case kMode_M2I:
+ case kMode_M4I:
+ case kMode_M8I: {
+ Register index = InputRegister(NextOffset(offset));
+ ScaleFactor scale = ScaleFor(kMode_M1I, mode);
+ int32_t disp = InputInt32(NextOffset(offset));
+ return Operand(index, scale, disp);
+ }
+ case kMode_None:
UNREACHABLE();
return Operand(no_reg, 0);
}
+ UNREACHABLE();
+ return Operand(no_reg, 0);
}
Operand MemoryOperand() {
@@ -204,6 +267,31 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
X64OperandConverter i(this, instr);
switch (ArchOpcodeField::decode(instr->opcode())) {
+ case kArchCallCodeObject: {
+ EnsureSpaceForLazyDeopt();
+ if (HasImmediateInput(instr, 0)) {
+ Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ __ Call(code, RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ int entry = Code::kHeaderSize - kHeapObjectTag;
+ __ Call(Operand(reg, entry));
+ }
+ AddSafepointAndDeopt(instr);
+ break;
+ }
+ case kArchCallJSFunction: {
+ EnsureSpaceForLazyDeopt();
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
+ __ Assert(equal, kWrongFunctionContext);
+ }
+ __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
+ AddSafepointAndDeopt(instr);
+ break;
+ }
case kArchJmp:
__ jmp(code_->GetLabel(i.InputBlock(0)));
break;
@@ -213,15 +301,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchRet:
AssembleReturn();
break;
- case kArchDeoptimize: {
- int deoptimization_id = MiscField::decode(instr->opcode());
- BuildTranslation(instr, deoptimization_id);
-
- Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, Deoptimizer::LAZY);
- __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ case kArchTruncateDoubleToI:
+ __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
- }
case kX64Add32:
ASSEMBLE_BINOP(addl);
break;
@@ -258,8 +340,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
if (input.type == kRegister) {
__ imull(i.OutputRegister(), input.reg, i.InputImmediate(1));
} else {
- __ movq(kScratchRegister, input.operand);
- __ imull(i.OutputRegister(), kScratchRegister, i.InputImmediate(1));
+ __ imull(i.OutputRegister(), input.operand, i.InputImmediate(1));
}
} else {
RegisterOrOperand input = i.InputRegisterOrOperand(1);
@@ -276,8 +357,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
if (input.type == kRegister) {
__ imulq(i.OutputRegister(), input.reg, i.InputImmediate(1));
} else {
- __ movq(kScratchRegister, input.operand);
- __ imulq(i.OutputRegister(), kScratchRegister, i.InputImmediate(1));
+ __ imulq(i.OutputRegister(), input.operand, i.InputImmediate(1));
}
} else {
RegisterOrOperand input = i.InputRegisterOrOperand(1);
@@ -370,62 +450,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kX64Sar:
ASSEMBLE_SHIFT(sarq, 6);
break;
- case kX64Push: {
- RegisterOrOperand input = i.InputRegisterOrOperand(0);
- if (input.type == kRegister) {
- __ pushq(input.reg);
- } else {
- __ pushq(input.operand);
- }
- break;
- }
- case kX64PushI:
- __ pushq(i.InputImmediate(0));
+ case kX64Ror32:
+ ASSEMBLE_SHIFT(rorl, 5);
break;
- case kX64CallCodeObject: {
- if (HasImmediateInput(instr, 0)) {
- Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
- __ Call(code, RelocInfo::CODE_TARGET);
- } else {
- Register reg = i.InputRegister(0);
- int entry = Code::kHeaderSize - kHeapObjectTag;
- __ Call(Operand(reg, entry));
- }
- RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
- if (lazy_deopt) {
- RecordLazyDeoptimizationEntry(instr);
- }
- AddNopForSmiCodeInlining();
+ case kX64Ror:
+ ASSEMBLE_SHIFT(rorq, 6);
break;
- }
- case kX64CallAddress:
- if (HasImmediateInput(instr, 0)) {
- Immediate64 imm = i.InputImmediate64(0);
- DCHECK_EQ(kImm64Value, imm.type);
- __ Call(reinterpret_cast<byte*>(imm.value), RelocInfo::NONE64);
- } else {
- __ call(i.InputRegister(0));
- }
- break;
- case kPopStack: {
- int words = MiscField::decode(instr->opcode());
- __ addq(rsp, Immediate(kPointerSize * words));
- break;
- }
- case kX64CallJSFunction: {
- Register func = i.InputRegister(0);
-
- // TODO(jarin) The load of the context should be separated from the call.
- __ movp(rsi, FieldOperand(func, JSFunction::kContextOffset));
- __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
-
- RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- RecordLazyDeoptimizationEntry(instr);
- break;
- }
case kSSEFloat64Cmp: {
RegisterOrOperand input = i.InputRegisterOrOperand(1);
if (input.type == kDoubleRegister) {
@@ -478,11 +508,20 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ addq(rsp, Immediate(kDoubleSize));
break;
}
- case kX64Int32ToInt64:
- __ movzxwq(i.OutputRegister(), i.InputRegister(0));
+ case kSSEFloat64Sqrt: {
+ RegisterOrOperand input = i.InputRegisterOrOperand(0);
+ if (input.type == kDoubleRegister) {
+ __ sqrtsd(i.OutputDoubleRegister(), input.double_reg);
+ } else {
+ __ sqrtsd(i.OutputDoubleRegister(), input.operand);
+ }
+ break;
+ }
+ case kSSECvtss2sd:
+ __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
- case kX64Int64ToInt32:
- __ Move(i.OutputRegister(), i.InputRegister(0));
+ case kSSECvtsd2ss:
+ __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kSSEFloat64ToInt32: {
RegisterOrOperand input = i.InputRegisterOrOperand(0);
@@ -494,8 +533,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kSSEFloat64ToUint32: {
- // TODO(turbofan): X64 SSE cvttsd2siq should support operands.
- __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
+ RegisterOrOperand input = i.InputRegisterOrOperand(0);
+ if (input.type == kDoubleRegister) {
+ __ cvttsd2siq(i.OutputRegister(), input.double_reg);
+ } else {
+ __ cvttsd2siq(i.OutputRegister(), input.operand);
+ }
__ andl(i.OutputRegister(), i.OutputRegister()); // clear upper bits.
// TODO(turbofan): generated code should not look at the upper 32 bits
// of the result, but those bits could escape to the outside world.
@@ -515,76 +558,112 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
break;
}
-
- case kSSELoad:
- __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
+ case kX64Movsxbl:
+ __ movsxbl(i.OutputRegister(), i.MemoryOperand());
break;
- case kSSEStore: {
- int index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ movsd(operand, i.InputDoubleRegister(index));
- break;
- }
- case kX64LoadWord8:
+ case kX64Movzxbl:
__ movzxbl(i.OutputRegister(), i.MemoryOperand());
break;
- case kX64StoreWord8: {
+ case kX64Movb: {
int index = 0;
Operand operand = i.MemoryOperand(&index);
- __ movb(operand, i.InputRegister(index));
+ if (HasImmediateInput(instr, index)) {
+ __ movb(operand, Immediate(i.InputInt8(index)));
+ } else {
+ __ movb(operand, i.InputRegister(index));
+ }
break;
}
- case kX64StoreWord8I: {
- int index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ movb(operand, Immediate(i.InputInt8(index)));
+ case kX64Movsxwl:
+ __ movsxwl(i.OutputRegister(), i.MemoryOperand());
break;
- }
- case kX64LoadWord16:
+ case kX64Movzxwl:
__ movzxwl(i.OutputRegister(), i.MemoryOperand());
break;
- case kX64StoreWord16: {
- int index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ movw(operand, i.InputRegister(index));
- break;
- }
- case kX64StoreWord16I: {
+ case kX64Movw: {
int index = 0;
Operand operand = i.MemoryOperand(&index);
- __ movw(operand, Immediate(i.InputInt16(index)));
+ if (HasImmediateInput(instr, index)) {
+ __ movw(operand, Immediate(i.InputInt16(index)));
+ } else {
+ __ movw(operand, i.InputRegister(index));
+ }
break;
}
- case kX64LoadWord32:
- __ movl(i.OutputRegister(), i.MemoryOperand());
+ case kX64Movl:
+ if (instr->HasOutput()) {
+ if (instr->addressing_mode() == kMode_None) {
+ RegisterOrOperand input = i.InputRegisterOrOperand(0);
+ if (input.type == kRegister) {
+ __ movl(i.OutputRegister(), input.reg);
+ } else {
+ __ movl(i.OutputRegister(), input.operand);
+ }
+ } else {
+ __ movl(i.OutputRegister(), i.MemoryOperand());
+ }
+ } else {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ if (HasImmediateInput(instr, index)) {
+ __ movl(operand, i.InputImmediate(index));
+ } else {
+ __ movl(operand, i.InputRegister(index));
+ }
+ }
break;
- case kX64StoreWord32: {
- int index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ movl(operand, i.InputRegister(index));
+ case kX64Movsxlq: {
+ RegisterOrOperand input = i.InputRegisterOrOperand(0);
+ if (input.type == kRegister) {
+ __ movsxlq(i.OutputRegister(), input.reg);
+ } else {
+ __ movsxlq(i.OutputRegister(), input.operand);
+ }
break;
}
- case kX64StoreWord32I: {
- int index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ movl(operand, i.InputImmediate(index));
+ case kX64Movq:
+ if (instr->HasOutput()) {
+ __ movq(i.OutputRegister(), i.MemoryOperand());
+ } else {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ if (HasImmediateInput(instr, index)) {
+ __ movq(operand, i.InputImmediate(index));
+ } else {
+ __ movq(operand, i.InputRegister(index));
+ }
+ }
break;
- }
- case kX64LoadWord64:
- __ movq(i.OutputRegister(), i.MemoryOperand());
+ case kX64Movss:
+ if (instr->HasOutput()) {
+ __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
+ } else {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ movss(operand, i.InputDoubleRegister(index));
+ }
break;
- case kX64StoreWord64: {
- int index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ movq(operand, i.InputRegister(index));
+ case kX64Movsd:
+ if (instr->HasOutput()) {
+ __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
+ } else {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ movsd(operand, i.InputDoubleRegister(index));
+ }
break;
- }
- case kX64StoreWord64I: {
- int index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ movq(operand, i.InputImmediate(index));
+ case kX64Push:
+ if (HasImmediateInput(instr, 0)) {
+ __ pushq(i.InputImmediate(0));
+ } else {
+ RegisterOrOperand input = i.InputRegisterOrOperand(0);
+ if (input.type == kRegister) {
+ __ pushq(input.reg);
+ } else {
+ __ pushq(input.operand);
+ }
+ }
break;
- }
case kX64StoreWriteBarrier: {
Register object = i.InputRegister(0);
Register index = i.InputRegister(1);
@@ -764,6 +843,13 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
}
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+ Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+ isolate(), deoptimization_id, Deoptimizer::LAZY);
+ __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
int stack_slots = frame()->GetSpillSlotCount();
@@ -841,8 +927,9 @@ void CodeGenerator::AssembleReturn() {
} else {
__ movq(rsp, rbp); // Move stack pointer back to frame pointer.
__ popq(rbp); // Pop caller's frame pointer.
- int pop_count =
- descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
+ int pop_count = descriptor->IsJSFunctionCall()
+ ? static_cast<int>(descriptor->JSParameterCount())
+ : 0;
__ ret(pop_count * kPointerSize);
}
}
@@ -877,6 +964,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
} else if (source->IsConstant()) {
ConstantOperand* constant_source = ConstantOperand::cast(source);
+ Constant src = g.ToConstant(constant_source);
if (destination->IsRegister() || destination->IsStackSlot()) {
Register dst = destination->IsRegister() ? g.ToRegister(destination)
: kScratchRegister;
@@ -895,9 +983,20 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (destination->IsStackSlot()) {
__ movq(g.ToOperand(destination), kScratchRegister);
}
+ } else if (src.type() == Constant::kFloat32) {
+ // TODO(turbofan): Can we do better here?
+ __ movl(kScratchRegister, Immediate(bit_cast<int32_t>(src.ToFloat32())));
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ __ movq(dst, kScratchRegister);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ Operand dst = g.ToOperand(destination);
+ __ movl(dst, kScratchRegister);
+ }
} else {
- __ movq(kScratchRegister,
- BitCast<uint64_t, double>(g.ToDouble(constant_source)));
+ DCHECK_EQ(Constant::kFloat64, src.type());
+ __ movq(kScratchRegister, bit_cast<int64_t>(src.ToFloat64()));
if (destination->IsDoubleRegister()) {
__ movq(g.ToDoubleRegister(destination), kScratchRegister);
} else {
@@ -980,21 +1079,22 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
-#undef __
-
-#ifdef DEBUG
-// Checks whether the code between start_pc and end_pc is a no-op.
-bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
- int end_pc) {
- if (start_pc + 1 != end_pc) {
- return false;
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ int space_needed = Deoptimizer::patch_size();
+ if (!linkage()->info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ __ Nop(padding_size);
+ }
}
- return *(code->instruction_start() + start_pc) ==
- v8::internal::Assembler::kNopByte;
+ MarkLazyDeoptSite();
}
-#endif
+#undef __
} // namespace internal
} // namespace compiler
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index 8ba33ab10d..336c592aba 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -42,38 +42,33 @@ namespace compiler {
V(X64Shr32) \
V(X64Sar) \
V(X64Sar32) \
- V(X64Push) \
- V(X64PushI) \
- V(X64CallCodeObject) \
- V(X64CallAddress) \
- V(PopStack) \
- V(X64CallJSFunction) \
+ V(X64Ror) \
+ V(X64Ror32) \
V(SSEFloat64Cmp) \
V(SSEFloat64Add) \
V(SSEFloat64Sub) \
V(SSEFloat64Mul) \
V(SSEFloat64Div) \
V(SSEFloat64Mod) \
- V(X64Int32ToInt64) \
- V(X64Int64ToInt32) \
+ V(SSEFloat64Sqrt) \
+ V(SSECvtss2sd) \
+ V(SSECvtsd2ss) \
V(SSEFloat64ToInt32) \
V(SSEFloat64ToUint32) \
V(SSEInt32ToFloat64) \
V(SSEUint32ToFloat64) \
- V(SSELoad) \
- V(SSEStore) \
- V(X64LoadWord8) \
- V(X64StoreWord8) \
- V(X64StoreWord8I) \
- V(X64LoadWord16) \
- V(X64StoreWord16) \
- V(X64StoreWord16I) \
- V(X64LoadWord32) \
- V(X64StoreWord32) \
- V(X64StoreWord32I) \
- V(X64LoadWord64) \
- V(X64StoreWord64) \
- V(X64StoreWord64I) \
+ V(X64Movsxbl) \
+ V(X64Movzxbl) \
+ V(X64Movb) \
+ V(X64Movsxwl) \
+ V(X64Movzxwl) \
+ V(X64Movw) \
+ V(X64Movl) \
+ V(X64Movsxlq) \
+ V(X64Movq) \
+ V(X64Movsd) \
+ V(X64Movss) \
+ V(X64Push) \
V(X64StoreWriteBarrier)
@@ -84,22 +79,30 @@ namespace compiler {
//
// We use the following local notation for addressing modes:
//
-// R = register
-// O = register or stack slot
-// D = double register
-// I = immediate (handle, external, int32)
-// MR = [register]
-// MI = [immediate]
-// MRN = [register + register * N in {1, 2, 4, 8}]
-// MRI = [register + immediate]
-// MRNI = [register + register * N in {1, 2, 4, 8} + immediate]
+// M = memory operand
+// R = base register
+// N = index register * N for N in {1, 2, 4, 8}
+// I = immediate displacement (int32_t)
+
#define TARGET_ADDRESSING_MODE_LIST(V) \
- V(MR) /* [%r1] */ \
- V(MRI) /* [%r1 + K] */ \
- V(MR1I) /* [%r1 + %r2 + K] */ \
+ V(MR) /* [%r1 ] */ \
+ V(MRI) /* [%r1 + K] */ \
+ V(MR1) /* [%r1 + %r2*1 ] */ \
+ V(MR2) /* [%r1 + %r2*2 ] */ \
+ V(MR4) /* [%r1 + %r2*4 ] */ \
+ V(MR8) /* [%r1 + %r2*8 ] */ \
+ V(MR1I) /* [%r1 + %r2*1 + K] */ \
V(MR2I) /* [%r1 + %r2*2 + K] */ \
- V(MR4I) /* [%r1 + %r2*4 + K] */ \
- V(MR8I) /* [%r1 + %r2*8 + K] */
+ V(MR4I) /* [%r1 + %r2*3 + K] */ \
+ V(MR8I) /* [%r1 + %r2*4 + K] */ \
+ V(M1) /* [ %r2*1 ] */ \
+ V(M2) /* [ %r2*2 ] */ \
+ V(M4) /* [ %r2*4 ] */ \
+ V(M8) /* [ %r2*8 ] */ \
+ V(M1I) /* [ %r2*1 + K] */ \
+ V(M2I) /* [ %r2*2 + K] */ \
+ V(M4I) /* [ %r2*4 + K] */ \
+ V(M8I) /* [ %r2*8 + K] */
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64-unittest.cc b/deps/v8/src/compiler/x64/instruction-selector-x64-unittest.cc
new file mode 100644
index 0000000000..f5545a789d
--- /dev/null
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64-unittest.cc
@@ -0,0 +1,294 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// -----------------------------------------------------------------------------
+// Conversions.
+
+
+TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) {
+ StreamBuilder m(this, kMachFloat32, kMachFloat64);
+ m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSECvtss2sd, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, ChangeInt32ToInt64WithParameter) {
+ StreamBuilder m(this, kMachInt64, kMachInt32);
+ m.Return(m.ChangeInt32ToInt64(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movsxlq, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, ChangeUint32ToUint64WithParameter) {
+ StreamBuilder m(this, kMachUint64, kMachUint32);
+ m.Return(m.ChangeUint32ToUint64(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
+ StreamBuilder m(this, kMachFloat64, kMachFloat32);
+ m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSECvtsd2ss, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
+ StreamBuilder m(this, kMachInt32, kMachInt64);
+ m.Return(m.TruncateInt64ToInt32(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
+}
+
+
+// -----------------------------------------------------------------------------
+// Better left operand for commutative binops
+
+TEST_F(InstructionSelectorTest, BetterLeftOperandTestAddBinop) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* param1 = m.Parameter(0);
+ Node* param2 = m.Parameter(1);
+ Node* add = m.Int32Add(param1, param2);
+ m.Return(m.Int32Add(add, param1));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kX64Add32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated());
+ EXPECT_EQ(param2->id(), s.ToVreg(s[0]->InputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, BetterLeftOperandTestMulBinop) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* param1 = m.Parameter(0);
+ Node* param2 = m.Parameter(1);
+ Node* mul = m.Int32Mul(param1, param2);
+ m.Return(m.Int32Mul(mul, param1));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kX64Imul32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated());
+ EXPECT_EQ(param2->id(), s.ToVreg(s[0]->InputAt(0)));
+}
+
+
+// -----------------------------------------------------------------------------
+// Loads and stores
+
+namespace {
+
+struct MemoryAccess {
+ MachineType type;
+ ArchOpcode load_opcode;
+ ArchOpcode store_opcode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
+ OStringStream ost;
+ ost << memacc.type;
+ return os << ost.c_str();
+}
+
+
+static const MemoryAccess kMemoryAccesses[] = {
+ {kMachInt8, kX64Movsxbl, kX64Movb},
+ {kMachUint8, kX64Movzxbl, kX64Movb},
+ {kMachInt16, kX64Movsxwl, kX64Movw},
+ {kMachUint16, kX64Movzxwl, kX64Movw},
+ {kMachInt32, kX64Movl, kX64Movl},
+ {kMachUint32, kX64Movl, kX64Movl},
+ {kMachInt64, kX64Movq, kX64Movq},
+ {kMachUint64, kX64Movq, kX64Movq},
+ {kMachFloat32, kX64Movss, kX64Movss},
+ {kMachFloat64, kX64Movsd, kX64Movsd}};
+
+} // namespace
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccess>
+ InstructionSelectorMemoryAccessTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
+ m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessTest,
+ ::testing::ValuesIn(kMemoryAccesses));
+
+// -----------------------------------------------------------------------------
+// AddressingMode for loads and stores.
+
+class AddressingModeUnitTest : public InstructionSelectorTest {
+ public:
+ AddressingModeUnitTest() : m(NULL) { Reset(); }
+ ~AddressingModeUnitTest() { delete m; }
+
+ void Run(Node* base, Node* index, AddressingMode mode) {
+ Node* load = m->Load(kMachInt32, base, index);
+ m->Store(kMachInt32, base, index, load);
+ m->Return(m->Int32Constant(0));
+ Stream s = m->Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(mode, s[0]->addressing_mode());
+ EXPECT_EQ(mode, s[1]->addressing_mode());
+ }
+
+ Node* zero;
+ Node* null_ptr;
+ Node* non_zero;
+ Node* base_reg; // opaque value to generate base as register
+ Node* index_reg; // opaque value to generate index as register
+ Node* scales[4];
+ StreamBuilder* m;
+
+ void Reset() {
+ delete m;
+ m = new StreamBuilder(this, kMachInt32, kMachInt32, kMachInt32);
+ zero = m->Int32Constant(0);
+ null_ptr = m->Int64Constant(0);
+ non_zero = m->Int32Constant(127);
+ base_reg = m->Parameter(0);
+ index_reg = m->Parameter(0);
+
+ scales[0] = m->Int32Constant(1);
+ scales[1] = m->Int32Constant(2);
+ scales[2] = m->Int32Constant(4);
+ scales[3] = m->Int32Constant(8);
+ }
+};
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MR) {
+ Node* base = base_reg;
+ Node* index = zero;
+ Run(base, index, kMode_MR);
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MRI) {
+ Node* base = base_reg;
+ Node* index = non_zero;
+ Run(base, index, kMode_MRI);
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MR1) {
+ Node* base = base_reg;
+ Node* index = index_reg;
+ Run(base, index, kMode_MR1);
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MRN) {
+ AddressingMode expected[] = {kMode_MR1, kMode_MR2, kMode_MR4, kMode_MR8};
+ for (size_t i = 0; i < arraysize(scales); ++i) {
+ Reset();
+ Node* base = base_reg;
+ Node* index = m->Int32Mul(index_reg, scales[i]);
+ Run(base, index, expected[i]);
+ }
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MR1I) {
+ Node* base = base_reg;
+ Node* index = m->Int32Add(index_reg, non_zero);
+ Run(base, index, kMode_MR1I);
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MRNI) {
+ AddressingMode expected[] = {kMode_MR1I, kMode_MR2I, kMode_MR4I, kMode_MR8I};
+ for (size_t i = 0; i < arraysize(scales); ++i) {
+ Reset();
+ Node* base = base_reg;
+ Node* index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
+ Run(base, index, expected[i]);
+ }
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_M1) {
+ Node* base = null_ptr;
+ Node* index = index_reg;
+ Run(base, index, kMode_M1);
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MN) {
+ AddressingMode expected[] = {kMode_M1, kMode_M2, kMode_M4, kMode_M8};
+ for (size_t i = 0; i < arraysize(scales); ++i) {
+ Reset();
+ Node* base = null_ptr;
+ Node* index = m->Int32Mul(index_reg, scales[i]);
+ Run(base, index, expected[i]);
+ }
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_M1I) {
+ Node* base = null_ptr;
+ Node* index = m->Int32Add(index_reg, non_zero);
+ Run(base, index, kMode_M1I);
+}
+
+
+TEST_F(AddressingModeUnitTest, AddressingMode_MNI) {
+ AddressingMode expected[] = {kMode_M1I, kMode_M2I, kMode_M4I, kMode_M8I};
+ for (size_t i = 0; i < arraysize(scales); ++i) {
+ Reset();
+ Node* base = null_ptr;
+ Node* index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
+ Run(base, index, expected[i]);
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index 965e612e2d..82d1e33fa2 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -10,7 +10,7 @@ namespace internal {
namespace compiler {
// Adds X64-specific methods for generating operands.
-class X64OperandGenerator V8_FINAL : public OperandGenerator {
+class X64OperandGenerator FINAL : public OperandGenerator {
public:
explicit X64OperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
@@ -20,11 +20,6 @@ class X64OperandGenerator V8_FINAL : public OperandGenerator {
Register::ToAllocationIndex(reg));
}
- InstructionOperand* UseByteRegister(Node* node) {
- // TODO(dcarney): relax constraint.
- return UseFixed(node, rdx);
- }
-
InstructionOperand* UseImmediate64(Node* node) { return UseImmediate(node); }
bool CanBeImmediate(Node* node) {
@@ -45,59 +40,140 @@ class X64OperandGenerator V8_FINAL : public OperandGenerator {
case IrOpcode::kHeapConstant: {
// Constants in new space cannot be used as immediates in V8 because
// the GC does not scan code objects when collecting the new generation.
- Handle<HeapObject> value = ValueOf<Handle<HeapObject> >(node->op());
- return !isolate()->heap()->InNewSpace(*value);
+ Unique<HeapObject> value = OpParameter<Unique<HeapObject> >(node);
+ return !isolate()->heap()->InNewSpace(*value.handle());
}
default:
return false;
}
}
+
+ bool CanBeBetterLeftOperand(Node* node) const {
+ return !selector()->IsLive(node);
+ }
+};
+
+
+class AddressingModeMatcher {
+ public:
+ AddressingModeMatcher(X64OperandGenerator* g, Node* base, Node* index)
+ : base_operand_(NULL),
+ index_operand_(NULL),
+ displacement_operand_(NULL),
+ mode_(kMode_None) {
+ Int32Matcher index_imm(index);
+ if (index_imm.HasValue()) {
+ int32_t value = index_imm.Value();
+ if (value == 0) {
+ mode_ = kMode_MR;
+ } else {
+ mode_ = kMode_MRI;
+ index_operand_ = g->UseImmediate(index);
+ }
+ base_operand_ = g->UseRegister(base);
+ } else {
+ // Compute base operand.
+ Int64Matcher base_imm(base);
+ if (!base_imm.HasValue() || base_imm.Value() != 0) {
+ base_operand_ = g->UseRegister(base);
+ }
+ // Compute index and displacement.
+ IndexAndDisplacementMatcher matcher(index);
+ index_operand_ = g->UseRegister(matcher.index_node());
+ if (matcher.displacement() != 0) {
+ displacement_operand_ = g->TempImmediate(matcher.displacement());
+ }
+ // Compute mode with scale factor one.
+ if (base_operand_ == NULL) {
+ if (displacement_operand_ == NULL) {
+ mode_ = kMode_M1;
+ } else {
+ mode_ = kMode_M1I;
+ }
+ } else {
+ if (displacement_operand_ == NULL) {
+ mode_ = kMode_MR1;
+ } else {
+ mode_ = kMode_MR1I;
+ }
+ }
+ // Adjust mode to actual scale factor.
+ mode_ = GetMode(mode_, matcher.power());
+ }
+ DCHECK_NE(kMode_None, mode_);
+ }
+
+ AddressingMode GetMode(AddressingMode one, int power) {
+ return static_cast<AddressingMode>(static_cast<int>(one) + power);
+ }
+
+ size_t SetInputs(InstructionOperand** inputs) {
+ size_t input_count = 0;
+ // Compute inputs_ and input_count.
+ if (base_operand_ != NULL) {
+ inputs[input_count++] = base_operand_;
+ }
+ if (index_operand_ != NULL) {
+ inputs[input_count++] = index_operand_;
+ }
+ if (displacement_operand_ != NULL) {
+ // Pure displacement mode not supported by x64.
+ DCHECK_NE(input_count, 0);
+ inputs[input_count++] = displacement_operand_;
+ }
+ DCHECK_NE(input_count, 0);
+ return input_count;
+ }
+
+ static const int kMaxInputCount = 3;
+ InstructionOperand* base_operand_;
+ InstructionOperand* index_operand_;
+ InstructionOperand* displacement_operand_;
+ AddressingMode mode_;
};
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = OpParameter<MachineType>(node);
- X64OperandGenerator g(this);
+ MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+ MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- InstructionOperand* output = rep == kMachineFloat64
- ? g.DefineAsDoubleRegister(node)
- : g.DefineAsRegister(node);
ArchOpcode opcode;
+ // TODO(titzer): signed/unsigned small loads
switch (rep) {
- case kMachineFloat64:
- opcode = kSSELoad;
+ case kRepFloat32:
+ opcode = kX64Movss;
break;
- case kMachineWord8:
- opcode = kX64LoadWord8;
+ case kRepFloat64:
+ opcode = kX64Movsd;
break;
- case kMachineWord16:
- opcode = kX64LoadWord16;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = typ == kTypeInt32 ? kX64Movsxbl : kX64Movzxbl;
break;
- case kMachineWord32:
- opcode = kX64LoadWord32;
+ case kRepWord16:
+ opcode = typ == kTypeInt32 ? kX64Movsxwl : kX64Movzxwl;
break;
- case kMachineTagged: // Fall through.
- case kMachineWord64:
- opcode = kX64LoadWord64;
+ case kRepWord32:
+ opcode = kX64Movl;
+ break;
+ case kRepTagged: // Fall through.
+ case kRepWord64:
+ opcode = kX64Movq;
break;
default:
UNREACHABLE();
return;
}
- if (g.CanBeImmediate(base)) {
- // load [#base + %index]
- Emit(opcode | AddressingModeField::encode(kMode_MRI), output,
- g.UseRegister(index), g.UseImmediate(base));
- } else if (g.CanBeImmediate(index)) { // load [%base + #index]
- Emit(opcode | AddressingModeField::encode(kMode_MRI), output,
- g.UseRegister(base), g.UseImmediate(index));
- } else { // load [%base + %index + K]
- Emit(opcode | AddressingModeField::encode(kMode_MR1I), output,
- g.UseRegister(base), g.UseRegister(index));
- }
- // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+
+ X64OperandGenerator g(this);
+ AddressingModeMatcher matcher(&g, base, index);
+ InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_);
+ InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
+ InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount];
+ size_t input_count = matcher.SetInputs(inputs);
+ Emit(code, 1, outputs, input_count, inputs);
}
@@ -108,67 +184,59 @@ void InstructionSelector::VisitStore(Node* node) {
Node* value = node->InputAt(2);
StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
- MachineType rep = store_rep.rep;
- if (store_rep.write_barrier_kind == kFullWriteBarrier) {
- DCHECK(rep == kMachineTagged);
+ MachineType rep = RepresentationOf(store_rep.machine_type());
+ if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+ DCHECK(rep == kRepTagged);
// TODO(dcarney): refactor RecordWrite function to take temp registers
// and pass them here instead of using fixed regs
// TODO(dcarney): handle immediate indices.
InstructionOperand* temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)};
Emit(kX64StoreWriteBarrier, NULL, g.UseFixed(base, rbx),
- g.UseFixed(index, rcx), g.UseFixed(value, rdx), ARRAY_SIZE(temps),
+ g.UseFixed(index, rcx), g.UseFixed(value, rdx), arraysize(temps),
temps);
return;
}
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
- bool is_immediate = false;
- InstructionOperand* val;
- if (rep == kMachineFloat64) {
- val = g.UseDoubleRegister(value);
- } else {
- is_immediate = g.CanBeImmediate(value);
- if (is_immediate) {
- val = g.UseImmediate(value);
- } else if (rep == kMachineWord8) {
- val = g.UseByteRegister(value);
- } else {
- val = g.UseRegister(value);
- }
- }
+ DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
ArchOpcode opcode;
switch (rep) {
- case kMachineFloat64:
- opcode = kSSEStore;
+ case kRepFloat32:
+ opcode = kX64Movss;
+ break;
+ case kRepFloat64:
+ opcode = kX64Movsd;
break;
- case kMachineWord8:
- opcode = is_immediate ? kX64StoreWord8I : kX64StoreWord8;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = kX64Movb;
break;
- case kMachineWord16:
- opcode = is_immediate ? kX64StoreWord16I : kX64StoreWord16;
+ case kRepWord16:
+ opcode = kX64Movw;
break;
- case kMachineWord32:
- opcode = is_immediate ? kX64StoreWord32I : kX64StoreWord32;
+ case kRepWord32:
+ opcode = kX64Movl;
break;
- case kMachineTagged: // Fall through.
- case kMachineWord64:
- opcode = is_immediate ? kX64StoreWord64I : kX64StoreWord64;
+ case kRepTagged: // Fall through.
+ case kRepWord64:
+ opcode = kX64Movq;
break;
default:
UNREACHABLE();
return;
}
- if (g.CanBeImmediate(base)) {
- // store [#base + %index], %|#value
- Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
- g.UseRegister(index), g.UseImmediate(base), val);
- } else if (g.CanBeImmediate(index)) { // store [%base + #index], %|#value
- Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
- g.UseRegister(base), g.UseImmediate(index), val);
- } else { // store [%base + %index], %|#value
- Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL,
- g.UseRegister(base), g.UseRegister(index), val);
+
+ InstructionOperand* val;
+ if (g.CanBeImmediate(value)) {
+ val = g.UseImmediate(value);
+ } else {
+ val = g.UseRegister(value);
}
- // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+
+ AddressingModeMatcher matcher(&g, base, index);
+ InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_);
+ InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount + 1];
+ size_t input_count = matcher.SetInputs(inputs);
+ inputs[input_count++] = val;
+ Emit(code, 0, static_cast<InstructionOperand**>(NULL), input_count, inputs);
}
@@ -177,20 +245,24 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
X64OperandGenerator g(selector);
Int32BinopMatcher m(node);
+ Node* left = m.left().node();
+ Node* right = m.right().node();
InstructionOperand* inputs[4];
size_t input_count = 0;
InstructionOperand* outputs[2];
size_t output_count = 0;
// TODO(turbofan): match complex addressing modes.
- // TODO(turbofan): if commutative, pick the non-live-in operand as the left as
- // this might be the last use and therefore its register can be reused.
- if (g.CanBeImmediate(m.right().node())) {
- inputs[input_count++] = g.Use(m.left().node());
- inputs[input_count++] = g.UseImmediate(m.right().node());
+ if (g.CanBeImmediate(right)) {
+ inputs[input_count++] = g.Use(left);
+ inputs[input_count++] = g.UseImmediate(right);
} else {
- inputs[input_count++] = g.UseRegister(m.left().node());
- inputs[input_count++] = g.Use(m.right().node());
+ if (node->op()->HasProperty(Operator::kCommutative) &&
+ g.CanBeBetterLeftOperand(right)) {
+ std::swap(left, right);
+ }
+ inputs[input_count++] = g.UseRegister(left);
+ inputs[input_count++] = g.Use(right);
}
if (cont->IsBranch()) {
@@ -205,8 +277,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
DCHECK_NE(0, input_count);
DCHECK_NE(0, output_count);
- DCHECK_GE(ARRAY_SIZE(inputs), input_count);
- DCHECK_GE(ARRAY_SIZE(outputs), output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
outputs, input_count, inputs);
@@ -242,27 +314,25 @@ void InstructionSelector::VisitWord64Or(Node* node) {
}
-template <typename T>
-static void VisitXor(InstructionSelector* selector, Node* node,
- ArchOpcode xor_opcode, ArchOpcode not_opcode) {
- X64OperandGenerator g(selector);
- BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ X64OperandGenerator g(this);
+ Uint32BinopMatcher m(node);
if (m.right().Is(-1)) {
- selector->Emit(not_opcode, g.DefineSameAsFirst(node),
- g.Use(m.left().node()));
+ Emit(kX64Not32, g.DefineSameAsFirst(node), g.Use(m.left().node()));
} else {
- VisitBinop(selector, node, xor_opcode);
+ VisitBinop(this, node, kX64Xor32);
}
}
-void InstructionSelector::VisitWord32Xor(Node* node) {
- VisitXor<int32_t>(this, node, kX64Xor32, kX64Not32);
-}
-
-
void InstructionSelector::VisitWord64Xor(Node* node) {
- VisitXor<int64_t>(this, node, kX64Xor, kX64Not);
+ X64OperandGenerator g(this);
+ Uint64BinopMatcher m(node);
+ if (m.right().Is(-1)) {
+ Emit(kX64Not, g.DefineSameAsFirst(node), g.Use(m.left().node()));
+ } else {
+ VisitBinop(this, node, kX64Xor);
+ }
}
@@ -348,6 +418,16 @@ void InstructionSelector::VisitWord64Sar(Node* node) {
}
+void InstructionSelector::VisitWord32Ror(Node* node) {
+ VisitWord32Shift(this, node, kX64Ror32);
+}
+
+
+void InstructionSelector::VisitWord64Ror(Node* node) {
+ VisitWord64Shift(this, node, kX64Ror);
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop(this, node, kX64Add32);
}
@@ -358,43 +438,41 @@ void InstructionSelector::VisitInt64Add(Node* node) {
}
-template <typename T>
-static void VisitSub(InstructionSelector* selector, Node* node,
- ArchOpcode sub_opcode, ArchOpcode neg_opcode) {
- X64OperandGenerator g(selector);
- BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ X64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
if (m.left().Is(0)) {
- selector->Emit(neg_opcode, g.DefineSameAsFirst(node),
- g.Use(m.right().node()));
+ Emit(kX64Neg32, g.DefineSameAsFirst(node), g.Use(m.right().node()));
} else {
- VisitBinop(selector, node, sub_opcode);
+ VisitBinop(this, node, kX64Sub32);
}
}
-void InstructionSelector::VisitInt32Sub(Node* node) {
- VisitSub<int32_t>(this, node, kX64Sub32, kX64Neg32);
-}
-
-
void InstructionSelector::VisitInt64Sub(Node* node) {
- VisitSub<int64_t>(this, node, kX64Sub, kX64Neg);
+ X64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.left().Is(0)) {
+ Emit(kX64Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
+ } else {
+ VisitBinop(this, node, kX64Sub);
+ }
}
static void VisitMul(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
X64OperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
+ Int32BinopMatcher m(node);
+ Node* left = m.left().node();
+ Node* right = m.right().node();
if (g.CanBeImmediate(right)) {
selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
g.UseImmediate(right));
- } else if (g.CanBeImmediate(left)) {
- selector->Emit(opcode, g.DefineAsRegister(node), g.Use(right),
- g.UseImmediate(left));
} else {
- // TODO(turbofan): select better left operand.
+ if (g.CanBeBetterLeftOperand(right)) {
+ std::swap(left, right);
+ }
selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
g.Use(right));
}
@@ -417,7 +495,7 @@ static void VisitDiv(InstructionSelector* selector, Node* node,
InstructionOperand* temps[] = {g.TempRegister(rdx)};
selector->Emit(
opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
- g.UseUniqueRegister(node->InputAt(1)), ARRAY_SIZE(temps), temps);
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
}
@@ -447,7 +525,7 @@ static void VisitMod(InstructionSelector* selector, Node* node,
InstructionOperand* temps[] = {g.TempRegister(rax), g.TempRegister(rdx)};
selector->Emit(
opcode, g.DefineAsFixed(node, rdx), g.UseFixed(node->InputAt(0), rax),
- g.UseUniqueRegister(node->InputAt(1)), ARRAY_SIZE(temps), temps);
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
}
@@ -471,17 +549,23 @@ void InstructionSelector::VisitInt64UMod(Node* node) {
}
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+ X64OperandGenerator g(this);
+ // TODO(turbofan): X64 SSE conversions should take an operand.
+ Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
X64OperandGenerator g(this);
- Emit(kSSEInt32ToFloat64, g.DefineAsDoubleRegister(node),
- g.Use(node->InputAt(0)));
+ Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
X64OperandGenerator g(this);
// TODO(turbofan): X64 SSE cvtqsi2sd should support operands.
- Emit(kSSEUint32ToFloat64, g.DefineAsDoubleRegister(node),
+ Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
@@ -494,41 +578,60 @@ void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
X64OperandGenerator g(this);
- // TODO(turbofan): X64 SSE cvttsd2siq should support operands.
- Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node),
- g.UseDoubleRegister(node->InputAt(0)));
+ Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ X64OperandGenerator g(this);
+ // TODO(turbofan): X64 SSE conversions should take an operand.
+ Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64Add(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
- g.UseDoubleRegister(node->InputAt(0)),
- g.UseDoubleRegister(node->InputAt(1)));
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
- g.UseDoubleRegister(node->InputAt(0)),
- g.UseDoubleRegister(node->InputAt(1)));
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
void InstructionSelector::VisitFloat64Mul(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
- g.UseDoubleRegister(node->InputAt(0)),
- g.UseDoubleRegister(node->InputAt(1)));
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
void InstructionSelector::VisitFloat64Div(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
- g.UseDoubleRegister(node->InputAt(0)),
- g.UseDoubleRegister(node->InputAt(1)));
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
@@ -536,24 +639,14 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
X64OperandGenerator g(this);
InstructionOperand* temps[] = {g.TempRegister(rax)};
Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
- g.UseDoubleRegister(node->InputAt(0)),
- g.UseDoubleRegister(node->InputAt(1)), 1, temps);
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
+ temps);
}
-void InstructionSelector::VisitConvertInt64ToInt32(Node* node) {
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
X64OperandGenerator g(this);
- // TODO(dcarney): other modes
- Emit(kX64Int64ToInt32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitConvertInt32ToInt64(Node* node) {
- X64OperandGenerator g(this);
- // TODO(dcarney): other modes
- Emit(kX64Int32ToInt64, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
@@ -652,8 +745,7 @@ void InstructionSelector::VisitFloat64Compare(Node* node,
X64OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
- VisitCompare(this, kSSEFloat64Cmp, g.UseDoubleRegister(left), g.Use(right),
- cont);
+ VisitCompare(this, kSSEFloat64Cmp, g.UseRegister(left), g.Use(right), cont);
}
@@ -661,60 +753,52 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
BasicBlock* deoptimization) {
X64OperandGenerator g(this);
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
- CallBuffer buffer(zone(), descriptor); // TODO(turbofan): temp zone here?
+
+ FrameStateDescriptor* frame_state_descriptor = NULL;
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor = GetFrameStateDescriptor(
+ call->InputAt(static_cast<int>(descriptor->InputCount())));
+ }
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
// Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(call, &buffer, true, true, continuation, deoptimization);
+ InitializeCallBuffer(call, &buffer, true, true);
- // TODO(dcarney): stack alignment for c calls.
- // TODO(dcarney): shadow space on window for c calls.
// Push any stack arguments.
- for (int i = buffer.pushed_count - 1; i >= 0; --i) {
- Node* input = buffer.pushed_nodes[i];
+ for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
+ input != buffer.pushed_nodes.rend(); input++) {
// TODO(titzer): handle pushing double parameters.
- if (g.CanBeImmediate(input)) {
- Emit(kX64PushI, NULL, g.UseImmediate(input));
- } else {
- Emit(kX64Push, NULL, g.Use(input));
- }
+ Emit(kX64Push, NULL,
+ g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input));
}
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
case CallDescriptor::kCallCodeObject: {
- bool lazy_deopt = descriptor->CanLazilyDeoptimize();
- opcode = kX64CallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
+ opcode = kArchCallCodeObject;
break;
}
- case CallDescriptor::kCallAddress:
- opcode = kX64CallAddress;
- break;
case CallDescriptor::kCallJSFunction:
- opcode = kX64CallJSFunction;
+ opcode = kArchCallJSFunction;
break;
default:
UNREACHABLE();
return;
}
+ opcode |= MiscField::encode(descriptor->flags());
// Emit the call instruction.
Instruction* call_instr =
- Emit(opcode, buffer.output_count, buffer.outputs,
- buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
+ Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+ buffer.instruction_args.size(), &buffer.instruction_args.front());
call_instr->MarkAsCall();
if (deoptimization != NULL) {
DCHECK(continuation != NULL);
call_instr->MarkAsControl();
}
-
- // Caller clean up of stack for C-style calls.
- if (descriptor->kind() == CallDescriptor::kCallAddress &&
- buffer.pushed_count > 0) {
- DCHECK(deoptimization == NULL && continuation == NULL);
- Emit(kPopStack | MiscField::encode(buffer.pushed_count), NULL);
- }
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/x64/linkage-x64.cc b/deps/v8/src/compiler/x64/linkage-x64.cc
index 84c01e6546..8175bc6ecd 100644
--- a/deps/v8/src/compiler/x64/linkage-x64.cc
+++ b/deps/v8/src/compiler/x64/linkage-x64.cc
@@ -20,7 +20,7 @@ const bool kWin64 = true;
const bool kWin64 = false;
#endif
-struct LinkageHelperTraits {
+struct X64LinkageHelperTraits {
static Register ReturnValueReg() { return rax; }
static Register ReturnValue2Reg() { return rdx; }
static Register JSCallFunctionReg() { return rdi; }
@@ -47,37 +47,34 @@ struct LinkageHelperTraits {
static int CRegisterParametersLength() { return kWin64 ? 4 : 6; }
};
+typedef LinkageHelper<X64LinkageHelperTraits> LH;
CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
- return LinkageHelper::GetJSCallDescriptor<LinkageHelperTraits>(
- zone, parameter_count);
+ return LH::GetJSCallDescriptor(zone, parameter_count);
}
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
Runtime::FunctionId function, int parameter_count,
- Operator::Property properties,
- CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
- return LinkageHelper::GetRuntimeCallDescriptor<LinkageHelperTraits>(
- zone, function, parameter_count, properties, can_deoptimize);
+ Operator::Properties properties, Zone* zone) {
+ return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
+ properties);
}
CallDescriptor* Linkage::GetStubCallDescriptor(
- CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count,
- CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
- return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>(
- zone, descriptor, stack_parameter_count, can_deoptimize);
+ CallInterfaceDescriptor descriptor, int stack_parameter_count,
+ CallDescriptor::Flags flags, Zone* zone) {
+ return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
+ flags);
}
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(
- Zone* zone, int num_params, MachineType return_type,
- const MachineType* param_types) {
- return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>(
- zone, num_params, return_type, param_types);
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+ MachineSignature* sig) {
+ return LH::GetSimplifiedCDescriptor(zone, sig);
}
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 1b49526526..30c474d5ff 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -388,7 +388,7 @@ Handle<Object> Context::ErrorMessageForCodeGenerationFromStrings() {
Isolate* isolate = GetIsolate();
Handle<Object> result(error_message_for_code_gen_from_strings(), isolate);
if (!result->IsUndefined()) return result;
- return isolate->factory()->NewStringFromStaticAscii(
+ return isolate->factory()->NewStringFromStaticChars(
"Code generation from strings disallowed for this context");
}
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 63c9955b97..ac25e48853 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -201,7 +201,8 @@ enum BindingFlags {
V(MAP_ITERATOR_MAP_INDEX, Map, map_iterator_map) \
V(SET_ITERATOR_MAP_INDEX, Map, set_iterator_map) \
V(ITERATOR_SYMBOL_INDEX, Symbol, iterator_symbol) \
- V(UNSCOPABLES_SYMBOL_INDEX, Symbol, unscopables_symbol)
+ V(UNSCOPABLES_SYMBOL_INDEX, Symbol, unscopables_symbol) \
+ V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator)
// JSFunctions are pairs (context, function code), sometimes also called
// closures. A Context object is used to represent function contexts and
@@ -396,6 +397,7 @@ class Context: public FixedArray {
SET_ITERATOR_MAP_INDEX,
ITERATOR_SYMBOL_INDEX,
UNSCOPABLES_SYMBOL_INDEX,
+ ARRAY_VALUES_ITERATOR_INDEX,
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
@@ -551,14 +553,20 @@ class Context: public FixedArray {
return kHeaderSize + index * kPointerSize - kHeapObjectTag;
}
- static int FunctionMapIndex(StrictMode strict_mode, bool is_generator) {
- return is_generator
- ? (strict_mode == SLOPPY
- ? SLOPPY_GENERATOR_FUNCTION_MAP_INDEX
- : STRICT_GENERATOR_FUNCTION_MAP_INDEX)
- : (strict_mode == SLOPPY
- ? SLOPPY_FUNCTION_MAP_INDEX
- : STRICT_FUNCTION_MAP_INDEX);
+ static int FunctionMapIndex(StrictMode strict_mode, FunctionKind kind) {
+ if (IsGeneratorFunction(kind)) {
+ return strict_mode == SLOPPY ? SLOPPY_GENERATOR_FUNCTION_MAP_INDEX
+ : STRICT_GENERATOR_FUNCTION_MAP_INDEX;
+ }
+
+ if (IsArrowFunction(kind) || IsConciseMethod(kind)) {
+ return strict_mode == SLOPPY
+ ? SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX
+ : STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX;
+ }
+
+ return strict_mode == SLOPPY ? SLOPPY_FUNCTION_MAP_INDEX
+ : STRICT_FUNCTION_MAP_INDEX;
}
static const int kSize = kHeaderSize + NATIVE_CONTEXT_SLOTS * kPointerSize;
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index ce3054ba31..ae87dc4d31 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -14,6 +14,7 @@
// ----------------------------------------------------------------------------
// Extra POSIX/ANSI functions for Win32/MSVC.
+#include "src/base/bits.h"
#include "src/base/platform/platform.h"
#include "src/conversions.h"
#include "src/double.h"
@@ -24,7 +25,7 @@ namespace v8 {
namespace internal {
inline double JunkStringValue() {
- return BitCast<double, uint64_t>(kQuietNaNMask);
+ return bit_cast<double, uint64_t>(kQuietNaNMask);
}
@@ -66,6 +67,14 @@ inline unsigned int FastD2UI(double x) {
}
+inline float DoubleToFloat32(double x) {
+ // TODO(yanggou): This static_cast is implementation-defined behaviour in C++,
+ // so we may need to do the conversion manually instead to match the spec.
+ volatile float f = static_cast<float>(x);
+ return f;
+}
+
+
inline double DoubleToInteger(double x) {
if (std::isnan(x)) return 0;
if (!std::isfinite(x) || x == 0) return x;
@@ -288,7 +297,7 @@ double InternalStringToInt(UnicodeCache* unicode_cache,
return JunkStringValue();
}
- if (IsPowerOf2(radix)) {
+ if (base::bits::IsPowerOfTwo32(radix)) {
switch (radix) {
case 2:
return InternalStringToIntDouble<1>(
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index 4b41d5e6a8..8b776235e7 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -211,7 +211,7 @@ char* DoubleToFixedCString(double value, int f) {
// use the non-fixed conversion routine.
if (abs_value >= kFirstNonFixed) {
char arr[100];
- Vector<char> buffer(arr, ARRAY_SIZE(arr));
+ Vector<char> buffer(arr, arraysize(arr));
return StrDup(DoubleToCString(value, buffer));
}
@@ -490,7 +490,7 @@ double StringToDouble(UnicodeCache* unicode_cache,
DisallowHeapAllocation no_gc;
String::FlatContent flat = string->GetFlatContent();
// ECMA-262 section 15.1.2.3, empty string is NaN
- if (flat.IsAscii()) {
+ if (flat.IsOneByte()) {
return StringToDouble(
unicode_cache, flat.ToOneByteVector(), flags, empty_string_val);
} else {
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index c33de77cd1..6a28b5f1ea 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -77,6 +77,10 @@ inline double FastUI2D(unsigned x) {
}
+// This function should match the exact semantics of ECMA-262 20.2.2.17.
+inline float DoubleToFloat32(double x);
+
+
// This function should match the exact semantics of ECMA-262 9.4.
inline double DoubleToInteger(double x);
@@ -153,6 +157,12 @@ static inline bool IsMinusZero(double value) {
}
+static inline bool IsSmiDouble(double value) {
+ return !IsMinusZero(value) && value >= Smi::kMinValue &&
+ value <= Smi::kMaxValue && value == FastI2D(FastD2I(value));
+}
+
+
// Integer32 is an integer that can be represented as a signed 32-bit
// integer. It has to be in the range [-2^31, 2^31 - 1].
// We also have to check for negative 0 as it is not an Integer32.
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index f778f556be..651cf54be9 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -291,9 +291,11 @@ class HistogramTimerScope BASE_EMBEDDED {
#endif
};
-#define HISTOGRAM_RANGE_LIST(HR) \
- /* Generic range histograms */ \
- HR(gc_idle_time_allotted_in_ms, V8.GCIdleTimeAllottedInMS, 0, 10000, 101)
+#define HISTOGRAM_RANGE_LIST(HR) \
+ /* Generic range histograms */ \
+ HR(gc_idle_time_allotted_in_ms, V8.GCIdleTimeAllottedInMS, 0, 10000, 101) \
+ HR(gc_idle_time_limit_overshot, V8.GCIdleTimeLimit.Overshot, 0, 10000, 101) \
+ HR(gc_idle_time_limit_undershot, V8.GCIdleTimeLimit.Undershot, 0, 10000, 101)
#define HISTOGRAM_TIMER_LIST(HT) \
/* Garbage collection timers. */ \
@@ -310,7 +312,10 @@ class HistogramTimerScope BASE_EMBEDDED {
/* Total compilation times. */ \
HT(compile, V8.Compile) \
HT(compile_eval, V8.CompileEval) \
- HT(compile_lazy, V8.CompileLazy)
+ /* Serialization as part of compilation (code caching) */ \
+ HT(compile_serialize, V8.CompileSerialize) \
+ HT(compile_deserialize, V8.CompileDeserialize)
+
#define HISTOGRAM_PERCENTAGE_LIST(HP) \
/* Heap fragmentation. */ \
@@ -425,130 +430,128 @@ class HistogramTimerScope BASE_EMBEDDED {
SC(store_buffer_overflows, V8.StoreBufferOverflows)
-#define STATS_COUNTER_LIST_2(SC) \
- /* Number of code stubs. */ \
- SC(code_stubs, V8.CodeStubs) \
- /* Amount of stub code. */ \
- SC(total_stubs_code_size, V8.TotalStubsCodeSize) \
- /* Amount of (JS) compiled code. */ \
- SC(total_compiled_code_size, V8.TotalCompiledCodeSize) \
- SC(gc_compactor_caused_by_request, V8.GCCompactorCausedByRequest) \
- SC(gc_compactor_caused_by_promoted_data, \
- V8.GCCompactorCausedByPromotedData) \
- SC(gc_compactor_caused_by_oldspace_exhaustion, \
- V8.GCCompactorCausedByOldspaceExhaustion) \
- SC(gc_last_resort_from_js, V8.GCLastResortFromJS) \
- SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles) \
- /* How is the generic keyed-load stub used? */ \
- SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \
- SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \
- SC(keyed_load_generic_lookup_cache, V8.KeyedLoadGenericLookupCache) \
- SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow) \
- SC(keyed_load_polymorphic_stubs, V8.KeyedLoadPolymorphicStubs) \
- SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow) \
- /* How is the generic keyed-call stub used? */ \
- SC(keyed_call_generic_smi_fast, V8.KeyedCallGenericSmiFast) \
- SC(keyed_call_generic_smi_dict, V8.KeyedCallGenericSmiDict) \
- SC(keyed_call_generic_lookup_cache, V8.KeyedCallGenericLookupCache) \
- SC(keyed_call_generic_lookup_dict, V8.KeyedCallGenericLookupDict) \
- SC(keyed_call_generic_slow, V8.KeyedCallGenericSlow) \
- SC(keyed_call_generic_slow_load, V8.KeyedCallGenericSlowLoad) \
- SC(named_load_global_stub, V8.NamedLoadGlobalStub) \
- SC(named_store_global_inline, V8.NamedStoreGlobalInline) \
- SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
- SC(keyed_store_polymorphic_stubs, V8.KeyedStorePolymorphicStubs) \
- SC(keyed_store_external_array_slow, V8.KeyedStoreExternalArraySlow) \
- SC(store_normal_miss, V8.StoreNormalMiss) \
- SC(store_normal_hit, V8.StoreNormalHit) \
- SC(cow_arrays_created_stub, V8.COWArraysCreatedStub) \
- SC(cow_arrays_created_runtime, V8.COWArraysCreatedRuntime) \
- SC(cow_arrays_converted, V8.COWArraysConverted) \
- SC(call_miss, V8.CallMiss) \
- SC(keyed_call_miss, V8.KeyedCallMiss) \
- SC(load_miss, V8.LoadMiss) \
- SC(keyed_load_miss, V8.KeyedLoadMiss) \
- SC(call_const, V8.CallConst) \
- SC(call_const_fast_api, V8.CallConstFastApi) \
- SC(call_const_interceptor, V8.CallConstInterceptor) \
- SC(call_const_interceptor_fast_api, V8.CallConstInterceptorFastApi) \
- SC(call_global_inline, V8.CallGlobalInline) \
- SC(call_global_inline_miss, V8.CallGlobalInlineMiss) \
- SC(constructed_objects, V8.ConstructedObjects) \
- SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
- SC(negative_lookups, V8.NegativeLookups) \
- SC(negative_lookups_miss, V8.NegativeLookupsMiss) \
- SC(megamorphic_stub_cache_probes, V8.MegamorphicStubCacheProbes) \
- SC(megamorphic_stub_cache_misses, V8.MegamorphicStubCacheMisses) \
- SC(megamorphic_stub_cache_updates, V8.MegamorphicStubCacheUpdates) \
- SC(array_function_runtime, V8.ArrayFunctionRuntime) \
- SC(array_function_native, V8.ArrayFunctionNative) \
- SC(for_in, V8.ForIn) \
- SC(enum_cache_hits, V8.EnumCacheHits) \
- SC(enum_cache_misses, V8.EnumCacheMisses) \
- SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
- SC(fast_new_closure_total, V8.FastNewClosureTotal) \
- SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized) \
- SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized) \
- SC(string_add_runtime, V8.StringAddRuntime) \
- SC(string_add_native, V8.StringAddNative) \
- SC(string_add_runtime_ext_to_ascii, V8.StringAddRuntimeExtToAscii) \
- SC(sub_string_runtime, V8.SubStringRuntime) \
- SC(sub_string_native, V8.SubStringNative) \
- SC(string_add_make_two_char, V8.StringAddMakeTwoChar) \
- SC(string_compare_native, V8.StringCompareNative) \
- SC(string_compare_runtime, V8.StringCompareRuntime) \
- SC(regexp_entry_runtime, V8.RegExpEntryRuntime) \
- SC(regexp_entry_native, V8.RegExpEntryNative) \
- SC(number_to_string_native, V8.NumberToStringNative) \
- SC(number_to_string_runtime, V8.NumberToStringRuntime) \
- SC(math_acos, V8.MathAcos) \
- SC(math_asin, V8.MathAsin) \
- SC(math_atan, V8.MathAtan) \
- SC(math_atan2, V8.MathAtan2) \
- SC(math_exp, V8.MathExp) \
- SC(math_floor, V8.MathFloor) \
- SC(math_log, V8.MathLog) \
- SC(math_pow, V8.MathPow) \
- SC(math_round, V8.MathRound) \
- SC(math_sqrt, V8.MathSqrt) \
- SC(stack_interrupts, V8.StackInterrupts) \
- SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks) \
- SC(bounds_checks_eliminated, V8.BoundsChecksEliminated) \
- SC(bounds_checks_hoisted, V8.BoundsChecksHoisted) \
- SC(soft_deopts_requested, V8.SoftDeoptsRequested) \
- SC(soft_deopts_inserted, V8.SoftDeoptsInserted) \
- SC(soft_deopts_executed, V8.SoftDeoptsExecuted) \
- /* Number of write barriers in generated code. */ \
- SC(write_barriers_dynamic, V8.WriteBarriersDynamic) \
- SC(write_barriers_static, V8.WriteBarriersStatic) \
- SC(new_space_bytes_available, V8.MemoryNewSpaceBytesAvailable) \
- SC(new_space_bytes_committed, V8.MemoryNewSpaceBytesCommitted) \
- SC(new_space_bytes_used, V8.MemoryNewSpaceBytesUsed) \
- SC(old_pointer_space_bytes_available, \
- V8.MemoryOldPointerSpaceBytesAvailable) \
- SC(old_pointer_space_bytes_committed, \
- V8.MemoryOldPointerSpaceBytesCommitted) \
- SC(old_pointer_space_bytes_used, V8.MemoryOldPointerSpaceBytesUsed) \
- SC(old_data_space_bytes_available, V8.MemoryOldDataSpaceBytesAvailable) \
- SC(old_data_space_bytes_committed, V8.MemoryOldDataSpaceBytesCommitted) \
- SC(old_data_space_bytes_used, V8.MemoryOldDataSpaceBytesUsed) \
- SC(code_space_bytes_available, V8.MemoryCodeSpaceBytesAvailable) \
- SC(code_space_bytes_committed, V8.MemoryCodeSpaceBytesCommitted) \
- SC(code_space_bytes_used, V8.MemoryCodeSpaceBytesUsed) \
- SC(map_space_bytes_available, V8.MemoryMapSpaceBytesAvailable) \
- SC(map_space_bytes_committed, V8.MemoryMapSpaceBytesCommitted) \
- SC(map_space_bytes_used, V8.MemoryMapSpaceBytesUsed) \
- SC(cell_space_bytes_available, V8.MemoryCellSpaceBytesAvailable) \
- SC(cell_space_bytes_committed, V8.MemoryCellSpaceBytesCommitted) \
- SC(cell_space_bytes_used, V8.MemoryCellSpaceBytesUsed) \
- SC(property_cell_space_bytes_available, \
- V8.MemoryPropertyCellSpaceBytesAvailable) \
- SC(property_cell_space_bytes_committed, \
- V8.MemoryPropertyCellSpaceBytesCommitted) \
- SC(property_cell_space_bytes_used, \
- V8.MemoryPropertyCellSpaceBytesUsed) \
- SC(lo_space_bytes_available, V8.MemoryLoSpaceBytesAvailable) \
- SC(lo_space_bytes_committed, V8.MemoryLoSpaceBytesCommitted) \
+#define STATS_COUNTER_LIST_2(SC) \
+ /* Number of code stubs. */ \
+ SC(code_stubs, V8.CodeStubs) \
+ /* Amount of stub code. */ \
+ SC(total_stubs_code_size, V8.TotalStubsCodeSize) \
+ /* Amount of (JS) compiled code. */ \
+ SC(total_compiled_code_size, V8.TotalCompiledCodeSize) \
+ SC(gc_compactor_caused_by_request, V8.GCCompactorCausedByRequest) \
+ SC(gc_compactor_caused_by_promoted_data, V8.GCCompactorCausedByPromotedData) \
+ SC(gc_compactor_caused_by_oldspace_exhaustion, \
+ V8.GCCompactorCausedByOldspaceExhaustion) \
+ SC(gc_last_resort_from_js, V8.GCLastResortFromJS) \
+ SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles) \
+ /* How is the generic keyed-load stub used? */ \
+ SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \
+ SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \
+ SC(keyed_load_generic_lookup_cache, V8.KeyedLoadGenericLookupCache) \
+ SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow) \
+ SC(keyed_load_polymorphic_stubs, V8.KeyedLoadPolymorphicStubs) \
+ SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow) \
+ /* How is the generic keyed-call stub used? */ \
+ SC(keyed_call_generic_smi_fast, V8.KeyedCallGenericSmiFast) \
+ SC(keyed_call_generic_smi_dict, V8.KeyedCallGenericSmiDict) \
+ SC(keyed_call_generic_lookup_cache, V8.KeyedCallGenericLookupCache) \
+ SC(keyed_call_generic_lookup_dict, V8.KeyedCallGenericLookupDict) \
+ SC(keyed_call_generic_slow, V8.KeyedCallGenericSlow) \
+ SC(keyed_call_generic_slow_load, V8.KeyedCallGenericSlowLoad) \
+ SC(named_load_global_stub, V8.NamedLoadGlobalStub) \
+ SC(named_store_global_inline, V8.NamedStoreGlobalInline) \
+ SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
+ SC(keyed_store_polymorphic_stubs, V8.KeyedStorePolymorphicStubs) \
+ SC(keyed_store_external_array_slow, V8.KeyedStoreExternalArraySlow) \
+ SC(store_normal_miss, V8.StoreNormalMiss) \
+ SC(store_normal_hit, V8.StoreNormalHit) \
+ SC(cow_arrays_created_stub, V8.COWArraysCreatedStub) \
+ SC(cow_arrays_created_runtime, V8.COWArraysCreatedRuntime) \
+ SC(cow_arrays_converted, V8.COWArraysConverted) \
+ SC(call_miss, V8.CallMiss) \
+ SC(keyed_call_miss, V8.KeyedCallMiss) \
+ SC(load_miss, V8.LoadMiss) \
+ SC(keyed_load_miss, V8.KeyedLoadMiss) \
+ SC(call_const, V8.CallConst) \
+ SC(call_const_fast_api, V8.CallConstFastApi) \
+ SC(call_const_interceptor, V8.CallConstInterceptor) \
+ SC(call_const_interceptor_fast_api, V8.CallConstInterceptorFastApi) \
+ SC(call_global_inline, V8.CallGlobalInline) \
+ SC(call_global_inline_miss, V8.CallGlobalInlineMiss) \
+ SC(constructed_objects, V8.ConstructedObjects) \
+ SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
+ SC(negative_lookups, V8.NegativeLookups) \
+ SC(negative_lookups_miss, V8.NegativeLookupsMiss) \
+ SC(megamorphic_stub_cache_probes, V8.MegamorphicStubCacheProbes) \
+ SC(megamorphic_stub_cache_misses, V8.MegamorphicStubCacheMisses) \
+ SC(megamorphic_stub_cache_updates, V8.MegamorphicStubCacheUpdates) \
+ SC(array_function_runtime, V8.ArrayFunctionRuntime) \
+ SC(array_function_native, V8.ArrayFunctionNative) \
+ SC(for_in, V8.ForIn) \
+ SC(enum_cache_hits, V8.EnumCacheHits) \
+ SC(enum_cache_misses, V8.EnumCacheMisses) \
+ SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
+ SC(fast_new_closure_total, V8.FastNewClosureTotal) \
+ SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized) \
+ SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized) \
+ SC(string_add_runtime, V8.StringAddRuntime) \
+ SC(string_add_native, V8.StringAddNative) \
+ SC(string_add_runtime_ext_to_one_byte, V8.StringAddRuntimeExtToOneByte) \
+ SC(sub_string_runtime, V8.SubStringRuntime) \
+ SC(sub_string_native, V8.SubStringNative) \
+ SC(string_add_make_two_char, V8.StringAddMakeTwoChar) \
+ SC(string_compare_native, V8.StringCompareNative) \
+ SC(string_compare_runtime, V8.StringCompareRuntime) \
+ SC(regexp_entry_runtime, V8.RegExpEntryRuntime) \
+ SC(regexp_entry_native, V8.RegExpEntryNative) \
+ SC(number_to_string_native, V8.NumberToStringNative) \
+ SC(number_to_string_runtime, V8.NumberToStringRuntime) \
+ SC(math_acos, V8.MathAcos) \
+ SC(math_asin, V8.MathAsin) \
+ SC(math_atan, V8.MathAtan) \
+ SC(math_atan2, V8.MathAtan2) \
+ SC(math_exp, V8.MathExp) \
+ SC(math_floor, V8.MathFloor) \
+ SC(math_log, V8.MathLog) \
+ SC(math_pow, V8.MathPow) \
+ SC(math_round, V8.MathRound) \
+ SC(math_sqrt, V8.MathSqrt) \
+ SC(stack_interrupts, V8.StackInterrupts) \
+ SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks) \
+ SC(bounds_checks_eliminated, V8.BoundsChecksEliminated) \
+ SC(bounds_checks_hoisted, V8.BoundsChecksHoisted) \
+ SC(soft_deopts_requested, V8.SoftDeoptsRequested) \
+ SC(soft_deopts_inserted, V8.SoftDeoptsInserted) \
+ SC(soft_deopts_executed, V8.SoftDeoptsExecuted) \
+ /* Number of write barriers in generated code. */ \
+ SC(write_barriers_dynamic, V8.WriteBarriersDynamic) \
+ SC(write_barriers_static, V8.WriteBarriersStatic) \
+ SC(new_space_bytes_available, V8.MemoryNewSpaceBytesAvailable) \
+ SC(new_space_bytes_committed, V8.MemoryNewSpaceBytesCommitted) \
+ SC(new_space_bytes_used, V8.MemoryNewSpaceBytesUsed) \
+ SC(old_pointer_space_bytes_available, \
+ V8.MemoryOldPointerSpaceBytesAvailable) \
+ SC(old_pointer_space_bytes_committed, \
+ V8.MemoryOldPointerSpaceBytesCommitted) \
+ SC(old_pointer_space_bytes_used, V8.MemoryOldPointerSpaceBytesUsed) \
+ SC(old_data_space_bytes_available, V8.MemoryOldDataSpaceBytesAvailable) \
+ SC(old_data_space_bytes_committed, V8.MemoryOldDataSpaceBytesCommitted) \
+ SC(old_data_space_bytes_used, V8.MemoryOldDataSpaceBytesUsed) \
+ SC(code_space_bytes_available, V8.MemoryCodeSpaceBytesAvailable) \
+ SC(code_space_bytes_committed, V8.MemoryCodeSpaceBytesCommitted) \
+ SC(code_space_bytes_used, V8.MemoryCodeSpaceBytesUsed) \
+ SC(map_space_bytes_available, V8.MemoryMapSpaceBytesAvailable) \
+ SC(map_space_bytes_committed, V8.MemoryMapSpaceBytesCommitted) \
+ SC(map_space_bytes_used, V8.MemoryMapSpaceBytesUsed) \
+ SC(cell_space_bytes_available, V8.MemoryCellSpaceBytesAvailable) \
+ SC(cell_space_bytes_committed, V8.MemoryCellSpaceBytesCommitted) \
+ SC(cell_space_bytes_used, V8.MemoryCellSpaceBytesUsed) \
+ SC(property_cell_space_bytes_available, \
+ V8.MemoryPropertyCellSpaceBytesAvailable) \
+ SC(property_cell_space_bytes_committed, \
+ V8.MemoryPropertyCellSpaceBytesCommitted) \
+ SC(property_cell_space_bytes_used, V8.MemoryPropertyCellSpaceBytesUsed) \
+ SC(lo_space_bytes_available, V8.MemoryLoSpaceBytesAvailable) \
+ SC(lo_space_bytes_committed, V8.MemoryLoSpaceBytesCommitted) \
SC(lo_space_bytes_used, V8.MemoryLoSpaceBytesUsed)
diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8-posix.cc
index 59c50b432f..9a20b06643 100644
--- a/deps/v8/src/d8-posix.cc
+++ b/deps/v8/src/d8-posix.cc
@@ -7,7 +7,6 @@
#include <signal.h>
#include <stdlib.h>
#include <string.h>
-#include <sys/select.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
@@ -16,6 +15,9 @@
#include "src/d8.h"
+#if !V8_OS_NACL
+#include <sys/select.h>
+#endif
namespace v8 {
@@ -102,11 +104,16 @@ static bool WaitOnFD(int fd,
}
timeout.tv_usec = (read_timeout % 1000) * 1000;
timeout.tv_sec = read_timeout / 1000;
+#if V8_OS_NACL
+ // PNaCL has no support for select.
+ int number_of_fds_ready = -1;
+#else
int number_of_fds_ready = select(fd + 1,
&readfds,
&writefds,
&exceptfds,
read_timeout != -1 ? &timeout : NULL);
+#endif
return number_of_fds_ready == 1;
}
@@ -547,8 +554,12 @@ void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
if (args[0]->IsNumber()) {
- mode_t mask = args[0]->Int32Value();
- int previous = umask(mask);
+#if V8_OS_NACL
+ // PNaCL has no support for umask.
+ int previous = 0;
+#else
+ int previous = umask(args[0]->Int32Value());
+#endif
args.GetReturnValue().Set(previous);
return;
} else {
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 356a64b2dc..d1929b071f 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -45,6 +45,8 @@
#include "src/base/cpu.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
+#include "src/base/sys-info.h"
+#include "src/basic-block-profiler.h"
#include "src/d8-debug.h"
#include "src/debug.h"
#include "src/natives.h"
@@ -53,7 +55,12 @@
#if !defined(_WIN32) && !defined(_WIN64)
#include <unistd.h> // NOLINT
-#endif
+#else
+#include <windows.h> // NOLINT
+#if defined(_MSC_VER)
+#include <crtdbg.h> // NOLINT
+#endif // defined(_MSC_VER)
+#endif // !defined(_WIN32) && !defined(_WIN64)
#ifndef DCHECK
#define DCHECK(condition) assert(condition)
@@ -332,7 +339,6 @@ void Shell::PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* v8_isolate = args.GetIsolate();
i::Heap* heap = reinterpret_cast<i::Isolate*>(v8_isolate)->heap();
args.GetReturnValue().Set(heap->synthetic_time());
-
} else {
base::TimeDelta delta =
base::TimeTicks::HighResolutionNow() - kInitialTicks;
@@ -1529,13 +1535,13 @@ class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
- virtual void* Allocate(size_t) V8_OVERRIDE {
+ virtual void* Allocate(size_t) OVERRIDE {
return malloc(0);
}
- virtual void* AllocateUninitialized(size_t length) V8_OVERRIDE {
+ virtual void* AllocateUninitialized(size_t length) OVERRIDE {
return malloc(0);
}
- virtual void Free(void* p, size_t) V8_OVERRIDE {
+ virtual void Free(void* p, size_t) OVERRIDE {
free(p);
}
};
@@ -1595,10 +1601,26 @@ class StartupDataHandler {
int Shell::Main(int argc, char* argv[]) {
+#if (defined(_WIN32) || defined(_WIN64))
+ UINT new_flags =
+ SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX;
+ UINT existing_flags = SetErrorMode(new_flags);
+ SetErrorMode(existing_flags | new_flags);
+#if defined(_MSC_VER)
+ _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_DEBUG | _CRTDBG_MODE_FILE);
+ _CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR);
+ _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_DEBUG | _CRTDBG_MODE_FILE);
+ _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR);
+ _CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_DEBUG | _CRTDBG_MODE_FILE);
+ _CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR);
+ _set_error_mode(_OUT_TO_STDERR);
+#endif // defined(_MSC_VER)
+#endif // defined(_WIN32) || defined(_WIN64)
if (!SetOptions(argc, argv)) return 1;
v8::V8::InitializeICU(options.icu_data_file);
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
+ v8::V8::Initialize();
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
StartupDataHandler startup_data(options.natives_blob, options.snapshot_blob);
#endif
@@ -1612,27 +1634,26 @@ int Shell::Main(int argc, char* argv[]) {
v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
}
int result = 0;
- Isolate* isolate = Isolate::New();
+ Isolate::CreateParams create_params;
+#if !defined(V8_SHARED) && defined(ENABLE_GDB_JIT_INTERFACE)
+ if (i::FLAG_gdbjit) {
+ create_params.code_event_handler = i::GDBJITInterface::EventHandler;
+ }
+#endif
+#ifdef ENABLE_VTUNE_JIT_INTERFACE
+ vTune::InitializeVtuneForV8(create_params);
+#endif
#ifndef V8_SHARED
- v8::ResourceConstraints constraints;
- constraints.ConfigureDefaults(base::OS::TotalPhysicalMemory(),
- base::OS::MaxVirtualMemory(),
- base::OS::NumberOfProcessorsOnline());
- v8::SetResourceConstraints(isolate, &constraints);
+ create_params.constraints.ConfigureDefaults(
+ base::SysInfo::AmountOfPhysicalMemory(),
+ base::SysInfo::AmountOfVirtualMemory(),
+ base::SysInfo::NumberOfProcessors());
#endif
+ Isolate* isolate = Isolate::New(create_params);
DumbLineEditor dumb_line_editor(isolate);
{
Isolate::Scope scope(isolate);
Initialize(isolate);
-#if !defined(V8_SHARED) && defined(ENABLE_GDB_JIT_INTERFACE)
- if (i::FLAG_gdbjit) {
- v8::V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault,
- i::GDBJITInterface::EventHandler);
- }
-#endif
-#ifdef ENABLE_VTUNE_JIT_INTERFACE
- vTune::InitializeVtuneForV8();
-#endif
PerIsolateData data(isolate);
InitializeDebugger(isolate);
@@ -1680,6 +1701,14 @@ int Shell::Main(int argc, char* argv[]) {
RunShell(isolate);
}
}
+#ifndef V8_SHARED
+ // Dump basic block profiling data.
+ if (i::BasicBlockProfiler* profiler =
+ reinterpret_cast<i::Isolate*>(isolate)->basic_block_profiler()) {
+ i::OFStream os(stdout);
+ os << *profiler;
+ }
+#endif // !V8_SHARED
isolate->Dispose();
V8::Dispose();
V8::ShutdownPlatform();
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index 991e5a51b0..44ee09a3b1 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -12,6 +12,7 @@
#include "src/v8.h"
#else
#include "include/v8.h"
+#include "src/base/compiler-specific.h"
#endif // !V8_SHARED
namespace v8 {
@@ -171,7 +172,7 @@ class SourceGroup {
};
-class BinaryResource : public v8::String::ExternalAsciiStringResource {
+class BinaryResource : public v8::String::ExternalOneByteStringResource {
public:
BinaryResource(const char* string, int length)
: data_(string),
diff --git a/deps/v8/src/data-flow.cc b/deps/v8/src/data-flow.cc
index e591778fa1..bd92ea0531 100644
--- a/deps/v8/src/data-flow.cc
+++ b/deps/v8/src/data-flow.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/data-flow.h"
+
+#include "src/base/bits.h"
#include "src/scopes.h"
namespace v8 {
@@ -40,4 +40,15 @@ void BitVector::Iterator::Advance() {
current_value_ = val >> 1;
}
-} } // namespace v8::internal
+
+int BitVector::Count() const {
+ int count = 0;
+ for (int i = 0; i < data_length_; i++) {
+ int data = data_[i];
+ if (data != 0) count += base::bits::CountPopulation32(data);
+ }
+ return count;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/data-flow.h b/deps/v8/src/data-flow.h
index 042e29f854..bfd238d24e 100644
--- a/deps/v8/src/data-flow.h
+++ b/deps/v8/src/data-flow.h
@@ -137,6 +137,17 @@ class BitVector: public ZoneObject {
}
}
+ bool IntersectIsChanged(const BitVector& other) {
+ DCHECK(other.length() == length());
+ bool changed = false;
+ for (int i = 0; i < data_length_; i++) {
+ uint32_t old_data = data_[i];
+ data_[i] &= other.data_[i];
+ if (data_[i] != old_data) changed = true;
+ }
+ return changed;
+ }
+
void Subtract(const BitVector& other) {
DCHECK(other.length() == length());
for (int i = 0; i < data_length_; i++) {
@@ -164,14 +175,7 @@ class BitVector: public ZoneObject {
return true;
}
- int Count() const {
- int count = 0;
- for (int i = 0; i < data_length_; i++) {
- int data = data_[i];
- if (data != 0) count += CompilerIntrinsics::CountSetBits(data);
- }
- return count;
- }
+ int Count() const;
int length() const { return length_; }
@@ -185,6 +189,7 @@ class BitVector: public ZoneObject {
uint32_t* data_;
};
+
class GrowableBitVector BASE_EMBEDDED {
public:
class Iterator BASE_EMBEDDED {
@@ -241,8 +246,7 @@ class GrowableBitVector BASE_EMBEDDED {
BitVector* bits_;
};
-
-} } // namespace v8::internal
-
+} // namespace internal
+} // namespace v8
#endif // V8_DATAFLOW_H_
diff --git a/deps/v8/src/date.h b/deps/v8/src/date.h
index 633dd9f38e..2e5ce39a04 100644
--- a/deps/v8/src/date.h
+++ b/deps/v8/src/date.h
@@ -103,14 +103,22 @@ class DateCache {
}
// ECMA 262 - 15.9.1.9
+ // LocalTime(t) = t + LocalTZA + DaylightSavingTA(t)
+ // ECMA 262 assumes that DaylightSavingTA is computed using UTC time,
+ // but we fetch DST from OS using local time, therefore we need:
+ // LocalTime(t) = t + LocalTZA + DaylightSavingTA(t + LocalTZA).
int64_t ToLocal(int64_t time_ms) {
- return time_ms + LocalOffsetInMs() + DaylightSavingsOffsetInMs(time_ms);
+ time_ms += LocalOffsetInMs();
+ return time_ms + DaylightSavingsOffsetInMs(time_ms);
}
// ECMA 262 - 15.9.1.9
+ // UTC(t) = t - LocalTZA - DaylightSavingTA(t - LocalTZA)
+ // ECMA 262 assumes that DaylightSavingTA is computed using UTC time,
+ // but we fetch DST from OS using local time, therefore we need:
+ // UTC(t) = t - LocalTZA - DaylightSavingTA(t).
int64_t ToUTC(int64_t time_ms) {
- time_ms -= LocalOffsetInMs();
- return time_ms - DaylightSavingsOffsetInMs(time_ms);
+ return time_ms - LocalOffsetInMs() - DaylightSavingsOffsetInMs(time_ms);
}
diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js
index a4c8801ea6..a1468a02b8 100644
--- a/deps/v8/src/debug-debugger.js
+++ b/deps/v8/src/debug-debugger.js
@@ -21,7 +21,8 @@ Debug.DebugEvent = { Break: 1,
AfterCompile: 5,
CompileError: 6,
PromiseEvent: 7,
- AsyncTaskEvent: 8 };
+ AsyncTaskEvent: 8,
+ BreakForCommand: 9 };
// Types of exceptions that can be broken upon.
Debug.ExceptionBreak = { Caught : 0,
@@ -1172,10 +1173,13 @@ CompileEvent.prototype.toJSONProtocol = function() {
switch (this.type_) {
case Debug.DebugEvent.BeforeCompile:
o.event = "beforeCompile";
+ break;
case Debug.DebugEvent.AfterCompile:
o.event = "afterCompile";
+ break;
case Debug.DebugEvent.CompileError:
o.event = "compileError";
+ break;
}
o.body = {};
o.body.script = this.script_;
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index 2ae8630885..f0e77968eb 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -16,14 +16,11 @@
#include "src/execution.h"
#include "src/full-codegen.h"
#include "src/global-handles.h"
-#include "src/ic.h"
-#include "src/ic-inl.h"
#include "src/isolate-inl.h"
#include "src/list.h"
#include "src/log.h"
#include "src/messages.h"
#include "src/natives.h"
-#include "src/stub-cache.h"
#include "include/v8-debug.h"
@@ -568,7 +565,6 @@ void Debug::ThreadInit() {
// TODO(isolates): frames_are_dropped_?
thread_local_.current_debug_scope_ = NULL;
thread_local_.restarter_frame_function_pointer_ = NULL;
- thread_local_.promise_on_stack_ = NULL;
}
@@ -761,13 +757,9 @@ bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
Handle<JSFunction> function =
factory->NewFunctionFromSharedFunctionInfo(function_info, context);
- Handle<Object> exception;
- MaybeHandle<Object> result =
- Execution::TryCall(function,
- handle(context->global_proxy()),
- 0,
- NULL,
- &exception);
+ MaybeHandle<Object> maybe_exception;
+ MaybeHandle<Object> result = Execution::TryCall(
+ function, handle(context->global_proxy()), 0, NULL, &maybe_exception);
// Check for caught exceptions.
if (result.is_null()) {
@@ -778,7 +770,8 @@ bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
isolate, "error_loading_debugger", &computed_location,
Vector<Handle<Object> >::empty(), Handle<JSArray>());
DCHECK(!isolate->has_pending_exception());
- if (!exception.is_null()) {
+ Handle<Object> exception;
+ if (maybe_exception.ToHandle(&exception)) {
isolate->set_pending_exception(*exception);
MessageHandler::ReportMessage(isolate, NULL, message);
isolate->clear_pending_exception();
@@ -825,7 +818,7 @@ bool Debug::Load() {
// Expose the builtins object in the debugger context.
Handle<String> key = isolate_->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("builtins"));
+ STATIC_CHAR_VECTOR("builtins"));
Handle<GlobalObject> global =
Handle<GlobalObject>(context->global_object(), isolate_);
Handle<JSBuiltinsObject> builtin =
@@ -855,9 +848,6 @@ void Debug::Unload() {
ClearAllBreakPoints();
ClearStepping();
- // Match unmatched PopPromise calls.
- while (thread_local_.promise_on_stack_) PopPromise();
-
// Return debugger is not loaded.
if (!is_loaded()) return;
@@ -1044,7 +1034,7 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
// Get the function IsBreakPointTriggered (defined in debug-debugger.js).
Handle<String> is_break_point_triggered_string =
factory->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("IsBreakPointTriggered"));
+ STATIC_CHAR_VECTOR("IsBreakPointTriggered"));
Handle<GlobalObject> debug_global(debug_context()->global_object());
Handle<JSFunction> check_break_point =
Handle<JSFunction>::cast(Object::GetProperty(
@@ -1058,7 +1048,7 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
Handle<Object> result;
if (!Execution::TryCall(check_break_point,
isolate_->js_builtins_object(),
- ARRAY_SIZE(argv),
+ arraysize(argv),
argv).ToHandle(&result)) {
return false;
}
@@ -1272,61 +1262,10 @@ bool Debug::IsBreakOnException(ExceptionBreakType type) {
}
-PromiseOnStack::PromiseOnStack(Isolate* isolate, PromiseOnStack* prev,
- Handle<JSObject> promise)
- : isolate_(isolate), prev_(prev) {
- handler_ = StackHandler::FromAddress(
- Isolate::handler(isolate->thread_local_top()));
- promise_ =
- Handle<JSObject>::cast(isolate->global_handles()->Create(*promise));
-}
-
-
-PromiseOnStack::~PromiseOnStack() {
- isolate_->global_handles()->Destroy(
- Handle<Object>::cast(promise_).location());
-}
-
-
-void Debug::PushPromise(Handle<JSObject> promise) {
- PromiseOnStack* prev = thread_local_.promise_on_stack_;
- thread_local_.promise_on_stack_ = new PromiseOnStack(isolate_, prev, promise);
-}
-
-
-void Debug::PopPromise() {
- if (thread_local_.promise_on_stack_ == NULL) return;
- PromiseOnStack* prev = thread_local_.promise_on_stack_->prev();
- delete thread_local_.promise_on_stack_;
- thread_local_.promise_on_stack_ = prev;
-}
-
-
-Handle<Object> Debug::GetPromiseOnStackOnThrow() {
- Handle<Object> undefined = isolate_->factory()->undefined_value();
- if (thread_local_.promise_on_stack_ == NULL) return undefined;
- StackHandler* promise_try = thread_local_.promise_on_stack_->handler();
- // Find the top-most try-catch handler.
- StackHandler* handler = StackHandler::FromAddress(
- Isolate::handler(isolate_->thread_local_top()));
- do {
- if (handler == promise_try) {
- // Mark the pushed try-catch handler to prevent a later duplicate event
- // triggered with the following reject.
- return thread_local_.promise_on_stack_->promise();
- }
- handler = handler->next();
- // Throwing inside a Promise can be intercepted by an inner try-catch, so
- // we stop at the first try-catch handler.
- } while (handler != NULL && !handler->is_catch());
- return undefined;
-}
-
-
bool Debug::PromiseHasRejectHandler(Handle<JSObject> promise) {
Handle<JSFunction> fun = Handle<JSFunction>::cast(
JSObject::GetDataProperty(isolate_->js_builtins_object(),
- isolate_->factory()->NewStringFromStaticAscii(
+ isolate_->factory()->NewStringFromStaticChars(
"PromiseHasRejectHandler")));
Handle<Object> result =
Execution::Call(isolate_, fun, promise, 0, NULL).ToHandleChecked();
@@ -1941,7 +1880,7 @@ static void EnsureFunctionHasDebugBreakSlots(Handle<JSFunction> function) {
// Make sure that the shared full code is compiled with debug
// break slots.
if (!function->shared()->code()->has_debug_break_slots()) {
- MaybeHandle<Code> code = Compiler::GetCodeForDebugging(function);
+ MaybeHandle<Code> code = Compiler::GetDebugCode(function);
// Recompilation can fail. In that case leave the code as it was.
if (!code.is_null()) function->ReplaceCode(*code.ToHandleChecked());
} else {
@@ -1975,7 +1914,7 @@ void Debug::PrepareForBreakPoints() {
Deoptimizer::DeoptimizeAll(isolate_);
- Handle<Code> lazy_compile = isolate_->builtins()->CompileUnoptimized();
+ Handle<Code> lazy_compile = isolate_->builtins()->CompileLazy();
// There will be at least one break point when we are done.
has_break_points_ = true;
@@ -2454,13 +2393,11 @@ void Debug::ClearMirrorCache() {
Factory* factory = isolate_->factory();
Handle<GlobalObject> global(isolate_->global_object());
JSObject::SetProperty(global,
- factory->NewStringFromAsciiChecked("next_handle_"),
- handle(Smi::FromInt(0), isolate_),
- SLOPPY).Check();
+ factory->NewStringFromAsciiChecked("next_handle_"),
+ handle(Smi::FromInt(0), isolate_), SLOPPY).Check();
JSObject::SetProperty(global,
- factory->NewStringFromAsciiChecked("mirror_cache_"),
- factory->NewJSArray(0, FAST_ELEMENTS),
- SLOPPY).Check();
+ factory->NewStringFromAsciiChecked("mirror_cache_"),
+ factory->NewJSArray(0, FAST_ELEMENTS), SLOPPY).Check();
}
@@ -2516,7 +2453,7 @@ MaybeHandle<Object> Debug::MakeJSObject(const char* constructor_name,
MaybeHandle<Object> Debug::MakeExecutionState() {
// Create the execution state object.
Handle<Object> argv[] = { isolate_->factory()->NewNumberFromInt(break_id()) };
- return MakeJSObject("MakeExecutionState", ARRAY_SIZE(argv), argv);
+ return MakeJSObject("MakeExecutionState", arraysize(argv), argv);
}
@@ -2524,7 +2461,7 @@ MaybeHandle<Object> Debug::MakeBreakEvent(Handle<Object> break_points_hit) {
// Create the new break event object.
Handle<Object> argv[] = { isolate_->factory()->NewNumberFromInt(break_id()),
break_points_hit };
- return MakeJSObject("MakeBreakEvent", ARRAY_SIZE(argv), argv);
+ return MakeJSObject("MakeBreakEvent", arraysize(argv), argv);
}
@@ -2536,7 +2473,7 @@ MaybeHandle<Object> Debug::MakeExceptionEvent(Handle<Object> exception,
exception,
isolate_->factory()->ToBoolean(uncaught),
promise };
- return MakeJSObject("MakeExceptionEvent", ARRAY_SIZE(argv), argv);
+ return MakeJSObject("MakeExceptionEvent", arraysize(argv), argv);
}
@@ -2546,28 +2483,38 @@ MaybeHandle<Object> Debug::MakeCompileEvent(Handle<Script> script,
Handle<Object> script_wrapper = Script::GetWrapper(script);
Handle<Object> argv[] = { script_wrapper,
isolate_->factory()->NewNumberFromInt(type) };
- return MakeJSObject("MakeCompileEvent", ARRAY_SIZE(argv), argv);
+ return MakeJSObject("MakeCompileEvent", arraysize(argv), argv);
}
MaybeHandle<Object> Debug::MakePromiseEvent(Handle<JSObject> event_data) {
// Create the promise event object.
Handle<Object> argv[] = { event_data };
- return MakeJSObject("MakePromiseEvent", ARRAY_SIZE(argv), argv);
+ return MakeJSObject("MakePromiseEvent", arraysize(argv), argv);
}
MaybeHandle<Object> Debug::MakeAsyncTaskEvent(Handle<JSObject> task_event) {
// Create the async task event object.
Handle<Object> argv[] = { task_event };
- return MakeJSObject("MakeAsyncTaskEvent", ARRAY_SIZE(argv), argv);
+ return MakeJSObject("MakeAsyncTaskEvent", arraysize(argv), argv);
}
void Debug::OnThrow(Handle<Object> exception, bool uncaught) {
if (in_debug_scope() || ignore_events()) return;
+ // Temporarily clear any scheduled_exception to allow evaluating
+ // JavaScript from the debug event handler.
HandleScope scope(isolate_);
- OnException(exception, uncaught, GetPromiseOnStackOnThrow());
+ Handle<Object> scheduled_exception;
+ if (isolate_->has_scheduled_exception()) {
+ scheduled_exception = handle(isolate_->scheduled_exception(), isolate_);
+ isolate_->clear_scheduled_exception();
+ }
+ OnException(exception, uncaught, isolate_->GetPromiseOnStackOnThrow());
+ if (!scheduled_exception.is_null()) {
+ isolate_->thread_local_top()->scheduled_exception_ = *scheduled_exception;
+ }
}
@@ -2688,7 +2635,7 @@ void Debug::OnAfterCompile(Handle<Script> script) {
// Get the function UpdateScriptBreakPoints (defined in debug-debugger.js).
Handle<String> update_script_break_points_string =
isolate_->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("UpdateScriptBreakPoints"));
+ STATIC_CHAR_VECTOR("UpdateScriptBreakPoints"));
Handle<GlobalObject> debug_global(debug_context()->global_object());
Handle<Object> update_script_break_points =
Object::GetProperty(
@@ -2706,7 +2653,7 @@ void Debug::OnAfterCompile(Handle<Script> script) {
Handle<Object> argv[] = { wrapper };
if (Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points),
isolate_->js_builtins_object(),
- ARRAY_SIZE(argv),
+ arraysize(argv),
argv).is_null()) {
return;
}
@@ -2823,7 +2770,7 @@ void Debug::CallEventCallback(v8::DebugEvent event,
event_listener_data_ };
Handle<JSReceiver> global(isolate_->global_proxy());
Execution::TryCall(Handle<JSFunction>::cast(event_listener_),
- global, ARRAY_SIZE(argv), argv);
+ global, arraysize(argv), argv);
}
}
@@ -2923,11 +2870,12 @@ void Debug::NotifyMessageHandler(v8::DebugEvent event,
Handle<String> request_text = isolate_->factory()->NewStringFromTwoByte(
command_text).ToHandleChecked();
Handle<Object> request_args[] = { request_text };
- Handle<Object> exception;
Handle<Object> answer_value;
Handle<String> answer;
- MaybeHandle<Object> maybe_result = Execution::TryCall(
- process_debug_request, cmd_processor, 1, request_args, &exception);
+ MaybeHandle<Object> maybe_exception;
+ MaybeHandle<Object> maybe_result =
+ Execution::TryCall(process_debug_request, cmd_processor, 1,
+ request_args, &maybe_exception);
if (maybe_result.ToHandle(&answer_value)) {
if (answer_value->IsUndefined()) {
@@ -2945,10 +2893,15 @@ void Debug::NotifyMessageHandler(v8::DebugEvent event,
Handle<Object> is_running_args[] = { answer };
maybe_result = Execution::Call(
isolate_, is_running, cmd_processor, 1, is_running_args);
- running = maybe_result.ToHandleChecked()->IsTrue();
+ Handle<Object> result;
+ if (!maybe_result.ToHandle(&result)) break;
+ running = result->IsTrue();
} else {
- answer = Handle<String>::cast(
- Execution::ToString(isolate_, exception).ToHandleChecked());
+ Handle<Object> exception;
+ if (!maybe_exception.ToHandle(&exception)) break;
+ Handle<Object> result;
+ if (!Execution::ToString(isolate_, exception).ToHandle(&result)) break;
+ answer = Handle<String>::cast(result);
}
// Return the result.
@@ -2961,6 +2914,7 @@ void Debug::NotifyMessageHandler(v8::DebugEvent event,
// running state (through a continue command) or auto continue is active
// and there are no more commands queued.
} while (!running || has_commands());
+ command_queue_.Clear();
}
@@ -3062,7 +3016,7 @@ MaybeHandle<Object> Debug::Call(Handle<JSFunction> fun, Handle<Object> data) {
isolate_,
fun,
Handle<Object>(debug_context()->global_proxy(), isolate_),
- ARRAY_SIZE(argv),
+ arraysize(argv),
argv);
}
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index e60e1aaab8..a5119d0773 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -333,22 +333,6 @@ class LockingCommandMessageQueue BASE_EMBEDDED {
};
-class PromiseOnStack {
- public:
- PromiseOnStack(Isolate* isolate, PromiseOnStack* prev,
- Handle<JSObject> getter);
- ~PromiseOnStack();
- StackHandler* handler() { return handler_; }
- Handle<JSObject> promise() { return promise_; }
- PromiseOnStack* prev() { return prev_; }
- private:
- Isolate* isolate_;
- StackHandler* handler_;
- Handle<JSObject> promise_;
- PromiseOnStack* prev_;
-};
-
-
// This class contains the debugger support. The main purpose is to handle
// setting break points in the code.
//
@@ -452,11 +436,6 @@ class Debug {
// Check whether this frame is just about to return.
bool IsBreakAtReturn(JavaScriptFrame* frame);
- // Promise handling.
- // Push and pop a promise and the current try-catch handler.
- void PushPromise(Handle<JSObject> promise);
- void PopPromise();
-
// Support for LiveEdit
void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
LiveEdit::FrameDropMode mode,
@@ -551,7 +530,6 @@ class Debug {
void ClearMirrorCache();
// Returns a promise if the pushed try-catch handler matches the current one.
- Handle<Object> GetPromiseOnStackOnThrow();
bool PromiseHasRejectHandler(Handle<JSObject> promise);
void CallEventCallback(v8::DebugEvent event,
@@ -658,13 +636,6 @@ class Debug {
// of the pointer to function being restarted. Otherwise (most of the time)
// stores NULL. This pointer is used with 'step in' implementation.
Object** restarter_frame_function_pointer_;
-
- // When a promise is being resolved, we may want to trigger a debug event
- // if we catch a throw. For this purpose we remember the try-catch
- // handler address that would catch the exception. We also hold onto a
- // closure that returns a promise if the exception is considered uncaught.
- // Due to the possibility of reentry we use a linked list.
- PromiseOnStack* promise_on_stack_;
};
// Storage location for registers when handling debug break calls
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 1df7df84d0..dd274ede99 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -353,7 +353,7 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
int deopt_index = safepoint.deoptimization_index();
// Turbofan deopt is checked when we are patching addresses on stack.
- bool turbofanned = code->is_turbofanned();
+ bool turbofanned = code->is_turbofanned() && !FLAG_turbo_deoptimization;
bool safe_to_deopt =
deopt_index != Safepoint::kNoDeoptimizationIndex || turbofanned;
CHECK(topmost_optimized_code == NULL || safe_to_deopt || turbofanned);
@@ -378,7 +378,8 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
Object* next = code->next_code_link();
- if (code->marked_for_deoptimization()) {
+ if (code->marked_for_deoptimization() &&
+ (!code->is_turbofanned() || FLAG_turbo_deoptimization)) {
// Put the code into the list for later patching.
codes.Add(code, &zone);
@@ -400,10 +401,6 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
element = next;
}
- if (FLAG_turbo_deoptimization) {
- PatchStackForMarkedCode(isolate);
- }
-
// TODO(titzer): we need a handle scope only because of the macro assembler,
// which is only used in EnsureCodeForDeoptimizationEntry.
HandleScope scope(isolate);
@@ -425,11 +422,7 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
shared->EvictFromOptimizedCodeMap(codes[i], "deoptimized code");
// Do platform-specific patching to force any activations to lazy deopt.
- //
- // We skip patching Turbofan code - we patch return addresses on stack.
- // TODO(jarin) We should still zap the code object (but we have to
- // be careful not to zap the deoptimization block).
- if (!codes[i]->is_turbofanned()) {
+ if (!codes[i]->is_turbofanned() || FLAG_turbo_deoptimization) {
PatchCodeForDeoptimization(isolate, codes[i]);
// We might be in the middle of incremental marking with compaction.
@@ -441,56 +434,6 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
}
-static int FindPatchAddressForReturnAddress(Code* code, int pc) {
- DeoptimizationInputData* input_data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- int patch_count = input_data->ReturnAddressPatchCount();
- for (int i = 0; i < patch_count; i++) {
- int return_pc = input_data->ReturnAddressPc(i)->value();
- int patch_pc = input_data->PatchedAddressPc(i)->value();
- // If the supplied pc matches the return pc or if the address
- // has been already patched, return the patch pc.
- if (pc == return_pc || pc == patch_pc) {
- return patch_pc;
- }
- }
- return -1;
-}
-
-
-// For all marked Turbofanned code on stack, change the return address to go
-// to the deoptimization block.
-void Deoptimizer::PatchStackForMarkedCode(Isolate* isolate) {
- // TODO(jarin) We should tolerate missing patch entry for the topmost frame.
- for (StackFrameIterator it(isolate, isolate->thread_local_top()); !it.done();
- it.Advance()) {
- StackFrame::Type type = it.frame()->type();
- if (type == StackFrame::OPTIMIZED) {
- Code* code = it.frame()->LookupCode();
- if (code->is_turbofanned() && code->marked_for_deoptimization()) {
- JSFunction* function =
- static_cast<OptimizedFrame*>(it.frame())->function();
- Address* pc_address = it.frame()->pc_address();
- int pc_offset =
- static_cast<int>(*pc_address - code->instruction_start());
- int new_pc_offset = FindPatchAddressForReturnAddress(code, pc_offset);
-
- if (FLAG_trace_deopt) {
- CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintF(scope.file(), "[patching stack address for function: ");
- function->PrintName(scope.file());
- PrintF(scope.file(), " (Pc offset %i -> %i)]\n", pc_offset,
- new_pc_offset);
- }
-
- CHECK_LE(0, new_pc_offset);
- *pc_address += new_pc_offset - pc_offset;
- }
- }
- }
-}
-
-
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
if (FLAG_trace_deopt) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
@@ -941,7 +884,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
CHECK_EQ(Translation::kSelfLiteralId, closure_id);
function = function_;
}
- unsigned height = iterator->Next();
+ unsigned height = iterator->Next() - 1; // Do not count the context.
unsigned height_in_bytes = height * kPointerSize;
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(), " translating ");
@@ -1076,12 +1019,24 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
Register context_reg = JavaScriptFrame::context_register();
output_offset -= kPointerSize;
input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = reinterpret_cast<intptr_t>(function->context());
+ // Read the context from the translations.
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ value = output_frame->GetFrameSlot(output_offset);
+ // The context should not be a placeholder for a materialized object.
+ CHECK(value !=
+ reinterpret_cast<intptr_t>(isolate_->heap()->arguments_marker()));
+ if (value ==
+ reinterpret_cast<intptr_t>(isolate_->heap()->undefined_value())) {
+ // If the context was optimized away, just use the context from
+ // the activation. This should only apply to Crankshaft code.
+ CHECK(!compiled_code_->is_turbofanned());
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = reinterpret_cast<intptr_t>(function->context());
+ }
+ output_frame->SetFrameSlot(output_offset, value);
}
- output_frame->SetFrameSlot(output_offset, value);
output_frame->SetContext(value);
if (is_topmost) output_frame->SetRegister(context_reg.code(), value);
if (trace_scope_ != NULL) {
@@ -1625,17 +1580,13 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
CHECK(compiled_code_->is_hydrogen_stub());
int major_key = CodeStub::GetMajorKey(compiled_code_);
- CodeStubInterfaceDescriptor* descriptor =
- isolate_->code_stub_interface_descriptor(major_key);
- // Check that there is a matching descriptor to the major key.
- // This will fail if there has not been one installed to the isolate.
- DCHECK_EQ(descriptor->MajorKey(), major_key);
+ CodeStubDescriptor descriptor(isolate_, compiled_code_->stub_key());
// The output frame must have room for all pushed register parameters
// and the standard stack frame slots. Include space for an argument
// object to the callee and optionally the space to pass the argument
// object to the stub failure handler.
- int param_count = descriptor->GetEnvironmentParameterCount();
+ int param_count = descriptor.GetEnvironmentParameterCount();
CHECK_GE(param_count, 0);
int height_in_bytes = kPointerSize * param_count + sizeof(Arguments) +
@@ -1733,7 +1684,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
}
intptr_t caller_arg_count = 0;
- bool arg_count_known = !descriptor->stack_parameter_count().is_valid();
+ bool arg_count_known = !descriptor.stack_parameter_count().is_valid();
// Build the Arguments object for the caller's parameters and a pointer to it.
output_frame_offset -= kPointerSize;
@@ -1785,8 +1736,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
output_frame_offset -= kPointerSize;
DoTranslateCommand(iterator, 0, output_frame_offset);
- if (!arg_count_known &&
- descriptor->IsEnvironmentParameterCountRegister(i)) {
+ if (!arg_count_known && descriptor.IsEnvironmentParameterCountRegister(i)) {
arguments_length_offset = output_frame_offset;
}
}
@@ -1825,11 +1775,11 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
CopyDoubleRegisters(output_frame);
// Fill registers containing handler and number of parameters.
- SetPlatformCompiledStubRegisters(output_frame, descriptor);
+ SetPlatformCompiledStubRegisters(output_frame, &descriptor);
// Compute this frame's PC, state, and continuation.
Code* trampoline = NULL;
- StubFunctionMode function_mode = descriptor->function_mode();
+ StubFunctionMode function_mode = descriptor.function_mode();
StubFailureTrampolineStub(isolate_,
function_mode).FindCodeInCache(&trampoline);
DCHECK(trampoline != NULL);
@@ -3645,6 +3595,7 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
bool has_construct_stub) {
FrameDescription* output_frame = deoptimizer->output_[frame_index];
function_ = output_frame->GetFunction();
+ context_ = reinterpret_cast<Object*>(output_frame->GetContext());
has_construct_stub_ = has_construct_stub;
expression_count_ = output_frame->GetExpressionCount();
expression_stack_ = new Object*[expression_count_];
@@ -3677,7 +3628,8 @@ DeoptimizedFrameInfo::~DeoptimizedFrameInfo() {
void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
- v->VisitPointer(BitCast<Object**>(&function_));
+ v->VisitPointer(bit_cast<Object**>(&function_));
+ v->VisitPointer(&context_);
v->VisitPointers(parameters_, parameters_ + parameters_count_);
v->VisitPointers(expression_stack_, expression_stack_ + expression_count_);
}
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index a0cc6975c3..612d5f6ecf 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -17,19 +17,9 @@ namespace internal {
static inline double read_double_value(Address p) {
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- return Memory::double_at(p);
-#else // V8_HOST_CAN_READ_UNALIGNED
- // Prevent gcc from using load-double (mips ldc1) on (possibly)
- // non-64-bit aligned address.
- union conversion {
- double d;
- uint32_t u[2];
- } c;
- c.u[0] = *reinterpret_cast<uint32_t*>(p);
- c.u[1] = *reinterpret_cast<uint32_t*>(p + 4);
- return c.d;
-#endif // V8_HOST_CAN_READ_UNALIGNED
+ double d;
+ memcpy(&d, p, sizeof(d));
+ return d;
}
@@ -111,16 +101,41 @@ class Deoptimizer : public Malloced {
static const int kBailoutTypesWithCodeEntry = SOFT + 1;
+ struct Reason {
+ Reason(int r, const char* m, const char* d)
+ : raw_position(r), mnemonic(m), detail(d) {}
+
+ bool operator==(const Reason& other) const {
+ return raw_position == other.raw_position &&
+ CStringEquals(mnemonic, other.mnemonic) &&
+ CStringEquals(detail, other.detail);
+ }
+
+ bool operator!=(const Reason& other) const { return !(*this == other); }
+
+ int raw_position;
+ const char* mnemonic;
+ const char* detail;
+ };
+
struct JumpTableEntry : public ZoneObject {
- inline JumpTableEntry(Address entry,
- Deoptimizer::BailoutType type,
- bool frame)
+ inline JumpTableEntry(Address entry, const Reason& the_reason,
+ Deoptimizer::BailoutType type, bool frame)
: label(),
address(entry),
+ reason(the_reason),
bailout_type(type),
- needs_frame(frame) { }
+ needs_frame(frame) {}
+
+ bool IsEquivalentTo(const JumpTableEntry& other) const {
+ return address == other.address && bailout_type == other.bailout_type &&
+ needs_frame == other.needs_frame &&
+ (!FLAG_trace_deopt || reason == other.reason);
+ }
+
Label label;
Address address;
+ Reason reason;
Deoptimizer::BailoutType bailout_type;
bool needs_frame;
};
@@ -177,8 +192,6 @@ class Deoptimizer : public Malloced {
// refer to that code.
static void DeoptimizeMarkedCode(Isolate* isolate);
- static void PatchStackForMarkedCode(Isolate* isolate);
-
// Visit all the known optimized functions in a given isolate.
static void VisitAllOptimizedFunctions(
Isolate* isolate, OptimizedFunctionVisitor* visitor);
@@ -379,7 +392,7 @@ class Deoptimizer : public Malloced {
// Fill the given output frame's registers to contain the failure handler
// address and the number of parameters for a stub failure trampoline.
void SetPlatformCompiledStubRegisters(FrameDescription* output_frame,
- CodeStubInterfaceDescriptor* desc);
+ CodeStubDescriptor* desc);
// Fill the given output frame's double registers with the original values
// from the input frame's double registers.
@@ -494,7 +507,7 @@ class FrameDescription {
// This convoluted DCHECK is needed to work around a gcc problem that
// improperly detects an array bounds overflow in optimized debug builds
// when using a plain DCHECK.
- if (n >= ARRAY_SIZE(registers_)) {
+ if (n >= arraysize(registers_)) {
DCHECK(false);
return 0;
}
@@ -503,17 +516,17 @@ class FrameDescription {
}
double GetDoubleRegister(unsigned n) const {
- DCHECK(n < ARRAY_SIZE(double_registers_));
+ DCHECK(n < arraysize(double_registers_));
return double_registers_[n];
}
void SetRegister(unsigned n, intptr_t value) {
- DCHECK(n < ARRAY_SIZE(registers_));
+ DCHECK(n < arraysize(registers_));
registers_[n] = value;
}
void SetDoubleRegister(unsigned n, double value) {
- DCHECK(n < ARRAY_SIZE(double_registers_));
+ DCHECK(n < arraysize(double_registers_));
double_registers_[n] = value;
}
@@ -922,6 +935,9 @@ class DeoptimizedFrameInfo : public Malloced {
return function_;
}
+ // Get the frame context.
+ Object* GetContext() { return context_; }
+
// Check if this frame is preceded by construct stub frame. The bottom-most
// inlined frame might still be called by an uninlined construct stub.
bool HasConstructStub() {
@@ -958,6 +974,7 @@ class DeoptimizedFrameInfo : public Malloced {
}
JSFunction* function_;
+ Object* context_;
bool has_construct_stub_;
int parameters_count_;
int expression_count_;
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 942b2be452..78a0d1c6ff 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -19,21 +19,6 @@ namespace internal {
#ifdef ENABLE_DISASSEMBLER
-void Disassembler::Dump(FILE* f, byte* begin, byte* end) {
- for (byte* pc = begin; pc < end; pc++) {
- if (f == NULL) {
- PrintF("%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n",
- reinterpret_cast<intptr_t>(pc),
- pc - begin,
- *pc);
- } else {
- PrintF(f, "%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n",
- reinterpret_cast<uintptr_t>(pc), pc - begin, *pc);
- }
- }
-}
-
-
class V8NameConverter: public disasm::NameConverter {
public:
explicit V8NameConverter(Code* code) : code_(code) {}
@@ -74,12 +59,8 @@ const char* V8NameConverter::NameInCode(byte* addr) const {
}
-static void DumpBuffer(FILE* f, StringBuilder* out) {
- if (f == NULL) {
- PrintF("%s\n", out->Finalize());
- } else {
- PrintF(f, "%s\n", out->Finalize());
- }
+static void DumpBuffer(OStream* os, StringBuilder* out) {
+ (*os) << out->Finalize() << endl;
out->Reset();
}
@@ -87,11 +68,8 @@ static void DumpBuffer(FILE* f, StringBuilder* out) {
static const int kOutBufferSize = 2048 + String::kMaxShortPrintLength;
static const int kRelocInfoPosition = 57;
-static int DecodeIt(Isolate* isolate,
- FILE* f,
- const V8NameConverter& converter,
- byte* begin,
- byte* end) {
+static int DecodeIt(Isolate* isolate, OStream* os,
+ const V8NameConverter& converter, byte* begin, byte* end) {
SealHandleScope shs(isolate);
DisallowHeapAllocation no_alloc;
ExternalReferenceEncoder ref_encoder(isolate);
@@ -164,7 +142,7 @@ static int DecodeIt(Isolate* isolate,
// Comments.
for (int i = 0; i < comments.length(); i++) {
out.AddFormatted(" %s", comments[i]);
- DumpBuffer(f, &out);
+ DumpBuffer(os, &out);
}
// Instruction address and instruction offset.
@@ -184,7 +162,7 @@ static int DecodeIt(Isolate* isolate,
out.AddPadding(' ', kRelocInfoPosition - out.position());
} else {
// Additional reloc infos are printed on separate lines.
- DumpBuffer(f, &out);
+ DumpBuffer(os, &out);
out.AddPadding(' ', kRelocInfoPosition);
}
@@ -214,7 +192,8 @@ static int DecodeIt(Isolate* isolate,
Code::Kind kind = code->kind();
if (code->is_inline_cache_stub()) {
if (kind == Code::LOAD_IC &&
- LoadIC::GetContextualMode(code->extra_ic_state()) == CONTEXTUAL) {
+ LoadICState::GetContextualMode(code->extra_ic_state()) ==
+ CONTEXTUAL) {
out.AddFormatted(" contextual,");
}
InlineCacheState ic_state = code->ic_state();
@@ -277,7 +256,7 @@ static int DecodeIt(Isolate* isolate,
out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
}
}
- DumpBuffer(f, &out);
+ DumpBuffer(os, &out);
}
// Emit comments following the last instruction (if any).
@@ -286,7 +265,7 @@ static int DecodeIt(Isolate* isolate,
if (RelocInfo::IsComment(it->rinfo()->rmode())) {
out.AddFormatted(" %s",
reinterpret_cast<const char*>(it->rinfo()->data()));
- DumpBuffer(f, &out);
+ DumpBuffer(os, &out);
}
}
}
@@ -296,40 +275,19 @@ static int DecodeIt(Isolate* isolate,
}
-int Disassembler::Decode(Isolate* isolate, FILE* f, byte* begin, byte* end) {
- V8NameConverter defaultConverter(NULL);
- return DecodeIt(isolate, f, defaultConverter, begin, end);
-}
-
-
-// Called by Code::CodePrint.
-void Disassembler::Decode(FILE* f, Code* code) {
- Isolate* isolate = code->GetIsolate();
- int decode_size = code->is_crankshafted()
- ? static_cast<int>(code->safepoint_table_offset())
- : code->instruction_size();
- // If there might be a back edge table, stop before reaching it.
- if (code->kind() == Code::FUNCTION) {
- decode_size =
- Min(decode_size, static_cast<int>(code->back_edge_table_offset()));
- }
-
- byte* begin = code->instruction_start();
- byte* end = begin + decode_size;
+int Disassembler::Decode(Isolate* isolate, OStream* os, byte* begin, byte* end,
+ Code* code) {
V8NameConverter v8NameConverter(code);
- DecodeIt(isolate, f, v8NameConverter, begin, end);
+ return DecodeIt(isolate, os, v8NameConverter, begin, end);
}
#else // ENABLE_DISASSEMBLER
-void Disassembler::Dump(FILE* f, byte* begin, byte* end) {}
-int Disassembler::Decode(Isolate* isolate, FILE* f, byte* begin, byte* end) {
+int Disassembler::Decode(Isolate* isolate, OStream* os, byte* begin, byte* end,
+ Code* code) {
return 0;
}
-
-void Disassembler::Decode(FILE* f, Code* code) {}
-
#endif // ENABLE_DISASSEMBLER
} } // namespace v8::internal
diff --git a/deps/v8/src/disassembler.h b/deps/v8/src/disassembler.h
index f65f538579..9b53166f2b 100644
--- a/deps/v8/src/disassembler.h
+++ b/deps/v8/src/disassembler.h
@@ -12,22 +12,12 @@ namespace internal {
class Disassembler : public AllStatic {
public:
- // Print the bytes in the interval [begin, end) into f.
- static void Dump(FILE* f, byte* begin, byte* end);
-
// Decode instructions in the the interval [begin, end) and print the
- // code into f. Returns the number of bytes disassembled or 1 if no
+ // code into os. Returns the number of bytes disassembled or 1 if no
// instruction could be decoded.
- static int Decode(Isolate* isolate, FILE* f, byte* begin, byte* end);
-
- // Decode instructions in code.
- static void Decode(FILE* f, Code* code);
- private:
- // Decode instruction at pc and print disassembled instruction into f.
- // Returns the instruction length in bytes, or 1 if the instruction could
- // not be decoded. The number of characters written is written into
- // the out parameter char_count.
- static int Decode(FILE* f, byte* pc, int* char_count);
+ // the code object is used for name resolution and may be null.
+ static int Decode(Isolate* isolate, OStream* os, byte* begin, byte* end,
+ Code* code = NULL);
};
} } // namespace v8::internal
diff --git a/deps/v8/src/double.h b/deps/v8/src/double.h
index 7b4486728a..cb12628675 100644
--- a/deps/v8/src/double.h
+++ b/deps/v8/src/double.h
@@ -11,8 +11,8 @@ namespace v8 {
namespace internal {
// We assume that doubles and uint64_t have the same endianness.
-inline uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
-inline double uint64_to_double(uint64_t d64) { return BitCast<double>(d64); }
+inline uint64_t double_to_uint64(double d) { return bit_cast<uint64_t>(d); }
+inline double uint64_to_double(uint64_t d64) { return bit_cast<double>(d64); }
// Helper functions for doubles.
class Double {
diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h
index b48a5dfe02..fb973411ec 100644
--- a/deps/v8/src/elements-kind.h
+++ b/deps/v8/src/elements-kind.h
@@ -87,6 +87,11 @@ inline bool IsDictionaryElementsKind(ElementsKind kind) {
}
+inline bool IsSloppyArgumentsElements(ElementsKind kind) {
+ return kind == SLOPPY_ARGUMENTS_ELEMENTS;
+}
+
+
inline bool IsExternalArrayElementsKind(ElementsKind kind) {
return kind >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
kind <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND;
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 945a9e7f6e..abb046725c 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -120,7 +120,7 @@ ELEMENTS_LIST(ELEMENTS_TRAITS)
#undef ELEMENTS_TRAITS
-ElementsAccessor** ElementsAccessor::elements_accessors_;
+ElementsAccessor** ElementsAccessor::elements_accessors_ = NULL;
static bool HasKey(Handle<FixedArray> array, Handle<Object> key_handle) {
@@ -141,9 +141,9 @@ static bool HasKey(Handle<FixedArray> array, Handle<Object> key_handle) {
MUST_USE_RESULT
static MaybeHandle<Object> ThrowArrayLengthRangeError(Isolate* isolate) {
- return isolate->Throw<Object>(
- isolate->factory()->NewRangeError("invalid_array_length",
- HandleVector<Object>(NULL, 0)));
+ THROW_NEW_ERROR(isolate, NewRangeError("invalid_array_length",
+ HandleVector<Object>(NULL, 0)),
+ Object);
}
@@ -247,15 +247,18 @@ static void CopyDictionaryToObjectElements(
}
-static void CopyDoubleToObjectElements(Handle<FixedArrayBase> from_base,
+// NOTE: this method violates the handlified function signature convention:
+// raw pointer parameters in the function that allocates.
+// See ElementsAccessorBase::CopyElements() for details.
+static void CopyDoubleToObjectElements(FixedArrayBase* from_base,
uint32_t from_start,
- Handle<FixedArrayBase> to_base,
- ElementsKind to_kind,
- uint32_t to_start,
+ FixedArrayBase* to_base,
+ ElementsKind to_kind, uint32_t to_start,
int raw_copy_size) {
DCHECK(IsFastSmiOrObjectElementsKind(to_kind));
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
+ DisallowHeapAllocation no_allocation;
DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = Min(from_base->length() - from_start,
@@ -268,7 +271,7 @@ static void CopyDoubleToObjectElements(Handle<FixedArrayBase> from_base,
int length = to_base->length() - start;
if (length > 0) {
Heap* heap = from_base->GetHeap();
- MemsetPointer(FixedArray::cast(*to_base)->data_start() + start,
+ MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
heap->the_hole_value(), length);
}
}
@@ -276,9 +279,12 @@ static void CopyDoubleToObjectElements(Handle<FixedArrayBase> from_base,
DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
+
+ // From here on, the code below could actually allocate. Therefore the raw
+ // values are wrapped into handles.
Isolate* isolate = from_base->GetIsolate();
- Handle<FixedDoubleArray> from = Handle<FixedDoubleArray>::cast(from_base);
- Handle<FixedArray> to = Handle<FixedArray>::cast(to_base);
+ Handle<FixedDoubleArray> from(FixedDoubleArray::cast(from_base), isolate);
+ Handle<FixedArray> to(FixedArray::cast(to_base), isolate);
for (int i = 0; i < copy_size; ++i) {
HandleScope scope(isolate);
if (IsFastSmiElementsKind(to_kind)) {
@@ -554,7 +560,7 @@ class ElementsAccessorBase : public ElementsAccessor {
typedef ElementsTraitsParam ElementsTraits;
typedef typename ElementsTraitsParam::BackingStore BackingStore;
- virtual ElementsKind kind() const V8_FINAL V8_OVERRIDE {
+ virtual ElementsKind kind() const FINAL OVERRIDE {
return ElementsTraits::Kind;
}
@@ -578,7 +584,7 @@ class ElementsAccessorBase : public ElementsAccessor {
ElementsAccessorSubclass::ValidateContents(holder, length);
}
- virtual void Validate(Handle<JSObject> holder) V8_FINAL V8_OVERRIDE {
+ virtual void Validate(Handle<JSObject> holder) FINAL OVERRIDE {
DisallowHeapAllocation no_gc;
ElementsAccessorSubclass::ValidateImpl(holder);
}
@@ -595,7 +601,7 @@ class ElementsAccessorBase : public ElementsAccessor {
Handle<Object> receiver,
Handle<JSObject> holder,
uint32_t key,
- Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE {
+ Handle<FixedArrayBase> backing_store) FINAL OVERRIDE {
return ElementsAccessorSubclass::HasElementImpl(
receiver, holder, key, backing_store);
}
@@ -604,7 +610,7 @@ class ElementsAccessorBase : public ElementsAccessor {
Handle<Object> receiver,
Handle<JSObject> holder,
uint32_t key,
- Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE {
+ Handle<FixedArrayBase> backing_store) FINAL OVERRIDE {
if (!IsExternalArrayElementsKind(ElementsTraits::Kind) &&
FLAG_trace_js_array_abuse) {
CheckArrayAbuse(holder, "elements read", key);
@@ -635,7 +641,7 @@ class ElementsAccessorBase : public ElementsAccessor {
Handle<Object> receiver,
Handle<JSObject> holder,
uint32_t key,
- Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE {
+ Handle<FixedArrayBase> backing_store) FINAL OVERRIDE {
return ElementsAccessorSubclass::GetAttributesImpl(
receiver, holder, key, backing_store);
}
@@ -653,33 +659,11 @@ class ElementsAccessorBase : public ElementsAccessor {
? ABSENT : NONE;
}
- MUST_USE_RESULT virtual PropertyType GetType(
- Handle<Object> receiver,
- Handle<JSObject> holder,
- uint32_t key,
- Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE {
- return ElementsAccessorSubclass::GetTypeImpl(
- receiver, holder, key, backing_store);
- }
-
- MUST_USE_RESULT static PropertyType GetTypeImpl(
- Handle<Object> receiver,
- Handle<JSObject> obj,
- uint32_t key,
- Handle<FixedArrayBase> backing_store) {
- if (key >= ElementsAccessorSubclass::GetCapacityImpl(backing_store)) {
- return NONEXISTENT;
- }
- return
- Handle<BackingStore>::cast(backing_store)->is_the_hole(key)
- ? NONEXISTENT : FIELD;
- }
-
MUST_USE_RESULT virtual MaybeHandle<AccessorPair> GetAccessorPair(
Handle<Object> receiver,
Handle<JSObject> holder,
uint32_t key,
- Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE {
+ Handle<FixedArrayBase> backing_store) FINAL OVERRIDE {
return ElementsAccessorSubclass::GetAccessorPairImpl(
receiver, holder, key, backing_store);
}
@@ -694,7 +678,7 @@ class ElementsAccessorBase : public ElementsAccessor {
MUST_USE_RESULT virtual MaybeHandle<Object> SetLength(
Handle<JSArray> array,
- Handle<Object> length) V8_FINAL V8_OVERRIDE {
+ Handle<Object> length) FINAL OVERRIDE {
return ElementsAccessorSubclass::SetLengthImpl(
array, length, handle(array->elements()));
}
@@ -707,7 +691,7 @@ class ElementsAccessorBase : public ElementsAccessor {
virtual void SetCapacityAndLength(
Handle<JSArray> array,
int capacity,
- int length) V8_FINAL V8_OVERRIDE {
+ int length) FINAL OVERRIDE {
ElementsAccessorSubclass::
SetFastElementsCapacityAndLength(array, capacity, length);
}
@@ -722,14 +706,11 @@ class ElementsAccessorBase : public ElementsAccessor {
MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
Handle<JSObject> obj,
uint32_t key,
- JSReceiver::DeleteMode mode) V8_OVERRIDE = 0;
-
- static void CopyElementsImpl(Handle<FixedArrayBase> from,
- uint32_t from_start,
- Handle<FixedArrayBase> to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
+ JSReceiver::DeleteMode mode) OVERRIDE = 0;
+
+ static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
+ FixedArrayBase* to, ElementsKind from_kind,
+ uint32_t to_start, int packed_size,
int copy_size) {
UNREACHABLE();
}
@@ -740,11 +721,17 @@ class ElementsAccessorBase : public ElementsAccessor {
ElementsKind from_kind,
Handle<FixedArrayBase> to,
uint32_t to_start,
- int copy_size) V8_FINAL V8_OVERRIDE {
+ int copy_size) FINAL OVERRIDE {
DCHECK(!from.is_null());
- ElementsAccessorSubclass::CopyElementsImpl(
- from, from_start, to, from_kind, to_start, kPackedSizeNotKnown,
- copy_size);
+ // NOTE: the ElementsAccessorSubclass::CopyElementsImpl() methods
+ // violate the handlified function signature convention:
+ // raw pointer parameters in the function that allocates. This is done
+ // intentionally to avoid ArrayConcat() builtin performance degradation.
+ // See the comment in another ElementsAccessorBase::CopyElements() for
+ // details.
+ ElementsAccessorSubclass::CopyElementsImpl(*from, from_start, *to,
+ from_kind, to_start,
+ kPackedSizeNotKnown, copy_size);
}
virtual void CopyElements(
@@ -753,7 +740,7 @@ class ElementsAccessorBase : public ElementsAccessor {
ElementsKind from_kind,
Handle<FixedArrayBase> to,
uint32_t to_start,
- int copy_size) V8_FINAL V8_OVERRIDE {
+ int copy_size) FINAL OVERRIDE {
int packed_size = kPackedSizeNotKnown;
bool is_packed = IsFastPackedElementsKind(from_kind) &&
from_holder->IsJSArray();
@@ -764,16 +751,25 @@ class ElementsAccessorBase : public ElementsAccessor {
packed_size = copy_size;
}
}
- Handle<FixedArrayBase> from(from_holder->elements());
+ FixedArrayBase* from = from_holder->elements();
+ // NOTE: the ElementsAccessorSubclass::CopyElementsImpl() methods
+ // violate the handlified function signature convention:
+ // raw pointer parameters in the function that allocates. This is done
+ // intentionally to avoid ArrayConcat() builtin performance degradation.
+ //
+ // Details: The idea is that allocations actually happen only in case of
+ // copying from object with fast double elements to object with object
+ // elements. In all the other cases there are no allocations performed and
+ // handle creation causes noticeable performance degradation of the builtin.
ElementsAccessorSubclass::CopyElementsImpl(
- from, from_start, to, from_kind, to_start, packed_size, copy_size);
+ from, from_start, *to, from_kind, to_start, packed_size, copy_size);
}
virtual MaybeHandle<FixedArray> AddElementsToFixedArray(
Handle<Object> receiver,
Handle<JSObject> holder,
Handle<FixedArray> to,
- Handle<FixedArrayBase> from) V8_FINAL V8_OVERRIDE {
+ Handle<FixedArrayBase> from) FINAL OVERRIDE {
int len0 = to->length();
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
@@ -852,7 +848,7 @@ class ElementsAccessorBase : public ElementsAccessor {
}
virtual uint32_t GetCapacity(Handle<FixedArrayBase> backing_store)
- V8_FINAL V8_OVERRIDE {
+ FINAL OVERRIDE {
return ElementsAccessorSubclass::GetCapacityImpl(backing_store);
}
@@ -862,7 +858,7 @@ class ElementsAccessorBase : public ElementsAccessor {
}
virtual uint32_t GetKeyForIndex(Handle<FixedArrayBase> backing_store,
- uint32_t index) V8_FINAL V8_OVERRIDE {
+ uint32_t index) FINAL OVERRIDE {
return ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, index);
}
@@ -999,7 +995,7 @@ class FastElementsAccessor
virtual MaybeHandle<Object> Delete(
Handle<JSObject> obj,
uint32_t key,
- JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE {
+ JSReceiver::DeleteMode mode) FINAL OVERRIDE {
return DeleteCommon(obj, key, mode);
}
@@ -1040,7 +1036,7 @@ class FastElementsAccessor
};
-static inline ElementsKind ElementsKindForArray(Handle<FixedArrayBase> array) {
+static inline ElementsKind ElementsKindForArray(FixedArrayBase* array) {
switch (array->map()->instance_type()) {
case FIXED_ARRAY_TYPE:
if (array->IsDictionary()) {
@@ -1076,38 +1072,42 @@ class FastSmiOrObjectElementsAccessor
: FastElementsAccessor<FastElementsAccessorSubclass,
KindTraits>(name) {}
- static void CopyElementsImpl(Handle<FixedArrayBase> from,
- uint32_t from_start,
- Handle<FixedArrayBase> to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
+ // NOTE: this method violates the handlified function signature convention:
+ // raw pointer parameters in the function that allocates.
+ // See ElementsAccessor::CopyElements() for details.
+ // This method could actually allocate if copying from double elements to
+ // object elements.
+ static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
+ FixedArrayBase* to, ElementsKind from_kind,
+ uint32_t to_start, int packed_size,
int copy_size) {
+ DisallowHeapAllocation no_gc;
ElementsKind to_kind = KindTraits::Kind;
switch (from_kind) {
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
- CopyObjectToObjectElements(*from, from_kind, from_start, *to, to_kind,
+ CopyObjectToObjectElements(from, from_kind, from_start, to, to_kind,
to_start, copy_size);
break;
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS: {
+ AllowHeapAllocation allow_allocation;
CopyDoubleToObjectElements(
from, from_start, to, to_kind, to_start, copy_size);
break;
+ }
case DICTIONARY_ELEMENTS:
- CopyDictionaryToObjectElements(*from, from_start, *to, to_kind,
- to_start, copy_size);
+ CopyDictionaryToObjectElements(from, from_start, to, to_kind, to_start,
+ copy_size);
break;
case SLOPPY_ARGUMENTS_ELEMENTS: {
// TODO(verwaest): This is a temporary hack to support extending
// SLOPPY_ARGUMENTS_ELEMENTS in SetFastElementsCapacityAndLength.
// This case should be UNREACHABLE().
- Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(from);
- Handle<FixedArrayBase> arguments(
- FixedArrayBase::cast(parameter_map->get(1)));
+ FixedArray* parameter_map = FixedArray::cast(from);
+ FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
ElementsKind from_kind = ElementsKindForArray(arguments);
CopyElementsImpl(arguments, from_start, to, from_kind,
to_start, packed_size, copy_size);
@@ -1201,31 +1201,29 @@ class FastDoubleElementsAccessor
}
protected:
- static void CopyElementsImpl(Handle<FixedArrayBase> from,
- uint32_t from_start,
- Handle<FixedArrayBase> to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
+ static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
+ FixedArrayBase* to, ElementsKind from_kind,
+ uint32_t to_start, int packed_size,
int copy_size) {
+ DisallowHeapAllocation no_allocation;
switch (from_kind) {
case FAST_SMI_ELEMENTS:
- CopyPackedSmiToDoubleElements(*from, from_start, *to, to_start,
+ CopyPackedSmiToDoubleElements(from, from_start, to, to_start,
packed_size, copy_size);
break;
case FAST_HOLEY_SMI_ELEMENTS:
- CopySmiToDoubleElements(*from, from_start, *to, to_start, copy_size);
+ CopySmiToDoubleElements(from, from_start, to, to_start, copy_size);
break;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
- CopyDoubleToDoubleElements(*from, from_start, *to, to_start, copy_size);
+ CopyDoubleToDoubleElements(from, from_start, to, to_start, copy_size);
break;
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
- CopyObjectToDoubleElements(*from, from_start, *to, to_start, copy_size);
+ CopyObjectToDoubleElements(from, from_start, to, to_start, copy_size);
break;
case DICTIONARY_ELEMENTS:
- CopyDictionaryToDoubleElements(*from, from_start, *to, to_start,
+ CopyDictionaryToDoubleElements(from, from_start, to, to_start,
copy_size);
break;
case SLOPPY_ARGUMENTS_ELEMENTS:
@@ -1310,16 +1308,6 @@ class TypedElementsAccessor
? NONE : ABSENT;
}
- MUST_USE_RESULT static PropertyType GetTypeImpl(
- Handle<Object> receiver,
- Handle<JSObject> obj,
- uint32_t key,
- Handle<FixedArrayBase> backing_store) {
- return
- key < AccessorClass::GetCapacityImpl(backing_store)
- ? FIELD : NONEXISTENT;
- }
-
MUST_USE_RESULT static MaybeHandle<Object> SetLengthImpl(
Handle<JSObject> obj,
Handle<Object> length,
@@ -1332,7 +1320,7 @@ class TypedElementsAccessor
MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
Handle<JSObject> obj,
uint32_t key,
- JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE {
+ JSReceiver::DeleteMode mode) FINAL OVERRIDE {
// External arrays always ignore deletes.
return obj->GetIsolate()->factory()->true_value();
}
@@ -1396,7 +1384,7 @@ class DictionaryElementsAccessor
uint32_t number = static_cast<uint32_t>(key->Number());
if (new_length <= number && number < old_length) {
PropertyDetails details = dict->DetailsAt(i);
- if (details.IsDontDelete()) new_length = number + 1;
+ if (!details.IsConfigurable()) new_length = number + 1;
}
}
}
@@ -1453,10 +1441,9 @@ class DictionaryElementsAccessor
// Deleting a non-configurable property in strict mode.
Handle<Object> name = isolate->factory()->NewNumberFromUint(key);
Handle<Object> args[2] = { name, obj };
- Handle<Object> error =
- isolate->factory()->NewTypeError("strict_delete_property",
- HandleVector(args, 2));
- return isolate->Throw<Object>(error);
+ THROW_NEW_ERROR(isolate, NewTypeError("strict_delete_property",
+ HandleVector(args, 2)),
+ Object);
}
return isolate->factory()->false_value();
}
@@ -1472,12 +1459,9 @@ class DictionaryElementsAccessor
return isolate->factory()->true_value();
}
- static void CopyElementsImpl(Handle<FixedArrayBase> from,
- uint32_t from_start,
- Handle<FixedArrayBase> to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
+ static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
+ FixedArrayBase* to, ElementsKind from_kind,
+ uint32_t to_start, int packed_size,
int copy_size) {
UNREACHABLE();
}
@@ -1490,7 +1474,7 @@ class DictionaryElementsAccessor
MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
Handle<JSObject> obj,
uint32_t key,
- JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE {
+ JSReceiver::DeleteMode mode) FINAL OVERRIDE {
return DeleteCommon(obj, key, mode);
}
@@ -1530,20 +1514,6 @@ class DictionaryElementsAccessor
return ABSENT;
}
- MUST_USE_RESULT static PropertyType GetTypeImpl(
- Handle<Object> receiver,
- Handle<JSObject> obj,
- uint32_t key,
- Handle<FixedArrayBase> store) {
- Handle<SeededNumberDictionary> backing_store =
- Handle<SeededNumberDictionary>::cast(store);
- int entry = backing_store->FindEntry(key);
- if (entry != SeededNumberDictionary::kNotFound) {
- return backing_store->DetailsAt(entry).type();
- }
- return NONEXISTENT;
- }
-
MUST_USE_RESULT static MaybeHandle<AccessorPair> GetAccessorPairImpl(
Handle<Object> receiver,
Handle<JSObject> obj,
@@ -1648,23 +1618,6 @@ class SloppyArgumentsElementsAccessor : public ElementsAccessorBase<
}
}
- MUST_USE_RESULT static PropertyType GetTypeImpl(
- Handle<Object> receiver,
- Handle<JSObject> obj,
- uint32_t key,
- Handle<FixedArrayBase> parameters) {
- Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(parameters);
- Handle<Object> probe = GetParameterMapArg(obj, parameter_map, key);
- if (!probe->IsTheHole()) {
- return FIELD;
- } else {
- // If not aliased, check the arguments.
- Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
- return ElementsAccessor::ForArray(arguments)->GetType(
- receiver, obj, key, arguments);
- }
- }
-
MUST_USE_RESULT static MaybeHandle<AccessorPair> GetAccessorPairImpl(
Handle<Object> receiver,
Handle<JSObject> obj,
@@ -1695,7 +1648,7 @@ class SloppyArgumentsElementsAccessor : public ElementsAccessorBase<
MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
Handle<JSObject> obj,
uint32_t key,
- JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE {
+ JSReceiver::DeleteMode mode) FINAL OVERRIDE {
Isolate* isolate = obj->GetIsolate();
Handle<FixedArray> parameter_map(FixedArray::cast(obj->elements()));
Handle<Object> probe = GetParameterMapArg(obj, parameter_map, key);
@@ -1718,12 +1671,9 @@ class SloppyArgumentsElementsAccessor : public ElementsAccessorBase<
return isolate->factory()->true_value();
}
- static void CopyElementsImpl(Handle<FixedArrayBase> from,
- uint32_t from_start,
- Handle<FixedArrayBase> to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
+ static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
+ FixedArrayBase* to, ElementsKind from_kind,
+ uint32_t to_start, int packed_size,
int copy_size) {
UNREACHABLE();
}
@@ -1779,7 +1729,7 @@ class SloppyArgumentsElementsAccessor : public ElementsAccessorBase<
ElementsAccessor* ElementsAccessor::ForArray(Handle<FixedArrayBase> array) {
- return elements_accessors_[ElementsKindForArray(array)];
+ return elements_accessors_[ElementsKindForArray(*array)];
}
@@ -1798,6 +1748,7 @@ void ElementsAccessor::InitializeOncePerProcess() {
void ElementsAccessor::TearDown() {
+ if (elements_accessors_ == NULL) return;
#define ACCESSOR_DELETE(Class, Kind, Store) delete elements_accessors_[Kind];
ELEMENTS_LIST(ACCESSOR_DELETE)
#undef ACCESSOR_DELETE
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 3496a644aa..f4de4bb010 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -81,24 +81,6 @@ class ElementsAccessor {
return GetAttributes(receiver, holder, key, handle(holder->elements()));
}
- // Returns an element's type, or NONEXISTENT if there is no such
- // element. This method doesn't iterate up the prototype chain. The caller
- // can optionally pass in the backing store to use for the check, which must
- // be compatible with the ElementsKind of the ElementsAccessor. If
- // backing_store is NULL, the holder->elements() is used as the backing store.
- MUST_USE_RESULT virtual PropertyType GetType(
- Handle<Object> receiver,
- Handle<JSObject> holder,
- uint32_t key,
- Handle<FixedArrayBase> backing_store) = 0;
-
- MUST_USE_RESULT inline PropertyType GetType(
- Handle<Object> receiver,
- Handle<JSObject> holder,
- uint32_t key) {
- return GetType(receiver, holder, key, handle(holder->elements()));
- }
-
// Returns an element's accessors, or NULL if the element does not exist or
// is plain. This method doesn't iterate up the prototype chain. The caller
// can optionally pass in the backing store to use for the check, which must
@@ -164,9 +146,10 @@ class ElementsAccessor {
uint32_t destination_start,
int copy_size) = 0;
- // TODO(ishell): Keeping |source_holder| parameter in a non-handlified form
- // helps avoiding ArrayConcat() builtin performance degradation.
- // Revisit this later.
+ // NOTE: this method violates the handlified function signature convention:
+ // raw pointer parameter |source_holder| in the function that allocates.
+ // This is done intentionally to avoid ArrayConcat() builtin performance
+ // degradation.
virtual void CopyElements(
JSObject* source_holder,
uint32_t source_start,
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index f146c3031e..7aa4f3341d 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -150,40 +150,43 @@ MaybeHandle<Object> Execution::New(Handle<JSFunction> func,
MaybeHandle<Object> Execution::TryCall(Handle<JSFunction> func,
- Handle<Object> receiver,
- int argc,
+ Handle<Object> receiver, int argc,
Handle<Object> args[],
- Handle<Object>* exception_out) {
+ MaybeHandle<Object>* exception_out) {
+ bool is_termination = false;
+ Isolate* isolate = func->GetIsolate();
+ MaybeHandle<Object> maybe_result;
+ if (exception_out != NULL) *exception_out = MaybeHandle<Object>();
// Enter a try-block while executing the JavaScript code. To avoid
// duplicate error printing it must be non-verbose. Also, to avoid
// creating message objects during stack overflow we shouldn't
// capture messages.
- v8::TryCatch catcher;
- catcher.SetVerbose(false);
- catcher.SetCaptureMessage(false);
-
- // Get isolate now, because handle might be persistent
- // and get destroyed in the next call.
- Isolate* isolate = func->GetIsolate();
- MaybeHandle<Object> maybe_result = Invoke(false, func, receiver, argc, args);
-
- if (maybe_result.is_null()) {
- DCHECK(catcher.HasCaught());
- DCHECK(isolate->has_pending_exception());
- DCHECK(isolate->external_caught_exception());
- if (exception_out != NULL) {
- if (isolate->pending_exception() ==
- isolate->heap()->termination_exception()) {
- *exception_out = isolate->factory()->termination_exception();
- } else {
- *exception_out = v8::Utils::OpenHandle(*catcher.Exception());
+ {
+ v8::TryCatch catcher;
+ catcher.SetVerbose(false);
+ catcher.SetCaptureMessage(false);
+
+ maybe_result = Invoke(false, func, receiver, argc, args);
+
+ if (maybe_result.is_null()) {
+ DCHECK(catcher.HasCaught());
+ DCHECK(isolate->has_pending_exception());
+ DCHECK(isolate->external_caught_exception());
+ if (exception_out != NULL) {
+ if (isolate->pending_exception() ==
+ isolate->heap()->termination_exception()) {
+ is_termination = true;
+ } else {
+ *exception_out = v8::Utils::OpenHandle(*catcher.Exception());
+ }
}
+ isolate->OptionalRescheduleException(true);
}
- isolate->OptionalRescheduleException(true);
- }
- DCHECK(!isolate->has_pending_exception());
- DCHECK(!isolate->external_caught_exception());
+ DCHECK(!isolate->has_pending_exception());
+ DCHECK(!isolate->external_caught_exception());
+ }
+ if (is_termination) isolate->TerminateExecution();
return maybe_result;
}
@@ -236,10 +239,9 @@ MaybeHandle<Object> Execution::TryGetFunctionDelegate(Isolate* isolate,
// If the Object doesn't have an instance-call handler we should
// throw a non-callable exception.
- i::Handle<i::Object> error_obj = isolate->factory()->NewTypeError(
- "called_non_callable", i::HandleVector<i::Object>(&object, 1));
-
- return isolate->Throw<Object>(error_obj);
+ THROW_NEW_ERROR(isolate, NewTypeError("called_non_callable",
+ i::HandleVector<i::Object>(&object, 1)),
+ Object);
}
@@ -293,9 +295,9 @@ MaybeHandle<Object> Execution::TryGetConstructorDelegate(
// If the Object doesn't have an instance-call handler we should
// throw a non-callable exception.
- i::Handle<i::Object> error_obj = isolate->factory()->NewTypeError(
- "called_non_callable", i::HandleVector<i::Object>(&object, 1));
- return isolate->Throw<Object>(error_obj);
+ THROW_NEW_ERROR(isolate, NewTypeError("called_non_callable",
+ i::HandleVector<i::Object>(&object, 1)),
+ Object);
}
@@ -484,7 +486,7 @@ void StackGuard::InitThread(const ExecutionAccess& lock) {
return Call(isolate, \
isolate->name##_fun(), \
isolate->js_builtins_object(), \
- ARRAY_SIZE(argv), argv); \
+ arraysize(argv), argv); \
} while (false)
@@ -575,7 +577,7 @@ Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
Handle<Object> result;
if (!TryCall(Handle<JSFunction>::cast(char_at),
string,
- ARRAY_SIZE(index_arg),
+ arraysize(index_arg),
index_arg).ToHandle(&result)) {
return factory->undefined_value();
}
@@ -602,7 +604,7 @@ MaybeHandle<JSFunction> Execution::InstantiateFunction(
Call(isolate,
isolate->instantiate_fun(),
isolate->js_builtins_object(),
- ARRAY_SIZE(args),
+ arraysize(args),
args),
JSFunction);
return Handle<JSFunction>::cast(result);
@@ -629,7 +631,7 @@ MaybeHandle<JSObject> Execution::InstantiateObject(
Call(isolate,
isolate->instantiate_fun(),
isolate->js_builtins_object(),
- ARRAY_SIZE(args),
+ arraysize(args),
args),
JSObject);
}
@@ -645,7 +647,7 @@ MaybeHandle<Object> Execution::ConfigureInstance(
return Execution::Call(isolate,
isolate->configure_instance_fun(),
isolate->js_builtins_object(),
- ARRAY_SIZE(args),
+ arraysize(args),
args);
}
@@ -659,7 +661,7 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
MaybeHandle<Object> maybe_result =
TryCall(isolate->get_stack_trace_line_fun(),
isolate->js_builtins_object(),
- ARRAY_SIZE(args),
+ arraysize(args),
args);
Handle<Object> result;
if (!maybe_result.ToHandle(&result) || !result->IsString()) {
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index 2a41bb8ba0..89175cd906 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -10,7 +10,7 @@
namespace v8 {
namespace internal {
-class Execution V8_FINAL : public AllStatic {
+class Execution FINAL : public AllStatic {
public:
// Call a function, the caller supplies a receiver and an array
// of arguments. Arguments are Object* type. After function returns,
@@ -46,12 +46,12 @@ class Execution V8_FINAL : public AllStatic {
// any thrown exceptions. The return value is either the result of
// calling the function (if caught exception is false) or the exception
// that occurred (if caught exception is true).
- static MaybeHandle<Object> TryCall(
- Handle<JSFunction> func,
- Handle<Object> receiver,
- int argc,
- Handle<Object> argv[],
- Handle<Object>* exception_out = NULL);
+ // In the exception case, exception_out holds the caught exceptions, unless
+ // it is a termination exception.
+ static MaybeHandle<Object> TryCall(Handle<JSFunction> func,
+ Handle<Object> receiver, int argc,
+ Handle<Object> argv[],
+ MaybeHandle<Object>* exception_out = NULL);
// ECMA-262 9.3
MUST_USE_RESULT static MaybeHandle<Object> ToNumber(
@@ -128,7 +128,7 @@ class PostponeInterruptsScope;
// StackGuard contains the handling of the limits that are used to limit the
// number of nested invocations of JavaScript and the stack size used in each
// invocation.
-class StackGuard V8_FINAL {
+class StackGuard FINAL {
public:
// Pass the address beyond which the stack should not grow. The stack
// is assumed to grow downwards.
@@ -233,7 +233,7 @@ class StackGuard V8_FINAL {
void PushPostponeInterruptsScope(PostponeInterruptsScope* scope);
void PopPostponeInterruptsScope();
- class ThreadLocal V8_FINAL {
+ class ThreadLocal FINAL {
public:
ThreadLocal() { Clear(); }
// You should hold the ExecutionAccess lock when you call Initialize or
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index 3f31249c54..8d38dfa0f0 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -27,15 +27,15 @@ class SimpleStringResource : public Base {
};
-typedef SimpleStringResource<char, v8::String::ExternalAsciiStringResource>
- SimpleAsciiStringResource;
+typedef SimpleStringResource<char, v8::String::ExternalOneByteStringResource>
+ SimpleOneByteStringResource;
typedef SimpleStringResource<uc16, v8::String::ExternalStringResource>
SimpleTwoByteStringResource;
const char* const ExternalizeStringExtension::kSource =
"native function externalizeString();"
- "native function isAsciiString();";
+ "native function isOneByteString();";
v8::Handle<v8::FunctionTemplate>
ExternalizeStringExtension::GetNativeFunctionTemplate(
@@ -44,9 +44,9 @@ ExternalizeStringExtension::GetNativeFunctionTemplate(
return v8::FunctionTemplate::New(isolate,
ExternalizeStringExtension::Externalize);
} else {
- DCHECK(strcmp(*v8::String::Utf8Value(str), "isAsciiString") == 0);
+ DCHECK(strcmp(*v8::String::Utf8Value(str), "isOneByteString") == 0);
return v8::FunctionTemplate::New(isolate,
- ExternalizeStringExtension::IsAscii);
+ ExternalizeStringExtension::IsOneByte);
}
}
@@ -81,7 +81,7 @@ void ExternalizeStringExtension::Externalize(
if (string->IsOneByteRepresentation() && !force_two_byte) {
uint8_t* data = new uint8_t[string->length()];
String::WriteToFlat(*string, data, 0, string->length());
- SimpleAsciiStringResource* resource = new SimpleAsciiStringResource(
+ SimpleOneByteStringResource* resource = new SimpleOneByteStringResource(
reinterpret_cast<char*>(data), string->length());
result = string->MakeExternal(resource);
if (result) {
@@ -109,12 +109,12 @@ void ExternalizeStringExtension::Externalize(
}
-void ExternalizeStringExtension::IsAscii(
+void ExternalizeStringExtension::IsOneByte(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsString()) {
args.GetIsolate()->ThrowException(v8::String::NewFromUtf8(
args.GetIsolate(),
- "isAsciiString() requires a single string argument."));
+ "isOneByteString() requires a single string argument."));
return;
}
bool is_one_byte =
diff --git a/deps/v8/src/extensions/externalize-string-extension.h b/deps/v8/src/extensions/externalize-string-extension.h
index 74b5665ef0..f8c54f8f12 100644
--- a/deps/v8/src/extensions/externalize-string-extension.h
+++ b/deps/v8/src/extensions/externalize-string-extension.h
@@ -17,7 +17,7 @@ class ExternalizeStringExtension : public v8::Extension {
v8::Isolate* isolate,
v8::Handle<v8::String> name);
static void Externalize(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void IsAscii(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void IsOneByte(const v8::FunctionCallbackInfo<v8::Value>& args);
private:
static const char* const kSource;
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 39e32806b7..0adc8730f5 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -5,6 +5,7 @@
#include "src/factory.h"
#include "src/allocation-site-scopes.h"
+#include "src/base/bits.h"
#include "src/conversions.h"
#include "src/isolate-inl.h"
#include "src/macro-assembler.h"
@@ -186,7 +187,7 @@ Handle<String> Factory::InternalizeOneByteString(Vector<const uint8_t> string) {
Handle<String> Factory::InternalizeOneByteString(
Handle<SeqOneByteString> string, int from, int length) {
- SubStringKey<uint8_t> key(string, from, length);
+ SeqOneByteSubStringKey key(string, from, length);
return InternalizeStringWithKey(&key);
}
@@ -203,12 +204,6 @@ Handle<String> Factory::InternalizeStringWithKey(StringTableKey* key) {
}
-template Handle<String> Factory::InternalizeStringWithKey<
- SubStringKey<uint8_t> > (SubStringKey<uint8_t>* key);
-template Handle<String> Factory::InternalizeStringWithKey<
- SubStringKey<uint16_t> > (SubStringKey<uint16_t>* key);
-
-
MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
PretenureFlag pretenure) {
int length = string.length();
@@ -253,7 +248,7 @@ MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
isolate(), result,
NewRawTwoByteString(non_ascii_start + utf16_length, pretenure),
String);
- // Copy ascii portion.
+ // Copy ASCII portion.
uint16_t* data = result->GetChars();
const char* ascii_data = string.start();
for (int i = 0; i < non_ascii_start; i++) {
@@ -313,6 +308,17 @@ MUST_USE_RESULT Handle<String> Factory::NewOneByteInternalizedString(
}
+MUST_USE_RESULT Handle<String> Factory::NewOneByteInternalizedSubString(
+ Handle<SeqOneByteString> string, int offset, int length,
+ uint32_t hash_field) {
+ CALL_HEAP_FUNCTION(
+ isolate(), isolate()->heap()->AllocateOneByteInternalizedString(
+ Vector<const uint8_t>(string->GetChars() + offset, length),
+ hash_field),
+ String);
+}
+
+
MUST_USE_RESULT Handle<String> Factory::NewTwoByteInternalizedString(
Vector<const uc16> str,
uint32_t hash_field) {
@@ -341,16 +347,17 @@ MaybeHandle<Map> Factory::InternalizedStringMapForString(
// Find the corresponding internalized string map for strings.
switch (string->map()->instance_type()) {
case STRING_TYPE: return internalized_string_map();
- case ASCII_STRING_TYPE: return ascii_internalized_string_map();
+ case ONE_BYTE_STRING_TYPE:
+ return one_byte_internalized_string_map();
case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
- case EXTERNAL_ASCII_STRING_TYPE:
- return external_ascii_internalized_string_map();
+ case EXTERNAL_ONE_BYTE_STRING_TYPE:
+ return external_one_byte_internalized_string_map();
case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
return external_internalized_string_with_one_byte_data_map();
case SHORT_EXTERNAL_STRING_TYPE:
return short_external_internalized_string_map();
- case SHORT_EXTERNAL_ASCII_STRING_TYPE:
- return short_external_ascii_internalized_string_map();
+ case SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE:
+ return short_external_one_byte_internalized_string_map();
case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
return short_external_internalized_string_with_one_byte_data_map();
default: return MaybeHandle<Map>(); // No match found.
@@ -361,7 +368,7 @@ MaybeHandle<Map> Factory::InternalizedStringMapForString(
MaybeHandle<SeqOneByteString> Factory::NewRawOneByteString(
int length, PretenureFlag pretenure) {
if (length > String::kMaxLength || length < 0) {
- return isolate()->Throw<SeqOneByteString>(NewInvalidStringLengthError());
+ THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqOneByteString);
}
CALL_HEAP_FUNCTION(
isolate(),
@@ -373,7 +380,7 @@ MaybeHandle<SeqOneByteString> Factory::NewRawOneByteString(
MaybeHandle<SeqTwoByteString> Factory::NewRawTwoByteString(
int length, PretenureFlag pretenure) {
if (length > String::kMaxLength || length < 0) {
- return isolate()->Throw<SeqTwoByteString>(NewInvalidStringLengthError());
+ THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqTwoByteString);
}
CALL_HEAP_FUNCTION(
isolate(),
@@ -430,7 +437,8 @@ static inline Handle<String> MakeOrFindTwoCharacterString(Isolate* isolate,
// when building the new string.
if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
// We can do this.
- DCHECK(IsPowerOf2(String::kMaxOneByteCharCodeU + 1)); // because of this.
+ DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU +
+ 1)); // because of this.
Handle<SeqOneByteString> str =
isolate->factory()->NewRawOneByteString(2).ToHandleChecked();
uint8_t* dest = str->GetChars();
@@ -478,7 +486,7 @@ MaybeHandle<String> Factory::NewConsString(Handle<String> left,
// Make sure that an out of memory exception is thrown if the length
// of the new cons string is too large.
if (length > String::kMaxLength || length < 0) {
- return isolate()->Throw<String>(NewInvalidStringLengthError());
+ THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), String);
}
bool left_is_one_byte = left->IsOneByteRepresentation();
@@ -487,12 +495,12 @@ MaybeHandle<String> Factory::NewConsString(Handle<String> left,
bool is_one_byte_data_in_two_byte_string = false;
if (!is_one_byte) {
// At least one of the strings uses two-byte representation so we
- // can't use the fast case code for short ASCII strings below, but
- // we can try to save memory if all chars actually fit in ASCII.
+ // can't use the fast case code for short one-byte strings below, but
+ // we can try to save memory if all chars actually fit in one-byte.
is_one_byte_data_in_two_byte_string =
left->HasOnlyOneByteChars() && right->HasOnlyOneByteChars();
if (is_one_byte_data_in_two_byte_string) {
- isolate()->counters()->string_add_runtime_ext_to_ascii()->Increment();
+ isolate()->counters()->string_add_runtime_ext_to_one_byte()->Increment();
}
}
@@ -510,14 +518,15 @@ MaybeHandle<String> Factory::NewConsString(Handle<String> left,
DisallowHeapAllocation no_gc;
uint8_t* dest = result->GetChars();
// Copy left part.
- const uint8_t* src = left->IsExternalString()
- ? Handle<ExternalAsciiString>::cast(left)->GetChars()
- : Handle<SeqOneByteString>::cast(left)->GetChars();
+ const uint8_t* src =
+ left->IsExternalString()
+ ? Handle<ExternalOneByteString>::cast(left)->GetChars()
+ : Handle<SeqOneByteString>::cast(left)->GetChars();
for (int i = 0; i < left_length; i++) *dest++ = src[i];
// Copy right part.
src = right->IsExternalString()
- ? Handle<ExternalAsciiString>::cast(right)->GetChars()
- : Handle<SeqOneByteString>::cast(right)->GetChars();
+ ? Handle<ExternalOneByteString>::cast(right)->GetChars()
+ : Handle<SeqOneByteString>::cast(right)->GetChars();
for (int i = 0; i < right_length; i++) *dest++ = src[i];
return result;
}
@@ -530,7 +539,8 @@ MaybeHandle<String> Factory::NewConsString(Handle<String> left,
}
Handle<Map> map = (is_one_byte || is_one_byte_data_in_two_byte_string)
- ? cons_ascii_string_map() : cons_string_map();
+ ? cons_one_byte_string_map()
+ : cons_string_map();
Handle<ConsString> result = New<ConsString>(map, NEW_SPACE);
DisallowHeapAllocation no_gc;
@@ -595,8 +605,9 @@ Handle<String> Factory::NewProperSubString(Handle<String> str,
}
DCHECK(str->IsSeqString() || str->IsExternalString());
- Handle<Map> map = str->IsOneByteRepresentation() ? sliced_ascii_string_map()
- : sliced_string_map();
+ Handle<Map> map = str->IsOneByteRepresentation()
+ ? sliced_one_byte_string_map()
+ : sliced_string_map();
Handle<SlicedString> slice = New<SlicedString>(map, NEW_SPACE);
slice->set_hash_field(String::kEmptyHashField);
@@ -607,16 +618,16 @@ Handle<String> Factory::NewProperSubString(Handle<String> str,
}
-MaybeHandle<String> Factory::NewExternalStringFromAscii(
- const ExternalAsciiString::Resource* resource) {
+MaybeHandle<String> Factory::NewExternalStringFromOneByte(
+ const ExternalOneByteString::Resource* resource) {
size_t length = resource->length();
if (length > static_cast<size_t>(String::kMaxLength)) {
- return isolate()->Throw<String>(NewInvalidStringLengthError());
+ THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), String);
}
- Handle<Map> map = external_ascii_string_map();
- Handle<ExternalAsciiString> external_string =
- New<ExternalAsciiString>(map, NEW_SPACE);
+ Handle<Map> map = external_one_byte_string_map();
+ Handle<ExternalOneByteString> external_string =
+ New<ExternalOneByteString>(map, NEW_SPACE);
external_string->set_length(static_cast<int>(length));
external_string->set_hash_field(String::kEmptyHashField);
external_string->set_resource(resource);
@@ -629,7 +640,7 @@ MaybeHandle<String> Factory::NewExternalStringFromTwoByte(
const ExternalTwoByteString::Resource* resource) {
size_t length = resource->length();
if (length > static_cast<size_t>(String::kMaxLength)) {
- return isolate()->Throw<String>(NewInvalidStringLengthError());
+ THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), String);
}
// For small strings we check whether the resource contains only
@@ -1045,59 +1056,58 @@ Handle<HeapNumber> Factory::NewHeapNumber(double value,
}
-Handle<Object> Factory::NewTypeError(const char* message,
- Vector< Handle<Object> > args) {
+MaybeHandle<Object> Factory::NewTypeError(const char* message,
+ Vector<Handle<Object> > args) {
return NewError("MakeTypeError", message, args);
}
-Handle<Object> Factory::NewTypeError(Handle<String> message) {
+MaybeHandle<Object> Factory::NewTypeError(Handle<String> message) {
return NewError("$TypeError", message);
}
-Handle<Object> Factory::NewRangeError(const char* message,
- Vector< Handle<Object> > args) {
+MaybeHandle<Object> Factory::NewRangeError(const char* message,
+ Vector<Handle<Object> > args) {
return NewError("MakeRangeError", message, args);
}
-Handle<Object> Factory::NewRangeError(Handle<String> message) {
+MaybeHandle<Object> Factory::NewRangeError(Handle<String> message) {
return NewError("$RangeError", message);
}
-Handle<Object> Factory::NewSyntaxError(const char* message,
- Handle<JSArray> args) {
+MaybeHandle<Object> Factory::NewSyntaxError(const char* message,
+ Handle<JSArray> args) {
return NewError("MakeSyntaxError", message, args);
}
-Handle<Object> Factory::NewSyntaxError(Handle<String> message) {
+MaybeHandle<Object> Factory::NewSyntaxError(Handle<String> message) {
return NewError("$SyntaxError", message);
}
-Handle<Object> Factory::NewReferenceError(const char* message,
- Vector< Handle<Object> > args) {
+MaybeHandle<Object> Factory::NewReferenceError(const char* message,
+ Vector<Handle<Object> > args) {
return NewError("MakeReferenceError", message, args);
}
-Handle<Object> Factory::NewReferenceError(const char* message,
- Handle<JSArray> args) {
+MaybeHandle<Object> Factory::NewReferenceError(const char* message,
+ Handle<JSArray> args) {
return NewError("MakeReferenceError", message, args);
}
-Handle<Object> Factory::NewReferenceError(Handle<String> message) {
+MaybeHandle<Object> Factory::NewReferenceError(Handle<String> message) {
return NewError("$ReferenceError", message);
}
-Handle<Object> Factory::NewError(const char* maker,
- const char* message,
- Vector< Handle<Object> > args) {
+MaybeHandle<Object> Factory::NewError(const char* maker, const char* message,
+ Vector<Handle<Object> > args) {
// Instantiate a closeable HandleScope for EscapeFrom.
v8::EscapableHandleScope scope(reinterpret_cast<v8::Isolate*>(isolate()));
Handle<FixedArray> array = NewFixedArray(args.length());
@@ -1105,19 +1115,21 @@ Handle<Object> Factory::NewError(const char* maker,
array->set(i, *args[i]);
}
Handle<JSArray> object = NewJSArrayWithElements(array);
- Handle<Object> result = NewError(maker, message, object);
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+ NewError(maker, message, object), Object);
return result.EscapeFrom(&scope);
}
-Handle<Object> Factory::NewEvalError(const char* message,
- Vector< Handle<Object> > args) {
+MaybeHandle<Object> Factory::NewEvalError(const char* message,
+ Vector<Handle<Object> > args) {
return NewError("MakeEvalError", message, args);
}
-Handle<Object> Factory::NewError(const char* message,
- Vector< Handle<Object> > args) {
+MaybeHandle<Object> Factory::NewError(const char* message,
+ Vector<Handle<Object> > args) {
return NewError("MakeError", message, args);
}
@@ -1134,7 +1146,7 @@ Handle<String> Factory::EmergencyNewError(const char* message,
space -= Min(space, strlen(message));
p = &buffer[kBufferSize] - space;
- for (unsigned i = 0; i < ARRAY_SIZE(args); i++) {
+ for (int i = 0; i < Smi::cast(args->length())->value(); i++) {
if (space > 0) {
*p++ = ' ';
space--;
@@ -1158,9 +1170,8 @@ Handle<String> Factory::EmergencyNewError(const char* message,
}
-Handle<Object> Factory::NewError(const char* maker,
- const char* message,
- Handle<JSArray> args) {
+MaybeHandle<Object> Factory::NewError(const char* maker, const char* message,
+ Handle<JSArray> args) {
Handle<String> make_str = InternalizeUtf8String(maker);
Handle<Object> fun_obj = Object::GetProperty(
isolate()->js_builtins_object(), make_str).ToHandleChecked();
@@ -1176,10 +1187,10 @@ Handle<Object> Factory::NewError(const char* maker,
// Invoke the JavaScript factory method. If an exception is thrown while
// running the factory method, use the exception as the result.
Handle<Object> result;
- Handle<Object> exception;
+ MaybeHandle<Object> exception;
if (!Execution::TryCall(fun,
isolate()->js_builtins_object(),
- ARRAY_SIZE(argv),
+ arraysize(argv),
argv,
&exception).ToHandle(&result)) {
return exception;
@@ -1188,13 +1199,13 @@ Handle<Object> Factory::NewError(const char* maker,
}
-Handle<Object> Factory::NewError(Handle<String> message) {
+MaybeHandle<Object> Factory::NewError(Handle<String> message) {
return NewError("$Error", message);
}
-Handle<Object> Factory::NewError(const char* constructor,
- Handle<String> message) {
+MaybeHandle<Object> Factory::NewError(const char* constructor,
+ Handle<String> message) {
Handle<String> constr = InternalizeUtf8String(constructor);
Handle<JSFunction> fun = Handle<JSFunction>::cast(Object::GetProperty(
isolate()->js_builtins_object(), constr).ToHandleChecked());
@@ -1203,10 +1214,10 @@ Handle<Object> Factory::NewError(const char* constructor,
// Invoke the JavaScript factory method. If an exception is thrown while
// running the factory method, use the exception as the result.
Handle<Object> result;
- Handle<Object> exception;
+ MaybeHandle<Object> exception;
if (!Execution::TryCall(fun,
isolate()->js_builtins_object(),
- ARRAY_SIZE(argv),
+ arraysize(argv),
argv,
&exception).ToHandle(&result)) {
return exception;
@@ -1226,7 +1237,6 @@ void Factory::InitializeFunction(Handle<JSFunction> function,
function->set_prototype_or_initial_map(*the_hole_value());
function->set_literals_or_bindings(*empty_fixed_array());
function->set_next_function_link(*undefined_value());
- if (info->is_arrow()) function->RemovePrototype();
}
@@ -1292,8 +1302,9 @@ Handle<JSFunction> Factory::NewFunction(Handle<String> name,
Handle<JSFunction> function = NewFunction(
name, code, prototype, read_only_prototype);
- Handle<Map> initial_map = NewMap(
- type, instance_size, GetInitialFastElementsKind());
+ ElementsKind elements_kind =
+ type == JS_ARRAY_TYPE ? FAST_SMI_ELEMENTS : FAST_HOLEY_SMI_ELEMENTS;
+ Handle<Map> initial_map = NewMap(type, instance_size, elements_kind);
if (prototype->IsTheHole() && !function->shared()->is_generator()) {
prototype = NewFunctionPrototype(function);
}
@@ -1345,8 +1356,7 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> info,
Handle<Context> context,
PretenureFlag pretenure) {
- int map_index = Context::FunctionMapIndex(info->strict_mode(),
- info->is_generator());
+ int map_index = Context::FunctionMapIndex(info->strict_mode(), info->kind());
Handle<Map> map(Map::cast(context->native_context()->get(map_index)));
Handle<JSFunction> result = NewFunction(map, info, context, pretenure);
@@ -1772,20 +1782,19 @@ Handle<JSProxy> Factory::NewJSFunctionProxy(Handle<Object> handler,
}
-void Factory::ReinitializeJSReceiver(Handle<JSReceiver> object,
- InstanceType type,
- int size) {
- DCHECK(type >= FIRST_JS_OBJECT_TYPE);
+void Factory::ReinitializeJSProxy(Handle<JSProxy> proxy, InstanceType type,
+ int size) {
+ DCHECK(type == JS_OBJECT_TYPE || type == JS_FUNCTION_TYPE);
// Allocate fresh map.
// TODO(rossberg): Once we optimize proxies, cache these maps.
Handle<Map> map = NewMap(type, size);
// Check that the receiver has at least the size of the fresh object.
- int size_difference = object->map()->instance_size() - map->instance_size();
+ int size_difference = proxy->map()->instance_size() - map->instance_size();
DCHECK(size_difference >= 0);
- map->set_prototype(object->map()->prototype());
+ map->set_prototype(proxy->map()->prototype());
// Allocate the backing storage for the properties.
int prop_size = map->InitialPropertiesLength();
@@ -1794,7 +1803,7 @@ void Factory::ReinitializeJSReceiver(Handle<JSReceiver> object,
Heap* heap = isolate()->heap();
MaybeHandle<SharedFunctionInfo> shared;
if (type == JS_FUNCTION_TYPE) {
- OneByteStringKey key(STATIC_ASCII_VECTOR("<freezing call trap>"),
+ OneByteStringKey key(STATIC_CHAR_VECTOR("<freezing call trap>"),
heap->HashSeed());
Handle<String> name = InternalizeStringWithKey(&key);
shared = NewSharedFunctionInfo(name, MaybeHandle<Code>());
@@ -1806,24 +1815,31 @@ void Factory::ReinitializeJSReceiver(Handle<JSReceiver> object,
// Put in filler if the new object is smaller than the old.
if (size_difference > 0) {
- Address address = object->address();
+ Address address = proxy->address();
heap->CreateFillerObjectAt(address + map->instance_size(), size_difference);
heap->AdjustLiveBytes(address, -size_difference, Heap::FROM_MUTATOR);
}
// Reset the map for the object.
- object->synchronized_set_map(*map);
- Handle<JSObject> jsobj = Handle<JSObject>::cast(object);
+ proxy->synchronized_set_map(*map);
+ Handle<JSObject> jsobj = Handle<JSObject>::cast(proxy);
// Reinitialize the object from the constructor map.
heap->InitializeJSObjectFromMap(*jsobj, *properties, *map);
+ // The current native context is used to set up certain bits.
+ // TODO(adamk): Using the current context seems wrong, it should be whatever
+ // context the JSProxy originated in. But that context isn't stored anywhere.
+ Handle<Context> context(isolate()->native_context());
+
// Functions require some minimal initialization.
if (type == JS_FUNCTION_TYPE) {
map->set_function_with_prototype(true);
- Handle<JSFunction> js_function = Handle<JSFunction>::cast(object);
- Handle<Context> context(isolate()->native_context());
+ Handle<JSFunction> js_function = Handle<JSFunction>::cast(proxy);
InitializeFunction(js_function, shared.ToHandleChecked(), context);
+ } else {
+ // Provide JSObjects with a constructor.
+ map->set_constructor(context->object_function());
}
}
@@ -1861,39 +1877,42 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
}
-void Factory::BecomeJSObject(Handle<JSReceiver> object) {
- ReinitializeJSReceiver(object, JS_OBJECT_TYPE, JSObject::kHeaderSize);
+void Factory::BecomeJSObject(Handle<JSProxy> proxy) {
+ ReinitializeJSProxy(proxy, JS_OBJECT_TYPE, JSObject::kHeaderSize);
}
-void Factory::BecomeJSFunction(Handle<JSReceiver> object) {
- ReinitializeJSReceiver(object, JS_FUNCTION_TYPE, JSFunction::kSize);
+void Factory::BecomeJSFunction(Handle<JSProxy> proxy) {
+ ReinitializeJSProxy(proxy, JS_FUNCTION_TYPE, JSFunction::kSize);
}
-Handle<FixedArray> Factory::NewTypeFeedbackVector(int slot_count) {
+Handle<TypeFeedbackVector> Factory::NewTypeFeedbackVector(int slot_count) {
// Ensure we can skip the write barrier
DCHECK_EQ(isolate()->heap()->uninitialized_symbol(),
- *TypeFeedbackInfo::UninitializedSentinel(isolate()));
+ *TypeFeedbackVector::UninitializedSentinel(isolate()));
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateFixedArrayWithFiller(
- slot_count,
- TENURED,
- *TypeFeedbackInfo::UninitializedSentinel(isolate())),
- FixedArray);
+ if (slot_count == 0) {
+ return Handle<TypeFeedbackVector>::cast(empty_fixed_array());
+ }
+
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateFixedArrayWithFiller(
+ slot_count, TENURED,
+ *TypeFeedbackVector::UninitializedSentinel(isolate())),
+ TypeFeedbackVector);
}
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
- Handle<String> name, int number_of_literals, bool is_generator,
- bool is_arrow, Handle<Code> code, Handle<ScopeInfo> scope_info,
- Handle<FixedArray> feedback_vector) {
+ Handle<String> name, int number_of_literals, FunctionKind kind,
+ Handle<Code> code, Handle<ScopeInfo> scope_info,
+ Handle<TypeFeedbackVector> feedback_vector) {
+ DCHECK(IsValidFunctionKind(kind));
Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name, code);
shared->set_scope_info(*scope_info);
shared->set_feedback_vector(*feedback_vector);
- shared->set_is_arrow(is_arrow);
+ shared->set_kind(kind);
int literals_array_size = number_of_literals;
// If the function contains object, regexp or array literals,
// allocate extra space for a literals array prefix containing the
@@ -1902,7 +1921,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
literals_array_size += JSFunction::kLiteralsPrefixSize;
}
shared->set_num_literals(literals_array_size);
- if (is_generator) {
+ if (IsGeneratorFunction(kind)) {
shared->set_instance_class_name(isolate()->heap()->Generator_string());
shared->DisableOptimization(kGenerator);
}
@@ -1956,7 +1975,8 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
share->set_debug_info(*undefined_value(), SKIP_WRITE_BARRIER);
share->set_inferred_name(*empty_string(), SKIP_WRITE_BARRIER);
- share->set_feedback_vector(*empty_fixed_array(), SKIP_WRITE_BARRIER);
+ Handle<TypeFeedbackVector> feedback_vector = NewTypeFeedbackVector(0);
+ share->set_feedback_vector(*feedback_vector, SKIP_WRITE_BARRIER);
share->set_profiler_ticks(0);
share->set_ast_node_count(0);
share->set_counters(0);
@@ -2034,7 +2054,7 @@ Handle<String> Factory::NumberToString(Handle<Object> number,
}
char arr[100];
- Vector<char> buffer(arr, ARRAY_SIZE(arr));
+ Vector<char> buffer(arr, arraysize(arr));
const char* str;
if (number->IsSmi()) {
int num = Handle<Smi>::cast(number)->value();
@@ -2170,7 +2190,7 @@ Handle<JSFunction> Factory::CreateApiFunction(
#ifdef DEBUG
LookupIterator it(handle(JSObject::cast(result->prototype())),
constructor_string(),
- LookupIterator::CHECK_OWN_REAL);
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
MaybeHandle<Object> maybe_prop = Object::GetProperty(&it);
DCHECK(it.IsFound());
DCHECK(maybe_prop.ToHandleChecked().is_identical_to(result));
@@ -2309,9 +2329,12 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
Handle<MapCache>(MapCache::cast(context->map_cache()));
Handle<Object> result = Handle<Object>(cache->Lookup(*keys), isolate());
if (result->IsMap()) return Handle<Map>::cast(result);
- // Create a new map and add it to the cache.
- Handle<Map> map = Map::Create(
- handle(context->object_function()), keys->length());
+ int length = keys->length();
+ // Create a new map and add it to the cache. Reuse the initial map of the
+ // Object function if the literal has no predeclared properties.
+ Handle<Map> map = length == 0
+ ? handle(context->object_function()->initial_map())
+ : Map::Create(isolate(), length);
AddToMapCache(context, keys, map);
return map;
}
@@ -2341,9 +2364,9 @@ void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
store->set(JSRegExp::kSourceIndex, *source);
store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags.value()));
- store->set(JSRegExp::kIrregexpASCIICodeIndex, uninitialized);
+ store->set(JSRegExp::kIrregexpLatin1CodeIndex, uninitialized);
store->set(JSRegExp::kIrregexpUC16CodeIndex, uninitialized);
- store->set(JSRegExp::kIrregexpASCIICodeSavedIndex, uninitialized);
+ store->set(JSRegExp::kIrregexpLatin1CodeSavedIndex, uninitialized);
store->set(JSRegExp::kIrregexpUC16CodeSavedIndex, uninitialized);
store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(0));
store->set(JSRegExp::kIrregexpCaptureCountIndex,
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index aa1f94d814..24b490c344 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -12,7 +12,7 @@ namespace internal {
// Interface for handle based allocation.
-class Factory V8_FINAL {
+class Factory FINAL {
public:
Handle<Oddball> NewOddball(Handle<Map> map,
const char* to_string,
@@ -85,34 +85,31 @@ class Factory V8_FINAL {
// allocated in the old generation. The pretenure flag defaults to
// DONT_TENURE.
//
- // Creates a new String object. There are two String encodings: ASCII and
- // two byte. One should choose between the three string factory functions
+ // Creates a new String object. There are two String encodings: one-byte and
+ // two-byte. One should choose between the three string factory functions
// based on the encoding of the string buffer that the string is
// initialized from.
- // - ...FromAscii initializes the string from a buffer that is ASCII
- // encoded (it does not check that the buffer is ASCII encoded) and
- // the result will be ASCII encoded.
+ // - ...FromOneByte initializes the string from a buffer that is Latin1
+ // encoded (it does not check that the buffer is Latin1 encoded) and
+ // the result will be Latin1 encoded.
// - ...FromUtf8 initializes the string from a buffer that is UTF-8
- // encoded. If the characters are all single-byte characters, the
- // result will be ASCII encoded, otherwise it will converted to two
- // byte.
- // - ...FromTwoByte initializes the string from a buffer that is two
- // byte encoded. If the characters are all single-byte characters,
- // the result will be converted to ASCII, otherwise it will be left as
- // two byte.
+ // encoded. If the characters are all ASCII characters, the result
+ // will be Latin1 encoded, otherwise it will converted to two-byte.
+ // - ...FromTwoByte initializes the string from a buffer that is two-byte
+ // encoded. If the characters are all Latin1 characters, the result
+ // will be converted to Latin1, otherwise it will be left as two-byte.
//
- // ASCII strings are pretenured when used as keys in the SourceCodeCache.
+ // One-byte strings are pretenured when used as keys in the SourceCodeCache.
MUST_USE_RESULT MaybeHandle<String> NewStringFromOneByte(
Vector<const uint8_t> str,
PretenureFlag pretenure = NOT_TENURED);
- template<size_t N>
- inline Handle<String> NewStringFromStaticAscii(
- const char (&str)[N],
- PretenureFlag pretenure = NOT_TENURED) {
+ template <size_t N>
+ inline Handle<String> NewStringFromStaticChars(
+ const char (&str)[N], PretenureFlag pretenure = NOT_TENURED) {
DCHECK(N == StrLength(str) + 1);
- return NewStringFromOneByte(
- STATIC_ASCII_VECTOR(str), pretenure).ToHandleChecked();
+ return NewStringFromOneByte(STATIC_CHAR_VECTOR(str), pretenure)
+ .ToHandleChecked();
}
inline Handle<String> NewStringFromAsciiChecked(
@@ -123,20 +120,19 @@ class Factory V8_FINAL {
}
- // Allocates and fully initializes a String. There are two String
- // encodings: ASCII and two byte. One should choose between the three string
+ // Allocates and fully initializes a String. There are two String encodings:
+ // one-byte and two-byte. One should choose between the threestring
// allocation functions based on the encoding of the string buffer used to
// initialized the string.
- // - ...FromAscii initializes the string from a buffer that is ASCII
- // encoded (it does not check that the buffer is ASCII encoded) and the
- // result will be ASCII encoded.
+ // - ...FromOneByte initializes the string from a buffer that is Latin1
+ // encoded (it does not check that the buffer is Latin1 encoded) and the
+ // result will be Latin1 encoded.
// - ...FromUTF8 initializes the string from a buffer that is UTF-8
- // encoded. If the characters are all single-byte characters, the
- // result will be ASCII encoded, otherwise it will converted to two
- // byte.
+ // encoded. If the characters are all ASCII characters, the result
+ // will be Latin1 encoded, otherwise it will converted to two-byte.
// - ...FromTwoByte initializes the string from a buffer that is two-byte
- // encoded. If the characters are all single-byte characters, the
- // result will be converted to ASCII, otherwise it will be left as
+ // encoded. If the characters are all Latin1 characters, the
+ // result will be converted to Latin1, otherwise it will be left as
// two-byte.
// TODO(dcarney): remove this function.
@@ -164,8 +160,11 @@ class Factory V8_FINAL {
uint32_t hash_field);
MUST_USE_RESULT Handle<String> NewOneByteInternalizedString(
- Vector<const uint8_t> str,
- uint32_t hash_field);
+ Vector<const uint8_t> str, uint32_t hash_field);
+
+ MUST_USE_RESULT Handle<String> NewOneByteInternalizedSubString(
+ Handle<SeqOneByteString> string, int offset, int length,
+ uint32_t hash_field);
MUST_USE_RESULT Handle<String> NewTwoByteInternalizedString(
Vector<const uc16> str,
@@ -179,7 +178,7 @@ class Factory V8_FINAL {
MUST_USE_RESULT MaybeHandle<Map> InternalizedStringMapForString(
Handle<String> string);
- // Allocates and partially initializes an ASCII or TwoByte String. The
+ // Allocates and partially initializes an one-byte or two-byte String. The
// characters of the string are uninitialized. Currently used in regexp code
// only, where they are pretenured.
MUST_USE_RESULT MaybeHandle<SeqOneByteString> NewRawOneByteString(
@@ -190,7 +189,7 @@ class Factory V8_FINAL {
PretenureFlag pretenure = NOT_TENURED);
// Creates a single character string where the character has given code.
- // A cache is used for ASCII codes.
+ // A cache is used for Latin1 codes.
Handle<String> LookupSingleCharacterStringFromCode(uint32_t code);
// Create a new cons string object which consists of a pair of strings.
@@ -209,12 +208,12 @@ class Factory V8_FINAL {
}
// Creates a new external String object. There are two String encodings
- // in the system: ASCII and two byte. Unlike other String types, it does
+ // in the system: one-byte and two-byte. Unlike other String types, it does
// not make sense to have a UTF-8 factory function for external strings,
// because we cannot change the underlying buffer. Note that these strings
// are backed by a string resource that resides outside the V8 heap.
- MUST_USE_RESULT MaybeHandle<String> NewExternalStringFromAscii(
- const ExternalAsciiString::Resource* resource);
+ MUST_USE_RESULT MaybeHandle<String> NewExternalStringFromOneByte(
+ const ExternalOneByteString::Resource* resource);
MUST_USE_RESULT MaybeHandle<String> NewExternalStringFromTwoByte(
const ExternalTwoByteString::Resource* resource);
@@ -446,13 +445,6 @@ class Factory V8_FINAL {
Handle<Object> construct_trap,
Handle<Object> prototype);
- // Reinitialize a JSReceiver into an (empty) JS object of respective type and
- // size, but keeping the original prototype. The receiver must have at least
- // the size of the new object. The object is reinitialized and behaves as an
- // object that has been freshly allocated.
- void ReinitializeJSReceiver(
- Handle<JSReceiver> object, InstanceType type, int size);
-
// Reinitialize an JSGlobalProxy based on a constructor. The object
// must have the same size as objects allocated using the
// constructor. The object is reinitialized and behaves as an
@@ -461,8 +453,8 @@ class Factory V8_FINAL {
Handle<JSFunction> constructor);
// Change the type of the argument into a JS object/function and reinitialize.
- void BecomeJSObject(Handle<JSReceiver> object);
- void BecomeJSFunction(Handle<JSReceiver> object);
+ void BecomeJSObject(Handle<JSProxy> object);
+ void BecomeJSFunction(Handle<JSProxy> object);
Handle<JSFunction> NewFunction(Handle<String> name,
Handle<Code> code,
@@ -511,40 +503,40 @@ class Factory V8_FINAL {
// Interface for creating error objects.
- Handle<Object> NewError(const char* maker, const char* message,
- Handle<JSArray> args);
+ MaybeHandle<Object> NewError(const char* maker, const char* message,
+ Handle<JSArray> args);
Handle<String> EmergencyNewError(const char* message, Handle<JSArray> args);
- Handle<Object> NewError(const char* maker, const char* message,
- Vector< Handle<Object> > args);
- Handle<Object> NewError(const char* message,
- Vector< Handle<Object> > args);
- Handle<Object> NewError(Handle<String> message);
- Handle<Object> NewError(const char* constructor,
- Handle<String> message);
-
- Handle<Object> NewTypeError(const char* message,
- Vector< Handle<Object> > args);
- Handle<Object> NewTypeError(Handle<String> message);
-
- Handle<Object> NewRangeError(const char* message,
- Vector< Handle<Object> > args);
- Handle<Object> NewRangeError(Handle<String> message);
-
- Handle<Object> NewInvalidStringLengthError() {
+ MaybeHandle<Object> NewError(const char* maker, const char* message,
+ Vector<Handle<Object> > args);
+ MaybeHandle<Object> NewError(const char* message,
+ Vector<Handle<Object> > args);
+ MaybeHandle<Object> NewError(Handle<String> message);
+ MaybeHandle<Object> NewError(const char* constructor, Handle<String> message);
+
+ MaybeHandle<Object> NewTypeError(const char* message,
+ Vector<Handle<Object> > args);
+ MaybeHandle<Object> NewTypeError(Handle<String> message);
+
+ MaybeHandle<Object> NewRangeError(const char* message,
+ Vector<Handle<Object> > args);
+ MaybeHandle<Object> NewRangeError(Handle<String> message);
+
+ MaybeHandle<Object> NewInvalidStringLengthError() {
return NewRangeError("invalid_string_length",
HandleVector<Object>(NULL, 0));
}
- Handle<Object> NewSyntaxError(const char* message, Handle<JSArray> args);
- Handle<Object> NewSyntaxError(Handle<String> message);
+ MaybeHandle<Object> NewSyntaxError(const char* message, Handle<JSArray> args);
+ MaybeHandle<Object> NewSyntaxError(Handle<String> message);
- Handle<Object> NewReferenceError(const char* message,
- Vector< Handle<Object> > args);
- Handle<Object> NewReferenceError(const char* message, Handle<JSArray> args);
- Handle<Object> NewReferenceError(Handle<String> message);
+ MaybeHandle<Object> NewReferenceError(const char* message,
+ Vector<Handle<Object> > args);
+ MaybeHandle<Object> NewReferenceError(const char* message,
+ Handle<JSArray> args);
+ MaybeHandle<Object> NewReferenceError(Handle<String> message);
- Handle<Object> NewEvalError(const char* message,
- Vector< Handle<Object> > args);
+ MaybeHandle<Object> NewEvalError(const char* message,
+ Vector<Handle<Object> > args);
Handle<String> NumberToString(Handle<Object> number,
bool check_number_string_cache = true);
@@ -572,26 +564,26 @@ class Factory V8_FINAL {
MUST_USE_RESULT MaybeHandle<FunctionTemplateInfo> ConfigureInstance(
Handle<FunctionTemplateInfo> desc, Handle<JSObject> instance);
-#define ROOT_ACCESSOR(type, name, camel_name) \
- inline Handle<type> name() { \
- return Handle<type>(BitCast<type**>( \
- &isolate()->heap()->roots_[Heap::k##camel_name##RootIndex])); \
+#define ROOT_ACCESSOR(type, name, camel_name) \
+ inline Handle<type> name() { \
+ return Handle<type>(bit_cast<type**>( \
+ &isolate()->heap()->roots_[Heap::k##camel_name##RootIndex])); \
}
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
-#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
- inline Handle<Map> name##_map() { \
- return Handle<Map>(BitCast<Map**>( \
- &isolate()->heap()->roots_[Heap::k##Name##MapRootIndex])); \
- }
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
+ inline Handle<Map> name##_map() { \
+ return Handle<Map>(bit_cast<Map**>( \
+ &isolate()->heap()->roots_[Heap::k##Name##MapRootIndex])); \
+ }
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR
-#define STRING_ACCESSOR(name, str) \
- inline Handle<String> name() { \
- return Handle<String>(BitCast<String**>( \
- &isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
+#define STRING_ACCESSOR(name, str) \
+ inline Handle<String> name() { \
+ return Handle<String>(bit_cast<String**>( \
+ &isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
}
INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
#undef STRING_ACCESSOR
@@ -606,14 +598,14 @@ class Factory V8_FINAL {
// Allocates a new SharedFunctionInfo object.
Handle<SharedFunctionInfo> NewSharedFunctionInfo(
- Handle<String> name, int number_of_literals, bool is_generator,
- bool is_arrow, Handle<Code> code, Handle<ScopeInfo> scope_info,
- Handle<FixedArray> feedback_vector);
+ Handle<String> name, int number_of_literals, FunctionKind kind,
+ Handle<Code> code, Handle<ScopeInfo> scope_info,
+ Handle<TypeFeedbackVector> feedback_vector);
Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name,
MaybeHandle<Code> code);
// Allocate a new type feedback vector
- Handle<FixedArray> NewTypeFeedbackVector(int slot_count);
+ Handle<TypeFeedbackVector> NewTypeFeedbackVector(int slot_count);
// Allocates a new JSMessageObject object.
Handle<JSMessageObject> NewJSMessageObject(
@@ -704,6 +696,12 @@ class Factory V8_FINAL {
Handle<JSFunction> NewFunction(Handle<Map> map,
Handle<String> name,
MaybeHandle<Code> maybe_code);
+
+ // Reinitialize a JSProxy into an (empty) JS object of respective type and
+ // size, but keeping the original prototype. The receiver must have at least
+ // the size of the new object. The object is reinitialized and behaves as an
+ // object that has been freshly allocated.
+ void ReinitializeJSProxy(Handle<JSProxy> proxy, InstanceType type, int size);
};
} } // namespace v8::internal
diff --git a/deps/v8/src/field-index-inl.h b/deps/v8/src/field-index-inl.h
index 5508adb161..198422feef 100644
--- a/deps/v8/src/field-index-inl.h
+++ b/deps/v8/src/field-index-inl.h
@@ -110,6 +110,11 @@ inline FieldIndex FieldIndex::ForKeyedLookupCacheIndex(Map* map, int index) {
}
+inline FieldIndex FieldIndex::FromFieldAccessStubKey(int key) {
+ return FieldIndex(key);
+}
+
+
inline int FieldIndex::GetKeyedLookupCacheIndex() const {
if (FLAG_compiled_keyed_generic_loads) {
return GetLoadByFieldIndex();
diff --git a/deps/v8/src/field-index.cc b/deps/v8/src/field-index.cc
deleted file mode 100644
index 5392afc9f2..0000000000
--- a/deps/v8/src/field-index.cc
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/field-index.h"
-#include "src/objects.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-FieldIndex FieldIndex::ForLookupResult(const LookupResult* lookup_result) {
- Map* map = lookup_result->holder()->map();
- return ForPropertyIndex(map,
- lookup_result->GetFieldIndexFromMap(map),
- lookup_result->representation().IsDouble());
-}
-
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/field-index.h b/deps/v8/src/field-index.h
index 8650c8fb8f..2558529070 100644
--- a/deps/v8/src/field-index.h
+++ b/deps/v8/src/field-index.h
@@ -17,16 +17,16 @@ class Map;
// from a property index. When available, the wrapper class captures additional
// information to allow the field index to be translated back into the property
// index it was originally generated from.
-class FieldIndex V8_FINAL {
+class FieldIndex FINAL {
public:
static FieldIndex ForPropertyIndex(Map* map,
int index,
bool is_double = false);
static FieldIndex ForInObjectOffset(int offset, Map* map = NULL);
- static FieldIndex ForLookupResult(const LookupResult* result);
static FieldIndex ForDescriptor(Map* map, int descriptor_index);
static FieldIndex ForLoadByFieldIndex(Map* map, int index);
static FieldIndex ForKeyedLookupCacheIndex(Map* map, int index);
+ static FieldIndex FromFieldAccessStubKey(int key);
int GetLoadByFieldIndex() const;
@@ -83,6 +83,8 @@ class FieldIndex V8_FINAL {
InObjectPropertyBits::encode(inobject_properties);
}
+ explicit FieldIndex(int bit_field) : bit_field_(bit_field) {}
+
int first_inobject_property_offset() const {
DCHECK(!IsHiddenField::decode(bit_field_));
return FirstInobjectPropertyOffsetBits::decode(bit_field_);
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 55af2e6740..672f2b6bd6 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -10,6 +10,12 @@
// which can be included multiple times in different modes. It expects to have
// a mode defined before it's included. The modes are FLAG_MODE_... below:
+#define DEFINE_IMPLICATION(whenflag, thenflag) \
+ DEFINE_VALUE_IMPLICATION(whenflag, thenflag, true)
+
+#define DEFINE_NEG_IMPLICATION(whenflag, thenflag) \
+ DEFINE_VALUE_IMPLICATION(whenflag, thenflag, false)
+
// We want to declare the names of the variables for the header file. Normally
// this will just be an extern declaration, but for a readonly flag we let the
// compiler make better optimizations by giving it the value.
@@ -45,11 +51,8 @@
// We produce the code to set flags when it is implied by another flag.
#elif defined(FLAG_MODE_DEFINE_IMPLICATIONS)
-#define DEFINE_IMPLICATION(whenflag, thenflag) \
- if (FLAG_##whenflag) FLAG_##thenflag = true;
-
-#define DEFINE_NEG_IMPLICATION(whenflag, thenflag) \
- if (FLAG_##whenflag) FLAG_##thenflag = false;
+#define DEFINE_VALUE_IMPLICATION(whenflag, thenflag, value) \
+ if (FLAG_##whenflag) FLAG_##thenflag = value;
#else
#error No mode supplied when including flags.defs
@@ -68,12 +71,8 @@
#define FLAG_ALIAS(ftype, ctype, alias, nam)
#endif
-#ifndef DEFINE_IMPLICATION
-#define DEFINE_IMPLICATION(whenflag, thenflag)
-#endif
-
-#ifndef DEFINE_NEG_IMPLICATION
-#define DEFINE_NEG_IMPLICATION(whenflag, thenflag)
+#ifndef DEFINE_VALUE_IMPLICATION
+#define DEFINE_VALUE_IMPLICATION(whenflag, thenflag, value)
#endif
#define COMMA ,
@@ -154,30 +153,35 @@ DEFINE_BOOL(harmony_scoping, false, "enable harmony block scoping")
DEFINE_BOOL(harmony_modules, false,
"enable harmony modules (implies block scoping)")
DEFINE_BOOL(harmony_proxies, false, "enable harmony proxies")
-DEFINE_BOOL(harmony_generators, false, "enable harmony generators")
DEFINE_BOOL(harmony_numeric_literals, false,
"enable harmony numeric literals (0o77, 0b11)")
DEFINE_BOOL(harmony_strings, false, "enable harmony string")
DEFINE_BOOL(harmony_arrays, false, "enable harmony arrays")
DEFINE_BOOL(harmony_arrow_functions, false, "enable harmony arrow functions")
+DEFINE_BOOL(harmony_classes, false, "enable harmony classes")
+DEFINE_BOOL(harmony_object_literals, false,
+ "enable harmony object literal extensions")
+DEFINE_BOOL(harmony_regexps, false, "enable regexp-related harmony features")
DEFINE_BOOL(harmony, false, "enable all harmony features (except proxies)")
DEFINE_IMPLICATION(harmony, harmony_scoping)
DEFINE_IMPLICATION(harmony, harmony_modules)
// TODO(rossberg): Reenable when problems are sorted out.
// DEFINE_IMPLICATION(harmony, harmony_proxies)
-DEFINE_IMPLICATION(harmony, harmony_generators)
DEFINE_IMPLICATION(harmony, harmony_numeric_literals)
DEFINE_IMPLICATION(harmony, harmony_strings)
DEFINE_IMPLICATION(harmony, harmony_arrays)
DEFINE_IMPLICATION(harmony, harmony_arrow_functions)
+DEFINE_IMPLICATION(harmony, harmony_classes)
+DEFINE_IMPLICATION(harmony, harmony_object_literals)
+DEFINE_IMPLICATION(harmony, harmony_regexps)
DEFINE_IMPLICATION(harmony_modules, harmony_scoping)
+DEFINE_IMPLICATION(harmony_classes, harmony_scoping)
+DEFINE_IMPLICATION(harmony_classes, harmony_object_literals)
DEFINE_IMPLICATION(harmony, es_staging)
// Flags for experimental implementation features.
-DEFINE_BOOL(compiled_keyed_dictionary_loads, true,
- "use optimizing compiler to generate keyed dictionary load stubs")
DEFINE_BOOL(compiled_keyed_generic_loads, false,
"use optimizing compiler to generate keyed generic load stubs")
DEFINE_BOOL(clever_optimizations, true,
@@ -209,6 +213,8 @@ DEFINE_BOOL(optimize_for_size, false,
"Enables optimizations which favor memory size over execution "
"speed.")
+DEFINE_VALUE_IMPLICATION(optimize_for_size, max_semi_space_size, 1)
+
// Flags for data representation optimizations
DEFINE_BOOL(unbox_double_arrays, true, "automatically unbox arrays of doubles")
DEFINE_BOOL(string_slices, true, "use string slices")
@@ -288,7 +294,6 @@ DEFINE_BOOL(trace_dead_code_elimination, false, "trace dead code elimination")
DEFINE_BOOL(unreachable_code_elimination, true, "eliminate unreachable code")
DEFINE_BOOL(trace_osr, false, "trace on-stack replacement")
DEFINE_INT(stress_runs, 0, "number of stress runs")
-DEFINE_BOOL(optimize_closures, true, "optimize closures")
DEFINE_BOOL(lookup_sample_by_shared, true,
"when picking a function to optimize, watch for shared function "
"info, not JSFunction itself")
@@ -328,14 +333,19 @@ DEFINE_STRING(turbo_filter, "~", "optimization filter for TurboFan compiler")
DEFINE_BOOL(trace_turbo, false, "trace generated TurboFan IR")
DEFINE_BOOL(trace_turbo_types, true, "trace generated TurboFan types")
DEFINE_BOOL(trace_turbo_scheduler, false, "trace generated TurboFan scheduler")
+DEFINE_BOOL(turbo_asm, false, "enable TurboFan for asm.js code")
DEFINE_BOOL(turbo_verify, false, "verify TurboFan graphs at each phase")
DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
-DEFINE_BOOL(turbo_types, false, "use typed lowering in TurboFan")
+DEFINE_BOOL(turbo_types, true, "use typed lowering in TurboFan")
DEFINE_BOOL(turbo_source_positions, false,
"track source code positions when building TurboFan IR")
-DEFINE_BOOL(context_specialization, true,
+DEFINE_BOOL(context_specialization, false,
"enable context specialization in TurboFan")
DEFINE_BOOL(turbo_deoptimization, false, "enable deoptimization in TurboFan")
+DEFINE_BOOL(turbo_inlining, false, "enable inlining in TurboFan")
+DEFINE_BOOL(trace_turbo_inlining, false, "trace TurboFan inlining")
+DEFINE_IMPLICATION(turbo_inlining, turbo_types)
+DEFINE_BOOL(turbo_profiling, false, "enable profiling in TurboFan")
DEFINE_INT(typed_array_max_size_in_heap, 64,
"threshold for in-heap typed array")
@@ -428,12 +438,11 @@ DEFINE_BOOL(trace_stub_failures, false,
"trace deoptimization of generated code stubs")
DEFINE_BOOL(serialize_toplevel, false, "enable caching of toplevel scripts")
+DEFINE_BOOL(trace_code_serializer, false, "trace code serializer")
// compiler.cc
DEFINE_INT(min_preparse_length, 1024,
"minimum length for automatic enable preparsing")
-DEFINE_BOOL(always_full_compiler, false,
- "try to use the dedicated run-once backend for all code")
DEFINE_INT(max_opt_count, 10,
"maximum number of optimization attempts before giving up.")
@@ -460,9 +469,7 @@ DEFINE_BOOL(enable_liveedit, true, "enable liveedit experimental feature")
DEFINE_BOOL(hard_abort, true, "abort by crashing")
// execution.cc
-// Slightly less than 1MB, since Windows' default stack size for
-// the main execution thread is 1MB for both 32 and 64-bit.
-DEFINE_INT(stack_size, 984,
+DEFINE_INT(stack_size, V8_DEFAULT_STACK_SIZE_KB,
"default size of stack region v8 is allowed to use (in kBytes)")
// frames.cc
@@ -491,6 +498,8 @@ DEFINE_BOOL(trace_gc_nvp, false,
"after each garbage collection")
DEFINE_BOOL(trace_gc_ignore_scavenger, false,
"do not print trace line after scavenger collection")
+DEFINE_BOOL(trace_idle_notification, false,
+ "print one trace line following each idle notification")
DEFINE_BOOL(print_cumulative_gc_stat, false,
"print cumulative GC statistics in name=value format on exit")
DEFINE_BOOL(print_max_heap_committed, false,
@@ -522,12 +531,11 @@ DEFINE_BOOL(trace_incremental_marking, false,
"trace progress of the incremental marking")
DEFINE_BOOL(track_gc_object_stats, false,
"track object counts and memory usage")
-DEFINE_BOOL(always_precise_sweeping, true, "always sweep precisely")
DEFINE_BOOL(parallel_sweeping, false, "enable parallel sweeping")
DEFINE_BOOL(concurrent_sweeping, true, "enable concurrent sweeping")
DEFINE_INT(sweeper_threads, 0,
"number of parallel and concurrent sweeping threads")
-DEFINE_BOOL(job_based_sweeping, false, "enable job based sweeping")
+DEFINE_BOOL(job_based_sweeping, true, "enable job based sweeping")
#ifdef VERIFY_HEAP
DEFINE_BOOL(verify_heap, false, "verify heap pointers before and after GC")
#endif
@@ -658,6 +666,7 @@ DEFINE_NEG_IMPLICATION(predictable, concurrent_recompilation)
DEFINE_NEG_IMPLICATION(predictable, concurrent_osr)
DEFINE_NEG_IMPLICATION(predictable, concurrent_sweeping)
DEFINE_NEG_IMPLICATION(predictable, parallel_sweeping)
+DEFINE_NEG_IMPLICATION(predictable, job_based_sweeping)
//
@@ -918,6 +927,7 @@ DEFINE_BOOL(enable_ool_constant_pool, V8_OOL_CONSTANT_POOL,
#undef DEFINE_ARGS
#undef DEFINE_IMPLICATION
#undef DEFINE_NEG_IMPLICATION
+#undef DEFINE_VALUE_IMPLICATION
#undef DEFINE_ALIAS_BOOL
#undef DEFINE_ALIAS_INT
#undef DEFINE_ALIAS_STRING
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index e892f805ef..f116fd2739 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -5,6 +5,7 @@
#include "src/v8.h"
#include "src/ast.h"
+#include "src/base/bits.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/full-codegen.h"
@@ -931,9 +932,9 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
DCHECK(frames->length() == 0);
DCHECK(is_optimized());
- // Delegate to JS frame in absence of inlining.
- // TODO(turbofan): Revisit once we support inlining.
- if (LookupCode()->is_turbofanned()) {
+ // Delegate to JS frame in absence of turbofan deoptimization.
+ // TODO(turbofan): Revisit once we support deoptimization across the board.
+ if (LookupCode()->is_turbofanned() && !FLAG_turbo_deoptimization) {
return JavaScriptFrame::Summarize(frames);
}
@@ -1058,9 +1059,9 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
int OptimizedFrame::GetInlineCount() {
DCHECK(is_optimized());
- // Delegate to JS frame in absence of inlining.
- // TODO(turbofan): Revisit once we support inlining.
- if (LookupCode()->is_turbofanned()) {
+ // Delegate to JS frame in absence of turbofan deoptimization.
+ // TODO(turbofan): Revisit once we support deoptimization across the board.
+ if (LookupCode()->is_turbofanned() && !FLAG_turbo_deoptimization) {
return JavaScriptFrame::GetInlineCount();
}
@@ -1082,9 +1083,9 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
DCHECK(functions->length() == 0);
DCHECK(is_optimized());
- // Delegate to JS frame in absence of inlining.
- // TODO(turbofan): Revisit once we support inlining.
- if (LookupCode()->is_turbofanned()) {
+ // Delegate to JS frame in absence of turbofan deoptimization.
+ // TODO(turbofan): Revisit once we support deoptimization across the board.
+ if (LookupCode()->is_turbofanned() && !FLAG_turbo_deoptimization) {
return JavaScriptFrame::GetFunctions(functions);
}
@@ -1500,7 +1501,7 @@ Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
isolate_->counters()->pc_to_code()->Increment();
- DCHECK(IsPowerOf2(kInnerPointerToCodeCacheSize));
+ DCHECK(base::bits::IsPowerOfTwo32(kInnerPointerToCodeCacheSize));
uint32_t hash = ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(inner_pointer)),
v8::internal::kZeroHashSeed);
@@ -1578,9 +1579,7 @@ int StackHandler::Rewind(Isolate* isolate,
// -------------------------------------------------------------------------
-int NumRegs(RegList reglist) {
- return CompilerIntrinsics::CountSetBits(reglist);
-}
+int NumRegs(RegList reglist) { return base::bits::CountPopulation32(reglist); }
struct JSCallerSavedCodeData {
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index 0297f88f58..35d51d982e 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -4,6 +4,7 @@
#include "src/v8.h"
+#include "src/code-factory.h"
#include "src/codegen.h"
#include "src/compiler.h"
#include "src/debug.h"
@@ -14,7 +15,6 @@
#include "src/scopeinfo.h"
#include "src/scopes.h"
#include "src/snapshot.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -33,18 +33,22 @@ void BreakableStatementChecker::VisitVariableDeclaration(
VariableDeclaration* decl) {
}
+
void BreakableStatementChecker::VisitFunctionDeclaration(
FunctionDeclaration* decl) {
}
+
void BreakableStatementChecker::VisitModuleDeclaration(
ModuleDeclaration* decl) {
}
+
void BreakableStatementChecker::VisitImportDeclaration(
ImportDeclaration* decl) {
}
+
void BreakableStatementChecker::VisitExportDeclaration(
ExportDeclaration* decl) {
}
@@ -178,6 +182,13 @@ void BreakableStatementChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
}
+void BreakableStatementChecker::VisitClassLiteral(ClassLiteral* expr) {
+ if (expr->extends() != NULL) {
+ Visit(expr->extends());
+ }
+}
+
+
void BreakableStatementChecker::VisitNativeFunctionLiteral(
NativeFunctionLiteral* expr) {
}
@@ -285,6 +296,9 @@ void BreakableStatementChecker::VisitThisFunction(ThisFunction* expr) {
}
+void BreakableStatementChecker::VisitSuperReference(SuperReference* expr) {}
+
+
#define __ ACCESS_MASM(masm())
bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
@@ -406,14 +420,13 @@ void FullCodeGenerator::PrepareForBailout(Expression* node, State state) {
void FullCodeGenerator::CallLoadIC(ContextualMode contextual_mode,
TypeFeedbackId id) {
- ExtraICState extra_state = LoadIC::ComputeExtraICState(contextual_mode);
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), extra_state);
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), contextual_mode).code();
CallIC(ic, id);
}
void FullCodeGenerator::CallStoreIC(TypeFeedbackId id) {
- Handle<Code> ic = StoreIC::initialize_stub(isolate(), strict_mode());
+ Handle<Code> ic = CodeFactory::StoreIC(isolate(), strict_mode()).code();
CallIC(ic, id);
}
@@ -819,6 +832,11 @@ void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
}
+void FullCodeGenerator::VisitSuperReference(SuperReference* super) {
+ __ CallRuntime(Runtime::kThrowUnsupportedSuperError, 0);
+}
+
+
void FullCodeGenerator::SetExpressionPosition(Expression* expr) {
if (!info_->is_debug()) {
CodeGenerator::RecordPositions(masm_, expr->position());
@@ -870,7 +888,7 @@ FullCodeGenerator::InlineFunctionGenerator
static_cast<int>(id) - static_cast<int>(Runtime::kFirstInlineFunction);
DCHECK(lookup_index >= 0);
DCHECK(static_cast<size_t>(lookup_index) <
- ARRAY_SIZE(kInlineFunctionGenerators));
+ arraysize(kInlineFunctionGenerators));
return kInlineFunctionGenerators[lookup_index];
}
@@ -1463,6 +1481,8 @@ void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
__ DebugBreak();
// Ignore the return value.
+
+ PrepareForBailoutForId(stmt->DebugBreakId(), NO_REGISTERS);
}
@@ -1521,6 +1541,16 @@ void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
}
+void FullCodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
+ // TODO(arv): Implement
+ Comment cmnt(masm_, "[ ClassLiteral");
+ if (expr->extends() != NULL) {
+ VisitForEffect(expr->extends());
+ }
+ context()->Plug(isolate()->factory()->undefined_value());
+}
+
+
void FullCodeGenerator::VisitNativeFunctionLiteral(
NativeFunctionLiteral* expr) {
Comment cmnt(masm_, "[ NativeFunctionLiteral");
@@ -1537,13 +1567,11 @@ void FullCodeGenerator::VisitNativeFunctionLiteral(
const int literals = fun->NumberOfLiterals();
Handle<Code> code = Handle<Code>(fun->shared()->code());
Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
- bool is_generator = false;
- bool is_arrow = false;
Handle<SharedFunctionInfo> shared =
isolate()->factory()->NewSharedFunctionInfo(
- name, literals, is_generator, is_arrow, code,
+ name, literals, FunctionKind::kNormalFunction, code,
Handle<ScopeInfo>(fun->shared()->scope_info()),
- Handle<FixedArray>(fun->shared()->feedback_vector()));
+ Handle<TypeFeedbackVector>(fun->shared()->feedback_vector()));
shared->set_construct_stub(*construct_stub);
// Copy the function data to the shared function info.
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 6814946529..fdb329324a 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -475,8 +475,9 @@ class FullCodeGenerator: public AstVisitor {
void EmitReturnSequence();
// Platform-specific code sequences for calls
- void EmitCall(Call* expr, CallIC::CallType = CallIC::FUNCTION);
+ void EmitCall(Call* expr, CallICState::CallType = CallICState::FUNCTION);
void EmitCallWithLoadIC(Call* expr);
+ void EmitSuperCallWithLoadIC(Call* expr);
void EmitKeyedCallWithLoadIC(Call* expr, Expression* key);
// Platform-specific code for inline runtime calls.
@@ -520,6 +521,10 @@ class FullCodeGenerator: public AstVisitor {
// The receiver is left on the stack by the IC.
void EmitNamedPropertyLoad(Property* expr);
+ // Load a value from super.named prroperty.
+ // Expect receiver ('this' value) and home_object on the stack.
+ void EmitNamedSuperPropertyLoad(Property* expr);
+
// Load a value from a keyed property.
// The receiver and the key is left on the stack by the IC.
void EmitKeyedPropertyLoad(Property* expr);
@@ -555,11 +560,17 @@ class FullCodeGenerator: public AstVisitor {
// of the stack and the right-hand-side value in the accumulator.
void EmitNamedPropertyAssignment(Assignment* expr);
+ // Complete a super named property assignment. The right-hand-side value
+ // is expected in accumulator.
+ void EmitNamedSuperPropertyAssignment(Assignment* expr);
+
// Complete a keyed property assignment. The receiver and key are
// expected on top of the stack and the right-hand-side value in the
// accumulator.
void EmitKeyedPropertyAssignment(Assignment* expr);
+ void EmitLoadHomeObject(SuperReference* expr);
+
void CallIC(Handle<Code> code,
TypeFeedbackId id = TypeFeedbackId::None());
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index c9a57b5fa0..776c662b01 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -5,6 +5,7 @@
#ifdef ENABLE_GDB_JIT_INTERFACE
#include "src/v8.h"
+#include "src/base/bits.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
#include "src/compiler.h"
@@ -222,16 +223,11 @@ class MachOSection : public DebugSectionBase<MachOSectionHeader> {
S_ATTR_PURE_INSTRUCTIONS = 0x80000000u
};
- MachOSection(const char* name,
- const char* segment,
- uintptr_t align,
+ MachOSection(const char* name, const char* segment, uint32_t align,
uint32_t flags)
- : name_(name),
- segment_(segment),
- align_(align),
- flags_(flags) {
+ : name_(name), segment_(segment), align_(align), flags_(flags) {
if (align_ != 0) {
- DCHECK(IsPowerOf2(align));
+ DCHECK(base::bits::IsPowerOfTwo32(align));
align_ = WhichPowerOf2(align_);
}
}
@@ -259,7 +255,7 @@ class MachOSection : public DebugSectionBase<MachOSectionHeader> {
private:
const char* name_;
const char* segment_;
- uintptr_t align_;
+ uint32_t align_;
uint32_t flags_;
};
diff --git a/deps/v8/src/generator.js b/deps/v8/src/generator.js
index c62fe2c771..72e64dce6e 100644
--- a/deps/v8/src/generator.js
+++ b/deps/v8/src/generator.js
@@ -20,6 +20,7 @@ function GeneratorObjectNext(value) {
['[Generator].prototype.next', this]);
}
+ if (DEBUG_IS_ACTIVE) %DebugPrepareStepInIfStepping(this);
return %_GeneratorNext(this, value);
}
@@ -47,9 +48,7 @@ function GeneratorFunctionConstructor(arg1) { // length == 1
var global_proxy = %GlobalProxy(global);
// Compile the string in the constructor and not a helper so that errors
// appear to come from here.
- var f = %CompileString(source, true);
- if (!IS_FUNCTION(f)) return f;
- f = %_CallFunction(global_proxy, f);
+ var f = %_CallFunction(global_proxy, %CompileString(source, true));
%FunctionMarkNameShouldPrintAsAnonymous(f);
return f;
}
@@ -57,11 +56,19 @@ function GeneratorFunctionConstructor(arg1) { // length == 1
function SetUpGenerators() {
%CheckIsBootstrapping();
+
+ // Both Runtime_GeneratorNext and Runtime_GeneratorThrow are supported by
+ // neither Crankshaft nor TurboFan, disable optimization of wrappers here.
+ %NeverOptimizeFunction(GeneratorObjectNext);
+ %NeverOptimizeFunction(GeneratorObjectThrow);
+
+ // Set up non-enumerable functions on the generator prototype object.
var GeneratorObjectPrototype = GeneratorFunctionPrototype.prototype;
InstallFunctions(GeneratorObjectPrototype,
DONT_ENUM | DONT_DELETE | READ_ONLY,
["next", GeneratorObjectNext,
"throw", GeneratorObjectThrow]);
+
%FunctionSetName(GeneratorObjectIterator, '[Symbol.iterator]');
%AddNamedProperty(GeneratorObjectPrototype, symbolIterator,
GeneratorObjectIterator, DONT_ENUM | DONT_DELETE | READ_ONLY);
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 940d53bb15..282ca2d5d8 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -235,8 +235,8 @@ class GlobalHandles::Node {
{
// Check that we are not passing a finalized external string to
// the callback.
- DCHECK(!object_->IsExternalAsciiString() ||
- ExternalAsciiString::cast(object_)->resource() != NULL);
+ DCHECK(!object_->IsExternalOneByteString() ||
+ ExternalOneByteString::cast(object_)->resource() != NULL);
DCHECK(!object_->IsExternalTwoByteString() ||
ExternalTwoByteString::cast(object_)->resource() != NULL);
// Leaving V8.
@@ -845,23 +845,6 @@ void GlobalHandles::SetRetainedObjectInfo(UniqueId id,
}
-void GlobalHandles::AddImplicitReferences(HeapObject** parent,
- Object*** children,
- size_t length) {
-#ifdef DEBUG
- DCHECK(!Node::FromLocation(BitCast<Object**>(parent))->is_independent());
- for (size_t i = 0; i < length; ++i) {
- DCHECK(!Node::FromLocation(children[i])->is_independent());
- }
-#endif
- if (length == 0) return;
- ImplicitRefGroup* group = new ImplicitRefGroup(parent, length);
- for (size_t i = 0; i < length; ++i)
- group->children[i] = children[i];
- implicit_ref_groups_.Add(group);
-}
-
-
void GlobalHandles::SetReferenceFromGroup(UniqueId id, Object** child) {
DCHECK(!Node::FromLocation(child)->is_independent());
implicit_ref_connections_.Add(ObjectGroupConnection(id, child));
@@ -1007,7 +990,7 @@ void GlobalHandles::ComputeObjectGroupsAndImplicitReferences() {
EternalHandles::EternalHandles() : size_(0) {
- for (unsigned i = 0; i < ARRAY_SIZE(singleton_handles_); i++) {
+ for (unsigned i = 0; i < arraysize(singleton_handles_); i++) {
singleton_handles_[i] = kInvalidIndex;
}
}
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index ff34821ce7..a06cba0856 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -215,13 +215,6 @@ class GlobalHandles {
// handles.
void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
- // Add an implicit references' group.
- // Should be only used in GC callback function before a collection.
- // All groups are destroyed after a mark-compact collection.
- void AddImplicitReferences(HeapObject** parent,
- Object*** children,
- size_t length);
-
// Adds an implicit reference from a group to an object. Should be only used
// in GC callback function before a collection. All implicit references are
// destroyed after a mark-compact collection.
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 258707493e..609ab8871f 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -68,6 +68,18 @@ namespace internal {
// Determine whether the architecture uses an out-of-line constant pool.
#define V8_OOL_CONSTANT_POOL 0
+#ifdef V8_TARGET_ARCH_ARM
+// Set stack limit lower for ARM than for other architectures because
+// stack allocating MacroAssembler takes 120K bytes.
+// See issue crbug.com/405338
+#define V8_DEFAULT_STACK_SIZE_KB 864
+#else
+// Slightly less than 1MB, since Windows' default stack size for
+// the main execution thread is 1MB for both 32 and 64-bit.
+#define V8_DEFAULT_STACK_SIZE_KB 984
+#endif
+
+
// Support for alternative bool type. This is only enabled if the code is
// compiled with USE_MYBOOL defined. This catches some nasty type bugs.
// For instance, 'bool b = "false";' results in b == true! This is a hidden
@@ -610,8 +622,12 @@ enum CpuFeature {
MOVW_MOVT_IMMEDIATE_LOADS,
VFP32DREGS,
NEON,
- // MIPS
+ // MIPS, MIPS64
FPU,
+ FP64FPU,
+ MIPSr1,
+ MIPSr2,
+ MIPSr6,
// ARM64
ALWAYS_ALIGN_CSP,
NUMBER_OF_CPU_FEATURES
@@ -752,6 +768,44 @@ enum MinusZeroMode {
FAIL_ON_MINUS_ZERO
};
+
+enum Signedness { kSigned, kUnsigned };
+
+
+enum FunctionKind {
+ kNormalFunction = 0,
+ kArrowFunction = 1,
+ kGeneratorFunction = 2,
+ kConciseMethod = 4,
+ kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod
+};
+
+
+inline bool IsValidFunctionKind(FunctionKind kind) {
+ return kind == FunctionKind::kNormalFunction ||
+ kind == FunctionKind::kArrowFunction ||
+ kind == FunctionKind::kGeneratorFunction ||
+ kind == FunctionKind::kConciseMethod ||
+ kind == FunctionKind::kConciseGeneratorMethod;
+}
+
+
+inline bool IsArrowFunction(FunctionKind kind) {
+ DCHECK(IsValidFunctionKind(kind));
+ return kind & FunctionKind::kArrowFunction;
+}
+
+
+inline bool IsGeneratorFunction(FunctionKind kind) {
+ DCHECK(IsValidFunctionKind(kind));
+ return kind & FunctionKind::kGeneratorFunction;
+}
+
+
+inline bool IsConciseMethod(FunctionKind kind) {
+ DCHECK(IsValidFunctionKind(kind));
+ return kind & FunctionKind::kConciseMethod;
+}
} } // namespace v8::internal
namespace i = v8::internal;
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index 65b78c5deb..34b3f32d96 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -41,7 +41,7 @@ inline bool Handle<T>::is_identical_to(const Handle<T> o) const {
template <typename T>
inline T* Handle<T>::operator*() const {
SLOW_DCHECK(IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
- return *BitCast<T**>(location_);
+ return *bit_cast<T**>(location_);
}
template <typename T>
@@ -55,7 +55,7 @@ inline T** Handle<T>::location() const {
template <typename T>
bool Handle<T>::IsDereferenceAllowed(DereferenceCheckMode mode) const {
DCHECK(location_ != NULL);
- Object* object = *BitCast<T**>(location_);
+ Object* object = *bit_cast<T**>(location_);
if (object->IsSmi()) return true;
HeapObject* heap_object = HeapObject::cast(object);
Heap* heap = heap_object->GetHeap();
diff --git a/deps/v8/src/harmony-array.js b/deps/v8/src/harmony-array.js
index dbcb292a08..88b878f0a7 100644
--- a/deps/v8/src/harmony-array.js
+++ b/deps/v8/src/harmony-array.js
@@ -123,11 +123,29 @@ function ArrayFill(value /* [, start [, end ] ] */) { // length == 1
return array;
}
+// ES6, draft 05-22-14, section 22.1.2.3
+function ArrayOf() {
+ var length = %_ArgumentsLength();
+ var constructor = this;
+ // TODO: Implement IsConstructor (ES6 section 7.2.5)
+ var array = IS_SPEC_FUNCTION(constructor) ? new constructor(length) : [];
+ for (var i = 0; i < length; i++) {
+ %AddElement(array, i, %_Arguments(i), NONE);
+ }
+ array.length = length;
+ return array;
+}
+
// -------------------------------------------------------------------
function HarmonyArrayExtendArrayPrototype() {
%CheckIsBootstrapping();
+ // Set up non-enumerable functions on the Array object.
+ InstallFunctions($Array, DONT_ENUM, $Array(
+ "of", ArrayOf
+ ));
+
// Set up the non-enumerable functions on the Array prototype object.
InstallFunctions($Array.prototype, DONT_ENUM, $Array(
"find", ArrayFind,
diff --git a/deps/v8/src/harmony-classes.js b/deps/v8/src/harmony-classes.js
new file mode 100644
index 0000000000..b6605a902c
--- /dev/null
+++ b/deps/v8/src/harmony-classes.js
@@ -0,0 +1,32 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file relies on the fact that the following declarations have been made
+// in runtime.js:
+// var $Function = global.Function;
+// var $Array = global.Array;
+
+
+(function() {
+ function FunctionToMethod(homeObject) {
+ if (!IS_SPEC_FUNCTION(this)) {
+ throw MakeTypeError('toMethod_non_function',
+ [%ToString(this), typeof this]);
+
+ }
+
+ if (!IS_SPEC_OBJECT(homeObject)) {
+ throw MakeTypeError('toMethod_non_object',
+ [%ToString(homeObject)]);
+ }
+
+ return %ToMethod(this, homeObject);
+ }
+
+ %CheckIsBootstrapping();
+
+ InstallFunctions($Function.prototype, DONT_ENUM, $Array(
+ "toMethod", FunctionToMethod
+ ));
+}());
diff --git a/deps/v8/src/hashmap.h b/deps/v8/src/hashmap.h
index 26dbd58473..33eb115258 100644
--- a/deps/v8/src/hashmap.h
+++ b/deps/v8/src/hashmap.h
@@ -6,6 +6,7 @@
#define V8_HASHMAP_H_
#include "src/allocation.h"
+#include "src/base/bits.h"
#include "src/base/logging.h"
#include "src/utils.h"
@@ -239,7 +240,7 @@ typename TemplateHashMapImpl<AllocationPolicy>::Entry*
TemplateHashMapImpl<AllocationPolicy>::Probe(void* key, uint32_t hash) {
DCHECK(key != NULL);
- DCHECK(IsPowerOf2(capacity_));
+ DCHECK(base::bits::IsPowerOfTwo32(capacity_));
Entry* p = map_ + (hash & (capacity_ - 1));
const Entry* end = map_end();
DCHECK(map_ <= p && p < end);
@@ -259,7 +260,7 @@ typename TemplateHashMapImpl<AllocationPolicy>::Entry*
template<class AllocationPolicy>
void TemplateHashMapImpl<AllocationPolicy>::Initialize(
uint32_t capacity, AllocationPolicy allocator) {
- DCHECK(IsPowerOf2(capacity));
+ DCHECK(base::bits::IsPowerOfTwo32(capacity));
map_ = reinterpret_cast<Entry*>(allocator.New(capacity * sizeof(Entry)));
if (map_ == NULL) {
v8::internal::FatalProcessOutOfMemory("HashMap::Initialize");
diff --git a/deps/v8/src/heap-snapshot-generator-inl.h b/deps/v8/src/heap-snapshot-generator-inl.h
index f7d87aa31b..3f7e622166 100644
--- a/deps/v8/src/heap-snapshot-generator-inl.h
+++ b/deps/v8/src/heap-snapshot-generator-inl.h
@@ -43,25 +43,6 @@ HeapGraphEdge** HeapEntry::children_arr() {
}
-SnapshotObjectId HeapObjectsMap::GetNthGcSubrootId(int delta) {
- return kGcRootsFirstSubrootId + delta * kObjectIdStep;
-}
-
-
-HeapObject* V8HeapExplorer::GetNthGcSubrootObject(int delta) {
- return reinterpret_cast<HeapObject*>(
- reinterpret_cast<char*>(kFirstGcSubrootObject) +
- delta * HeapObjectsMap::kObjectIdStep);
-}
-
-
-int V8HeapExplorer::GetGcSubrootOrder(HeapObject* subroot) {
- return static_cast<int>(
- (reinterpret_cast<char*>(subroot) -
- reinterpret_cast<char*>(kFirstGcSubrootObject)) /
- HeapObjectsMap::kObjectIdStep);
-}
-
} } // namespace v8::internal
#endif // V8_HEAP_SNAPSHOT_GENERATOR_INL_H_
diff --git a/deps/v8/src/heap-snapshot-generator.cc b/deps/v8/src/heap-snapshot-generator.cc
index eff9f9a934..4a4c914e4e 100644
--- a/deps/v8/src/heap-snapshot-generator.cc
+++ b/deps/v8/src/heap-snapshot-generator.cc
@@ -188,7 +188,6 @@ HeapSnapshot::HeapSnapshot(HeapProfiler* profiler,
uid_(uid),
root_index_(HeapEntry::kNoEntry),
gc_roots_index_(HeapEntry::kNoEntry),
- natives_root_index_(HeapEntry::kNoEntry),
max_snapshot_js_object_id_(0) {
STATIC_ASSERT(
sizeof(HeapGraphEdge) ==
@@ -217,6 +216,18 @@ void HeapSnapshot::RememberLastJSObjectId() {
}
+void HeapSnapshot::AddSyntheticRootEntries() {
+ AddRootEntry();
+ AddGcRootsEntry();
+ SnapshotObjectId id = HeapObjectsMap::kGcRootsFirstSubrootId;
+ for (int tag = 0; tag < VisitorSynchronization::kNumberOfSyncTags; tag++) {
+ AddGcSubrootEntry(tag, id);
+ id += HeapObjectsMap::kObjectIdStep;
+ }
+ DCHECK(HeapObjectsMap::kFirstAvailableObjectId == id);
+}
+
+
HeapEntry* HeapSnapshot::AddRootEntry() {
DCHECK(root_index_ == HeapEntry::kNoEntry);
DCHECK(entries_.is_empty()); // Root entry must be the first one.
@@ -243,15 +254,11 @@ HeapEntry* HeapSnapshot::AddGcRootsEntry() {
}
-HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) {
+HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag, SnapshotObjectId id) {
DCHECK(gc_subroot_indexes_[tag] == HeapEntry::kNoEntry);
DCHECK(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags);
- HeapEntry* entry = AddEntry(
- HeapEntry::kSynthetic,
- VisitorSynchronization::kTagNames[tag],
- HeapObjectsMap::GetNthGcSubrootId(tag),
- 0,
- 0);
+ HeapEntry* entry = AddEntry(HeapEntry::kSynthetic,
+ VisitorSynchronization::kTagNames[tag], id, 0, 0);
gc_subroot_indexes_[tag] = entry->index();
return entry;
}
@@ -771,20 +778,6 @@ void HeapObjectsSet::SetTag(Object* obj, const char* tag) {
}
-HeapObject* const V8HeapExplorer::kInternalRootObject =
- reinterpret_cast<HeapObject*>(
- static_cast<intptr_t>(HeapObjectsMap::kInternalRootObjectId));
-HeapObject* const V8HeapExplorer::kGcRootsObject =
- reinterpret_cast<HeapObject*>(
- static_cast<intptr_t>(HeapObjectsMap::kGcRootsObjectId));
-HeapObject* const V8HeapExplorer::kFirstGcSubrootObject =
- reinterpret_cast<HeapObject*>(
- static_cast<intptr_t>(HeapObjectsMap::kGcRootsFirstSubrootId));
-HeapObject* const V8HeapExplorer::kLastGcSubrootObject =
- reinterpret_cast<HeapObject*>(
- static_cast<intptr_t>(HeapObjectsMap::kFirstAvailableObjectId));
-
-
V8HeapExplorer::V8HeapExplorer(
HeapSnapshot* snapshot,
SnapshottingProgressReportingInterface* progress,
@@ -809,16 +802,7 @@ HeapEntry* V8HeapExplorer::AllocateEntry(HeapThing ptr) {
HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
- if (object == kInternalRootObject) {
- snapshot_->AddRootEntry();
- return snapshot_->root();
- } else if (object == kGcRootsObject) {
- HeapEntry* entry = snapshot_->AddGcRootsEntry();
- return entry;
- } else if (object >= kFirstGcSubrootObject && object < kLastGcSubrootObject) {
- HeapEntry* entry = snapshot_->AddGcSubrootEntry(GetGcSubrootOrder(object));
- return entry;
- } else if (object->IsJSFunction()) {
+ if (object->IsJSFunction()) {
JSFunction* func = JSFunction::cast(object);
SharedFunctionInfo* shared = func->shared();
const char* name = shared->bound() ? "native_bind" :
@@ -965,41 +949,6 @@ class SnapshotFiller {
};
-class GcSubrootsEnumerator : public ObjectVisitor {
- public:
- GcSubrootsEnumerator(
- SnapshotFiller* filler, V8HeapExplorer* explorer)
- : filler_(filler),
- explorer_(explorer),
- previous_object_count_(0),
- object_count_(0) {
- }
- void VisitPointers(Object** start, Object** end) {
- object_count_ += end - start;
- }
- void Synchronize(VisitorSynchronization::SyncTag tag) {
- // Skip empty subroots.
- if (previous_object_count_ != object_count_) {
- previous_object_count_ = object_count_;
- filler_->AddEntry(V8HeapExplorer::GetNthGcSubrootObject(tag), explorer_);
- }
- }
- private:
- SnapshotFiller* filler_;
- V8HeapExplorer* explorer_;
- intptr_t previous_object_count_;
- intptr_t object_count_;
-};
-
-
-void V8HeapExplorer::AddRootEntries(SnapshotFiller* filler) {
- filler->AddEntry(kInternalRootObject, this);
- filler->AddEntry(kGcRootsObject, this);
- GcSubrootsEnumerator enumerator(filler, this);
- heap_->IterateRoots(&enumerator, VISIT_ALL);
-}
-
-
const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
switch (object->map()->instance_type()) {
case MAP_TYPE:
@@ -1118,6 +1067,8 @@ bool V8HeapExplorer::ExtractReferencesPass1(int entry, HeapObject* obj) {
ExtractSharedFunctionInfoReferences(entry, SharedFunctionInfo::cast(obj));
} else if (obj->IsScript()) {
ExtractScriptReferences(entry, Script::cast(obj));
+ } else if (obj->IsAccessorInfo()) {
+ ExtractAccessorInfoReferences(entry, AccessorInfo::cast(obj));
} else if (obj->IsAccessorPair()) {
ExtractAccessorPairReferences(entry, AccessorPair::cast(obj));
} else if (obj->IsCodeCache()) {
@@ -1469,6 +1420,35 @@ void V8HeapExplorer::ExtractScriptReferences(int entry, Script* script) {
}
+void V8HeapExplorer::ExtractAccessorInfoReferences(
+ int entry, AccessorInfo* accessor_info) {
+ SetInternalReference(accessor_info, entry, "name", accessor_info->name(),
+ AccessorInfo::kNameOffset);
+ SetInternalReference(accessor_info, entry, "expected_receiver_type",
+ accessor_info->expected_receiver_type(),
+ AccessorInfo::kExpectedReceiverTypeOffset);
+ if (accessor_info->IsDeclaredAccessorInfo()) {
+ DeclaredAccessorInfo* declared_accessor_info =
+ DeclaredAccessorInfo::cast(accessor_info);
+ SetInternalReference(declared_accessor_info, entry, "descriptor",
+ declared_accessor_info->descriptor(),
+ DeclaredAccessorInfo::kDescriptorOffset);
+ } else if (accessor_info->IsExecutableAccessorInfo()) {
+ ExecutableAccessorInfo* executable_accessor_info =
+ ExecutableAccessorInfo::cast(accessor_info);
+ SetInternalReference(executable_accessor_info, entry, "getter",
+ executable_accessor_info->getter(),
+ ExecutableAccessorInfo::kGetterOffset);
+ SetInternalReference(executable_accessor_info, entry, "setter",
+ executable_accessor_info->setter(),
+ ExecutableAccessorInfo::kSetterOffset);
+ SetInternalReference(executable_accessor_info, entry, "data",
+ executable_accessor_info->data(),
+ ExecutableAccessorInfo::kDataOffset);
+ }
+}
+
+
void V8HeapExplorer::ExtractAccessorPairReferences(
int entry, AccessorPair* accessors) {
SetInternalReference(accessors, entry, "getter", accessors->getter(),
@@ -1697,10 +1677,6 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
descs->GetKey(i), descs->GetValue(i));
break;
case NORMAL: // only in slow mode
- case HANDLER: // only in lookup results, not in descriptors
- case INTERCEPTOR: // only in lookup results, not in descriptors
- break;
- case NONEXISTENT:
UNREACHABLE();
break;
}
@@ -1786,25 +1762,8 @@ String* V8HeapExplorer::GetConstructorName(JSObject* object) {
if (object->IsJSFunction()) return heap->closure_string();
String* constructor_name = object->constructor_name();
if (constructor_name == heap->Object_string()) {
- // Look up an immediate "constructor" property, if it is a function,
- // return its name. This is for instances of binding objects, which
- // have prototype constructor type "Object".
- Object* constructor_prop = NULL;
- Isolate* isolate = heap->isolate();
- LookupResult result(isolate);
- object->LookupOwnRealNamedProperty(
- isolate->factory()->constructor_string(), &result);
- if (!result.IsFound()) return object->constructor_name();
-
- constructor_prop = result.GetLazyValue();
- if (constructor_prop->IsJSFunction()) {
- Object* maybe_name =
- JSFunction::cast(constructor_prop)->shared()->name();
- if (maybe_name->IsString()) {
- String* name = String::cast(maybe_name);
- if (name->length() > 0) return name;
- }
- }
+ // TODO(verwaest): Try to get object.constructor.name in this case.
+ // This requires handlification of the V8HeapExplorer.
}
return object->constructor_name();
}
@@ -1845,9 +1804,6 @@ class RootsReferencesExtractor : public ObjectVisitor {
void FillReferences(V8HeapExplorer* explorer) {
DCHECK(strong_references_.length() <= all_references_.length());
Builtins* builtins = heap_->isolate()->builtins();
- for (int i = 0; i < reference_tags_.length(); ++i) {
- explorer->SetGcRootsReference(reference_tags_[i].tag);
- }
int strong_index = 0, all_index = 0, tags_index = 0, builtin_index = 0;
while (all_index < all_references_.length()) {
bool is_strong = strong_index < strong_references_.length()
@@ -1890,10 +1846,15 @@ bool V8HeapExplorer::IterateAndExtractReferences(
SnapshotFiller* filler) {
filler_ = filler;
+ // Create references to the synthetic roots.
+ SetRootGcRootsReference();
+ for (int tag = 0; tag < VisitorSynchronization::kNumberOfSyncTags; tag++) {
+ SetGcRootsReference(static_cast<VisitorSynchronization::SyncTag>(tag));
+ }
+
// Make sure builtin code objects get their builtin tags
// first. Otherwise a particular JSFunction object could set
// its custom name to a generic builtin.
- SetRootGcRootsReference();
RootsReferencesExtractor extractor(heap_);
heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
extractor.SetCollectingAllReferences();
@@ -2600,15 +2561,6 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
#ifdef VERIFY_HEAP
Heap* debug_heap = heap_;
- CHECK(debug_heap->old_data_space()->swept_precisely());
- CHECK(debug_heap->old_pointer_space()->swept_precisely());
- CHECK(debug_heap->code_space()->swept_precisely());
- CHECK(debug_heap->cell_space()->swept_precisely());
- CHECK(debug_heap->property_cell_space()->swept_precisely());
- CHECK(debug_heap->map_space()->swept_precisely());
-#endif
-
-#ifdef VERIFY_HEAP
debug_heap->Verify();
#endif
@@ -2618,6 +2570,8 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
debug_heap->Verify();
#endif
+ snapshot_->AddSyntheticRootEntries();
+
if (!FillReferences()) return false;
snapshot_->FillChildren();
@@ -2658,7 +2612,6 @@ void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
bool HeapSnapshotGenerator::FillReferences() {
SnapshotFiller filler(snapshot_, &entries_);
- v8_heap_explorer_.AddRootEntries(&filler);
return v8_heap_explorer_.IterateAndExtractReferences(&filler)
&& dom_explorer_.IterateAndExtractReferences(&filler);
}
diff --git a/deps/v8/src/heap-snapshot-generator.h b/deps/v8/src/heap-snapshot-generator.h
index 1aea5a0264..3e4ce71b8a 100644
--- a/deps/v8/src/heap-snapshot-generator.h
+++ b/deps/v8/src/heap-snapshot-generator.h
@@ -100,7 +100,7 @@ class HeapEntry BASE_EMBEDDED {
Type type() { return static_cast<Type>(type_); }
const char* name() { return name_; }
void set_name(const char* name) { name_ = name; }
- inline SnapshotObjectId id() { return id_; }
+ SnapshotObjectId id() { return id_; }
size_t self_size() { return self_size_; }
unsigned trace_node_id() const { return trace_node_id_; }
INLINE(int index() const);
@@ -154,7 +154,6 @@ class HeapSnapshot {
size_t RawSnapshotSize() const;
HeapEntry* root() { return &entries_[root_index_]; }
HeapEntry* gc_roots() { return &entries_[gc_roots_index_]; }
- HeapEntry* natives_root() { return &entries_[natives_root_index_]; }
HeapEntry* gc_subroot(int index) {
return &entries_[gc_subroot_indexes_[index]];
}
@@ -171,10 +170,7 @@ class HeapSnapshot {
SnapshotObjectId id,
size_t size,
unsigned trace_node_id);
- HeapEntry* AddRootEntry();
- HeapEntry* AddGcRootsEntry();
- HeapEntry* AddGcSubrootEntry(int tag);
- HeapEntry* AddNativesRootEntry();
+ void AddSyntheticRootEntries();
HeapEntry* GetEntryById(SnapshotObjectId id);
List<HeapEntry*>* GetSortedEntriesList();
void FillChildren();
@@ -183,12 +179,15 @@ class HeapSnapshot {
void PrintEntriesSize();
private:
+ HeapEntry* AddRootEntry();
+ HeapEntry* AddGcRootsEntry();
+ HeapEntry* AddGcSubrootEntry(int tag, SnapshotObjectId id);
+
HeapProfiler* profiler_;
const char* title_;
unsigned uid_;
int root_index_;
int gc_roots_index_;
- int natives_root_index_;
int gc_subroot_indexes_[VisitorSynchronization::kNumberOfSyncTags];
List<HeapEntry> entries_;
List<HeapGraphEdge> edges_;
@@ -223,12 +222,10 @@ class HeapObjectsMap {
size_t GetUsedMemorySize() const;
SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info);
- static inline SnapshotObjectId GetNthGcSubrootId(int delta);
static const int kObjectIdStep = 2;
static const SnapshotObjectId kInternalRootObjectId;
static const SnapshotObjectId kGcRootsObjectId;
- static const SnapshotObjectId kNativesRootObjectId;
static const SnapshotObjectId kGcRootsFirstSubrootId;
static const SnapshotObjectId kFirstAvailableObjectId;
@@ -348,8 +345,6 @@ class V8HeapExplorer : public HeapEntriesAllocator {
static String* GetConstructorName(JSObject* object);
- static HeapObject* const kInternalRootObject;
-
private:
typedef bool (V8HeapExplorer::*ExtractReferencesMethod)(int entry,
HeapObject* object);
@@ -378,6 +373,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void ExtractSharedFunctionInfoReferences(int entry,
SharedFunctionInfo* shared);
void ExtractScriptReferences(int entry, Script* script);
+ void ExtractAccessorInfoReferences(int entry, AccessorInfo* accessor_info);
void ExtractAccessorPairReferences(int entry, AccessorPair* accessors);
void ExtractCodeCacheReferences(int entry, CodeCache* code_cache);
void ExtractCodeReferences(int entry, Code* code);
@@ -449,9 +445,6 @@ class V8HeapExplorer : public HeapEntriesAllocator {
HeapEntry* GetEntry(Object* obj);
- static inline HeapObject* GetNthGcSubrootObject(int delta);
- static inline int GetGcSubrootOrder(HeapObject* subroot);
-
Heap* heap_;
HeapSnapshot* snapshot_;
StringsStorage* names_;
@@ -464,12 +457,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
HeapObjectsSet weak_containers_;
v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_;
- static HeapObject* const kGcRootsObject;
- static HeapObject* const kFirstGcSubrootObject;
- static HeapObject* const kLastGcSubrootObject;
-
friend class IndexedReferencesExtractor;
- friend class GcSubrootsEnumerator;
friend class RootsReferencesExtractor;
DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer);
diff --git a/deps/v8/src/heap/gc-idle-time-handler-unittest.cc b/deps/v8/src/heap/gc-idle-time-handler-unittest.cc
new file mode 100644
index 0000000000..b4f2f74f57
--- /dev/null
+++ b/deps/v8/src/heap/gc-idle-time-handler-unittest.cc
@@ -0,0 +1,348 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/heap/gc-idle-time-handler.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+class GCIdleTimeHandlerTest : public ::testing::Test {
+ public:
+ GCIdleTimeHandlerTest() {}
+ virtual ~GCIdleTimeHandlerTest() {}
+
+ GCIdleTimeHandler* handler() { return &handler_; }
+
+ GCIdleTimeHandler::HeapState DefaultHeapState() {
+ GCIdleTimeHandler::HeapState result;
+ result.contexts_disposed = 0;
+ result.size_of_objects = kSizeOfObjects;
+ result.incremental_marking_stopped = false;
+ result.can_start_incremental_marking = true;
+ result.sweeping_in_progress = false;
+ result.mark_compact_speed_in_bytes_per_ms = kMarkCompactSpeed;
+ result.incremental_marking_speed_in_bytes_per_ms = kMarkingSpeed;
+ result.scavenge_speed_in_bytes_per_ms = kScavengeSpeed;
+ result.available_new_space_memory = kNewSpaceCapacity;
+ result.new_space_capacity = kNewSpaceCapacity;
+ result.new_space_allocation_throughput_in_bytes_per_ms =
+ kNewSpaceAllocationThroughput;
+ return result;
+ }
+
+ static const size_t kSizeOfObjects = 100 * MB;
+ static const size_t kMarkCompactSpeed = 200 * KB;
+ static const size_t kMarkingSpeed = 200 * KB;
+ static const size_t kScavengeSpeed = 100 * KB;
+ static const size_t kNewSpaceCapacity = 1 * MB;
+ static const size_t kNewSpaceAllocationThroughput = 10 * KB;
+
+ private:
+ GCIdleTimeHandler handler_;
+};
+
+} // namespace
+
+
+TEST(GCIdleTimeHandler, EstimateMarkingStepSizeInitial) {
+ size_t step_size = GCIdleTimeHandler::EstimateMarkingStepSize(1, 0);
+ EXPECT_EQ(
+ static_cast<size_t>(GCIdleTimeHandler::kInitialConservativeMarkingSpeed *
+ GCIdleTimeHandler::kConservativeTimeRatio),
+ step_size);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkingStepSizeNonZero) {
+ size_t marking_speed_in_bytes_per_millisecond = 100;
+ size_t step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
+ 1, marking_speed_in_bytes_per_millisecond);
+ EXPECT_EQ(static_cast<size_t>(marking_speed_in_bytes_per_millisecond *
+ GCIdleTimeHandler::kConservativeTimeRatio),
+ step_size);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkingStepSizeOverflow1) {
+ size_t step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
+ 10, std::numeric_limits<size_t>::max());
+ EXPECT_EQ(static_cast<size_t>(GCIdleTimeHandler::kMaximumMarkingStepSize),
+ step_size);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkingStepSizeOverflow2) {
+ size_t step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
+ std::numeric_limits<size_t>::max(), 10);
+ EXPECT_EQ(static_cast<size_t>(GCIdleTimeHandler::kMaximumMarkingStepSize),
+ step_size);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkCompactTimeInitial) {
+ size_t size = 100 * MB;
+ size_t time = GCIdleTimeHandler::EstimateMarkCompactTime(size, 0);
+ EXPECT_EQ(size / GCIdleTimeHandler::kInitialConservativeMarkCompactSpeed,
+ time);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkCompactTimeNonZero) {
+ size_t size = 100 * MB;
+ size_t speed = 1 * MB;
+ size_t time = GCIdleTimeHandler::EstimateMarkCompactTime(size, speed);
+ EXPECT_EQ(size / speed, time);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkCompactTimeMax) {
+ size_t size = std::numeric_limits<size_t>::max();
+ size_t speed = 1;
+ size_t time = GCIdleTimeHandler::EstimateMarkCompactTime(size, speed);
+ EXPECT_EQ(GCIdleTimeHandler::kMaxMarkCompactTimeInMs, time);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateScavengeTimeInitial) {
+ size_t size = 1 * MB;
+ size_t time = GCIdleTimeHandler::EstimateScavengeTime(size, 0);
+ EXPECT_EQ(size / GCIdleTimeHandler::kInitialConservativeScavengeSpeed, time);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateScavengeTimeNonZero) {
+ size_t size = 1 * MB;
+ size_t speed = 1 * MB;
+ size_t time = GCIdleTimeHandler::EstimateScavengeTime(size, speed);
+ EXPECT_EQ(size / speed, time);
+}
+
+
+TEST(GCIdleTimeHandler, ScavangeMayHappenSoonInitial) {
+ size_t available = 100 * KB;
+ EXPECT_FALSE(GCIdleTimeHandler::ScavangeMayHappenSoon(available, 0));
+}
+
+
+TEST(GCIdleTimeHandler, ScavangeMayHappenSoonNonZeroFalse) {
+ size_t available = (GCIdleTimeHandler::kMaxFrameRenderingIdleTime + 1) * KB;
+ size_t speed = 1 * KB;
+ EXPECT_FALSE(GCIdleTimeHandler::ScavangeMayHappenSoon(available, speed));
+}
+
+
+TEST(GCIdleTimeHandler, ScavangeMayHappenSoonNonZeroTrue) {
+ size_t available = GCIdleTimeHandler::kMaxFrameRenderingIdleTime * KB;
+ size_t speed = 1 * KB;
+ EXPECT_TRUE(GCIdleTimeHandler::ScavangeMayHappenSoon(available, speed));
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeLargeIdleTime) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ heap_state.contexts_disposed = 1;
+ heap_state.incremental_marking_stopped = true;
+ size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+ int idle_time_ms =
+ static_cast<int>((heap_state.size_of_objects + speed - 1) / speed);
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_FULL_GC, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime1) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ heap_state.contexts_disposed = 1;
+ heap_state.incremental_marking_stopped = true;
+ size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+ int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed - 1);
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime2) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ heap_state.contexts_disposed = 1;
+ size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+ int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed - 1);
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, IncrementalMarking1) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ size_t speed = heap_state.incremental_marking_speed_in_bytes_per_ms;
+ int idle_time_ms = 10;
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+ EXPECT_GT(speed * static_cast<size_t>(idle_time_ms),
+ static_cast<size_t>(action.parameter));
+ EXPECT_LT(0, action.parameter);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, IncrementalMarking2) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ heap_state.incremental_marking_stopped = true;
+ size_t speed = heap_state.incremental_marking_speed_in_bytes_per_ms;
+ int idle_time_ms = 10;
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+ EXPECT_GT(speed * static_cast<size_t>(idle_time_ms),
+ static_cast<size_t>(action.parameter));
+ EXPECT_LT(0, action.parameter);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, NotEnoughTime) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ heap_state.incremental_marking_stopped = true;
+ heap_state.can_start_incremental_marking = false;
+ size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+ int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed - 1);
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_NOTHING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, StopEventually1) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ heap_state.incremental_marking_stopped = true;
+ heap_state.can_start_incremental_marking = false;
+ size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+ int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed + 1);
+ for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_FULL_GC, action.type);
+ handler()->NotifyIdleMarkCompact();
+ }
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DONE, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, StopEventually2) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ int idle_time_ms = 10;
+ for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+ // In this case we emulate incremental marking steps that finish with a
+ // full gc.
+ handler()->NotifyIdleMarkCompact();
+ }
+ heap_state.can_start_incremental_marking = false;
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DONE, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ContinueAfterStop1) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ heap_state.incremental_marking_stopped = true;
+ heap_state.can_start_incremental_marking = false;
+ size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+ int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed + 1);
+ for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_FULL_GC, action.type);
+ handler()->NotifyIdleMarkCompact();
+ }
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DONE, action.type);
+ // Emulate mutator work.
+ for (int i = 0; i < GCIdleTimeHandler::kIdleScavengeThreshold; i++) {
+ handler()->NotifyScavenge();
+ }
+ action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_FULL_GC, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ContinueAfterStop2) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ int idle_time_ms = 10;
+ for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ if (action.type == DONE) break;
+ EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+ // In this case we try to emulate incremental marking steps the finish with
+ // a full gc.
+ handler()->NotifyIdleMarkCompact();
+ }
+ heap_state.can_start_incremental_marking = false;
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DONE, action.type);
+ // Emulate mutator work.
+ for (int i = 0; i < GCIdleTimeHandler::kIdleScavengeThreshold; i++) {
+ handler()->NotifyScavenge();
+ }
+ heap_state.can_start_incremental_marking = true;
+ action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, Scavenge) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ int idle_time_ms = 10;
+ heap_state.available_new_space_memory =
+ kNewSpaceAllocationThroughput * idle_time_ms;
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_SCAVENGE, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ScavengeAndDone) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ int idle_time_ms = 10;
+ heap_state.can_start_incremental_marking = false;
+ heap_state.incremental_marking_stopped = true;
+ heap_state.available_new_space_memory =
+ kNewSpaceAllocationThroughput * idle_time_ms;
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_SCAVENGE, action.type);
+ heap_state.available_new_space_memory = kNewSpaceCapacity;
+ action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_NOTHING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ZeroIdleTimeNothingToDo) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ int idle_time_ms = 0;
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_NOTHING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ZeroIdleTimeDoNothingButStartIdleRound) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ int idle_time_ms = 10;
+ for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ if (action.type == DONE) break;
+ EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+ // In this case we try to emulate incremental marking steps the finish with
+ // a full gc.
+ handler()->NotifyIdleMarkCompact();
+ }
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ // Emulate mutator work.
+ for (int i = 0; i < GCIdleTimeHandler::kIdleScavengeThreshold; i++) {
+ handler()->NotifyScavenge();
+ }
+ action = handler()->Compute(0, heap_state);
+ EXPECT_EQ(DO_NOTHING, action.type);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/gc-idle-time-handler.cc b/deps/v8/src/heap/gc-idle-time-handler.cc
new file mode 100644
index 0000000000..b9a99b2340
--- /dev/null
+++ b/deps/v8/src/heap/gc-idle-time-handler.cc
@@ -0,0 +1,174 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/gc-idle-time-handler.h"
+#include "src/heap/gc-tracer.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+const double GCIdleTimeHandler::kConservativeTimeRatio = 0.9;
+const size_t GCIdleTimeHandler::kMaxMarkCompactTimeInMs = 1000;
+const size_t GCIdleTimeHandler::kMinTimeForFinalizeSweeping = 100;
+const int GCIdleTimeHandler::kMaxMarkCompactsInIdleRound = 7;
+const int GCIdleTimeHandler::kIdleScavengeThreshold = 5;
+
+
+void GCIdleTimeAction::Print() {
+ switch (type) {
+ case DONE:
+ PrintF("done");
+ break;
+ case DO_NOTHING:
+ PrintF("no action");
+ break;
+ case DO_INCREMENTAL_MARKING:
+ PrintF("incremental marking with step %" V8_PTR_PREFIX "d", parameter);
+ break;
+ case DO_SCAVENGE:
+ PrintF("scavenge");
+ break;
+ case DO_FULL_GC:
+ PrintF("full GC");
+ break;
+ case DO_FINALIZE_SWEEPING:
+ PrintF("finalize sweeping");
+ break;
+ }
+}
+
+
+size_t GCIdleTimeHandler::EstimateMarkingStepSize(
+ size_t idle_time_in_ms, size_t marking_speed_in_bytes_per_ms) {
+ DCHECK(idle_time_in_ms > 0);
+
+ if (marking_speed_in_bytes_per_ms == 0) {
+ marking_speed_in_bytes_per_ms = kInitialConservativeMarkingSpeed;
+ }
+
+ size_t marking_step_size = marking_speed_in_bytes_per_ms * idle_time_in_ms;
+ if (marking_step_size / marking_speed_in_bytes_per_ms != idle_time_in_ms) {
+ // In the case of an overflow we return maximum marking step size.
+ return kMaximumMarkingStepSize;
+ }
+
+ if (marking_step_size > kMaximumMarkingStepSize)
+ return kMaximumMarkingStepSize;
+
+ return static_cast<size_t>(marking_step_size * kConservativeTimeRatio);
+}
+
+
+size_t GCIdleTimeHandler::EstimateMarkCompactTime(
+ size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms) {
+ if (mark_compact_speed_in_bytes_per_ms == 0) {
+ mark_compact_speed_in_bytes_per_ms = kInitialConservativeMarkCompactSpeed;
+ }
+ size_t result = size_of_objects / mark_compact_speed_in_bytes_per_ms;
+ return Min(result, kMaxMarkCompactTimeInMs);
+}
+
+
+size_t GCIdleTimeHandler::EstimateScavengeTime(
+ size_t new_space_size, size_t scavenge_speed_in_bytes_per_ms) {
+ if (scavenge_speed_in_bytes_per_ms == 0) {
+ scavenge_speed_in_bytes_per_ms = kInitialConservativeScavengeSpeed;
+ }
+ return new_space_size / scavenge_speed_in_bytes_per_ms;
+}
+
+
+bool GCIdleTimeHandler::ScavangeMayHappenSoon(
+ size_t available_new_space_memory,
+ size_t new_space_allocation_throughput_in_bytes_per_ms) {
+ if (available_new_space_memory <=
+ new_space_allocation_throughput_in_bytes_per_ms *
+ kMaxFrameRenderingIdleTime) {
+ return true;
+ }
+ return false;
+}
+
+
+// The following logic is implemented by the controller:
+// (1) If the new space is almost full and we can effort a Scavenge, then a
+// Scavenge is performed.
+// (2) If there is currently no MarkCompact idle round going on, we start a
+// new idle round if enough garbage was created or we received a context
+// disposal event. Otherwise we do not perform garbage collection to keep
+// system utilization low.
+// (3) If incremental marking is done, we perform a full garbage collection
+// if context was disposed or if we are allowed to still do full garbage
+// collections during this idle round or if we are not allowed to start
+// incremental marking. Otherwise we do not perform garbage collection to
+// keep system utilization low.
+// (4) If sweeping is in progress and we received a large enough idle time
+// request, we finalize sweeping here.
+// (5) If incremental marking is in progress, we perform a marking step. Note,
+// that this currently may trigger a full garbage collection.
+GCIdleTimeAction GCIdleTimeHandler::Compute(size_t idle_time_in_ms,
+ HeapState heap_state) {
+ if (idle_time_in_ms <= kMaxFrameRenderingIdleTime &&
+ ScavangeMayHappenSoon(
+ heap_state.available_new_space_memory,
+ heap_state.new_space_allocation_throughput_in_bytes_per_ms) &&
+ idle_time_in_ms >=
+ EstimateScavengeTime(heap_state.new_space_capacity,
+ heap_state.scavenge_speed_in_bytes_per_ms)) {
+ return GCIdleTimeAction::Scavenge();
+ }
+ if (IsMarkCompactIdleRoundFinished()) {
+ if (EnoughGarbageSinceLastIdleRound() || heap_state.contexts_disposed > 0) {
+ StartIdleRound();
+ } else {
+ return GCIdleTimeAction::Done();
+ }
+ }
+
+ if (idle_time_in_ms == 0) {
+ return GCIdleTimeAction::Nothing();
+ }
+
+ if (heap_state.incremental_marking_stopped) {
+ size_t estimated_time_in_ms =
+ EstimateMarkCompactTime(heap_state.size_of_objects,
+ heap_state.mark_compact_speed_in_bytes_per_ms);
+ if (idle_time_in_ms >= estimated_time_in_ms ||
+ (heap_state.size_of_objects < kSmallHeapSize &&
+ heap_state.contexts_disposed > 0)) {
+ // If there are no more than two GCs left in this idle round and we are
+ // allowed to do a full GC, then make those GCs full in order to compact
+ // the code space.
+ // TODO(ulan): Once we enable code compaction for incremental marking, we
+ // can get rid of this special case and always start incremental marking.
+ int remaining_mark_sweeps =
+ kMaxMarkCompactsInIdleRound - mark_compacts_since_idle_round_started_;
+ if (heap_state.contexts_disposed > 0 ||
+ (idle_time_in_ms > kMaxFrameRenderingIdleTime &&
+ (remaining_mark_sweeps <= 2 ||
+ !heap_state.can_start_incremental_marking))) {
+ return GCIdleTimeAction::FullGC();
+ }
+ }
+ if (!heap_state.can_start_incremental_marking) {
+ return GCIdleTimeAction::Nothing();
+ }
+ }
+ // TODO(hpayer): Estimate finalize sweeping time.
+ if (heap_state.sweeping_in_progress &&
+ idle_time_in_ms >= kMinTimeForFinalizeSweeping) {
+ return GCIdleTimeAction::FinalizeSweeping();
+ }
+
+ if (heap_state.incremental_marking_stopped &&
+ !heap_state.can_start_incremental_marking) {
+ return GCIdleTimeAction::Nothing();
+ }
+ size_t step_size = EstimateMarkingStepSize(
+ idle_time_in_ms, heap_state.incremental_marking_speed_in_bytes_per_ms);
+ return GCIdleTimeAction::IncrementalMarking(step_size);
+}
+}
+}
diff --git a/deps/v8/src/heap/gc-idle-time-handler.h b/deps/v8/src/heap/gc-idle-time-handler.h
new file mode 100644
index 0000000000..daab616d61
--- /dev/null
+++ b/deps/v8/src/heap/gc-idle-time-handler.h
@@ -0,0 +1,188 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_GC_IDLE_TIME_HANDLER_H_
+#define V8_HEAP_GC_IDLE_TIME_HANDLER_H_
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+enum GCIdleTimeActionType {
+ DONE,
+ DO_NOTHING,
+ DO_INCREMENTAL_MARKING,
+ DO_SCAVENGE,
+ DO_FULL_GC,
+ DO_FINALIZE_SWEEPING
+};
+
+
+class GCIdleTimeAction {
+ public:
+ static GCIdleTimeAction Done() {
+ GCIdleTimeAction result;
+ result.type = DONE;
+ result.parameter = 0;
+ return result;
+ }
+
+ static GCIdleTimeAction Nothing() {
+ GCIdleTimeAction result;
+ result.type = DO_NOTHING;
+ result.parameter = 0;
+ return result;
+ }
+
+ static GCIdleTimeAction IncrementalMarking(intptr_t step_size) {
+ GCIdleTimeAction result;
+ result.type = DO_INCREMENTAL_MARKING;
+ result.parameter = step_size;
+ return result;
+ }
+
+ static GCIdleTimeAction Scavenge() {
+ GCIdleTimeAction result;
+ result.type = DO_SCAVENGE;
+ result.parameter = 0;
+ return result;
+ }
+
+ static GCIdleTimeAction FullGC() {
+ GCIdleTimeAction result;
+ result.type = DO_FULL_GC;
+ result.parameter = 0;
+ return result;
+ }
+
+ static GCIdleTimeAction FinalizeSweeping() {
+ GCIdleTimeAction result;
+ result.type = DO_FINALIZE_SWEEPING;
+ result.parameter = 0;
+ return result;
+ }
+
+ void Print();
+
+ GCIdleTimeActionType type;
+ intptr_t parameter;
+};
+
+
+class GCTracer;
+
+// The idle time handler makes decisions about which garbage collection
+// operations are executing during IdleNotification.
+class GCIdleTimeHandler {
+ public:
+ // If we haven't recorded any incremental marking events yet, we carefully
+ // mark with a conservative lower bound for the marking speed.
+ static const size_t kInitialConservativeMarkingSpeed = 100 * KB;
+
+ // Maximum marking step size returned by EstimateMarkingStepSize.
+ static const size_t kMaximumMarkingStepSize = 700 * MB;
+
+ // We have to make sure that we finish the IdleNotification before
+ // idle_time_in_ms. Hence, we conservatively prune our workload estimate.
+ static const double kConservativeTimeRatio;
+
+ // If we haven't recorded any mark-compact events yet, we use
+ // conservative lower bound for the mark-compact speed.
+ static const size_t kInitialConservativeMarkCompactSpeed = 2 * MB;
+
+ // Maximum mark-compact time returned by EstimateMarkCompactTime.
+ static const size_t kMaxMarkCompactTimeInMs;
+
+ // Minimum time to finalize sweeping phase. The main thread may wait for
+ // sweeper threads.
+ static const size_t kMinTimeForFinalizeSweeping;
+
+ // Number of idle mark-compact events, after which idle handler will finish
+ // idle round.
+ static const int kMaxMarkCompactsInIdleRound;
+
+ // Number of scavenges that will trigger start of new idle round.
+ static const int kIdleScavengeThreshold;
+
+ // Heap size threshold below which we prefer mark-compact over incremental
+ // step.
+ static const size_t kSmallHeapSize = 4 * kPointerSize * MB;
+
+ // That is the maximum idle time we will have during frame rendering.
+ static const size_t kMaxFrameRenderingIdleTime = 16;
+
+ // If less than that much memory is left in the new space, we consider it
+ // as almost full and force a new space collection earlier in the idle time.
+ static const size_t kNewSpaceAlmostFullTreshold = 100 * KB;
+
+ // If we haven't recorded any scavenger events yet, we use a conservative
+ // lower bound for the scavenger speed.
+ static const size_t kInitialConservativeScavengeSpeed = 100 * KB;
+
+ struct HeapState {
+ int contexts_disposed;
+ size_t size_of_objects;
+ bool incremental_marking_stopped;
+ bool can_start_incremental_marking;
+ bool sweeping_in_progress;
+ size_t mark_compact_speed_in_bytes_per_ms;
+ size_t incremental_marking_speed_in_bytes_per_ms;
+ size_t scavenge_speed_in_bytes_per_ms;
+ size_t available_new_space_memory;
+ size_t new_space_capacity;
+ size_t new_space_allocation_throughput_in_bytes_per_ms;
+ };
+
+ GCIdleTimeHandler()
+ : mark_compacts_since_idle_round_started_(0),
+ scavenges_since_last_idle_round_(0) {}
+
+ GCIdleTimeAction Compute(size_t idle_time_in_ms, HeapState heap_state);
+
+ void NotifyIdleMarkCompact() {
+ if (mark_compacts_since_idle_round_started_ < kMaxMarkCompactsInIdleRound) {
+ ++mark_compacts_since_idle_round_started_;
+ if (mark_compacts_since_idle_round_started_ ==
+ kMaxMarkCompactsInIdleRound) {
+ scavenges_since_last_idle_round_ = 0;
+ }
+ }
+ }
+
+ void NotifyScavenge() { ++scavenges_since_last_idle_round_; }
+
+ static size_t EstimateMarkingStepSize(size_t idle_time_in_ms,
+ size_t marking_speed_in_bytes_per_ms);
+
+ static size_t EstimateMarkCompactTime(
+ size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms);
+
+ static size_t EstimateScavengeTime(size_t new_space_size,
+ size_t scavenger_speed_in_bytes_per_ms);
+
+ static bool ScavangeMayHappenSoon(
+ size_t available_new_space_memory,
+ size_t new_space_allocation_throughput_in_bytes_per_ms);
+
+ private:
+ void StartIdleRound() { mark_compacts_since_idle_round_started_ = 0; }
+ bool IsMarkCompactIdleRoundFinished() {
+ return mark_compacts_since_idle_round_started_ ==
+ kMaxMarkCompactsInIdleRound;
+ }
+ bool EnoughGarbageSinceLastIdleRound() {
+ return scavenges_since_last_idle_round_ >= kIdleScavengeThreshold;
+ }
+
+ int mark_compacts_since_idle_round_started_;
+ int scavenges_since_last_idle_round_;
+
+ DISALLOW_COPY_AND_ASSIGN(GCIdleTimeHandler);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_GC_IDLE_TIME_HANDLER_H_
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 12de0e457e..8a40b53a62 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -19,6 +19,13 @@ static intptr_t CountTotalHolesSize(Heap* heap) {
}
+GCTracer::AllocationEvent::AllocationEvent(double duration,
+ intptr_t allocation_in_bytes) {
+ duration_ = duration;
+ allocation_in_bytes_ = allocation_in_bytes;
+}
+
+
GCTracer::Event::Event(Type type, const char* gc_reason,
const char* collector_reason)
: type(type),
@@ -80,7 +87,8 @@ GCTracer::GCTracer(Heap* heap)
cumulative_pure_incremental_marking_duration_(0.0),
longest_incremental_marking_step_(0.0),
cumulative_marking_duration_(0.0),
- cumulative_sweeping_duration_(0.0) {
+ cumulative_sweeping_duration_(0.0),
+ new_space_top_after_gc_(0) {
current_ = Event(Event::START, NULL, NULL);
current_.end_time = base::OS::TimeCurrentMillis();
previous_ = previous_mark_compactor_event_ = current_;
@@ -90,6 +98,13 @@ GCTracer::GCTracer(Heap* heap)
void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
const char* collector_reason) {
previous_ = current_;
+ double start_time = base::OS::TimeCurrentMillis();
+ if (new_space_top_after_gc_ != 0) {
+ AddNewSpaceAllocationTime(
+ start_time - previous_.end_time,
+ reinterpret_cast<intptr_t>((heap_->new_space()->top()) -
+ new_space_top_after_gc_));
+ }
if (current_.type == Event::MARK_COMPACTOR)
previous_mark_compactor_event_ = current_;
@@ -99,10 +114,12 @@ void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
current_ = Event(Event::MARK_COMPACTOR, gc_reason, collector_reason);
}
- current_.start_time = base::OS::TimeCurrentMillis();
+ current_.start_time = start_time;
current_.start_object_size = heap_->SizeOfObjects();
current_.start_memory_size = heap_->isolate()->memory_allocator()->Size();
current_.start_holes_size = CountTotalHolesSize(heap_);
+ current_.new_space_object_size =
+ heap_->new_space()->top() - heap_->new_space()->bottom();
current_.cumulative_incremental_marking_steps =
cumulative_incremental_marking_steps_;
@@ -125,6 +142,8 @@ void GCTracer::Stop() {
current_.end_object_size = heap_->SizeOfObjects();
current_.end_memory_size = heap_->isolate()->memory_allocator()->Size();
current_.end_holes_size = CountTotalHolesSize(heap_);
+ new_space_top_after_gc_ =
+ reinterpret_cast<intptr_t>(heap_->new_space()->top());
if (current_.type == Event::SCAVENGER) {
current_.incremental_marking_steps =
@@ -182,6 +201,12 @@ void GCTracer::Stop() {
}
+void GCTracer::AddNewSpaceAllocationTime(double duration,
+ intptr_t allocation_in_bytes) {
+ allocation_events_.push_front(AllocationEvent(duration, allocation_in_bytes));
+}
+
+
void GCTracer::AddIncrementalMarkingStep(double duration, intptr_t bytes) {
cumulative_incremental_marking_steps_++;
cumulative_incremental_marking_bytes_ += bytes;
@@ -292,10 +317,14 @@ void GCTracer::PrintNVP() const {
PrintF("nodes_promoted=%d ", heap_->nodes_promoted_);
PrintF("promotion_rate=%.1f%% ", heap_->promotion_rate_);
PrintF("semi_space_copy_rate=%.1f%% ", heap_->semi_space_copied_rate_);
+ PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d ",
+ NewSpaceAllocationThroughputInBytesPerMillisecond());
if (current_.type == Event::SCAVENGER) {
PrintF("steps_count=%d ", current_.incremental_marking_steps);
PrintF("steps_took=%.1f ", current_.incremental_marking_duration);
+ PrintF("scavenge_throughput=%" V8_PTR_PREFIX "d ",
+ ScavengeSpeedInBytesPerMillisecond());
} else {
PrintF("steps_count=%d ", current_.incremental_marking_steps);
PrintF("steps_took=%.1f ", current_.incremental_marking_duration);
@@ -398,5 +427,54 @@ intptr_t GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
return static_cast<intptr_t>(bytes / durations);
}
+
+
+intptr_t GCTracer::ScavengeSpeedInBytesPerMillisecond() const {
+ intptr_t bytes = 0;
+ double durations = 0.0;
+ EventBuffer::const_iterator iter = scavenger_events_.begin();
+ while (iter != scavenger_events_.end()) {
+ bytes += iter->new_space_object_size;
+ durations += iter->end_time - iter->start_time;
+ ++iter;
+ }
+
+ if (durations == 0.0) return 0;
+
+ return static_cast<intptr_t>(bytes / durations);
+}
+
+
+intptr_t GCTracer::MarkCompactSpeedInBytesPerMillisecond() const {
+ intptr_t bytes = 0;
+ double durations = 0.0;
+ EventBuffer::const_iterator iter = mark_compactor_events_.begin();
+ while (iter != mark_compactor_events_.end()) {
+ bytes += iter->start_object_size;
+ durations += iter->end_time - iter->start_time +
+ iter->pure_incremental_marking_duration;
+ ++iter;
+ }
+
+ if (durations == 0.0) return 0;
+
+ return static_cast<intptr_t>(bytes / durations);
+}
+
+
+intptr_t GCTracer::NewSpaceAllocationThroughputInBytesPerMillisecond() const {
+ intptr_t bytes = 0;
+ double durations = 0.0;
+ AllocationEventBuffer::const_iterator iter = allocation_events_.begin();
+ while (iter != allocation_events_.end()) {
+ bytes += iter->allocation_in_bytes_;
+ durations += iter->duration_;
+ ++iter;
+ }
+
+ if (durations == 0.0) return 0;
+
+ return static_cast<intptr_t>(bytes / durations);
+}
}
} // namespace v8::internal
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index 14281a4c8d..4e70f0741c 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -5,6 +5,8 @@
#ifndef V8_HEAP_GC_TRACER_H_
#define V8_HEAP_GC_TRACER_H_
+#include "src/base/platform/platform.h"
+
namespace v8 {
namespace internal {
@@ -81,9 +83,9 @@ class RingBuffer {
// GCTracer collects and prints ONE line after each garbage collector
// invocation IFF --trace_gc is used.
// TODO(ernstm): Unit tests.
-class GCTracer BASE_EMBEDDED {
+class GCTracer {
public:
- class Scope BASE_EMBEDDED {
+ class Scope {
public:
enum ScopeId {
EXTERNAL,
@@ -127,6 +129,22 @@ class GCTracer BASE_EMBEDDED {
};
+ class AllocationEvent {
+ public:
+ // Default constructor leaves the event uninitialized.
+ AllocationEvent() {}
+
+ AllocationEvent(double duration, intptr_t allocation_in_bytes);
+
+ // Time spent in the mutator during the end of the last garbage collection
+ // to the beginning of the next garbage collection.
+ double duration_;
+
+ // Memory allocated in the new space during the end of the last garbage
+ // collection to the beginning of the next garbage collection.
+ intptr_t allocation_in_bytes_;
+ };
+
class Event {
public:
enum Type { SCAVENGER = 0, MARK_COMPACTOR = 1, START = 2 };
@@ -171,6 +189,9 @@ class GCTracer BASE_EMBEDDED {
// after the current GC.
intptr_t end_holes_size;
+ // Size of new space objects in constructor.
+ intptr_t new_space_object_size;
+
// Number of incremental marking steps since creation of tracer.
// (value at start of event)
int cumulative_incremental_marking_steps;
@@ -218,6 +239,8 @@ class GCTracer BASE_EMBEDDED {
typedef RingBuffer<Event, kRingBufferMaxSize> EventBuffer;
+ typedef RingBuffer<AllocationEvent, kRingBufferMaxSize> AllocationEventBuffer;
+
explicit GCTracer(Heap* heap);
// Start collecting data.
@@ -227,6 +250,9 @@ class GCTracer BASE_EMBEDDED {
// Stop collecting data and print results.
void Stop();
+ // Log an allocation throughput event.
+ void AddNewSpaceAllocationTime(double duration, intptr_t allocation_in_bytes);
+
// Log an incremental marking step.
void AddIncrementalMarkingStep(double duration, intptr_t bytes);
@@ -280,10 +306,22 @@ class GCTracer BASE_EMBEDDED {
// Returns 0 if no incremental marking round has been completed.
double MaxIncrementalMarkingDuration() const;
- // Compute the average incremental marking speed in bytes/second. Returns 0 if
- // no events have been recorded.
+ // Compute the average incremental marking speed in bytes/millisecond.
+ // Returns 0 if no events have been recorded.
intptr_t IncrementalMarkingSpeedInBytesPerMillisecond() const;
+ // Compute the average scavenge speed in bytes/millisecond.
+ // Returns 0 if no events have been recorded.
+ intptr_t ScavengeSpeedInBytesPerMillisecond() const;
+
+ // Compute the max mark-sweep speed in bytes/millisecond.
+ // Returns 0 if no events have been recorded.
+ intptr_t MarkCompactSpeedInBytesPerMillisecond() const;
+
+ // Allocation throughput in the new space in bytes/millisecond.
+ // Returns 0 if no events have been recorded.
+ intptr_t NewSpaceAllocationThroughputInBytesPerMillisecond() const;
+
private:
// Print one detailed trace line in name=value format.
// TODO(ernstm): Move to Heap.
@@ -318,6 +356,9 @@ class GCTracer BASE_EMBEDDED {
// RingBuffers for MARK_COMPACTOR events.
EventBuffer mark_compactor_events_;
+ // RingBuffer for allocation events.
+ AllocationEventBuffer allocation_events_;
+
// Cumulative number of incremental marking steps since creation of tracer.
int cumulative_incremental_marking_steps_;
@@ -348,6 +389,10 @@ class GCTracer BASE_EMBEDDED {
// all sweeping operations performed on the main thread.
double cumulative_sweeping_duration_;
+ // Holds the new space top pointer recorded at the end of the last garbage
+ // collection.
+ intptr_t new_space_top_after_gc_;
+
DISALLOW_COPY_AND_ASSIGN(GCTracer);
};
}
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index adb6e25bb7..e658224aca 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -15,6 +15,7 @@
#include "src/heap-profiler.h"
#include "src/isolate.h"
#include "src/list-inl.h"
+#include "src/msan.h"
#include "src/objects.h"
namespace v8 {
@@ -31,18 +32,12 @@ void PromotionQueue::insert(HeapObject* target, int size) {
NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
DCHECK(!rear_page->prev_page()->is_anchor());
rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->area_end());
- ActivateGuardIfOnTheSamePage();
}
- if (guard_) {
- DCHECK(GetHeadPage() ==
- Page::FromAllocationTop(reinterpret_cast<Address>(limit_)));
-
- if ((rear_ - 2) < limit_) {
- RelocateQueueHead();
- emergency_stack_->Add(Entry(target, size));
- return;
- }
+ if ((rear_ - 2) < limit_) {
+ RelocateQueueHead();
+ emergency_stack_->Add(Entry(target, size));
+ return;
}
*(--rear_) = reinterpret_cast<intptr_t>(target);
@@ -55,17 +50,9 @@ void PromotionQueue::insert(HeapObject* target, int size) {
}
-void PromotionQueue::ActivateGuardIfOnTheSamePage() {
- guard_ = guard_ ||
- heap_->new_space()->active_space()->current_page()->address() ==
- GetHeadPage()->address();
-}
-
-
template <>
bool inline Heap::IsOneByte(Vector<const char> str, int chars) {
// TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported?
- // ASCII only check.
return chars == str.length();
}
@@ -100,7 +87,7 @@ AllocationResult Heap::AllocateOneByteInternalizedString(
Vector<const uint8_t> str, uint32_t hash_field) {
CHECK_GE(String::kMaxLength, str.length());
// Compute map and object size.
- Map* map = ascii_internalized_string_map();
+ Map* map = one_byte_internalized_string_map();
int size = SeqOneByteString::SizeFor(str.length());
AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
@@ -461,8 +448,7 @@ bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
return dst == src || dst == TargetSpaceId(type);
case OLD_POINTER_SPACE:
return dst == src && (dst == TargetSpaceId(type) || obj->IsFiller() ||
- (obj->IsExternalString() &&
- ExternalString::cast(obj)->is_short()));
+ obj->IsExternalString());
case OLD_DATA_SPACE:
return dst == src && dst == TargetSpaceId(type);
case CODE_SPACE:
@@ -510,7 +496,7 @@ void Heap::ScavengePointer(HeapObject** p) { ScavengeObject(p, *p); }
AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
// Check if there is potentially a memento behind the object. If
- // the last word of the momento is on another page we return
+ // the last word of the memento is on another page we return
// immediately.
Address object_address = object->address();
Address memento_address = object_address + object->Size();
@@ -520,7 +506,12 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
}
HeapObject* candidate = HeapObject::FromAddress(memento_address);
- if (candidate->map() != allocation_memento_map()) return NULL;
+ Map* candidate_map = candidate->map();
+ // This fast check may peek at an uninitialized word. However, the slow check
+ // below (memento_address == top) ensures that this is safe. Mark the word as
+ // initialized to silence MemorySanitizer warnings.
+ MSAN_MEMORY_IS_INITIALIZED(&candidate_map, sizeof(candidate_map));
+ if (candidate_map != allocation_memento_map()) return NULL;
// Either the object is the last object in the new space, or there is another
// object of at least word size (the header map word) following it, so
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index fd08c8292f..78c45b0e22 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -6,6 +6,7 @@
#include "src/accessors.h"
#include "src/api.h"
+#include "src/base/bits.h"
#include "src/base/once.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
@@ -16,6 +17,7 @@
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/global-handles.h"
+#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/objects-visiting-inl.h"
@@ -119,12 +121,7 @@ Heap::Heap()
store_buffer_(this),
marking_(this),
incremental_marking_(this),
- number_idle_notifications_(0),
- last_idle_notification_gc_count_(0),
- last_idle_notification_gc_count_init_(false),
- mark_sweeps_since_idle_round_started_(0),
gc_count_at_last_idle_gc_(0),
- scavenges_since_last_idle_round_(kIdleScavengeThreshold),
full_codegen_bytes_generated_(0),
crankshaft_codegen_bytes_generated_(0),
gcs_since_last_deopt_(0),
@@ -849,8 +846,7 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
// Start incremental marking for the next cycle. The heap snapshot
// generator needs incremental marking to stay off after it aborted.
if (!mark_compact_collector()->abort_incremental_marking() &&
- incremental_marking()->IsStopped() &&
- incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
+ WorthActivatingIncrementalMarking()) {
incremental_marking()->Start();
}
@@ -927,9 +923,12 @@ void Heap::ReserveSpace(int* sizes, Address* locations_out) {
static const int kThreshold = 20;
while (gc_performed && counter++ < kThreshold) {
gc_performed = false;
- DCHECK(NEW_SPACE == FIRST_PAGED_SPACE - 1);
- for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
- if (sizes[space] != 0) {
+ for (int space = NEW_SPACE; space < Serializer::kNumberOfSpaces; space++) {
+ if (sizes[space] == 0) continue;
+ bool perform_gc = false;
+ if (space == LO_SPACE) {
+ perform_gc = !lo_space()->CanAllocateSize(sizes[space]);
+ } else {
AllocationResult allocation;
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRaw(sizes[space]);
@@ -937,24 +936,28 @@ void Heap::ReserveSpace(int* sizes, Address* locations_out) {
allocation = paged_space(space)->AllocateRaw(sizes[space]);
}
FreeListNode* node;
- if (!allocation.To(&node)) {
- if (space == NEW_SPACE) {
- Heap::CollectGarbage(NEW_SPACE,
- "failed to reserve space in the new space");
- } else {
- AbortIncrementalMarkingAndCollectGarbage(
- this, static_cast<AllocationSpace>(space),
- "failed to reserve space in paged space");
- }
- gc_performed = true;
- break;
- } else {
+ if (allocation.To(&node)) {
// Mark with a free list node, in case we have a GC before
// deserializing.
node->set_size(this, sizes[space]);
+ DCHECK(space < Serializer::kNumberOfPreallocatedSpaces);
locations_out[space] = node->address();
+ } else {
+ perform_gc = true;
}
}
+ if (perform_gc) {
+ if (space == NEW_SPACE) {
+ Heap::CollectGarbage(NEW_SPACE,
+ "failed to reserve space in the new space");
+ } else {
+ AbortIncrementalMarkingAndCollectGarbage(
+ this, static_cast<AllocationSpace>(space),
+ "failed to reserve space in paged or large object space");
+ }
+ gc_performed = true;
+ break; // Abort for-loop over spaces and retry.
+ }
}
}
@@ -1277,21 +1280,17 @@ static void VerifyNonPointerSpacePointers(Heap* heap) {
object = code_it.Next())
object->Iterate(&v);
- // The old data space was normally swept conservatively so that the iterator
- // doesn't work, so we normally skip the next bit.
- if (heap->old_data_space()->swept_precisely()) {
HeapObjectIterator data_it(heap->old_data_space());
for (HeapObject* object = data_it.Next(); object != NULL;
object = data_it.Next())
object->Iterate(&v);
- }
}
#endif // VERIFY_HEAP
void Heap::CheckNewSpaceExpansionCriteria() {
- if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
- survived_since_last_expansion_ > new_space_.Capacity()) {
+ if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
+ survived_since_last_expansion_ > new_space_.TotalCapacity()) {
// Grow the size of new space if there is room to grow, enough data
// has survived scavenge since the last expansion and we are not in
// high promotion mode.
@@ -1373,7 +1372,6 @@ void PromotionQueue::Initialize() {
front_ = rear_ =
reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
emergency_stack_ = NULL;
- guard_ = false;
}
@@ -1557,8 +1555,6 @@ void Heap::Scavenge() {
LOG(isolate_, ResourceEvent("scavenge", "end"));
gc_state_ = NOT_IN_GC;
-
- scavenges_since_last_idle_round_++;
}
@@ -1971,15 +1967,16 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* target = NULL; // Initialization to please compiler.
if (allocation.To(&target)) {
+ // Order is important here: Set the promotion limit before storing a
+ // filler for double alignment or migrating the object. Otherwise we
+ // may end up overwriting promotion queue entries when we migrate the
+ // object.
+ heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
+
if (alignment != kObjectAlignment) {
target = EnsureDoubleAligned(heap, target, allocation_size);
}
- // Order is important here: Set the promotion limit before migrating
- // the object. Otherwise we may end up overwriting promotion queue
- // entries when we migrate the object.
- heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
-
// Order is important: slot might be inside of the target if target
// was allocated over a dead object and slot comes from the store
// buffer.
@@ -2072,7 +2069,10 @@ class ScavengingVisitor : public StaticVisitorBase {
ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
JSFunction::kSize>(map, slot, object);
- HeapObject* target = *slot;
+ MapWord map_word = object->map_word();
+ DCHECK(map_word.IsForwardingAddress());
+ HeapObject* target = map_word.ToForwardingAddress();
+
MarkBit mark_bit = Marking::MarkBitFrom(target);
if (Marking::IsBlack(mark_bit)) {
// This object is black and it might not be rescanned by marker.
@@ -2509,7 +2509,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
- for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
+ for (unsigned i = 0; i < arraysize(string_type_table); i++) {
const StringTypeTable& entry = string_type_table[i];
{
AllocationResult allocation = AllocateMap(entry.type, entry.size);
@@ -2525,8 +2525,8 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
undetectable_string_map()->set_is_undetectable();
- ALLOCATE_VARSIZE_MAP(ASCII_STRING_TYPE, undetectable_ascii_string);
- undetectable_ascii_string_map()->set_is_undetectable();
+ ALLOCATE_VARSIZE_MAP(ONE_BYTE_STRING_TYPE, undetectable_one_byte_string);
+ undetectable_one_byte_string_map()->set_is_undetectable();
ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
@@ -2555,7 +2555,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
- for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
+ for (unsigned i = 0; i < arraysize(struct_table); i++) {
const StructTable& entry = struct_table[i];
Map* map;
if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
@@ -2699,13 +2699,13 @@ void Heap::CreateApiObjects() {
void Heap::CreateJSEntryStub() {
- JSEntryStub stub(isolate());
+ JSEntryStub stub(isolate(), StackFrame::ENTRY);
set_js_entry_code(*stub.GetCode());
}
void Heap::CreateJSConstructEntryStub() {
- JSConstructEntryStub stub(isolate());
+ JSEntryStub stub(isolate(), StackFrame::ENTRY_CONSTRUCT);
set_js_construct_entry_code(*stub.GetCode());
}
@@ -2800,7 +2800,7 @@ void Heap::CreateInitialObjects() {
handle(Smi::FromInt(-5), isolate()),
Oddball::kException));
- for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
+ for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
Handle<String> str =
factory->InternalizeUtf8String(constant_string_table[i].contents);
roots_[constant_string_table[i].index] = *str;
@@ -2868,15 +2868,18 @@ void Heap::CreateInitialObjects() {
// Number of queued microtasks stored in Isolate::pending_microtask_count().
set_microtask_queue(empty_fixed_array());
- set_detailed_stack_trace_symbol(*factory->NewPrivateSymbol());
- set_elements_transition_symbol(*factory->NewPrivateSymbol());
- set_frozen_symbol(*factory->NewPrivateSymbol());
- set_megamorphic_symbol(*factory->NewPrivateSymbol());
- set_nonexistent_symbol(*factory->NewPrivateSymbol());
- set_normal_ic_symbol(*factory->NewPrivateSymbol());
- set_observed_symbol(*factory->NewPrivateSymbol());
- set_stack_trace_symbol(*factory->NewPrivateSymbol());
- set_uninitialized_symbol(*factory->NewPrivateSymbol());
+ set_detailed_stack_trace_symbol(*factory->NewPrivateOwnSymbol());
+ set_elements_transition_symbol(*factory->NewPrivateOwnSymbol());
+ set_frozen_symbol(*factory->NewPrivateOwnSymbol());
+ set_megamorphic_symbol(*factory->NewPrivateOwnSymbol());
+ set_premonomorphic_symbol(*factory->NewPrivateOwnSymbol());
+ set_generic_symbol(*factory->NewPrivateOwnSymbol());
+ set_nonexistent_symbol(*factory->NewPrivateOwnSymbol());
+ set_normal_ic_symbol(*factory->NewPrivateOwnSymbol());
+ set_observed_symbol(*factory->NewPrivateOwnSymbol());
+ set_stack_trace_symbol(*factory->NewPrivateOwnSymbol());
+ set_uninitialized_symbol(*factory->NewPrivateOwnSymbol());
+ set_home_object_symbol(*factory->NewPrivateOwnSymbol());
Handle<SeededNumberDictionary> slow_element_dictionary =
SeededNumberDictionary::New(isolate(), 0, TENURED);
@@ -2927,7 +2930,7 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
kStringTableRootIndex,
};
- for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
+ for (unsigned int i = 0; i < arraysize(writable_roots); i++) {
if (root_index == writable_roots[i]) return true;
}
return false;
@@ -3321,7 +3324,6 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
const int bytes_to_trim = elements_to_trim * element_size;
// For now this trick is only applied to objects in new and paged space.
- DCHECK(!lo_space()->Contains(object));
DCHECK(object->map() != fixed_cow_array_map());
const int len = object->length();
@@ -3333,7 +3335,12 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
- CreateFillerObjectAt(new_end, bytes_to_trim);
+ // We do not create a filler for objects in large object space.
+ // TODO(hpayer): We should shrink the large object page if the size
+ // of the object changed significantly.
+ if (!lo_space()->Contains(object)) {
+ CreateFillerObjectAt(new_end, bytes_to_trim);
+ }
// Initialize header of the trimmed array. We are storing the new length
// using release store after creating a filler for the left-over space to
@@ -3763,7 +3770,7 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
int len) {
- // Only works for ascii.
+ // Only works for one byte strings.
DCHECK(vector.length() == len);
MemCopy(chars, vector.start(), len);
}
@@ -3818,7 +3825,7 @@ AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
DCHECK_LE(0, chars);
DCHECK_GE(String::kMaxLength, chars);
if (is_one_byte) {
- map = ascii_internalized_string_map();
+ map = one_byte_internalized_string_map();
size = SeqOneByteString::SizeFor(chars);
} else {
map = internalized_string_map();
@@ -3876,7 +3883,7 @@ AllocationResult Heap::AllocateRawOneByteString(int length,
}
// Partially initialize the object.
- result->set_map_no_write_barrier(ascii_string_map());
+ result->set_map_no_write_barrier(one_byte_string_map());
String::cast(result)->set_length(length);
String::cast(result)->set_hash_field(String::kEmptyHashField);
DCHECK_EQ(size, HeapObject::cast(result)->Size());
@@ -4241,9 +4248,7 @@ AllocationResult Heap::AllocateStruct(InstanceType type) {
bool Heap::IsHeapIterable() {
// TODO(hpayer): This function is not correct. Allocation folding in old
// space breaks the iterability.
- return (old_pointer_space()->swept_precisely() &&
- old_data_space()->swept_precisely() &&
- new_space_top_after_last_gc_ == new_space()->top());
+ return new_space_top_after_last_gc_ == new_space()->top();
}
@@ -4272,7 +4277,7 @@ void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
}
CollectAllGarbage(kReduceMemoryFootprintMask,
"idle notification: finalize incremental");
- mark_sweeps_since_idle_round_started_++;
+ gc_idle_time_handler_.NotifyIdleMarkCompact();
gc_count_at_last_idle_gc_ = gc_count_;
if (uncommit) {
new_space_.Shrink();
@@ -4282,96 +4287,96 @@ void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
}
-bool Heap::IdleNotification(int hint) {
+bool Heap::WorthActivatingIncrementalMarking() {
+ return incremental_marking()->IsStopped() &&
+ incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull();
+}
+
+
+bool Heap::IdleNotification(int idle_time_in_ms) {
// If incremental marking is off, we do not perform idle notification.
if (!FLAG_incremental_marking) return true;
-
- // Hints greater than this value indicate that
- // the embedder is requesting a lot of GC work.
- const int kMaxHint = 1000;
- const int kMinHintForIncrementalMarking = 10;
- // Minimal hint that allows to do full GC.
- const int kMinHintForFullGC = 100;
- intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
- // The size factor is in range [5..250]. The numbers here are chosen from
- // experiments. If you changes them, make sure to test with
- // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
- intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
-
- isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(hint);
+ base::ElapsedTimer timer;
+ timer.Start();
+ isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
+ idle_time_in_ms);
HistogramTimerScope idle_notification_scope(
isolate_->counters()->gc_idle_notification());
- if (contexts_disposed_ > 0) {
- contexts_disposed_ = 0;
- int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
- if (hint >= mark_sweep_time && !FLAG_expose_gc &&
- incremental_marking()->IsStopped()) {
+ GCIdleTimeHandler::HeapState heap_state;
+ heap_state.contexts_disposed = contexts_disposed_;
+ heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
+ heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
+ // TODO(ulan): Start incremental marking only for large heaps.
+ heap_state.can_start_incremental_marking =
+ incremental_marking()->ShouldActivate();
+ heap_state.sweeping_in_progress =
+ mark_compact_collector()->sweeping_in_progress();
+ heap_state.mark_compact_speed_in_bytes_per_ms =
+ static_cast<size_t>(tracer()->MarkCompactSpeedInBytesPerMillisecond());
+ heap_state.incremental_marking_speed_in_bytes_per_ms = static_cast<size_t>(
+ tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
+ heap_state.scavenge_speed_in_bytes_per_ms =
+ static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond());
+ heap_state.available_new_space_memory = new_space_.Available();
+ heap_state.new_space_capacity = new_space_.Capacity();
+ heap_state.new_space_allocation_throughput_in_bytes_per_ms =
+ static_cast<size_t>(
+ tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
+
+ GCIdleTimeAction action =
+ gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
+
+ bool result = false;
+ switch (action.type) {
+ case DONE:
+ result = true;
+ break;
+ case DO_INCREMENTAL_MARKING:
+ if (incremental_marking()->IsStopped()) {
+ incremental_marking()->Start();
+ }
+ AdvanceIdleIncrementalMarking(action.parameter);
+ break;
+ case DO_FULL_GC: {
HistogramTimerScope scope(isolate_->counters()->gc_context());
- CollectAllGarbage(kReduceMemoryFootprintMask,
- "idle notification: contexts disposed");
- } else {
- AdvanceIdleIncrementalMarking(step_size);
- }
-
- // After context disposal there is likely a lot of garbage remaining, reset
- // the idle notification counters in order to trigger more incremental GCs
- // on subsequent idle notifications.
- StartIdleRound();
- return false;
- }
-
- // By doing small chunks of GC work in each IdleNotification,
- // perform a round of incremental GCs and after that wait until
- // the mutator creates enough garbage to justify a new round.
- // An incremental GC progresses as follows:
- // 1. many incremental marking steps,
- // 2. one old space mark-sweep-compact,
- // Use mark-sweep-compact events to count incremental GCs in a round.
-
- if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
- if (EnoughGarbageSinceLastIdleRound()) {
- StartIdleRound();
- } else {
- return true;
+ const char* message = contexts_disposed_
+ ? "idle notification: contexts disposed"
+ : "idle notification: finalize idle round";
+ CollectAllGarbage(kReduceMemoryFootprintMask, message);
+ gc_idle_time_handler_.NotifyIdleMarkCompact();
+ break;
}
+ case DO_SCAVENGE:
+ CollectGarbage(NEW_SPACE, "idle notification: scavenge");
+ break;
+ case DO_FINALIZE_SWEEPING:
+ mark_compact_collector()->EnsureSweepingCompleted();
+ break;
+ case DO_NOTHING:
+ break;
}
- int remaining_mark_sweeps =
- kMaxMarkSweepsInIdleRound - mark_sweeps_since_idle_round_started_;
-
- if (incremental_marking()->IsStopped()) {
- // If there are no more than two GCs left in this idle round and we are
- // allowed to do a full GC, then make those GCs full in order to compact
- // the code space.
- // TODO(ulan): Once we enable code compaction for incremental marking,
- // we can get rid of this special case and always start incremental marking.
- if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
- CollectAllGarbage(kReduceMemoryFootprintMask,
- "idle notification: finalize idle round");
- mark_sweeps_since_idle_round_started_++;
- } else if (hint > kMinHintForIncrementalMarking) {
- incremental_marking()->Start();
+ int actual_time_ms = static_cast<int>(timer.Elapsed().InMilliseconds());
+ if (actual_time_ms <= idle_time_in_ms) {
+ if (action.type != DONE && action.type != DO_NOTHING) {
+ isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
+ idle_time_in_ms - actual_time_ms);
}
- }
- if (!incremental_marking()->IsStopped() &&
- hint > kMinHintForIncrementalMarking) {
- AdvanceIdleIncrementalMarking(step_size);
- }
-
- if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
- FinishIdleRound();
- return true;
+ } else {
+ isolate()->counters()->gc_idle_time_limit_overshot()->AddSample(
+ actual_time_ms - idle_time_in_ms);
}
- // If the IdleNotifcation is called with a large hint we will wait for
- // the sweepter threads here.
- if (hint >= kMinHintForFullGC &&
- mark_compact_collector()->sweeping_in_progress()) {
- mark_compact_collector()->EnsureSweepingCompleted();
+ if (FLAG_trace_idle_notification) {
+ PrintF("Idle notification: requested idle time %d ms, actual time %d ms [",
+ idle_time_in_ms, actual_time_ms);
+ action.Print();
+ PrintF("]\n");
}
- return false;
+ contexts_disposed_ = 0;
+ return result;
}
@@ -4743,7 +4748,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
v->Synchronize(VisitorSynchronization::kStrongRootList);
- v->VisitPointer(BitCast<Object**>(&hidden_string_));
+ v->VisitPointer(bit_cast<Object**>(&hidden_string_));
v->Synchronize(VisitorSynchronization::kInternalizedString);
isolate_->bootstrapper()->Iterate(v);
@@ -4877,8 +4882,10 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
// The new space size must be a power of two to support single-bit testing
// for containment.
- max_semi_space_size_ = RoundUpToPowerOf2(max_semi_space_size_);
- reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
+ max_semi_space_size_ =
+ base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_);
+ reserved_semispace_size_ =
+ base::bits::RoundUpToPowerOfTwo32(reserved_semispace_size_);
if (FLAG_min_semi_space_size > 0) {
int initial_semispace_size = FLAG_min_semi_space_size * MB;
diff --git a/deps/v8/test/compiler-unittests/compiler-unittests.gyp b/deps/v8/src/heap/heap.gyp
index c1de0c4235..2970eb8a43 100644
--- a/deps/v8/test/compiler-unittests/compiler-unittests.gyp
+++ b/deps/v8/src/heap/heap.gyp
@@ -9,31 +9,22 @@
'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
'targets': [
{
- 'target_name': 'compiler-unittests',
+ 'target_name': 'heap-unittests',
'type': 'executable',
'dependencies': [
- '../../testing/gmock.gyp:gmock',
'../../testing/gtest.gyp:gtest',
+ '../../testing/gtest.gyp:gtest_main',
'../../tools/gyp/v8.gyp:v8_libplatform',
],
'include_dirs': [
'../..',
],
'sources': [ ### gcmole(all) ###
- 'change-lowering-unittest.cc',
- 'compiler-unittests.cc',
- 'instruction-selector-unittest.cc',
- 'node-matchers.cc',
- 'node-matchers.h',
+ 'gc-idle-time-handler-unittest.cc',
],
'conditions': [
- ['v8_target_arch=="arm"', {
- 'sources': [ ### gcmole(arch:arm) ###
- 'arm/instruction-selector-arm-unittest.cc',
- ],
- }],
['component=="shared_library"', {
- # compiler-unittests can't be built against a shared library, so we
+ # heap-unittests can't be built against a shared library, so we
# need to depend on the underlying static target in that case.
'conditions': [
['v8_use_snapshot=="true"', {
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index c313333362..87b939ad6c 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -11,6 +11,7 @@
#include "src/assert-scope.h"
#include "src/counters.h"
#include "src/globals.h"
+#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
@@ -73,34 +74,34 @@ namespace internal {
V(Smi, hash_seed, HashSeed) \
V(Map, symbol_map, SymbolMap) \
V(Map, string_map, StringMap) \
- V(Map, ascii_string_map, AsciiStringMap) \
+ V(Map, one_byte_string_map, OneByteStringMap) \
V(Map, cons_string_map, ConsStringMap) \
- V(Map, cons_ascii_string_map, ConsAsciiStringMap) \
+ V(Map, cons_one_byte_string_map, ConsOneByteStringMap) \
V(Map, sliced_string_map, SlicedStringMap) \
- V(Map, sliced_ascii_string_map, SlicedAsciiStringMap) \
+ V(Map, sliced_one_byte_string_map, SlicedOneByteStringMap) \
V(Map, external_string_map, ExternalStringMap) \
V(Map, external_string_with_one_byte_data_map, \
ExternalStringWithOneByteDataMap) \
- V(Map, external_ascii_string_map, ExternalAsciiStringMap) \
+ V(Map, external_one_byte_string_map, ExternalOneByteStringMap) \
V(Map, short_external_string_map, ShortExternalStringMap) \
V(Map, short_external_string_with_one_byte_data_map, \
ShortExternalStringWithOneByteDataMap) \
V(Map, internalized_string_map, InternalizedStringMap) \
- V(Map, ascii_internalized_string_map, AsciiInternalizedStringMap) \
+ V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap) \
V(Map, external_internalized_string_map, ExternalInternalizedStringMap) \
V(Map, external_internalized_string_with_one_byte_data_map, \
ExternalInternalizedStringWithOneByteDataMap) \
- V(Map, external_ascii_internalized_string_map, \
- ExternalAsciiInternalizedStringMap) \
+ V(Map, external_one_byte_internalized_string_map, \
+ ExternalOneByteInternalizedStringMap) \
V(Map, short_external_internalized_string_map, \
ShortExternalInternalizedStringMap) \
V(Map, short_external_internalized_string_with_one_byte_data_map, \
ShortExternalInternalizedStringWithOneByteDataMap) \
- V(Map, short_external_ascii_internalized_string_map, \
- ShortExternalAsciiInternalizedStringMap) \
- V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap) \
+ V(Map, short_external_one_byte_internalized_string_map, \
+ ShortExternalOneByteInternalizedStringMap) \
+ V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap) \
V(Map, undetectable_string_map, UndetectableStringMap) \
- V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \
+ V(Map, undetectable_one_byte_string_map, UndetectableOneByteStringMap) \
V(Map, external_int8_array_map, ExternalInt8ArrayMap) \
V(Map, external_uint8_array_map, ExternalUint8ArrayMap) \
V(Map, external_int16_array_map, ExternalInt16ArrayMap) \
@@ -182,9 +183,12 @@ namespace internal {
V(Symbol, observed_symbol, ObservedSymbol) \
V(Symbol, uninitialized_symbol, UninitializedSymbol) \
V(Symbol, megamorphic_symbol, MegamorphicSymbol) \
+ V(Symbol, premonomorphic_symbol, PremonomorphicSymbol) \
+ V(Symbol, generic_symbol, GenericSymbol) \
V(Symbol, stack_trace_symbol, StackTraceSymbol) \
V(Symbol, detailed_stack_trace_symbol, DetailedStackTraceSymbol) \
V(Symbol, normal_ic_symbol, NormalICSymbol) \
+ V(Symbol, home_object_symbol, HomeObjectSymbol) \
V(FixedArray, materialized_objects, MaterializedObjects) \
V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad) \
V(FixedArray, microtask_queue, MicrotaskQueue)
@@ -256,13 +260,10 @@ namespace internal {
V(neander_map)
#define INTERNALIZED_STRING_LIST(V) \
- V(Array_string, "Array") \
V(Object_string, "Object") \
V(proto_string, "__proto__") \
V(arguments_string, "arguments") \
V(Arguments_string, "Arguments") \
- V(call_string, "call") \
- V(apply_string, "apply") \
V(caller_string, "caller") \
V(boolean_string, "boolean") \
V(Boolean_string, "Boolean") \
@@ -273,59 +274,51 @@ namespace internal {
V(eval_string, "eval") \
V(empty_string, "") \
V(function_string, "function") \
+ V(Function_string, "Function") \
V(length_string, "length") \
V(name_string, "name") \
V(null_string, "null") \
V(number_string, "number") \
V(Number_string, "Number") \
V(nan_string, "NaN") \
- V(RegExp_string, "RegExp") \
V(source_string, "source") \
V(source_url_string, "source_url") \
V(source_mapping_url_string, "source_mapping_url") \
V(global_string, "global") \
V(ignore_case_string, "ignoreCase") \
V(multiline_string, "multiline") \
+ V(sticky_string, "sticky") \
+ V(harmony_regexps_string, "harmony_regexps") \
V(input_string, "input") \
V(index_string, "index") \
V(last_index_string, "lastIndex") \
V(object_string, "object") \
- V(literals_string, "literals") \
V(prototype_string, "prototype") \
V(string_string, "string") \
V(String_string, "String") \
V(symbol_string, "symbol") \
V(Symbol_string, "Symbol") \
+ V(Map_string, "Map") \
+ V(Set_string, "Set") \
+ V(WeakMap_string, "WeakMap") \
+ V(WeakSet_string, "WeakSet") \
V(for_string, "for") \
V(for_api_string, "for_api") \
V(for_intern_string, "for_intern") \
V(private_api_string, "private_api") \
V(private_intern_string, "private_intern") \
V(Date_string, "Date") \
- V(to_string_string, "toString") \
V(char_at_string, "CharAt") \
V(undefined_string, "undefined") \
V(value_of_string, "valueOf") \
V(stack_string, "stack") \
V(toJSON_string, "toJSON") \
- V(InitializeVarGlobal_string, "InitializeVarGlobal") \
- V(InitializeConstGlobal_string, "InitializeConstGlobal") \
V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
V(stack_overflow_string, "kStackOverflowBoilerplate") \
V(illegal_access_string, "illegal access") \
- V(get_string, "get") \
- V(set_string, "set") \
- V(map_field_string, "%map") \
- V(elements_field_string, "%elements") \
- V(length_field_string, "%length") \
V(cell_value_string, "%cell_value") \
- V(function_class_string, "Function") \
V(illegal_argument_string, "illegal argument") \
- V(space_string, " ") \
- V(exec_string, "exec") \
- V(zero_string, "0") \
- V(global_eval_string, "GlobalEval") \
V(identity_hash_string, "v8::IdentityHash") \
V(closure_string, "(closure)") \
V(dot_string, ".") \
@@ -341,7 +334,6 @@ namespace internal {
V(next_string, "next") \
V(byte_length_string, "byteLength") \
V(byte_offset_string, "byteOffset") \
- V(buffer_string, "buffer") \
V(intl_initialized_marker_string, "v8::intl_initialized_marker") \
V(intl_impl_object_string, "v8::intl_object")
@@ -393,18 +385,11 @@ class PromotionQueue {
emergency_stack_ = NULL;
}
- inline void ActivateGuardIfOnTheSamePage();
-
Page* GetHeadPage() {
return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
}
void SetNewLimit(Address limit) {
- if (!guard_) {
- return;
- }
-
- DCHECK(GetHeadPage() == Page::FromAllocationTop(limit));
limit_ = reinterpret_cast<intptr_t*>(limit);
if (limit_ <= rear_) {
@@ -461,8 +446,6 @@ class PromotionQueue {
intptr_t* rear_;
intptr_t* limit_;
- bool guard_;
-
static const int kEntrySizeInWords = 2;
struct Entry {
@@ -731,14 +714,11 @@ class Heap {
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
static const int kNoGCFlags = 0;
- static const int kSweepPreciselyMask = 1;
- static const int kReduceMemoryFootprintMask = 2;
- static const int kAbortIncrementalMarkingMask = 4;
+ static const int kReduceMemoryFootprintMask = 1;
+ static const int kAbortIncrementalMarkingMask = 2;
- // Making the heap iterable requires us to sweep precisely and abort any
- // incremental marking as well.
- static const int kMakeHeapIterableMask =
- kSweepPreciselyMask | kAbortIncrementalMarkingMask;
+ // Making the heap iterable requires us to abort incremental marking.
+ static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;
// Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
// non-zero, then the slower precise sweeper is used, which leaves the heap
@@ -960,7 +940,7 @@ class Heap {
// Returns deterministic "time" value in ms. Works only with
// FLAG_verify_predictable.
- double synthetic_time() { return allocations_count_ / 100.0; }
+ double synthetic_time() { return allocations_count_ / 2.0; }
// Print short heap statistics.
void PrintShortHeapStatistics();
@@ -1068,7 +1048,7 @@ class Heap {
void DisableInlineAllocation();
// Implements the corresponding V8 API function.
- bool IdleNotification(int hint);
+ bool IdleNotification(int idle_time_in_ms);
// Declare all the root indices. This defines the root list order.
enum RootListIndex {
@@ -1716,9 +1696,9 @@ class Heap {
Object* filler);
// Allocate and partially initializes a String. There are two String
- // encodings: ASCII and two byte. These functions allocate a string of the
- // given length and set its map and length fields. The characters of the
- // string are uninitialized.
+ // encodings: one-byte and two-byte. These functions allocate a string of
+ // the given length and set its map and length fields. The characters of
+ // the string are uninitialized.
MUST_USE_RESULT AllocationResult
AllocateRawOneByteString(int length, PretenureFlag pretenure);
MUST_USE_RESULT AllocationResult
@@ -1770,7 +1750,7 @@ class Heap {
// Computes a single character string where the character has code.
- // A cache is used for ASCII codes.
+ // A cache is used for one-byte (Latin1) codes.
MUST_USE_RESULT AllocationResult
LookupSingleCharacterStringFromCode(uint16_t code);
@@ -1946,31 +1926,10 @@ class Heap {
void SelectScavengingVisitorsTable();
- void StartIdleRound() { mark_sweeps_since_idle_round_started_ = 0; }
-
- void FinishIdleRound() {
- mark_sweeps_since_idle_round_started_ = kMaxMarkSweepsInIdleRound;
- scavenges_since_last_idle_round_ = 0;
- }
-
- bool EnoughGarbageSinceLastIdleRound() {
- return (scavenges_since_last_idle_round_ >= kIdleScavengeThreshold);
- }
-
- // Estimates how many milliseconds a Mark-Sweep would take to complete.
- // In idle notification handler we assume that this function will return:
- // - a number less than 10 for small heaps, which are less than 8Mb.
- // - a number greater than 10 for large heaps, which are greater than 32Mb.
- int TimeMarkSweepWouldTakeInMs() {
- // Rough estimate of how many megabytes of heap can be processed in 1 ms.
- static const int kMbPerMs = 2;
-
- int heap_size_mb = static_cast<int>(SizeOfObjects() / MB);
- return heap_size_mb / kMbPerMs;
- }
-
void AdvanceIdleIncrementalMarking(intptr_t step_size);
+ bool WorthActivatingIncrementalMarking();
+
void ClearObjectStats(bool clear_last_time_stats = false);
void set_weak_object_to_code_table(Object* value) {
@@ -2022,13 +1981,8 @@ class Heap {
IncrementalMarking incremental_marking_;
- int number_idle_notifications_;
- unsigned int last_idle_notification_gc_count_;
- bool last_idle_notification_gc_count_init_;
-
- int mark_sweeps_since_idle_round_started_;
+ GCIdleTimeHandler gc_idle_time_handler_;
unsigned int gc_count_at_last_idle_gc_;
- int scavenges_since_last_idle_round_;
// These two counters are monotomically increasing and never reset.
size_t full_codegen_bytes_generated_;
@@ -2046,7 +2000,7 @@ class Heap {
static const int kAllocationSiteScratchpadSize = 256;
int allocation_sites_scratchpad_length_;
- static const int kMaxMarkSweepsInIdleRound = 7;
+ static const int kMaxMarkCompactsInIdleRound = 7;
static const int kIdleScavengeThreshold = 5;
// Shared state read by the scavenge collector and set by ScavengeObject.
@@ -2067,6 +2021,7 @@ class Heap {
int gc_callbacks_depth_;
friend class AlwaysAllocateScope;
+ friend class Deserializer;
friend class Factory;
friend class GCCallbacksScope;
friend class GCTracer;
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index c922e83a67..d72423a60a 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -421,6 +421,11 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
}
+bool IncrementalMarking::ShouldActivate() {
+ return WorthActivating() && heap_->NextGCIsLikelyToBeFull();
+}
+
+
bool IncrementalMarking::WorthActivating() {
#ifndef DEBUG
static const intptr_t kActivationThreshold = 8 * MB;
@@ -811,7 +816,7 @@ void IncrementalMarking::MarkingComplete(CompletionAction action) {
void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
- if (IsStopped() && WorthActivating() && heap_->NextGCIsLikelyToBeFull()) {
+ if (IsStopped() && ShouldActivate()) {
// TODO(hpayer): Let's play safe for now, but compaction should be
// in principle possible.
Start(PREVENT_COMPACTION);
@@ -821,6 +826,72 @@ void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
}
+void IncrementalMarking::SpeedUp() {
+ bool speed_up = false;
+
+ if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
+ if (FLAG_trace_gc) {
+ PrintPID("Speed up marking after %d steps\n",
+ static_cast<int>(kMarkingSpeedAccellerationInterval));
+ }
+ speed_up = true;
+ }
+
+ bool space_left_is_very_small =
+ (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
+
+ bool only_1_nth_of_space_that_was_available_still_left =
+ (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
+ old_generation_space_available_at_start_of_incremental_);
+
+ if (space_left_is_very_small ||
+ only_1_nth_of_space_that_was_available_still_left) {
+ if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
+ speed_up = true;
+ }
+
+ bool size_of_old_space_multiplied_by_n_during_marking =
+ (heap_->PromotedTotalSize() >
+ (marking_speed_ + 1) *
+ old_generation_space_used_at_start_of_incremental_);
+ if (size_of_old_space_multiplied_by_n_during_marking) {
+ speed_up = true;
+ if (FLAG_trace_gc) {
+ PrintPID("Speed up marking because of heap size increase\n");
+ }
+ }
+
+ int64_t promoted_during_marking =
+ heap_->PromotedTotalSize() -
+ old_generation_space_used_at_start_of_incremental_;
+ intptr_t delay = marking_speed_ * MB;
+ intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
+
+ // We try to scan at at least twice the speed that we are allocating.
+ if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
+ if (FLAG_trace_gc) {
+ PrintPID("Speed up marking because marker was not keeping up\n");
+ }
+ speed_up = true;
+ }
+
+ if (speed_up) {
+ if (state_ != MARKING) {
+ if (FLAG_trace_gc) {
+ PrintPID("Postponing speeding up marking until marking starts\n");
+ }
+ } else {
+ marking_speed_ += kMarkingSpeedAccelleration;
+ marking_speed_ = static_cast<int>(
+ Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
+ if (FLAG_trace_gc) {
+ PrintPID("Marking speed increased to %d\n", marking_speed_);
+ }
+ }
+ }
+}
+
+
void IncrementalMarking::Step(intptr_t allocated_bytes, CompletionAction action,
bool force_marking) {
if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
@@ -877,69 +948,9 @@ void IncrementalMarking::Step(intptr_t allocated_bytes, CompletionAction action,
steps_count_++;
- bool speed_up = false;
-
- if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
- if (FLAG_trace_gc) {
- PrintPID("Speed up marking after %d steps\n",
- static_cast<int>(kMarkingSpeedAccellerationInterval));
- }
- speed_up = true;
- }
-
- bool space_left_is_very_small =
- (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
-
- bool only_1_nth_of_space_that_was_available_still_left =
- (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
- old_generation_space_available_at_start_of_incremental_);
-
- if (space_left_is_very_small ||
- only_1_nth_of_space_that_was_available_still_left) {
- if (FLAG_trace_gc)
- PrintPID("Speed up marking because of low space left\n");
- speed_up = true;
- }
-
- bool size_of_old_space_multiplied_by_n_during_marking =
- (heap_->PromotedTotalSize() >
- (marking_speed_ + 1) *
- old_generation_space_used_at_start_of_incremental_);
- if (size_of_old_space_multiplied_by_n_during_marking) {
- speed_up = true;
- if (FLAG_trace_gc) {
- PrintPID("Speed up marking because of heap size increase\n");
- }
- }
-
- int64_t promoted_during_marking =
- heap_->PromotedTotalSize() -
- old_generation_space_used_at_start_of_incremental_;
- intptr_t delay = marking_speed_ * MB;
- intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
-
- // We try to scan at at least twice the speed that we are allocating.
- if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
- if (FLAG_trace_gc) {
- PrintPID("Speed up marking because marker was not keeping up\n");
- }
- speed_up = true;
- }
-
- if (speed_up) {
- if (state_ != MARKING) {
- if (FLAG_trace_gc) {
- PrintPID("Postponing speeding up marking until marking starts\n");
- }
- } else {
- marking_speed_ += kMarkingSpeedAccelleration;
- marking_speed_ = static_cast<int>(
- Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
- if (FLAG_trace_gc) {
- PrintPID("Marking speed increased to %d\n", marking_speed_);
- }
- }
- }
+ // Speed up marking if we are marking too slow or if we are almost done
+ // with marking.
+ SpeedUp();
double end = base::OS::TimeCurrentMillis();
double duration = (end - start);
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index c054fbd40f..e4a8e972ca 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -44,6 +44,8 @@ class IncrementalMarking {
bool WorthActivating();
+ bool ShouldActivate();
+
enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
void Start(CompactionFlag flag = ALLOW_COMPACTION);
@@ -166,6 +168,8 @@ class IncrementalMarking {
private:
int64_t SpaceLeftInOldSpace();
+ void SpeedUp();
+
void ResetStepCounters();
void StartMarking(CompactionFlag flag);
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index 934fce847d..66b0a59cec 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -5,8 +5,6 @@
#ifndef V8_HEAP_MARK_COMPACT_INL_H_
#define V8_HEAP_MARK_COMPACT_INL_H_
-#include <memory.h>
-
#include "src/heap/mark-compact.h"
#include "src/isolate.h"
@@ -23,7 +21,6 @@ MarkBit Marking::MarkBitFrom(Address addr) {
void MarkCompactCollector::SetFlags(int flags) {
- sweep_precisely_ = ((flags & Heap::kSweepPreciselyMask) != 0);
reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0);
abort_incremental_marking_ =
((flags & Heap::kAbortIncrementalMarkingMask) != 0);
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index abb4e1beb8..9f9a658c1c 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -5,6 +5,7 @@
#include "src/v8.h"
#include "src/base/atomicops.h"
+#include "src/base/bits.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/cpu-profiler.h"
@@ -19,8 +20,8 @@
#include "src/heap/spaces-inl.h"
#include "src/heap/sweeper-thread.h"
#include "src/heap-profiler.h"
-#include "src/ic-inl.h"
-#include "src/stub-cache.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
@@ -40,7 +41,6 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
#ifdef DEBUG
state_(IDLE),
#endif
- sweep_precisely_(false),
reduce_memory_footprint_(false),
abort_incremental_marking_(false),
marking_parity_(ODD_MARKING_PARITY),
@@ -199,7 +199,6 @@ static void VerifyEvacuation(NewSpace* space) {
static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
- if (!space->swept_precisely()) return;
if (FLAG_use_allocation_folding &&
(space == heap->old_pointer_space() || space == heap->old_data_space())) {
return;
@@ -542,7 +541,7 @@ class MarkCompactCollector::SweeperTask : public v8::Task {
private:
// v8::Task overrides.
- virtual void Run() V8_OVERRIDE {
+ virtual void Run() OVERRIDE {
heap_->mark_compact_collector()->SweepInParallel(space_, 0);
heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
}
@@ -1025,8 +1024,7 @@ void MarkCompactCollector::Finish() {
// objects have been marked.
void CodeFlusher::ProcessJSFunctionCandidates() {
- Code* lazy_compile =
- isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
+ Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
Object* undefined = isolate_->heap()->undefined_value();
JSFunction* candidate = jsfunction_candidates_head_;
@@ -1071,8 +1069,7 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
- Code* lazy_compile =
- isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
+ Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
SharedFunctionInfo* next_candidate;
@@ -1464,7 +1461,7 @@ class MarkCompactMarkingVisitor
static const int kRegExpCodeThreshold = 5;
static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
- bool is_ascii) {
+ bool is_one_byte) {
// Make sure that the fixed array is in fact initialized on the RegExp.
// We could potentially trigger a GC when initializing the RegExp.
if (HeapObject::cast(re->data())->map()->instance_type() !=
@@ -1474,22 +1471,23 @@ class MarkCompactMarkingVisitor
// Make sure this is a RegExp that actually contains code.
if (re->TypeTag() != JSRegExp::IRREGEXP) return;
- Object* code = re->DataAt(JSRegExp::code_index(is_ascii));
+ Object* code = re->DataAt(JSRegExp::code_index(is_one_byte));
if (!code->IsSmi() &&
HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
// Save a copy that can be reinstated if we need the code again.
- re->SetDataAt(JSRegExp::saved_code_index(is_ascii), code);
+ re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code);
// Saving a copy might create a pointer into compaction candidate
// that was not observed by marker. This might happen if JSRegExp data
// was marked through the compilation cache before marker reached JSRegExp
// object.
FixedArray* data = FixedArray::cast(re->data());
- Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
+ Object** slot =
+ data->data_start() + JSRegExp::saved_code_index(is_one_byte);
heap->mark_compact_collector()->RecordSlot(slot, slot, code);
// Set a number in the 0-255 range to guarantee no smi overflow.
- re->SetDataAt(JSRegExp::code_index(is_ascii),
+ re->SetDataAt(JSRegExp::code_index(is_one_byte),
Smi::FromInt(heap->sweep_generation() & 0xff));
} else if (code->IsSmi()) {
int value = Smi::cast(code)->value();
@@ -1501,9 +1499,9 @@ class MarkCompactMarkingVisitor
// Check if we should flush now.
if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
- re->SetDataAt(JSRegExp::code_index(is_ascii),
+ re->SetDataAt(JSRegExp::code_index(is_one_byte),
Smi::FromInt(JSRegExp::kUninitializedValue));
- re->SetDataAt(JSRegExp::saved_code_index(is_ascii),
+ re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
Smi::FromInt(JSRegExp::kUninitializedValue));
}
}
@@ -1523,7 +1521,7 @@ class MarkCompactMarkingVisitor
return;
}
JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
- // Flush code or set age on both ASCII and two byte code.
+ // Flush code or set age on both one byte and two byte code.
UpdateRegExpCodeAgeAndFlush(heap, re, true);
UpdateRegExpCodeAgeAndFlush(heap, re, false);
// Visit the fields of the RegExp, including the updated FixedArray.
@@ -1926,7 +1924,7 @@ static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
int offset = 0;
while (grey_objects != 0) {
- int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
+ int trailing_zeros = base::bits::CountTrailingZeros32(grey_objects);
grey_objects >>= trailing_zeros;
offset += trailing_zeros;
MarkBit markbit(cell, 1 << offset, false);
@@ -1965,7 +1963,7 @@ int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
int offset = 0;
while (current_cell != 0) {
- int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(current_cell);
+ int trailing_zeros = base::bits::CountTrailingZeros32(current_cell);
current_cell >>= trailing_zeros;
offset += trailing_zeros;
Address address = cell_base + offset * kPointerSize;
@@ -2724,7 +2722,8 @@ void MarkCompactCollector::ClearDependentCode(DependentCode* entries) {
DCHECK(entries->is_code_at(i));
Code* code = entries->code_at(i);
if (IsMarked(code) && !code->marked_for_deoptimization()) {
- code->set_marked_for_deoptimization(true);
+ DependentCode::SetMarkedForDeoptimization(
+ code, static_cast<DependentCode::DependencyGroup>(g));
code->InvalidateEmbeddedObjects();
have_code_to_deoptimize_ = true;
}
@@ -2899,10 +2898,7 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
Memory::Object_at(dst_slot) = value;
- // We special case ConstantPoolArrays below since they could contain
- // integers value entries which look like tagged pointers.
- // TODO(mstarzinger): restructure this code to avoid this special-casing.
- if (!src->IsConstantPoolArray()) {
+ if (!src->MayContainRawValues()) {
RecordMigratedSlot(value, dst_slot);
}
@@ -2920,6 +2916,9 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
SlotsBuffer::IGNORE_OVERFLOW);
}
} else if (dst->IsConstantPoolArray()) {
+ // We special case ConstantPoolArrays since they could contain integers
+ // value entries which look like tagged pointers.
+ // TODO(mstarzinger): restructure this code to avoid this special-casing.
ConstantPoolArray* array = ConstantPoolArray::cast(dst);
ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR);
while (!code_iter.is_finished()) {
@@ -3124,7 +3123,7 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
AlwaysAllocateScope always_allocate(isolate());
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
- p->MarkSweptPrecisely();
+ p->SetWasSwept();
int offsets[16];
@@ -3288,10 +3287,7 @@ static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start,
}
-// Sweep a space precisely. After this has been done the space can
-// be iterated precisely, hitting only the live objects. Code space
-// is always swept precisely because we want to be able to iterate
-// over it. Map space is swept precisely, because it is not compacted.
+// Sweeps a page. After sweeping the page can be iterated.
// Slots in live objects pointing into evacuation candidates are updated
// if requested.
// Returns the size of the biggest continuous freed memory chunk in bytes.
@@ -3299,8 +3295,8 @@ template <SweepingMode sweeping_mode,
MarkCompactCollector::SweepingParallelism parallelism,
SkipListRebuildingMode skip_list_mode,
FreeSpaceTreatmentMode free_space_mode>
-static int SweepPrecisely(PagedSpace* space, FreeList* free_list, Page* p,
- ObjectVisitor* v) {
+static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
+ ObjectVisitor* v) {
DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
space->identity() == CODE_SPACE);
@@ -3382,7 +3378,7 @@ static int SweepPrecisely(PagedSpace* space, FreeList* free_list, Page* p,
// sweeping by the main thread.
p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
} else {
- p->MarkSweptPrecisely();
+ p->SetWasSwept();
}
return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
}
@@ -3619,22 +3615,24 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
switch (space->identity()) {
case OLD_DATA_SPACE:
- SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
+ Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
+ IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
+ &updating_visitor);
break;
case OLD_POINTER_SPACE:
- SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
- IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
- space, NULL, p, &updating_visitor);
+ Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
+ IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
+ &updating_visitor);
break;
case CODE_SPACE:
if (FLAG_zap_code_space) {
- SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
- REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(
- space, NULL, p, &updating_visitor);
+ Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
+ REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(space, NULL, p,
+ &updating_visitor);
} else {
- SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
- REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(
- space, NULL, p, &updating_visitor);
+ Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
+ REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
+ &updating_visitor);
}
break;
default:
@@ -4117,182 +4115,6 @@ static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
}
-static inline Address DigestFreeStart(Address approximate_free_start,
- uint32_t free_start_cell) {
- DCHECK(free_start_cell != 0);
-
- // No consecutive 1 bits.
- DCHECK((free_start_cell & (free_start_cell << 1)) == 0);
-
- int offsets[16];
- uint32_t cell = free_start_cell;
- int offset_of_last_live;
- if ((cell & 0x80000000u) != 0) {
- // This case would overflow below.
- offset_of_last_live = 31;
- } else {
- // Remove all but one bit, the most significant. This is an optimization
- // that may or may not be worthwhile.
- cell |= cell >> 16;
- cell |= cell >> 8;
- cell |= cell >> 4;
- cell |= cell >> 2;
- cell |= cell >> 1;
- cell = (cell + 1) >> 1;
- int live_objects = MarkWordToObjectStarts(cell, offsets);
- DCHECK(live_objects == 1);
- offset_of_last_live = offsets[live_objects - 1];
- }
- Address last_live_start =
- approximate_free_start + offset_of_last_live * kPointerSize;
- HeapObject* last_live = HeapObject::FromAddress(last_live_start);
- Address free_start = last_live_start + last_live->Size();
- return free_start;
-}
-
-
-static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
- DCHECK(cell != 0);
-
- // No consecutive 1 bits.
- DCHECK((cell & (cell << 1)) == 0);
-
- int offsets[16];
- if (cell == 0x80000000u) { // Avoid overflow below.
- return block_address + 31 * kPointerSize;
- }
- uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
- DCHECK((first_set_bit & cell) == first_set_bit);
- int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
- DCHECK(live_objects == 1);
- USE(live_objects);
- return block_address + offsets[0] * kPointerSize;
-}
-
-
-// Force instantiation of templatized SweepConservatively method for
-// SWEEP_ON_MAIN_THREAD mode.
-template int MarkCompactCollector::SweepConservatively<
- MarkCompactCollector::SWEEP_ON_MAIN_THREAD>(PagedSpace*, FreeList*, Page*);
-
-
-// Force instantiation of templatized SweepConservatively method for
-// SWEEP_IN_PARALLEL mode.
-template int MarkCompactCollector::SweepConservatively<
- MarkCompactCollector::SWEEP_IN_PARALLEL>(PagedSpace*, FreeList*, Page*);
-
-
-// Sweeps a space conservatively. After this has been done the larger free
-// spaces have been put on the free list and the smaller ones have been
-// ignored and left untouched. A free space is always either ignored or put
-// on the free list, never split up into two parts. This is important
-// because it means that any FreeSpace maps left actually describe a region of
-// memory that can be ignored when scanning. Dead objects other than free
-// spaces will not contain the free space map.
-template <MarkCompactCollector::SweepingParallelism mode>
-int MarkCompactCollector::SweepConservatively(PagedSpace* space,
- FreeList* free_list, Page* p) {
- DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
- DCHECK(
- (mode == MarkCompactCollector::SWEEP_IN_PARALLEL && free_list != NULL) ||
- (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD &&
- free_list == NULL));
-
- intptr_t freed_bytes = 0;
- intptr_t max_freed_bytes = 0;
- size_t size = 0;
-
- // Skip over all the dead objects at the start of the page and mark them free.
- Address cell_base = 0;
- MarkBit::CellType* cell = NULL;
- MarkBitCellIterator it(p);
- for (; !it.Done(); it.Advance()) {
- cell_base = it.CurrentCellBase();
- cell = it.CurrentCell();
- if (*cell != 0) break;
- }
-
- if (it.Done()) {
- size = p->area_end() - p->area_start();
- freed_bytes =
- Free<mode>(space, free_list, p->area_start(), static_cast<int>(size));
- max_freed_bytes = Max(freed_bytes, max_freed_bytes);
- DCHECK_EQ(0, p->LiveBytes());
- if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) {
- // When concurrent sweeping is active, the page will be marked after
- // sweeping by the main thread.
- p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
- } else {
- p->MarkSweptConservatively();
- }
- return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
- }
-
- // Grow the size of the start-of-page free space a little to get up to the
- // first live object.
- Address free_end = StartOfLiveObject(cell_base, *cell);
- // Free the first free space.
- size = free_end - p->area_start();
- freed_bytes =
- Free<mode>(space, free_list, p->area_start(), static_cast<int>(size));
- max_freed_bytes = Max(freed_bytes, max_freed_bytes);
-
- // The start of the current free area is represented in undigested form by
- // the address of the last 32-word section that contained a live object and
- // the marking bitmap for that cell, which describes where the live object
- // started. Unless we find a large free space in the bitmap we will not
- // digest this pair into a real address. We start the iteration here at the
- // first word in the marking bit map that indicates a live object.
- Address free_start = cell_base;
- MarkBit::CellType free_start_cell = *cell;
-
- for (; !it.Done(); it.Advance()) {
- cell_base = it.CurrentCellBase();
- cell = it.CurrentCell();
- if (*cell != 0) {
- // We have a live object. Check approximately whether it is more than 32
- // words since the last live object.
- if (cell_base - free_start > 32 * kPointerSize) {
- free_start = DigestFreeStart(free_start, free_start_cell);
- if (cell_base - free_start > 32 * kPointerSize) {
- // Now that we know the exact start of the free space it still looks
- // like we have a large enough free space to be worth bothering with.
- // so now we need to find the start of the first live object at the
- // end of the free space.
- free_end = StartOfLiveObject(cell_base, *cell);
- freed_bytes = Free<mode>(space, free_list, free_start,
- static_cast<int>(free_end - free_start));
- max_freed_bytes = Max(freed_bytes, max_freed_bytes);
- }
- }
- // Update our undigested record of where the current free area started.
- free_start = cell_base;
- free_start_cell = *cell;
- // Clear marking bits for current cell.
- *cell = 0;
- }
- }
-
- // Handle the free space at the end of the page.
- if (cell_base - free_start > 32 * kPointerSize) {
- free_start = DigestFreeStart(free_start, free_start_cell);
- freed_bytes = Free<mode>(space, free_list, free_start,
- static_cast<int>(p->area_end() - free_start));
- max_freed_bytes = Max(freed_bytes, max_freed_bytes);
- }
-
- p->ResetLiveBytes();
- if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) {
- // When concurrent sweeping is active, the page will be marked after
- // sweeping by the main thread.
- p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
- } else {
- p->MarkSweptConservatively();
- }
- return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
-}
-
-
int MarkCompactCollector::SweepInParallel(PagedSpace* space,
int required_freed_bytes) {
int max_freed = 0;
@@ -4319,14 +4141,8 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
? free_list_old_pointer_space_.get()
: free_list_old_data_space_.get();
FreeList private_free_list(space);
- if (space->swept_precisely()) {
- max_freed = SweepPrecisely<SWEEP_ONLY, SWEEP_IN_PARALLEL,
- IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
- space, &private_free_list, page, NULL);
- } else {
- max_freed = SweepConservatively<SWEEP_IN_PARALLEL>(
- space, &private_free_list, page);
- }
+ max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
free_list->Concatenate(&private_free_list);
}
return max_freed;
@@ -4334,9 +4150,6 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
- space->set_swept_precisely(sweeper == PRECISE ||
- sweeper == CONCURRENT_PRECISE ||
- sweeper == PARALLEL_PRECISE);
space->ClearStats();
// We defensively initialize end_of_unswept_pages_ here with the first page
@@ -4354,8 +4167,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
// Clear sweeping flags indicating that marking bits are still intact.
- p->ClearSweptPrecisely();
- p->ClearSweptConservatively();
+ p->ClearWasSwept();
if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
p->IsEvacuationCandidate()) {
@@ -4381,41 +4193,20 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
}
switch (sweeper) {
- case CONCURRENT_CONSERVATIVE:
- case PARALLEL_CONSERVATIVE: {
- if (!parallel_sweeping_active) {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
- reinterpret_cast<intptr_t>(p));
- }
- SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
- pages_swept++;
- parallel_sweeping_active = true;
- } else {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
- reinterpret_cast<intptr_t>(p));
- }
- p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
- space->IncreaseUnsweptFreeBytes(p);
- }
- space->set_end_of_unswept_pages(p);
- break;
- }
- case CONCURRENT_PRECISE:
- case PARALLEL_PRECISE:
+ case CONCURRENT_SWEEPING:
+ case PARALLEL_SWEEPING:
if (!parallel_sweeping_active) {
if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
+ PrintF("Sweeping 0x%" V8PRIxPTR ".\n",
reinterpret_cast<intptr_t>(p));
}
- SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, NULL, p, NULL);
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, NULL, p, NULL);
pages_swept++;
parallel_sweeping_active = true;
} else {
if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
+ PrintF("Sweeping 0x%" V8PRIxPTR " in parallel.\n",
reinterpret_cast<intptr_t>(p));
}
p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
@@ -4423,20 +4214,19 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
}
space->set_end_of_unswept_pages(p);
break;
- case PRECISE: {
+ case SEQUENTIAL_SWEEPING: {
if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
- reinterpret_cast<intptr_t>(p));
+ PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast<intptr_t>(p));
}
if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
- SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
- ZAP_FREE_SPACE>(space, NULL, p, NULL);
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+ ZAP_FREE_SPACE>(space, NULL, p, NULL);
} else if (space->identity() == CODE_SPACE) {
- SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, NULL, p, NULL);
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, NULL, p, NULL);
} else {
- SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, NULL, p, NULL);
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, NULL, p, NULL);
}
pages_swept++;
break;
@@ -4456,17 +4246,14 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
static bool ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type) {
- return type == MarkCompactCollector::PARALLEL_CONSERVATIVE ||
- type == MarkCompactCollector::CONCURRENT_CONSERVATIVE ||
- type == MarkCompactCollector::PARALLEL_PRECISE ||
- type == MarkCompactCollector::CONCURRENT_PRECISE;
+ return type == MarkCompactCollector::PARALLEL_SWEEPING ||
+ type == MarkCompactCollector::CONCURRENT_SWEEPING;
}
static bool ShouldWaitForSweeperThreads(
MarkCompactCollector::SweeperType type) {
- return type == MarkCompactCollector::PARALLEL_CONSERVATIVE ||
- type == MarkCompactCollector::PARALLEL_PRECISE;
+ return type == MarkCompactCollector::PARALLEL_SWEEPING;
}
@@ -4480,16 +4267,9 @@ void MarkCompactCollector::SweepSpaces() {
#ifdef DEBUG
state_ = SWEEP_SPACES;
#endif
- SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE;
- if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
- if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
- if (FLAG_always_precise_sweeping && FLAG_parallel_sweeping) {
- how_to_sweep = PARALLEL_PRECISE;
- }
- if (FLAG_always_precise_sweeping && FLAG_concurrent_sweeping) {
- how_to_sweep = CONCURRENT_PRECISE;
- }
- if (sweep_precisely_) how_to_sweep = PRECISE;
+ SweeperType how_to_sweep = CONCURRENT_SWEEPING;
+ if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_SWEEPING;
+ if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_SWEEPING;
MoveEvacuationCandidatesToEndOfPagesList();
@@ -4520,14 +4300,14 @@ void MarkCompactCollector::SweepSpaces() {
{
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_CODE);
- SweepSpace(heap()->code_space(), PRECISE);
+ SweepSpace(heap()->code_space(), SEQUENTIAL_SWEEPING);
}
{
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_CELL);
- SweepSpace(heap()->cell_space(), PRECISE);
- SweepSpace(heap()->property_cell_space(), PRECISE);
+ SweepSpace(heap()->cell_space(), SEQUENTIAL_SWEEPING);
+ SweepSpace(heap()->property_cell_space(), SEQUENTIAL_SWEEPING);
}
EvacuateNewSpaceAndCandidates();
@@ -4538,7 +4318,7 @@ void MarkCompactCollector::SweepSpaces() {
{
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_MAP);
- SweepSpace(heap()->map_space(), PRECISE);
+ SweepSpace(heap()->map_space(), SEQUENTIAL_SWEEPING);
}
// Deallocate unmarked objects and clear marked bits for marked objects.
@@ -4560,11 +4340,7 @@ void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
Page* p = it.next();
if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) {
p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE);
- if (space->swept_precisely()) {
- p->MarkSweptPrecisely();
- } else {
- p->MarkSweptConservatively();
- }
+ p->SetWasSwept();
}
DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
}
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index a32c16b6f2..c5087b4ea6 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -5,7 +5,7 @@
#ifndef V8_HEAP_MARK_COMPACT_H_
#define V8_HEAP_MARK_COMPACT_H_
-#include "src/compiler-intrinsics.h"
+#include "src/base/bits.h"
#include "src/heap/spaces.h"
namespace v8 {
@@ -146,7 +146,9 @@ class MarkingDeque {
HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
array_ = obj_low;
- mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1;
+ mask_ = base::bits::RoundDownToPowerOfTwo32(
+ static_cast<uint32_t>(obj_high - obj_low)) -
+ 1;
top_ = bottom_ = 0;
overflowed_ = false;
}
@@ -545,11 +547,9 @@ class MarkCompactCollector {
void EnableCodeFlushing(bool enable);
enum SweeperType {
- PARALLEL_CONSERVATIVE,
- CONCURRENT_CONSERVATIVE,
- PARALLEL_PRECISE,
- CONCURRENT_PRECISE,
- PRECISE
+ PARALLEL_SWEEPING,
+ CONCURRENT_SWEEPING,
+ SEQUENTIAL_SWEEPING
};
enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
@@ -562,12 +562,6 @@ class MarkCompactCollector {
void VerifyOmittedMapChecks();
#endif
- // Sweep a single page from the given space conservatively.
- // Returns the size of the biggest continuous freed memory chunk in bytes.
- template <SweepingParallelism type>
- static int SweepConservatively(PagedSpace* space, FreeList* free_list,
- Page* p);
-
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
return Page::FromAddress(reinterpret_cast<Address>(anchor))
->ShouldSkipEvacuationSlotRecording();
@@ -694,10 +688,6 @@ class MarkCompactCollector {
CollectorState state_;
#endif
- // Global flag that forces sweeping to be precise, so we can traverse the
- // heap.
- bool sweep_precisely_;
-
bool reduce_memory_footprint_;
bool abort_incremental_marking_;
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 8846d27bce..d220118368 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -261,11 +261,13 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(Heap* heap,
// to be serialized.
if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub() &&
(target->ic_state() == MEGAMORPHIC || target->ic_state() == GENERIC ||
- target->ic_state() == POLYMORPHIC || heap->flush_monomorphic_ics() ||
+ target->ic_state() == POLYMORPHIC ||
+ (heap->flush_monomorphic_ics() && !target->is_weak_stub()) ||
heap->isolate()->serializer_enabled() ||
target->ic_age() != heap->global_ic_age() ||
target->is_invalidated_weak_stub())) {
- IC::Clear(heap->isolate(), rinfo->pc(), rinfo->host()->constant_pool());
+ ICUtility::Clear(heap->isolate(), rinfo->pc(),
+ rinfo->host()->constant_pool());
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
}
heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index a316d12dcd..a0fc231d08 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -5,7 +5,6 @@
#include "src/v8.h"
#include "src/heap/objects-visiting.h"
-#include "src/ic-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 56c2bad70c..d81d253e1b 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -8,6 +8,7 @@
#include "src/heap/spaces.h"
#include "src/heap-profiler.h"
#include "src/isolate.h"
+#include "src/msan.h"
#include "src/v8memory.h"
namespace v8 {
@@ -258,6 +259,7 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) {
if (identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
}
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
return object;
}
@@ -280,6 +282,9 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes) {
allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+ // The slow path above ultimately goes through AllocateRaw, so this suffices.
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
+
return obj;
}
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 9be53e03f2..ae4048f452 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -4,6 +4,7 @@
#include "src/v8.h"
+#include "src/base/bits.h"
#include "src/base/platform/platform.h"
#include "src/full-codegen.h"
#include "src/heap/mark-compact.h"
@@ -47,18 +48,13 @@ HeapObjectIterator::HeapObjectIterator(Page* page,
owner == page->heap()->code_space());
Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
page->area_end(), kOnePageOnly, size_func);
- DCHECK(page->WasSweptPrecisely() ||
- (static_cast<PagedSpace*>(owner)->swept_precisely() &&
- page->SweepingCompleted()));
+ DCHECK(page->WasSwept() || page->SweepingCompleted());
}
void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
HeapObjectIterator::PageMode mode,
HeapObjectCallback size_f) {
- // Check that we actually can iterate this space.
- DCHECK(space->swept_precisely());
-
space_ = space;
cur_addr_ = cur;
cur_end_ = end;
@@ -83,9 +79,7 @@ bool HeapObjectIterator::AdvanceToNextPage() {
if (cur_page == space_->anchor()) return false;
cur_addr_ = cur_page->area_start();
cur_end_ = cur_page->area_end();
- DCHECK(cur_page->WasSweptPrecisely() ||
- (static_cast<PagedSpace*>(cur_page->owner())->swept_precisely() &&
- cur_page->SweepingCompleted()));
+ DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
return true;
}
@@ -193,8 +187,10 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
const size_t commit_size,
size_t* allocated) {
DCHECK(commit_size <= requested_size);
- DCHECK(current_allocation_block_index_ < allocation_list_.length());
- if (requested_size > allocation_list_[current_allocation_block_index_].size) {
+ DCHECK(allocation_list_.length() == 0 ||
+ current_allocation_block_index_ < allocation_list_.length());
+ if (allocation_list_.length() == 0 ||
+ requested_size > allocation_list_[current_allocation_block_index_].size) {
// Find an allocation block large enough.
if (!GetNextAllocationBlock(requested_size)) return NULL;
}
@@ -218,7 +214,7 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
allocation_list_[current_allocation_block_index_].size -= *allocated;
if (*allocated == current.size) {
// This block is used up, get the next one.
- if (!GetNextAllocationBlock(0)) return NULL;
+ GetNextAllocationBlock(0);
}
return current.start;
}
@@ -459,7 +455,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->ResetLiveBytes();
Bitmap::Clear(chunk);
chunk->initialize_scan_on_scavenge(false);
- chunk->SetFlag(WAS_SWEPT_PRECISELY);
+ chunk->SetFlag(WAS_SWEPT);
DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
@@ -668,7 +664,6 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
MemoryChunk* result = MemoryChunk::Initialize(
heap, base, chunk_size, area_start, area_end, executable, owner);
result->set_reserved_memory(&reservation);
- MSAN_MEMORY_IS_INITIALIZED_IN_JIT(base, chunk_size);
return result;
}
@@ -886,7 +881,6 @@ PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
Executability executable)
: Space(heap, id, executable),
free_list_(this),
- swept_precisely_(true),
unswept_free_bytes_(0),
end_of_unswept_pages_(NULL),
emergency_memory_(NULL) {
@@ -936,7 +930,7 @@ size_t PagedSpace::CommittedPhysicalMemory() {
Object* PagedSpace::FindObject(Address addr) {
- // Note: this function can only be called on precisely swept spaces.
+ // Note: this function can only be called on iterable spaces.
DCHECK(!heap()->mark_compact_collector()->in_use());
if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
@@ -990,10 +984,13 @@ bool PagedSpace::Expand() {
intptr_t PagedSpace::SizeOfFirstPage() {
+ // If using an ool constant pool then transfer the constant pool allowance
+ // from the code space to the old pointer space.
+ static const int constant_pool_delta = FLAG_enable_ool_constant_pool ? 48 : 0;
int size = 0;
switch (identity()) {
case OLD_POINTER_SPACE:
- size = 112 * kPointerSize * KB;
+ size = (112 + constant_pool_delta) * kPointerSize * KB;
break;
case OLD_DATA_SPACE:
size = 192 * KB;
@@ -1015,9 +1012,9 @@ intptr_t PagedSpace::SizeOfFirstPage() {
// upgraded to handle small pages.
size = AreaSize();
} else {
- size =
- RoundUp(480 * KB * FullCodeGenerator::kBootCodeSizeMultiplier / 100,
- kPointerSize);
+ size = RoundUp((480 - constant_pool_delta) * KB *
+ FullCodeGenerator::kBootCodeSizeMultiplier / 100,
+ kPointerSize);
}
break;
}
@@ -1126,9 +1123,6 @@ void PagedSpace::Print() {}
#ifdef VERIFY_HEAP
void PagedSpace::Verify(ObjectVisitor* visitor) {
- // We can only iterate over the pages if they were swept precisely.
- if (!swept_precisely_) return;
-
bool allocation_pointer_found_in_space =
(allocation_info_.top() == allocation_info_.limit());
PageIterator page_iterator(this);
@@ -1138,7 +1132,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
if (page == Page::FromAllocationTop(allocation_info_.top())) {
allocation_pointer_found_in_space = true;
}
- CHECK(page->WasSweptPrecisely());
+ CHECK(page->WasSwept());
HeapObjectIterator it(page, NULL);
Address end_of_previous_object = page->area_start();
Address top = page->area_end();
@@ -1196,7 +1190,7 @@ bool NewSpace::SetUp(int reserved_semispace_capacity,
LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
- DCHECK(IsPowerOf2(maximum_semispace_capacity));
+ DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
// Allocate and set up the histogram arrays if necessary.
allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
@@ -1265,14 +1259,15 @@ void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
void NewSpace::Grow() {
// Double the semispace size but only up to maximum capacity.
- DCHECK(Capacity() < MaximumCapacity());
- int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
+ DCHECK(TotalCapacity() < MaximumCapacity());
+ int new_capacity =
+ Min(MaximumCapacity(), 2 * static_cast<int>(TotalCapacity()));
if (to_space_.GrowTo(new_capacity)) {
// Only grow from space if we managed to grow to-space.
if (!from_space_.GrowTo(new_capacity)) {
// If we managed to grow to-space but couldn't grow from-space,
// attempt to shrink to-space.
- if (!to_space_.ShrinkTo(from_space_.Capacity())) {
+ if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
// We are in an inconsistent state because we could not
// commit/uncommit memory from new space.
V8::FatalProcessOutOfMemory("Failed to grow new space.");
@@ -1284,16 +1279,16 @@ void NewSpace::Grow() {
void NewSpace::Shrink() {
- int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
+ int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
- if (rounded_new_capacity < Capacity() &&
+ if (rounded_new_capacity < TotalCapacity() &&
to_space_.ShrinkTo(rounded_new_capacity)) {
// Only shrink from-space if we managed to shrink to-space.
from_space_.Reset();
if (!from_space_.ShrinkTo(rounded_new_capacity)) {
// If we managed to shrink to-space but couldn't shrink from
// space, attempt to grow to-space again.
- if (!to_space_.GrowTo(from_space_.Capacity())) {
+ if (!to_space_.GrowTo(from_space_.TotalCapacity())) {
// We are in an inconsistent state because we could not
// commit/uncommit memory from new space.
V8::FatalProcessOutOfMemory("Failed to shrink new space.");
@@ -1365,7 +1360,6 @@ bool NewSpace::AddFreshPage() {
Address limit = NewSpacePage::FromLimit(top)->area_end();
if (heap()->gc_state() == Heap::SCAVENGE) {
heap()->promotion_queue()->SetNewLimit(limit);
- heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
}
int remaining_in_page = static_cast<int>(limit - top);
@@ -1472,9 +1466,9 @@ void SemiSpace::SetUp(Address start, int initial_capacity,
// space is used as the marking stack. It requires contiguous memory
// addresses.
DCHECK(maximum_capacity >= Page::kPageSize);
- initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
- capacity_ = initial_capacity;
- maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
+ initial_total_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
+ total_capacity_ = initial_capacity;
+ maximum_total_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
maximum_committed_ = 0;
committed_ = false;
start_ = start;
@@ -1487,15 +1481,15 @@ void SemiSpace::SetUp(Address start, int initial_capacity,
void SemiSpace::TearDown() {
start_ = NULL;
- capacity_ = 0;
+ total_capacity_ = 0;
}
bool SemiSpace::Commit() {
DCHECK(!is_committed());
- int pages = capacity_ / Page::kPageSize;
- if (!heap()->isolate()->memory_allocator()->CommitBlock(start_, capacity_,
- executable())) {
+ int pages = total_capacity_ / Page::kPageSize;
+ if (!heap()->isolate()->memory_allocator()->CommitBlock(
+ start_, total_capacity_, executable())) {
return false;
}
@@ -1507,7 +1501,7 @@ bool SemiSpace::Commit() {
current = new_page;
}
- SetCapacity(capacity_);
+ SetCapacity(total_capacity_);
committed_ = true;
Reset();
return true;
@@ -1516,8 +1510,9 @@ bool SemiSpace::Commit() {
bool SemiSpace::Uncommit() {
DCHECK(is_committed());
- Address start = start_ + maximum_capacity_ - capacity_;
- if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
+ Address start = start_ + maximum_total_capacity_ - total_capacity_;
+ if (!heap()->isolate()->memory_allocator()->UncommitBlock(start,
+ total_capacity_)) {
return false;
}
anchor()->set_next_page(anchor());
@@ -1544,16 +1539,16 @@ bool SemiSpace::GrowTo(int new_capacity) {
if (!Commit()) return false;
}
DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
- DCHECK(new_capacity <= maximum_capacity_);
- DCHECK(new_capacity > capacity_);
- int pages_before = capacity_ / Page::kPageSize;
+ DCHECK(new_capacity <= maximum_total_capacity_);
+ DCHECK(new_capacity > total_capacity_);
+ int pages_before = total_capacity_ / Page::kPageSize;
int pages_after = new_capacity / Page::kPageSize;
- size_t delta = new_capacity - capacity_;
+ size_t delta = new_capacity - total_capacity_;
DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
if (!heap()->isolate()->memory_allocator()->CommitBlock(
- start_ + capacity_, delta, executable())) {
+ start_ + total_capacity_, delta, executable())) {
return false;
}
SetCapacity(new_capacity);
@@ -1576,10 +1571,10 @@ bool SemiSpace::GrowTo(int new_capacity) {
bool SemiSpace::ShrinkTo(int new_capacity) {
DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
- DCHECK(new_capacity >= initial_capacity_);
- DCHECK(new_capacity < capacity_);
+ DCHECK(new_capacity >= initial_total_capacity_);
+ DCHECK(new_capacity < total_capacity_);
if (is_committed()) {
- size_t delta = capacity_ - new_capacity;
+ size_t delta = total_capacity_ - new_capacity;
DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
@@ -1659,9 +1654,9 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
void SemiSpace::SetCapacity(int new_capacity) {
- capacity_ = new_capacity;
- if (capacity_ > maximum_committed_) {
- maximum_committed_ = capacity_;
+ total_capacity_ = new_capacity;
+ if (total_capacity_ > maximum_committed_) {
+ maximum_committed_ = total_capacity_;
}
}
@@ -1902,11 +1897,11 @@ static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
void NewSpace::ReportStatistics() {
#ifdef DEBUG
if (FLAG_heap_stats) {
- float pct = static_cast<float>(Available()) / Capacity();
+ float pct = static_cast<float>(Available()) / TotalCapacity();
PrintF(" capacity: %" V8_PTR_PREFIX
"d"
", available: %" V8_PTR_PREFIX "d, %%%d\n",
- Capacity(), Available(), static_cast<int>(pct * 100));
+ TotalCapacity(), Available(), static_cast<int>(pct * 100));
PrintF("\n Object Histogram:\n");
for (int i = 0; i <= LAST_TYPE; i++) {
if (allocated_histogram_[i].number() > 0) {
@@ -2734,7 +2729,9 @@ void PagedSpace::ReportStatistics() {
", available: %" V8_PTR_PREFIX "d, %%%d\n",
Capacity(), Waste(), Available(), pct);
- if (!swept_precisely_) return;
+ if (heap()->mark_compact_collector()->sweeping_in_progress()) {
+ heap()->mark_compact_collector()->EnsureSweepingCompleted();
+ }
ClearHistograms(heap()->isolate());
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
@@ -2843,9 +2840,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
return AllocationResult::Retry(identity());
}
- if (Size() + object_size > max_capacity_) {
- return AllocationResult::Retry(identity());
- }
+ if (!CanAllocateSize(object_size)) return AllocationResult::Retry(identity());
LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
object_size, this, executable);
@@ -2875,6 +2870,8 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
HeapObject* object = page->GetObject();
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
+
if (Heap::ShouldZapGarbage()) {
// Make the object consistent so the heap can be verified in OldSpaceStep.
// We only need to do this in debug builds or if verify_heap is on.
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 312d75f52e..ef55357163 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -7,6 +7,7 @@
#include "src/allocation.h"
#include "src/base/atomicops.h"
+#include "src/base/bits.h"
#include "src/base/platform/mutex.h"
#include "src/hashmap.h"
#include "src/list.h"
@@ -373,12 +374,9 @@ class MemoryChunk {
EVACUATION_CANDIDATE,
RESCAN_ON_EVACUATION,
- // Pages swept precisely can be iterated, hitting only the live objects.
- // Whereas those swept conservatively cannot be iterated over. Both flags
- // indicate that marking bits have been cleared by the sweeper, otherwise
- // marking bits are still intact.
- WAS_SWEPT_PRECISELY,
- WAS_SWEPT_CONSERVATIVELY,
+ // WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
+ // otherwise marking bits are still intact.
+ WAS_SWEPT,
// Large objects can have a progress bar in their page header. These object
// are scanned in increments and will be kept black while being scanned.
@@ -765,15 +763,9 @@ class Page : public MemoryChunk {
void InitializeAsAnchor(PagedSpace* owner);
- bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
- bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); }
- bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); }
-
- void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
- void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
-
- void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
- void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
+ bool WasSwept() { return IsFlagSet(WAS_SWEPT); }
+ void SetWasSwept() { SetFlag(WAS_SWEPT); }
+ void ClearWasSwept() { ClearFlag(WAS_SWEPT); }
void ResetFreeListStatistics();
@@ -888,6 +880,10 @@ class CodeRange {
DCHECK(valid());
return static_cast<Address>(code_range_->address());
}
+ size_t size() {
+ DCHECK(valid());
+ return code_range_->size();
+ }
bool contains(Address address) {
if (!valid()) return false;
Address start = static_cast<Address>(code_range_->address());
@@ -1830,14 +1826,11 @@ class PagedSpace : public Space {
static void ResetCodeStatistics(Isolate* isolate);
#endif
- bool swept_precisely() { return swept_precisely_; }
- void set_swept_precisely(bool b) { swept_precisely_ = b; }
-
// Evacuation candidates are swept by evacuator. Needs to return a valid
// result before _and_ after evacuation has finished.
static bool ShouldBeSweptBySweeperThreads(Page* p) {
return !p->IsEvacuationCandidate() &&
- !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSweptPrecisely();
+ !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
}
void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; }
@@ -1907,12 +1900,8 @@ class PagedSpace : public Space {
// Normal allocation information.
AllocationInfo allocation_info_;
- // This space was swept precisely, hence it is iterable.
- bool swept_precisely_;
-
// The number of free bytes which could be reclaimed by advancing the
- // concurrent sweeper threads. This is only an estimation because concurrent
- // sweeping is done conservatively.
+ // concurrent sweeper threads.
intptr_t unswept_free_bytes_;
// The sweeper threads iterate over the list of pointer and data space pages
@@ -2028,7 +2017,7 @@ class NewSpacePage : public MemoryChunk {
Address address() { return reinterpret_cast<Address>(this); }
- // Finds the NewSpacePage containg the given address.
+ // Finds the NewSpacePage containing the given address.
static inline NewSpacePage* FromAddress(Address address_in_page) {
Address page_start =
reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
@@ -2176,14 +2165,14 @@ class SemiSpace : public Space {
inline static void AssertValidRange(Address from, Address to) {}
#endif
- // Returns the current capacity of the semi space.
- int Capacity() { return capacity_; }
+ // Returns the current total capacity of the semispace.
+ int TotalCapacity() { return total_capacity_; }
- // Returns the maximum capacity of the semi space.
- int MaximumCapacity() { return maximum_capacity_; }
+ // Returns the maximum total capacity of the semispace.
+ int MaximumTotalCapacity() { return maximum_total_capacity_; }
- // Returns the initial capacity of the semi space.
- int InitialCapacity() { return initial_capacity_; }
+ // Returns the initial capacity of the semispace.
+ int InitialTotalCapacity() { return initial_total_capacity_; }
SemiSpaceId id() { return id_; }
@@ -2205,10 +2194,10 @@ class SemiSpace : public Space {
NewSpacePage* anchor() { return &anchor_; }
- // The current and maximum capacity of the space.
- int capacity_;
- int maximum_capacity_;
- int initial_capacity_;
+ // The current and maximum total capacity of the space.
+ int total_capacity_;
+ int maximum_total_capacity_;
+ int initial_total_capacity_;
intptr_t maximum_committed_;
@@ -2378,22 +2367,24 @@ class NewSpace : public Space {
// new space, which can't get as big as the other spaces then this is useful:
int SizeAsInt() { return static_cast<int>(Size()); }
- // Return the current capacity of a semispace.
- intptr_t EffectiveCapacity() {
- SLOW_DCHECK(to_space_.Capacity() == from_space_.Capacity());
- return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
+ // Return the allocatable capacity of a semispace.
+ intptr_t Capacity() {
+ SLOW_DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
+ return (to_space_.TotalCapacity() / Page::kPageSize) *
+ NewSpacePage::kAreaSize;
}
- // Return the current capacity of a semispace.
- intptr_t Capacity() {
- DCHECK(to_space_.Capacity() == from_space_.Capacity());
- return to_space_.Capacity();
+ // Return the current size of a semispace, allocatable and non-allocatable
+ // memory.
+ intptr_t TotalCapacity() {
+ DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
+ return to_space_.TotalCapacity();
}
// Return the total amount of memory committed for new space.
intptr_t CommittedMemory() {
if (from_space_.is_committed()) return 2 * Capacity();
- return Capacity();
+ return TotalCapacity();
}
// Return the total amount of memory committed for new space.
@@ -2410,16 +2401,18 @@ class NewSpace : public Space {
// Return the maximum capacity of a semispace.
int MaximumCapacity() {
- DCHECK(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
- return to_space_.MaximumCapacity();
+ DCHECK(to_space_.MaximumTotalCapacity() ==
+ from_space_.MaximumTotalCapacity());
+ return to_space_.MaximumTotalCapacity();
}
- bool IsAtMaximumCapacity() { return Capacity() == MaximumCapacity(); }
+ bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
// Returns the initial capacity of a semispace.
- int InitialCapacity() {
- DCHECK(to_space_.InitialCapacity() == from_space_.InitialCapacity());
- return to_space_.InitialCapacity();
+ int InitialTotalCapacity() {
+ DCHECK(to_space_.InitialTotalCapacity() ==
+ from_space_.InitialTotalCapacity());
+ return to_space_.InitialTotalCapacity();
}
// Return the address of the allocation pointer in the active semispace.
@@ -2636,7 +2629,7 @@ class MapSpace : public PagedSpace {
static const int kMaxMapPageIndex = 1 << 16;
virtual int RoundSizeDownToObjectAlignment(int size) {
- if (IsPowerOf2(Map::kSize)) {
+ if (base::bits::IsPowerOfTwo32(Map::kSize)) {
return RoundDown(size, Map::kSize);
} else {
return (size / Map::kSize) * Map::kSize;
@@ -2671,7 +2664,7 @@ class CellSpace : public PagedSpace {
: PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {}
virtual int RoundSizeDownToObjectAlignment(int size) {
- if (IsPowerOf2(Cell::kSize)) {
+ if (base::bits::IsPowerOfTwo32(Cell::kSize)) {
return RoundDown(size, Cell::kSize);
} else {
return (size / Cell::kSize) * Cell::kSize;
@@ -2696,7 +2689,7 @@ class PropertyCellSpace : public PagedSpace {
: PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {}
virtual int RoundSizeDownToObjectAlignment(int size) {
- if (IsPowerOf2(PropertyCell::kSize)) {
+ if (base::bits::IsPowerOfTwo32(PropertyCell::kSize)) {
return RoundDown(size, PropertyCell::kSize);
} else {
return (size / PropertyCell::kSize) * PropertyCell::kSize;
@@ -2739,6 +2732,8 @@ class LargeObjectSpace : public Space {
MUST_USE_RESULT AllocationResult
AllocateRaw(int object_size, Executability executable);
+ bool CanAllocateSize(int size) { return Size() + size <= max_capacity_; }
+
// Available bytes for objects in this space.
inline intptr_t Available();
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index b48e1a4049..278e9f2f6e 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -477,19 +477,18 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
} else {
Page* page = reinterpret_cast<Page*>(chunk);
PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
- Address start = page->area_start();
- Address end = page->area_end();
if (owner == heap_->map_space()) {
- DCHECK(page->WasSweptPrecisely());
+ DCHECK(page->WasSwept());
HeapObjectIterator iterator(page, NULL);
for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
heap_object = iterator.Next()) {
// We skip free space objects.
if (!heap_object->IsFiller()) {
+ DCHECK(heap_object->IsMap());
FindPointersToNewSpaceInRegion(
- heap_object->address() + HeapObject::kHeaderSize,
- heap_object->address() + heap_object->Size(), slot_callback,
- clear_maps);
+ heap_object->address() + Map::kPointerFieldsBeginOffset,
+ heap_object->address() + Map::kPointerFieldsEndOffset,
+ slot_callback, clear_maps);
}
}
} else {
@@ -503,24 +502,17 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
heap_->mark_compact_collector()->EnsureSweepingCompleted();
}
}
- // TODO(hpayer): remove the special casing and merge map and pointer
- // space handling as soon as we removed conservative sweeping.
CHECK(page->owner() == heap_->old_pointer_space());
- if (heap_->old_pointer_space()->swept_precisely()) {
- HeapObjectIterator iterator(page, NULL);
- for (HeapObject* heap_object = iterator.Next();
- heap_object != NULL; heap_object = iterator.Next()) {
- // We iterate over objects that contain new space pointers only.
- if (heap_object->MayContainNewSpacePointers()) {
- FindPointersToNewSpaceInRegion(
- heap_object->address() + HeapObject::kHeaderSize,
- heap_object->address() + heap_object->Size(),
- slot_callback, clear_maps);
- }
+ HeapObjectIterator iterator(page, NULL);
+ for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
+ heap_object = iterator.Next()) {
+ // We iterate over objects that contain new space pointers only.
+ if (!heap_object->MayContainRawValues()) {
+ FindPointersToNewSpaceInRegion(
+ heap_object->address() + HeapObject::kHeaderSize,
+ heap_object->address() + heap_object->Size(), slot_callback,
+ clear_maps);
}
- } else {
- FindPointersToNewSpaceInRegion(start, end, slot_callback,
- clear_maps);
}
}
}
diff --git a/deps/v8/src/hydrogen-gvn.cc b/deps/v8/src/hydrogen-gvn.cc
index 794f51855e..be1e17bb41 100644
--- a/deps/v8/src/hydrogen-gvn.cc
+++ b/deps/v8/src/hydrogen-gvn.cc
@@ -9,7 +9,7 @@
namespace v8 {
namespace internal {
-class HInstructionMap V8_FINAL : public ZoneObject {
+class HInstructionMap FINAL : public ZoneObject {
public:
HInstructionMap(Zone* zone, SideEffectsTracker* side_effects_tracker)
: array_size_(0),
@@ -70,7 +70,7 @@ class HInstructionMap V8_FINAL : public ZoneObject {
};
-class HSideEffectMap V8_FINAL BASE_EMBEDDED {
+class HSideEffectMap FINAL BASE_EMBEDDED {
public:
HSideEffectMap();
explicit HSideEffectMap(HSideEffectMap* other);
diff --git a/deps/v8/src/hydrogen-gvn.h b/deps/v8/src/hydrogen-gvn.h
index 3cab59c735..8cdeb9992b 100644
--- a/deps/v8/src/hydrogen-gvn.h
+++ b/deps/v8/src/hydrogen-gvn.h
@@ -19,7 +19,7 @@ class OStream;
// which can be used to represent side effects that cannot be expressed using
// the GVNFlags of an HInstruction. These special side effects are tracked by a
// SideEffectsTracker (see below).
-class SideEffects V8_FINAL {
+class SideEffects FINAL {
public:
static const int kNumberOfSpecials = 64 - kNumberOfFlags;
@@ -63,7 +63,7 @@ struct TrackedEffects;
// SideEffects class (see above). This way unrelated global variable/inobject
// field stores don't prevent hoisting and merging of global variable/inobject
// field loads.
-class SideEffectsTracker V8_FINAL BASE_EMBEDDED {
+class SideEffectsTracker FINAL BASE_EMBEDDED {
public:
SideEffectsTracker() : num_global_vars_(0), num_inobject_fields_(0) {}
SideEffects ComputeChanges(HInstruction* instr);
@@ -111,7 +111,7 @@ OStream& operator<<(OStream& os, const TrackedEffects& f);
// Perform common subexpression elimination and loop-invariant code motion.
-class HGlobalValueNumberingPhase V8_FINAL : public HPhase {
+class HGlobalValueNumberingPhase FINAL : public HPhase {
public:
explicit HGlobalValueNumberingPhase(HGraph* graph);
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index b75bec0f5e..a057217cc5 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -4,6 +4,7 @@
#include "src/v8.h"
+#include "src/base/bits.h"
#include "src/double.h"
#include "src/factory.h"
#include "src/hydrogen-infer-representation.h"
@@ -845,6 +846,7 @@ bool HInstruction::CanDeoptimize() {
case HValue::kStoreNamedGeneric:
case HValue::kStringCharCodeAt:
case HValue::kStringCharFromCode:
+ case HValue::kTailCallThroughMegamorphicCache:
case HValue::kThisFunction:
case HValue::kTypeofIsAndBranch:
case HValue::kUnknownOSRValue:
@@ -1511,17 +1513,8 @@ HInstruction* HForceRepresentation::New(Zone* zone, HValue* context,
HValue* value, Representation representation) {
if (FLAG_fold_constants && value->IsConstant()) {
HConstant* c = HConstant::cast(value);
- if (c->HasNumberValue()) {
- double double_res = c->DoubleValue();
- if (representation.IsDouble()) {
- return HConstant::New(zone, context, double_res);
-
- } else if (representation.CanContainDouble(double_res)) {
- return HConstant::New(zone, context,
- static_cast<int32_t>(double_res),
- representation);
- }
- }
+ c = c->CopyToRepresentation(representation, zone);
+ if (c != NULL) return c;
}
return new(zone) HForceRepresentation(value, representation);
}
@@ -1710,6 +1703,15 @@ OStream& HCallStub::PrintDataTo(OStream& os) const { // NOLINT
}
+OStream& HTailCallThroughMegamorphicCache::PrintDataTo(
+ OStream& os) const { // NOLINT
+ for (int i = 0; i < OperandCount(); i++) {
+ os << NameOf(OperandAt(i)) << " ";
+ }
+ return os << "flags: " << flags();
+}
+
+
OStream& HUnknownOSRValue::PrintDataTo(OStream& os) const { // NOLINT
const char* type = "expression";
if (environment_->is_local_index(index_)) type = "local";
@@ -2652,7 +2654,7 @@ OStream& HEnterInlined::PrintDataTo(OStream& os) const { // NOLINT
static bool IsInteger32(double value) {
double roundtrip_value = static_cast<double>(static_cast<int32_t>(value));
- return BitCast<int64_t>(roundtrip_value) == BitCast<int64_t>(value);
+ return bit_cast<int64_t>(roundtrip_value) == bit_cast<int64_t>(value);
}
@@ -2813,6 +2815,13 @@ void HConstant::Initialize(Representation r) {
r = Representation::Tagged();
}
}
+ if (r.IsSmi()) {
+ // If we have an existing handle, zap it, because it might be a heap
+ // number which we must not re-use when copying this HConstant to
+ // Tagged representation later, because having Smi representation now
+ // could cause heap object checks not to get emitted.
+ object_ = Unique<Object>(Handle<Object>::null());
+ }
set_representation(r);
SetFlag(kUseGVN);
}
@@ -3583,14 +3592,14 @@ OStream& HTransitionElementsKind::PrintDataTo(OStream& os) const { // NOLINT
OStream& HLoadGlobalCell::PrintDataTo(OStream& os) const { // NOLINT
os << "[" << *cell().handle() << "]";
- if (!details_.IsDontDelete()) os << " (deleteable)";
+ if (details_.IsConfigurable()) os << " (configurable)";
if (details_.IsReadOnly()) os << " (read-only)";
return os;
}
bool HLoadGlobalCell::RequiresHoleCheck() const {
- if (details_.IsDontDelete() && !details_.IsReadOnly()) return false;
+ if (!details_.IsConfigurable()) return false;
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (!use->IsChange()) return true;
@@ -3612,7 +3621,7 @@ OStream& HInnerAllocatedObject::PrintDataTo(OStream& os) const { // NOLINT
OStream& HStoreGlobalCell::PrintDataTo(OStream& os) const { // NOLINT
os << "[" << *cell().handle() << "] = " << NameOf(value());
- if (!details_.IsDontDelete()) os << " (deleteable)";
+ if (details_.IsConfigurable()) os << " (configurable)";
if (details_.IsReadOnly()) os << " (read-only)";
return os;
}
@@ -4179,8 +4188,7 @@ HInstruction* HUnaryMathOperation::New(
return H_CONSTANT_DOUBLE(Floor(d));
case kMathClz32: {
uint32_t i = DoubleToUint32(d);
- return H_CONSTANT_INT(
- (i == 0) ? 32 : CompilerIntrinsics::CountLeadingZeros(i));
+ return H_CONSTANT_INT(base::bits::CountLeadingZeros32(i));
}
default:
UNREACHABLE();
@@ -4642,24 +4650,9 @@ HObjectAccess HObjectAccess::ForBackingStoreOffset(int offset,
}
-HObjectAccess HObjectAccess::ForField(Handle<Map> map,
- LookupResult* lookup,
+HObjectAccess HObjectAccess::ForField(Handle<Map> map, int index,
+ Representation representation,
Handle<String> name) {
- DCHECK(lookup->IsField() || lookup->IsTransitionToField());
- int index;
- Representation representation;
- if (lookup->IsField()) {
- index = lookup->GetLocalFieldIndexFromMap(*map);
- representation = lookup->representation();
- } else {
- Map* transition = lookup->GetTransitionTarget();
- int descriptor = transition->LastAdded();
- index = transition->instance_descriptors()->GetFieldIndex(descriptor) -
- map->inobject_properties();
- PropertyDetails details =
- transition->instance_descriptors()->GetDetails(descriptor);
- representation = details.representation();
- }
if (index < 0) {
// Negative property indices are in-object properties, indexed
// from the end of the fixed part of the object.
@@ -4675,9 +4668,8 @@ HObjectAccess HObjectAccess::ForField(Handle<Map> map,
HObjectAccess HObjectAccess::ForCellPayload(Isolate* isolate) {
- return HObjectAccess(
- kInobject, Cell::kValueOffset, Representation::Tagged(),
- Handle<String>(isolate->heap()->cell_value_string()));
+ return HObjectAccess(kInobject, Cell::kValueOffset, Representation::Tagged(),
+ isolate->factory()->cell_value_string());
}
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index ed1243507c..695c629a70 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -8,6 +8,7 @@
#include "src/v8.h"
#include "src/allocation.h"
+#include "src/base/bits.h"
#include "src/code-stubs.h"
#include "src/conversions.h"
#include "src/data-flow.h"
@@ -36,133 +37,134 @@ class LInstruction;
class LChunkBuilder;
class OStream;
-#define HYDROGEN_ABSTRACT_INSTRUCTION_LIST(V) \
- V(ArithmeticBinaryOperation) \
- V(BinaryOperation) \
- V(BitwiseBinaryOperation) \
- V(ControlInstruction) \
- V(Instruction) \
-
-
-#define HYDROGEN_CONCRETE_INSTRUCTION_LIST(V) \
- V(AbnormalExit) \
- V(AccessArgumentsAt) \
- V(Add) \
- V(AllocateBlockContext) \
- V(Allocate) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArgumentsObject) \
- V(Bitwise) \
- V(BlockEntry) \
- V(BoundsCheck) \
- V(BoundsCheckBaseIndexInformation) \
- V(Branch) \
- V(CallWithDescriptor) \
- V(CallJSFunction) \
- V(CallFunction) \
- V(CallNew) \
- V(CallNewArray) \
- V(CallRuntime) \
- V(CallStub) \
- V(CapturedObject) \
- V(Change) \
- V(CheckHeapObject) \
- V(CheckInstanceType) \
- V(CheckMaps) \
- V(CheckMapValue) \
- V(CheckSmi) \
- V(CheckValue) \
- V(ClampToUint8) \
- V(ClassOfTestAndBranch) \
- V(CompareNumericAndBranch) \
- V(CompareHoleAndBranch) \
- V(CompareGeneric) \
- V(CompareMinusZeroAndBranch) \
- V(CompareObjectEqAndBranch) \
- V(CompareMap) \
- V(Constant) \
- V(ConstructDouble) \
- V(Context) \
- V(DateField) \
- V(DebugBreak) \
- V(DeclareGlobals) \
- V(Deoptimize) \
- V(Div) \
- V(DoubleBits) \
- V(DummyUse) \
- V(EnterInlined) \
- V(EnvironmentMarker) \
- V(ForceRepresentation) \
- V(ForInCacheArray) \
- V(ForInPrepareMap) \
- V(FunctionLiteral) \
- V(GetCachedArrayIndex) \
- V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceTypeAndBranch) \
- V(InnerAllocatedObject) \
- V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
- V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
- V(IsObjectAndBranch) \
- V(IsStringAndBranch) \
- V(IsSmiAndBranch) \
- V(IsUndetectableAndBranch) \
- V(LeaveInlined) \
- V(LoadContextSlot) \
- V(LoadFieldByIndex) \
- V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
- V(LoadGlobalGeneric) \
- V(LoadKeyed) \
- V(LoadKeyedGeneric) \
- V(LoadNamedField) \
- V(LoadNamedGeneric) \
- V(LoadRoot) \
- V(MapEnumLength) \
- V(MathFloorOfDiv) \
- V(MathMinMax) \
- V(Mod) \
- V(Mul) \
- V(OsrEntry) \
- V(Parameter) \
- V(Power) \
- V(PushArguments) \
- V(RegExpLiteral) \
- V(Return) \
- V(Ror) \
- V(Sar) \
- V(SeqStringGetChar) \
- V(SeqStringSetChar) \
- V(Shl) \
- V(Shr) \
- V(Simulate) \
- V(StackCheck) \
- V(StoreCodeEntry) \
- V(StoreContextSlot) \
- V(StoreFrameContext) \
- V(StoreGlobalCell) \
- V(StoreKeyed) \
- V(StoreKeyedGeneric) \
- V(StoreNamedField) \
- V(StoreNamedGeneric) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringCompareAndBranch) \
- V(Sub) \
- V(ThisFunction) \
- V(ToFastProperties) \
- V(TransitionElementsKind) \
- V(TrapAllocationMemento) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(UnaryMathOperation) \
- V(UnknownOSRValue) \
- V(UseConst) \
+#define HYDROGEN_ABSTRACT_INSTRUCTION_LIST(V) \
+ V(ArithmeticBinaryOperation) \
+ V(BinaryOperation) \
+ V(BitwiseBinaryOperation) \
+ V(ControlInstruction) \
+ V(Instruction)
+
+
+#define HYDROGEN_CONCRETE_INSTRUCTION_LIST(V) \
+ V(AbnormalExit) \
+ V(AccessArgumentsAt) \
+ V(Add) \
+ V(AllocateBlockContext) \
+ V(Allocate) \
+ V(ApplyArguments) \
+ V(ArgumentsElements) \
+ V(ArgumentsLength) \
+ V(ArgumentsObject) \
+ V(Bitwise) \
+ V(BlockEntry) \
+ V(BoundsCheck) \
+ V(BoundsCheckBaseIndexInformation) \
+ V(Branch) \
+ V(CallWithDescriptor) \
+ V(CallJSFunction) \
+ V(CallFunction) \
+ V(CallNew) \
+ V(CallNewArray) \
+ V(CallRuntime) \
+ V(CallStub) \
+ V(CapturedObject) \
+ V(Change) \
+ V(CheckHeapObject) \
+ V(CheckInstanceType) \
+ V(CheckMaps) \
+ V(CheckMapValue) \
+ V(CheckSmi) \
+ V(CheckValue) \
+ V(ClampToUint8) \
+ V(ClassOfTestAndBranch) \
+ V(CompareNumericAndBranch) \
+ V(CompareHoleAndBranch) \
+ V(CompareGeneric) \
+ V(CompareMinusZeroAndBranch) \
+ V(CompareObjectEqAndBranch) \
+ V(CompareMap) \
+ V(Constant) \
+ V(ConstructDouble) \
+ V(Context) \
+ V(DateField) \
+ V(DebugBreak) \
+ V(DeclareGlobals) \
+ V(Deoptimize) \
+ V(Div) \
+ V(DoubleBits) \
+ V(DummyUse) \
+ V(EnterInlined) \
+ V(EnvironmentMarker) \
+ V(ForceRepresentation) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
+ V(FunctionLiteral) \
+ V(GetCachedArrayIndex) \
+ V(Goto) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceTypeAndBranch) \
+ V(InnerAllocatedObject) \
+ V(InstanceOf) \
+ V(InstanceOfKnownGlobal) \
+ V(InvokeFunction) \
+ V(IsConstructCallAndBranch) \
+ V(IsObjectAndBranch) \
+ V(IsStringAndBranch) \
+ V(IsSmiAndBranch) \
+ V(IsUndetectableAndBranch) \
+ V(LeaveInlined) \
+ V(LoadContextSlot) \
+ V(LoadFieldByIndex) \
+ V(LoadFunctionPrototype) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
+ V(LoadKeyed) \
+ V(LoadKeyedGeneric) \
+ V(LoadNamedField) \
+ V(LoadNamedGeneric) \
+ V(LoadRoot) \
+ V(MapEnumLength) \
+ V(MathFloorOfDiv) \
+ V(MathMinMax) \
+ V(Mod) \
+ V(Mul) \
+ V(OsrEntry) \
+ V(Parameter) \
+ V(Power) \
+ V(PushArguments) \
+ V(RegExpLiteral) \
+ V(Return) \
+ V(Ror) \
+ V(Sar) \
+ V(SeqStringGetChar) \
+ V(SeqStringSetChar) \
+ V(Shl) \
+ V(Shr) \
+ V(Simulate) \
+ V(StackCheck) \
+ V(StoreCodeEntry) \
+ V(StoreContextSlot) \
+ V(StoreFrameContext) \
+ V(StoreGlobalCell) \
+ V(StoreKeyed) \
+ V(StoreKeyedGeneric) \
+ V(StoreNamedField) \
+ V(StoreNamedGeneric) \
+ V(StringAdd) \
+ V(StringCharCodeAt) \
+ V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
+ V(Sub) \
+ V(TailCallThroughMegamorphicCache) \
+ V(ThisFunction) \
+ V(ToFastProperties) \
+ V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
+ V(Typeof) \
+ V(TypeofIsAndBranch) \
+ V(UnaryMathOperation) \
+ V(UnknownOSRValue) \
+ V(UseConst) \
V(WrapReceiver)
#define GVN_TRACKED_FLAG_LIST(V) \
@@ -189,7 +191,7 @@ class OStream;
#define DECLARE_ABSTRACT_INSTRUCTION(type) \
- virtual bool Is##type() const V8_FINAL V8_OVERRIDE { return true; } \
+ virtual bool Is##type() const FINAL OVERRIDE { return true; } \
static H##type* cast(HValue* value) { \
DCHECK(value->Is##type()); \
return reinterpret_cast<H##type*>(value); \
@@ -198,12 +200,12 @@ class OStream;
#define DECLARE_CONCRETE_INSTRUCTION(type) \
virtual LInstruction* CompileToLithium( \
- LChunkBuilder* builder) V8_FINAL V8_OVERRIDE; \
+ LChunkBuilder* builder) FINAL OVERRIDE; \
static H##type* cast(HValue* value) { \
DCHECK(value->Is##type()); \
return reinterpret_cast<H##type*>(value); \
} \
- virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ virtual Opcode opcode() const FINAL OVERRIDE { \
return HValue::k##type; \
}
@@ -211,7 +213,7 @@ class OStream;
enum PropertyAccessType { LOAD, STORE };
-class Range V8_FINAL : public ZoneObject {
+class Range FINAL : public ZoneObject {
public:
Range()
: lower_(kMinInt),
@@ -315,7 +317,7 @@ class HUseListNode: public ZoneObject {
// We reuse use list nodes behind the scenes as uses are added and deleted.
// This class is the safe way to iterate uses while deleting them.
-class HUseIterator V8_FINAL BASE_EMBEDDED {
+class HUseIterator FINAL BASE_EMBEDDED {
public:
bool Done() { return current_ == NULL; }
void Advance();
@@ -364,7 +366,7 @@ static inline GVNFlag GVNFlagFromInt(int i) {
}
-class DecompositionResult V8_FINAL BASE_EMBEDDED {
+class DecompositionResult FINAL BASE_EMBEDDED {
public:
DecompositionResult() : base_(NULL), offset_(0), scale_(0) {}
@@ -453,10 +455,10 @@ class HSourcePosition {
// Offset from the start of the inlined function.
typedef BitField<int, 9, 23> PositionField;
- // On HPositionInfo can use this constructor.
explicit HSourcePosition(int value) : value_(value) { }
friend class HPositionInfo;
+ friend class LCodeGenBase;
// If FLAG_hydrogen_track_positions is set contains bitfields InliningIdField
// and PositionField.
@@ -1145,7 +1147,7 @@ class HInstruction : public HValue {
HInstruction* next() const { return next_; }
HInstruction* previous() const { return previous_; }
- virtual OStream& PrintTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintTo(OStream& os) const OVERRIDE; // NOLINT
virtual OStream& PrintDataTo(OStream& os) const; // NOLINT
bool IsLinked() const { return block() != NULL; }
@@ -1166,7 +1168,7 @@ class HInstruction : public HValue {
}
// The position is a write-once variable.
- virtual HSourcePosition position() const V8_OVERRIDE {
+ virtual HSourcePosition position() const OVERRIDE {
return HSourcePosition(position_.position());
}
bool has_position() const {
@@ -1178,7 +1180,7 @@ class HInstruction : public HValue {
position_.set_position(position);
}
- virtual HSourcePosition operand_position(int index) const V8_OVERRIDE {
+ virtual HSourcePosition operand_position(int index) const OVERRIDE {
const HSourcePosition pos = position_.operand_position(index);
return pos.IsUnknown() ? position() : pos;
}
@@ -1195,7 +1197,7 @@ class HInstruction : public HValue {
virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
#ifdef DEBUG
- virtual void Verify() V8_OVERRIDE;
+ virtual void Verify() OVERRIDE;
#endif
bool CanDeoptimize();
@@ -1213,7 +1215,7 @@ class HInstruction : public HValue {
SetDependsOnFlag(kOsrEntries);
}
- virtual void DeleteFromGraph() V8_OVERRIDE { Unlink(); }
+ virtual void DeleteFromGraph() OVERRIDE { Unlink(); }
private:
void InitializeAsFirst(HBasicBlock* block) {
@@ -1232,8 +1234,8 @@ class HInstruction : public HValue {
template<int V>
class HTemplateInstruction : public HInstruction {
public:
- virtual int OperandCount() const V8_FINAL V8_OVERRIDE { return V; }
- virtual HValue* OperandAt(int i) const V8_FINAL V8_OVERRIDE {
+ virtual int OperandCount() const FINAL OVERRIDE { return V; }
+ virtual HValue* OperandAt(int i) const FINAL OVERRIDE {
return inputs_[i];
}
@@ -1241,7 +1243,7 @@ class HTemplateInstruction : public HInstruction {
explicit HTemplateInstruction(HType type = HType::Tagged())
: HInstruction(type) {}
- virtual void InternalSetOperandAt(int i, HValue* value) V8_FINAL V8_OVERRIDE {
+ virtual void InternalSetOperandAt(int i, HValue* value) FINAL OVERRIDE {
inputs_[i] = value;
}
@@ -1256,7 +1258,7 @@ class HControlInstruction : public HInstruction {
virtual int SuccessorCount() const = 0;
virtual void SetSuccessorAt(int i, HBasicBlock* block) = 0;
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
virtual bool KnownSuccessorBlock(HBasicBlock** block) {
*block = NULL;
@@ -1280,7 +1282,7 @@ class HControlInstruction : public HInstruction {
};
-class HSuccessorIterator V8_FINAL BASE_EMBEDDED {
+class HSuccessorIterator FINAL BASE_EMBEDDED {
public:
explicit HSuccessorIterator(const HControlInstruction* instr)
: instr_(instr), current_(0) {}
@@ -1298,18 +1300,18 @@ class HSuccessorIterator V8_FINAL BASE_EMBEDDED {
template<int S, int V>
class HTemplateControlInstruction : public HControlInstruction {
public:
- int SuccessorCount() const V8_OVERRIDE { return S; }
- HBasicBlock* SuccessorAt(int i) const V8_OVERRIDE { return successors_[i]; }
- void SetSuccessorAt(int i, HBasicBlock* block) V8_OVERRIDE {
+ int SuccessorCount() const OVERRIDE { return S; }
+ HBasicBlock* SuccessorAt(int i) const OVERRIDE { return successors_[i]; }
+ void SetSuccessorAt(int i, HBasicBlock* block) OVERRIDE {
successors_[i] = block;
}
- int OperandCount() const V8_OVERRIDE { return V; }
- HValue* OperandAt(int i) const V8_OVERRIDE { return inputs_[i]; }
+ int OperandCount() const OVERRIDE { return V; }
+ HValue* OperandAt(int i) const OVERRIDE { return inputs_[i]; }
protected:
- void InternalSetOperandAt(int i, HValue* value) V8_OVERRIDE {
+ void InternalSetOperandAt(int i, HValue* value) OVERRIDE {
inputs_[i] = value;
}
@@ -1319,9 +1321,9 @@ class HTemplateControlInstruction : public HControlInstruction {
};
-class HBlockEntry V8_FINAL : public HTemplateInstruction<0> {
+class HBlockEntry FINAL : public HTemplateInstruction<0> {
public:
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
@@ -1329,7 +1331,7 @@ class HBlockEntry V8_FINAL : public HTemplateInstruction<0> {
};
-class HDummyUse V8_FINAL : public HTemplateInstruction<1> {
+class HDummyUse FINAL : public HTemplateInstruction<1> {
public:
explicit HDummyUse(HValue* value)
: HTemplateInstruction<1>(HType::Smi()) {
@@ -1341,23 +1343,23 @@ class HDummyUse V8_FINAL : public HTemplateInstruction<1> {
HValue* value() const { return OperandAt(0); }
- virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual bool HasEscapingOperandAt(int index) OVERRIDE { return false; }
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
DECLARE_CONCRETE_INSTRUCTION(DummyUse);
};
// Inserts an int3/stop break instruction for debugging purposes.
-class HDebugBreak V8_FINAL : public HTemplateInstruction<0> {
+class HDebugBreak FINAL : public HTemplateInstruction<0> {
public:
DECLARE_INSTRUCTION_FACTORY_P0(HDebugBreak);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
@@ -1365,28 +1367,28 @@ class HDebugBreak V8_FINAL : public HTemplateInstruction<0> {
};
-class HGoto V8_FINAL : public HTemplateControlInstruction<1, 0> {
+class HGoto FINAL : public HTemplateControlInstruction<1, 0> {
public:
explicit HGoto(HBasicBlock* target) {
SetSuccessorAt(0, target);
}
- virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE {
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE {
*block = FirstSuccessor();
return true;
}
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
DECLARE_CONCRETE_INSTRUCTION(Goto)
};
-class HDeoptimize V8_FINAL : public HTemplateControlInstruction<1, 0> {
+class HDeoptimize FINAL : public HTemplateControlInstruction<1, 0> {
public:
static HDeoptimize* New(Zone* zone,
HValue* context,
@@ -1396,12 +1398,12 @@ class HDeoptimize V8_FINAL : public HTemplateControlInstruction<1, 0> {
return new(zone) HDeoptimize(reason, type, unreachable_continuation);
}
- virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE {
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE {
*block = NULL;
return true;
}
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
@@ -1433,13 +1435,13 @@ class HUnaryControlInstruction : public HTemplateControlInstruction<2, 1> {
SetSuccessorAt(1, false_target);
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
HValue* value() const { return OperandAt(0); }
};
-class HBranch V8_FINAL : public HUnaryControlInstruction {
+class HBranch FINAL : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HBranch, HValue*);
DECLARE_INSTRUCTION_FACTORY_P2(HBranch, HValue*,
@@ -1448,14 +1450,14 @@ class HBranch V8_FINAL : public HUnaryControlInstruction {
ToBooleanStub::Types,
HBasicBlock*, HBasicBlock*);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
- virtual Representation observed_input_representation(int index) V8_OVERRIDE;
+ virtual Representation observed_input_representation(int index) OVERRIDE;
- virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
ToBooleanStub::Types expected_input_types() const {
return expected_input_types_;
@@ -1477,13 +1479,13 @@ class HBranch V8_FINAL : public HUnaryControlInstruction {
};
-class HCompareMap V8_FINAL : public HUnaryControlInstruction {
+class HCompareMap FINAL : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HCompareMap, HValue*, Handle<Map>);
DECLARE_INSTRUCTION_FACTORY_P4(HCompareMap, HValue*, Handle<Map>,
HBasicBlock*, HBasicBlock*);
- virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE {
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE {
if (known_successor_index() != kNoKnownSuccessorIndex) {
*block = SuccessorAt(known_successor_index());
return true;
@@ -1492,7 +1494,7 @@ class HCompareMap V8_FINAL : public HUnaryControlInstruction {
return false;
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
static const int kNoKnownSuccessorIndex = -1;
int known_successor_index() const { return known_successor_index_; }
@@ -1503,7 +1505,7 @@ class HCompareMap V8_FINAL : public HUnaryControlInstruction {
Unique<Map> map() const { return map_; }
bool map_is_stable() const { return map_is_stable_; }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
@@ -1530,20 +1532,20 @@ class HCompareMap V8_FINAL : public HUnaryControlInstruction {
};
-class HContext V8_FINAL : public HTemplateInstruction<0> {
+class HContext FINAL : public HTemplateInstruction<0> {
public:
static HContext* New(Zone* zone) {
return new(zone) HContext();
}
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(Context)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
private:
HContext() {
@@ -1551,22 +1553,22 @@ class HContext V8_FINAL : public HTemplateInstruction<0> {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
};
-class HReturn V8_FINAL : public HTemplateControlInstruction<0, 3> {
+class HReturn FINAL : public HTemplateControlInstruction<0, 3> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HReturn, HValue*, HValue*);
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HReturn, HValue*);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
// TODO(titzer): require an Int32 input for faster returns.
if (index == 2) return Representation::Smi();
return Representation::Tagged();
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
HValue* value() const { return OperandAt(0); }
HValue* context() const { return OperandAt(1); }
@@ -1583,11 +1585,11 @@ class HReturn V8_FINAL : public HTemplateControlInstruction<0, 3> {
};
-class HAbnormalExit V8_FINAL : public HTemplateControlInstruction<0, 0> {
+class HAbnormalExit FINAL : public HTemplateControlInstruction<0, 0> {
public:
DECLARE_INSTRUCTION_FACTORY_P0(HAbnormalExit);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
@@ -1609,15 +1611,15 @@ class HUnaryOperation : public HTemplateInstruction<1> {
}
HValue* value() const { return OperandAt(0); }
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
};
-class HUseConst V8_FINAL : public HUnaryOperation {
+class HUseConst FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HUseConst, HValue*);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
@@ -1628,18 +1630,18 @@ class HUseConst V8_FINAL : public HUnaryOperation {
};
-class HForceRepresentation V8_FINAL : public HTemplateInstruction<1> {
+class HForceRepresentation FINAL : public HTemplateInstruction<1> {
public:
static HInstruction* New(Zone* zone, HValue* context, HValue* value,
Representation required_representation);
HValue* value() const { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return representation(); // Same as the output representation.
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
DECLARE_CONCRETE_INSTRUCTION(ForceRepresentation)
@@ -1651,7 +1653,7 @@ class HForceRepresentation V8_FINAL : public HTemplateInstruction<1> {
};
-class HChange V8_FINAL : public HUnaryOperation {
+class HChange FINAL : public HUnaryOperation {
public:
HChange(HValue* value,
Representation to,
@@ -1681,46 +1683,46 @@ class HChange V8_FINAL : public HUnaryOperation {
return CheckUsesForFlag(kAllowUndefinedAsNaN);
}
- virtual HType CalculateInferredType() V8_OVERRIDE;
- virtual HValue* Canonicalize() V8_OVERRIDE;
+ virtual HType CalculateInferredType() OVERRIDE;
+ virtual HValue* Canonicalize() OVERRIDE;
Representation from() const { return value()->representation(); }
Representation to() const { return representation(); }
bool deoptimize_on_minus_zero() const {
return CheckFlag(kBailoutOnMinusZero);
}
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return from();
}
- virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual Range* InferRange(Zone* zone) OVERRIDE;
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
DECLARE_CONCRETE_INSTRUCTION(Change)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const V8_OVERRIDE {
+ virtual bool IsDeletable() const OVERRIDE {
return !from().IsTagged() || value()->type().IsSmi();
}
};
-class HClampToUint8 V8_FINAL : public HUnaryOperation {
+class HClampToUint8 FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HClampToUint8, HValue*);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(ClampToUint8)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
private:
explicit HClampToUint8(HValue* value)
@@ -1730,16 +1732,16 @@ class HClampToUint8 V8_FINAL : public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
};
-class HDoubleBits V8_FINAL : public HUnaryOperation {
+class HDoubleBits FINAL : public HUnaryOperation {
public:
enum Bits { HIGH, LOW };
DECLARE_INSTRUCTION_FACTORY_P2(HDoubleBits, HValue*, Bits);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Double();
}
@@ -1748,7 +1750,7 @@ class HDoubleBits V8_FINAL : public HUnaryOperation {
Bits bits() { return bits_; }
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ virtual bool DataEquals(HValue* other) OVERRIDE {
return other->IsDoubleBits() && HDoubleBits::cast(other)->bits() == bits();
}
@@ -1759,17 +1761,17 @@ class HDoubleBits V8_FINAL : public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
Bits bits_;
};
-class HConstructDouble V8_FINAL : public HTemplateInstruction<2> {
+class HConstructDouble FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HConstructDouble, HValue*, HValue*);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Integer32();
}
@@ -1779,7 +1781,7 @@ class HConstructDouble V8_FINAL : public HTemplateInstruction<2> {
HValue* lo() { return OperandAt(1); }
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
private:
explicit HConstructDouble(HValue* hi, HValue* lo) {
@@ -1789,7 +1791,7 @@ class HConstructDouble V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(1, lo);
}
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
};
@@ -1799,7 +1801,7 @@ enum RemovableSimulate {
};
-class HSimulate V8_FINAL : public HInstruction {
+class HSimulate FINAL : public HInstruction {
public:
HSimulate(BailoutId ast_id,
int pop_count,
@@ -1814,7 +1816,7 @@ class HSimulate V8_FINAL : public HInstruction {
done_with_replay_(false) {}
~HSimulate() {}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
bool HasAstId() const { return !ast_id_.IsNone(); }
BailoutId ast_id() const { return ast_id_; }
@@ -1844,13 +1846,13 @@ class HSimulate V8_FINAL : public HInstruction {
}
return -1;
}
- virtual int OperandCount() const V8_OVERRIDE { return values_.length(); }
- virtual HValue* OperandAt(int index) const V8_OVERRIDE {
+ virtual int OperandCount() const OVERRIDE { return values_.length(); }
+ virtual HValue* OperandAt(int index) const OVERRIDE {
return values_[index];
}
- virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual bool HasEscapingOperandAt(int index) OVERRIDE { return false; }
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
@@ -1863,13 +1865,13 @@ class HSimulate V8_FINAL : public HInstruction {
DECLARE_CONCRETE_INSTRUCTION(Simulate)
#ifdef DEBUG
- virtual void Verify() V8_OVERRIDE;
+ virtual void Verify() OVERRIDE;
void set_closure(Handle<JSFunction> closure) { closure_ = closure; }
Handle<JSFunction> closure() const { return closure_; }
#endif
protected:
- virtual void InternalSetOperandAt(int index, HValue* value) V8_OVERRIDE {
+ virtual void InternalSetOperandAt(int index, HValue* value) OVERRIDE {
values_[index] = value;
}
@@ -1903,7 +1905,7 @@ class HSimulate V8_FINAL : public HInstruction {
};
-class HEnvironmentMarker V8_FINAL : public HTemplateInstruction<1> {
+class HEnvironmentMarker FINAL : public HTemplateInstruction<1> {
public:
enum Kind { BIND, LOOKUP };
@@ -1916,11 +1918,11 @@ class HEnvironmentMarker V8_FINAL : public HTemplateInstruction<1> {
next_simulate_ = simulate;
}
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
#ifdef DEBUG
void set_closure(Handle<JSFunction> closure) {
@@ -1947,7 +1949,7 @@ class HEnvironmentMarker V8_FINAL : public HTemplateInstruction<1> {
};
-class HStackCheck V8_FINAL : public HTemplateInstruction<1> {
+class HStackCheck FINAL : public HTemplateInstruction<1> {
public:
enum Type {
kFunctionEntry,
@@ -1958,7 +1960,7 @@ class HStackCheck V8_FINAL : public HTemplateInstruction<1> {
HValue* context() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
@@ -1994,30 +1996,29 @@ enum InliningKind {
class HArgumentsObject;
+class HConstant;
-class HEnterInlined V8_FINAL : public HTemplateInstruction<0> {
+class HEnterInlined FINAL : public HTemplateInstruction<0> {
public:
- static HEnterInlined* New(Zone* zone,
- HValue* context,
- BailoutId return_id,
+ static HEnterInlined* New(Zone* zone, HValue* context, BailoutId return_id,
Handle<JSFunction> closure,
- int arguments_count,
+ HConstant* closure_context, int arguments_count,
FunctionLiteral* function,
- InliningKind inlining_kind,
- Variable* arguments_var,
+ InliningKind inlining_kind, Variable* arguments_var,
HArgumentsObject* arguments_object) {
- return new(zone) HEnterInlined(return_id, closure, arguments_count,
- function, inlining_kind, arguments_var,
- arguments_object, zone);
+ return new (zone) HEnterInlined(return_id, closure, closure_context,
+ arguments_count, function, inlining_kind,
+ arguments_var, arguments_object, zone);
}
void RegisterReturnTarget(HBasicBlock* return_target, Zone* zone);
ZoneList<HBasicBlock*>* return_targets() { return &return_targets_; }
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
Handle<JSFunction> closure() const { return closure_; }
+ HConstant* closure_context() const { return closure_context_; }
int arguments_count() const { return arguments_count_; }
bool arguments_pushed() const { return arguments_pushed_; }
void set_arguments_pushed() { arguments_pushed_ = true; }
@@ -2025,7 +2026,7 @@ class HEnterInlined V8_FINAL : public HTemplateInstruction<0> {
InliningKind inlining_kind() const { return inlining_kind_; }
BailoutId ReturnId() const { return return_id_; }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
@@ -2035,27 +2036,25 @@ class HEnterInlined V8_FINAL : public HTemplateInstruction<0> {
DECLARE_CONCRETE_INSTRUCTION(EnterInlined)
private:
- HEnterInlined(BailoutId return_id,
- Handle<JSFunction> closure,
- int arguments_count,
- FunctionLiteral* function,
- InliningKind inlining_kind,
- Variable* arguments_var,
- HArgumentsObject* arguments_object,
+ HEnterInlined(BailoutId return_id, Handle<JSFunction> closure,
+ HConstant* closure_context, int arguments_count,
+ FunctionLiteral* function, InliningKind inlining_kind,
+ Variable* arguments_var, HArgumentsObject* arguments_object,
Zone* zone)
: return_id_(return_id),
closure_(closure),
+ closure_context_(closure_context),
arguments_count_(arguments_count),
arguments_pushed_(false),
function_(function),
inlining_kind_(inlining_kind),
arguments_var_(arguments_var),
arguments_object_(arguments_object),
- return_targets_(2, zone) {
- }
+ return_targets_(2, zone) {}
BailoutId return_id_;
Handle<JSFunction> closure_;
+ HConstant* closure_context_;
int arguments_count_;
bool arguments_pushed_;
FunctionLiteral* function_;
@@ -2066,18 +2065,18 @@ class HEnterInlined V8_FINAL : public HTemplateInstruction<0> {
};
-class HLeaveInlined V8_FINAL : public HTemplateInstruction<0> {
+class HLeaveInlined FINAL : public HTemplateInstruction<0> {
public:
HLeaveInlined(HEnterInlined* entry,
int drop_count)
: entry_(entry),
drop_count_(drop_count) { }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
- virtual int argument_delta() const V8_OVERRIDE {
+ virtual int argument_delta() const OVERRIDE {
return entry_->arguments_pushed() ? -drop_count_ : 0;
}
@@ -2089,7 +2088,7 @@ class HLeaveInlined V8_FINAL : public HTemplateInstruction<0> {
};
-class HPushArguments V8_FINAL : public HInstruction {
+class HPushArguments FINAL : public HInstruction {
public:
static HPushArguments* New(Zone* zone, HValue* context) {
return new(zone) HPushArguments(zone);
@@ -2124,17 +2123,17 @@ class HPushArguments V8_FINAL : public HInstruction {
return instr;
}
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
- virtual int argument_delta() const V8_OVERRIDE { return inputs_.length(); }
+ virtual int argument_delta() const OVERRIDE { return inputs_.length(); }
HValue* argument(int i) { return OperandAt(i); }
- virtual int OperandCount() const V8_FINAL V8_OVERRIDE {
+ virtual int OperandCount() const FINAL OVERRIDE {
return inputs_.length();
}
- virtual HValue* OperandAt(int i) const V8_FINAL V8_OVERRIDE {
+ virtual HValue* OperandAt(int i) const FINAL OVERRIDE {
return inputs_[i];
}
@@ -2143,7 +2142,7 @@ class HPushArguments V8_FINAL : public HInstruction {
DECLARE_CONCRETE_INSTRUCTION(PushArguments)
protected:
- virtual void InternalSetOperandAt(int i, HValue* value) V8_FINAL V8_OVERRIDE {
+ virtual void InternalSetOperandAt(int i, HValue* value) FINAL OVERRIDE {
inputs_[i] = value;
}
@@ -2157,18 +2156,18 @@ class HPushArguments V8_FINAL : public HInstruction {
};
-class HThisFunction V8_FINAL : public HTemplateInstruction<0> {
+class HThisFunction FINAL : public HTemplateInstruction<0> {
public:
DECLARE_INSTRUCTION_FACTORY_P0(HThisFunction);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
private:
HThisFunction() {
@@ -2176,11 +2175,11 @@ class HThisFunction V8_FINAL : public HTemplateInstruction<0> {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
};
-class HDeclareGlobals V8_FINAL : public HUnaryOperation {
+class HDeclareGlobals FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HDeclareGlobals,
Handle<FixedArray>,
@@ -2192,7 +2191,7 @@ class HDeclareGlobals V8_FINAL : public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals)
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
@@ -2221,7 +2220,7 @@ class HCall : public HTemplateInstruction<V> {
this->SetAllSideEffects();
}
- virtual HType CalculateInferredType() V8_FINAL V8_OVERRIDE {
+ virtual HType CalculateInferredType() FINAL OVERRIDE {
return HType::Tagged();
}
@@ -2229,7 +2228,7 @@ class HCall : public HTemplateInstruction<V> {
return argument_count_;
}
- virtual int argument_delta() const V8_OVERRIDE {
+ virtual int argument_delta() const OVERRIDE {
return -argument_count();
}
@@ -2246,11 +2245,11 @@ class HUnaryCall : public HCall<1> {
}
virtual Representation RequiredInputRepresentation(
- int index) V8_FINAL V8_OVERRIDE {
+ int index) FINAL OVERRIDE {
return Representation::Tagged();
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
HValue* value() const { return OperandAt(0); }
};
@@ -2264,10 +2263,10 @@ class HBinaryCall : public HCall<2> {
SetOperandAt(1, second);
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
virtual Representation RequiredInputRepresentation(
- int index) V8_FINAL V8_OVERRIDE {
+ int index) FINAL OVERRIDE {
return Representation::Tagged();
}
@@ -2276,7 +2275,7 @@ class HBinaryCall : public HCall<2> {
};
-class HCallJSFunction V8_FINAL : public HCall<1> {
+class HCallJSFunction FINAL : public HCall<1> {
public:
static HCallJSFunction* New(Zone* zone,
HValue* context,
@@ -2286,17 +2285,17 @@ class HCallJSFunction V8_FINAL : public HCall<1> {
HValue* function() const { return OperandAt(0); }
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
virtual Representation RequiredInputRepresentation(
- int index) V8_FINAL V8_OVERRIDE {
+ int index) FINAL OVERRIDE {
DCHECK(index == 0);
return Representation::Tagged();
}
bool pass_argument_count() const { return pass_argument_count_; }
- virtual bool HasStackCheck() V8_FINAL V8_OVERRIDE {
+ virtual bool HasStackCheck() FINAL OVERRIDE {
return has_stack_check_;
}
@@ -2319,41 +2318,39 @@ class HCallJSFunction V8_FINAL : public HCall<1> {
};
-class HCallWithDescriptor V8_FINAL : public HInstruction {
+class HCallWithDescriptor FINAL : public HInstruction {
public:
- static HCallWithDescriptor* New(Zone* zone, HValue* context,
- HValue* target,
- int argument_count,
- const InterfaceDescriptor* descriptor,
- const Vector<HValue*>& operands) {
- DCHECK(operands.length() == descriptor->GetEnvironmentLength());
- HCallWithDescriptor* res =
- new(zone) HCallWithDescriptor(target, argument_count,
- descriptor, operands, zone);
+ static HCallWithDescriptor* New(Zone* zone, HValue* context, HValue* target,
+ int argument_count,
+ CallInterfaceDescriptor descriptor,
+ const Vector<HValue*>& operands) {
+ DCHECK(operands.length() == descriptor.GetEnvironmentLength());
+ HCallWithDescriptor* res = new (zone)
+ HCallWithDescriptor(target, argument_count, descriptor, operands, zone);
return res;
}
- virtual int OperandCount() const V8_FINAL V8_OVERRIDE {
+ virtual int OperandCount() const FINAL OVERRIDE {
return values_.length();
}
- virtual HValue* OperandAt(int index) const V8_FINAL V8_OVERRIDE {
+ virtual HValue* OperandAt(int index) const FINAL OVERRIDE {
return values_[index];
}
virtual Representation RequiredInputRepresentation(
- int index) V8_FINAL V8_OVERRIDE {
+ int index) FINAL OVERRIDE {
if (index == 0) {
return Representation::Tagged();
} else {
int par_index = index - 1;
- DCHECK(par_index < descriptor_->GetEnvironmentLength());
- return descriptor_->GetParameterRepresentation(par_index);
+ DCHECK(par_index < descriptor_.GetEnvironmentLength());
+ return descriptor_.GetParameterRepresentation(par_index);
}
}
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor)
- virtual HType CalculateInferredType() V8_FINAL V8_OVERRIDE {
+ virtual HType CalculateInferredType() FINAL OVERRIDE {
return HType::Tagged();
}
@@ -2361,29 +2358,25 @@ class HCallWithDescriptor V8_FINAL : public HInstruction {
return argument_count_;
}
- virtual int argument_delta() const V8_OVERRIDE {
+ virtual int argument_delta() const OVERRIDE {
return -argument_count_;
}
- const InterfaceDescriptor* descriptor() const {
- return descriptor_;
- }
+ CallInterfaceDescriptor descriptor() const { return descriptor_; }
HValue* target() {
return OperandAt(0);
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
private:
// The argument count includes the receiver.
- HCallWithDescriptor(HValue* target,
- int argument_count,
- const InterfaceDescriptor* descriptor,
- const Vector<HValue*>& operands,
- Zone* zone)
- : descriptor_(descriptor),
- values_(descriptor->GetEnvironmentLength() + 1, zone) {
+ HCallWithDescriptor(HValue* target, int argument_count,
+ CallInterfaceDescriptor descriptor,
+ const Vector<HValue*>& operands, Zone* zone)
+ : descriptor_(descriptor),
+ values_(descriptor.GetEnvironmentLength() + 1, zone) {
argument_count_ = argument_count;
AddOperand(target, zone);
for (int i = 0; i < operands.length(); i++) {
@@ -2399,17 +2392,17 @@ class HCallWithDescriptor V8_FINAL : public HInstruction {
}
void InternalSetOperandAt(int index,
- HValue* value) V8_FINAL V8_OVERRIDE {
+ HValue* value) FINAL OVERRIDE {
values_[index] = value;
}
- const InterfaceDescriptor* descriptor_;
+ CallInterfaceDescriptor descriptor_;
ZoneList<HValue*> values_;
int argument_count_;
};
-class HInvokeFunction V8_FINAL : public HBinaryCall {
+class HInvokeFunction FINAL : public HBinaryCall {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInvokeFunction, HValue*, int);
@@ -2440,7 +2433,7 @@ class HInvokeFunction V8_FINAL : public HBinaryCall {
Handle<JSFunction> known_function() { return known_function_; }
int formal_parameter_count() const { return formal_parameter_count_; }
- virtual bool HasStackCheck() V8_FINAL V8_OVERRIDE {
+ virtual bool HasStackCheck() FINAL OVERRIDE {
return has_stack_check_;
}
@@ -2458,7 +2451,7 @@ class HInvokeFunction V8_FINAL : public HBinaryCall {
};
-class HCallFunction V8_FINAL : public HBinaryCall {
+class HCallFunction FINAL : public HBinaryCall {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallFunction, HValue*, int);
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(
@@ -2470,7 +2463,7 @@ class HCallFunction V8_FINAL : public HBinaryCall {
DECLARE_CONCRETE_INSTRUCTION(CallFunction)
- virtual int argument_delta() const V8_OVERRIDE { return -argument_count(); }
+ virtual int argument_delta() const OVERRIDE { return -argument_count(); }
private:
HCallFunction(HValue* context,
@@ -2483,7 +2476,7 @@ class HCallFunction V8_FINAL : public HBinaryCall {
};
-class HCallNew V8_FINAL : public HBinaryCall {
+class HCallNew FINAL : public HBinaryCall {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallNew, HValue*, int);
@@ -2498,7 +2491,7 @@ class HCallNew V8_FINAL : public HBinaryCall {
};
-class HCallNewArray V8_FINAL : public HBinaryCall {
+class HCallNewArray FINAL : public HBinaryCall {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCallNewArray,
HValue*,
@@ -2508,7 +2501,7 @@ class HCallNewArray V8_FINAL : public HBinaryCall {
HValue* context() { return first(); }
HValue* constructor() { return second(); }
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
ElementsKind elements_kind() const { return elements_kind_; }
@@ -2524,14 +2517,14 @@ class HCallNewArray V8_FINAL : public HBinaryCall {
};
-class HCallRuntime V8_FINAL : public HCall<1> {
+class HCallRuntime FINAL : public HCall<1> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCallRuntime,
Handle<String>,
const Runtime::Function*,
int);
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
HValue* context() { return OperandAt(0); }
const Runtime::Function* function() const { return c_function_; }
@@ -2541,7 +2534,7 @@ class HCallRuntime V8_FINAL : public HCall<1> {
save_doubles_ = save_doubles;
}
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
@@ -2563,18 +2556,18 @@ class HCallRuntime V8_FINAL : public HCall<1> {
};
-class HMapEnumLength V8_FINAL : public HUnaryOperation {
+class HMapEnumLength FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HMapEnumLength, HValue*);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(MapEnumLength)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
private:
explicit HMapEnumLength(HValue* value)
@@ -2584,11 +2577,11 @@ class HMapEnumLength V8_FINAL : public HUnaryOperation {
SetDependsOnFlag(kMaps);
}
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
};
-class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
+class HUnaryMathOperation FINAL : public HTemplateInstruction<2> {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -2598,9 +2591,9 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
HValue* context() const { return OperandAt(0); }
HValue* value() const { return OperandAt(1); }
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
if (index == 0) {
return Representation::Tagged();
} else {
@@ -2624,11 +2617,11 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
}
}
- virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual Range* InferRange(Zone* zone) OVERRIDE;
- virtual HValue* Canonicalize() V8_OVERRIDE;
- virtual Representation RepresentationFromUses() V8_OVERRIDE;
- virtual Representation RepresentationFromInputs() V8_OVERRIDE;
+ virtual HValue* Canonicalize() OVERRIDE;
+ virtual Representation RepresentationFromUses() OVERRIDE;
+ virtual Representation RepresentationFromInputs() OVERRIDE;
BuiltinFunctionId op() const { return op_; }
const char* OpName() const;
@@ -2636,7 +2629,7 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ virtual bool DataEquals(HValue* other) OVERRIDE {
HUnaryMathOperation* b = HUnaryMathOperation::cast(other);
return op_ == b->op();
}
@@ -2688,7 +2681,7 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
SetFlag(kAllowUndefinedAsNaN);
}
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
HValue* SimplifiedDividendForMathFloorOfDiv(HDiv* hdiv);
HValue* SimplifiedDivisorForMathFloorOfDiv(HDiv* hdiv);
@@ -2697,12 +2690,12 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
};
-class HLoadRoot V8_FINAL : public HTemplateInstruction<0> {
+class HLoadRoot FINAL : public HTemplateInstruction<0> {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HLoadRoot, Heap::RootListIndex);
DECLARE_INSTRUCTION_FACTORY_P2(HLoadRoot, Heap::RootListIndex, HType);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
@@ -2711,7 +2704,7 @@ class HLoadRoot V8_FINAL : public HTemplateInstruction<0> {
DECLARE_CONCRETE_INSTRUCTION(LoadRoot)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ virtual bool DataEquals(HValue* other) OVERRIDE {
HLoadRoot* b = HLoadRoot::cast(other);
return index_ == b->index_;
}
@@ -2725,13 +2718,13 @@ class HLoadRoot V8_FINAL : public HTemplateInstruction<0> {
SetDependsOnFlag(kCalls);
}
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
const Heap::RootListIndex index_;
};
-class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
+class HCheckMaps FINAL : public HTemplateInstruction<2> {
public:
static HCheckMaps* New(Zone* zone, HValue* context, HValue* value,
Handle<Map> map, HValue* typecheck = NULL) {
@@ -2758,17 +2751,17 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
ClearDependsOnFlag(kMaps);
}
- virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual bool HasEscapingOperandAt(int index) OVERRIDE { return false; }
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
- virtual HType CalculateInferredType() V8_OVERRIDE {
+ virtual HType CalculateInferredType() OVERRIDE {
if (value()->type().IsHeapObject()) return value()->type();
return HType::HeapObject();
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
HValue* value() const { return OperandAt(0); }
HValue* typecheck() const { return OperandAt(1); }
@@ -2780,7 +2773,7 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
bool HasMigrationTarget() const { return has_migration_target_; }
- virtual HValue* Canonicalize() V8_OVERRIDE;
+ virtual HValue* Canonicalize() OVERRIDE;
static HCheckMaps* CreateAndInsertAfter(Zone* zone,
HValue* value,
@@ -2802,7 +2795,7 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(CheckMaps)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ virtual bool DataEquals(HValue* other) OVERRIDE {
return this->maps()->Equals(HCheckMaps::cast(other)->maps());
}
@@ -2850,7 +2843,7 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
};
-class HCheckValue V8_FINAL : public HUnaryOperation {
+class HCheckValue FINAL : public HUnaryOperation {
public:
static HCheckValue* New(Zone* zone, HValue* context,
HValue* value, Handle<JSFunction> func) {
@@ -2869,19 +2862,19 @@ class HCheckValue V8_FINAL : public HUnaryOperation {
return new(zone) HCheckValue(value, target, object_in_new_space);
}
- virtual void FinalizeUniqueness() V8_OVERRIDE {
+ virtual void FinalizeUniqueness() OVERRIDE {
object_ = Unique<HeapObject>(object_.handle());
}
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
- virtual HValue* Canonicalize() V8_OVERRIDE;
+ virtual HValue* Canonicalize() OVERRIDE;
#ifdef DEBUG
- virtual void Verify() V8_OVERRIDE;
+ virtual void Verify() OVERRIDE;
#endif
Unique<HeapObject> object() const { return object_; }
@@ -2890,7 +2883,7 @@ class HCheckValue V8_FINAL : public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(CheckValue)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ virtual bool DataEquals(HValue* other) OVERRIDE {
HCheckValue* b = HCheckValue::cast(other);
return object_ == b->object_;
}
@@ -2910,7 +2903,7 @@ class HCheckValue V8_FINAL : public HUnaryOperation {
};
-class HCheckInstanceType V8_FINAL : public HUnaryOperation {
+class HCheckInstanceType FINAL : public HUnaryOperation {
public:
enum Check {
IS_SPEC_OBJECT,
@@ -2922,13 +2915,13 @@ class HCheckInstanceType V8_FINAL : public HUnaryOperation {
DECLARE_INSTRUCTION_FACTORY_P2(HCheckInstanceType, HValue*, Check);
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
- virtual HType CalculateInferredType() V8_OVERRIDE {
+ virtual HType CalculateInferredType() OVERRIDE {
switch (check_) {
case IS_SPEC_OBJECT: return HType::JSObject();
case IS_JS_ARRAY: return HType::JSArray();
@@ -2939,7 +2932,7 @@ class HCheckInstanceType V8_FINAL : public HUnaryOperation {
return HType::Tagged();
}
- virtual HValue* Canonicalize() V8_OVERRIDE;
+ virtual HValue* Canonicalize() OVERRIDE;
bool is_interval_check() const { return check_ <= LAST_INTERVAL_CHECK; }
void GetCheckInterval(InstanceType* first, InstanceType* last);
@@ -2953,7 +2946,7 @@ class HCheckInstanceType V8_FINAL : public HUnaryOperation {
// TODO(ager): It could be nice to allow the ommision of instance
// type checks if we have already performed an instance type check
// with a larger range.
- virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ virtual bool DataEquals(HValue* other) OVERRIDE {
HCheckInstanceType* b = HCheckInstanceType::cast(other);
return check_ == b->check_;
}
@@ -2973,15 +2966,15 @@ class HCheckInstanceType V8_FINAL : public HUnaryOperation {
};
-class HCheckSmi V8_FINAL : public HUnaryOperation {
+class HCheckSmi FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HCheckSmi, HValue*);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
- virtual HValue* Canonicalize() V8_OVERRIDE {
+ virtual HValue* Canonicalize() OVERRIDE {
HType value_type = value()->type();
if (value_type.IsSmi()) {
return NULL;
@@ -2992,7 +2985,7 @@ class HCheckSmi V8_FINAL : public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(CheckSmi)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
private:
explicit HCheckSmi(HValue* value) : HUnaryOperation(value, HType::Smi()) {
@@ -3002,32 +2995,32 @@ class HCheckSmi V8_FINAL : public HUnaryOperation {
};
-class HCheckHeapObject V8_FINAL : public HUnaryOperation {
+class HCheckHeapObject FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HCheckHeapObject, HValue*);
- virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual bool HasEscapingOperandAt(int index) OVERRIDE { return false; }
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
- virtual HType CalculateInferredType() V8_OVERRIDE {
+ virtual HType CalculateInferredType() OVERRIDE {
if (value()->type().IsHeapObject()) return value()->type();
return HType::HeapObject();
}
#ifdef DEBUG
- virtual void Verify() V8_OVERRIDE;
+ virtual void Verify() OVERRIDE;
#endif
- virtual HValue* Canonicalize() V8_OVERRIDE {
+ virtual HValue* Canonicalize() OVERRIDE {
return value()->type().IsHeapObject() ? NULL : this;
}
DECLARE_CONCRETE_INSTRUCTION(CheckHeapObject)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
private:
explicit HCheckHeapObject(HValue* value) : HUnaryOperation(value) {
@@ -3054,11 +3047,10 @@ struct InductionVariableLimitUpdate {
class HBoundsCheck;
class HPhi;
-class HConstant;
class HBitwise;
-class InductionVariableData V8_FINAL : public ZoneObject {
+class InductionVariableData FINAL : public ZoneObject {
public:
class InductionVariableCheck : public ZoneObject {
public:
@@ -3258,7 +3250,7 @@ class InductionVariableData V8_FINAL : public ZoneObject {
};
-class HPhi V8_FINAL : public HValue {
+class HPhi FINAL : public HValue {
public:
HPhi(int merged_index, Zone* zone)
: inputs_(2, zone),
@@ -3274,20 +3266,20 @@ class HPhi V8_FINAL : public HValue {
SetFlag(kAllowUndefinedAsNaN);
}
- virtual Representation RepresentationFromInputs() V8_OVERRIDE;
+ virtual Representation RepresentationFromInputs() OVERRIDE;
- virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual Range* InferRange(Zone* zone) OVERRIDE;
virtual void InferRepresentation(
- HInferRepresentationPhase* h_infer) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ HInferRepresentationPhase* h_infer) OVERRIDE;
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return representation();
}
- virtual Representation KnownOptimalRepresentation() V8_OVERRIDE {
+ virtual Representation KnownOptimalRepresentation() OVERRIDE {
return representation();
}
- virtual HType CalculateInferredType() V8_OVERRIDE;
- virtual int OperandCount() const V8_OVERRIDE { return inputs_.length(); }
- virtual HValue* OperandAt(int index) const V8_OVERRIDE {
+ virtual HType CalculateInferredType() OVERRIDE;
+ virtual int OperandCount() const OVERRIDE { return inputs_.length(); }
+ virtual HValue* OperandAt(int index) const OVERRIDE {
return inputs_[index];
}
HValue* GetRedundantReplacement();
@@ -3297,7 +3289,7 @@ class HPhi V8_FINAL : public HValue {
bool IsReceiver() const { return merged_index_ == 0; }
bool HasMergedIndex() const { return merged_index_ != kInvalidMergedIndex; }
- virtual HSourcePosition position() const V8_OVERRIDE;
+ virtual HSourcePosition position() const OVERRIDE;
int merged_index() const { return merged_index_; }
@@ -3316,10 +3308,10 @@ class HPhi V8_FINAL : public HValue {
induction_variable_data_ = InductionVariableData::ExaminePhi(this);
}
- virtual OStream& PrintTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintTo(OStream& os) const OVERRIDE; // NOLINT
#ifdef DEBUG
- virtual void Verify() V8_OVERRIDE;
+ virtual void Verify() OVERRIDE;
#endif
void InitRealUses(int id);
@@ -3356,7 +3348,7 @@ class HPhi V8_FINAL : public HValue {
DCHECK(value->IsPhi());
return reinterpret_cast<HPhi*>(value);
}
- virtual Opcode opcode() const V8_OVERRIDE { return HValue::kPhi; }
+ virtual Opcode opcode() const OVERRIDE { return HValue::kPhi; }
void SimplifyConstantInputs();
@@ -3364,8 +3356,8 @@ class HPhi V8_FINAL : public HValue {
static const int kInvalidMergedIndex = -1;
protected:
- virtual void DeleteFromGraph() V8_OVERRIDE;
- virtual void InternalSetOperandAt(int index, HValue* value) V8_OVERRIDE {
+ virtual void DeleteFromGraph() OVERRIDE;
+ virtual void InternalSetOperandAt(int index, HValue* value) OVERRIDE {
inputs_[index] = value;
}
@@ -3379,7 +3371,7 @@ class HPhi V8_FINAL : public HValue {
InductionVariableData* induction_variable_data_;
// TODO(titzer): we can't eliminate the receiver for generating backtraces
- virtual bool IsDeletable() const V8_OVERRIDE { return !IsReceiver(); }
+ virtual bool IsDeletable() const OVERRIDE { return !IsReceiver(); }
};
@@ -3388,24 +3380,24 @@ class HDematerializedObject : public HInstruction {
public:
HDematerializedObject(int count, Zone* zone) : values_(count, zone) {}
- virtual int OperandCount() const V8_FINAL V8_OVERRIDE {
+ virtual int OperandCount() const FINAL OVERRIDE {
return values_.length();
}
- virtual HValue* OperandAt(int index) const V8_FINAL V8_OVERRIDE {
+ virtual HValue* OperandAt(int index) const FINAL OVERRIDE {
return values_[index];
}
- virtual bool HasEscapingOperandAt(int index) V8_FINAL V8_OVERRIDE {
+ virtual bool HasEscapingOperandAt(int index) FINAL OVERRIDE {
return false;
}
virtual Representation RequiredInputRepresentation(
- int index) V8_FINAL V8_OVERRIDE {
+ int index) FINAL OVERRIDE {
return Representation::None();
}
protected:
virtual void InternalSetOperandAt(int index,
- HValue* value) V8_FINAL V8_OVERRIDE {
+ HValue* value) FINAL OVERRIDE {
values_[index] = value;
}
@@ -3414,7 +3406,7 @@ class HDematerializedObject : public HInstruction {
};
-class HArgumentsObject V8_FINAL : public HDematerializedObject {
+class HArgumentsObject FINAL : public HDematerializedObject {
public:
static HArgumentsObject* New(Zone* zone, HValue* context, int count) {
return new(zone) HArgumentsObject(count, zone);
@@ -3441,7 +3433,7 @@ class HArgumentsObject V8_FINAL : public HDematerializedObject {
};
-class HCapturedObject V8_FINAL : public HDematerializedObject {
+class HCapturedObject FINAL : public HDematerializedObject {
public:
HCapturedObject(int length, int id, Zone* zone)
: HDematerializedObject(length, zone), capture_id_(id) {
@@ -3468,7 +3460,7 @@ class HCapturedObject V8_FINAL : public HDematerializedObject {
// Replay effects of this instruction on the given environment.
void ReplayEnvironment(HEnvironment* env);
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
DECLARE_CONCRETE_INSTRUCTION(CapturedObject)
@@ -3478,11 +3470,11 @@ class HCapturedObject V8_FINAL : public HDematerializedObject {
// Note that we cannot DCE captured objects as they are used to replay
// the environment. This method is here as an explicit reminder.
// TODO(mstarzinger): Turn HSimulates into full snapshots maybe?
- virtual bool IsDeletable() const V8_FINAL V8_OVERRIDE { return false; }
+ virtual bool IsDeletable() const FINAL OVERRIDE { return false; }
};
-class HConstant V8_FINAL : public HTemplateInstruction<0> {
+class HConstant FINAL : public HTemplateInstruction<0> {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HConstant, int32_t);
DECLARE_INSTRUCTION_FACTORY_P2(HConstant, int32_t, Representation);
@@ -3499,6 +3491,14 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
zone, context, value, representation));
}
+ virtual Handle<Map> GetMonomorphicJSObjectMap() OVERRIDE {
+ Handle<Object> object = object_.handle();
+ if (!object.is_null() && object->IsHeapObject()) {
+ return v8::internal::handle(HeapObject::cast(*object)->map());
+ }
+ return Handle<Map>();
+ }
+
static HConstant* CreateAndInsertBefore(Zone* zone,
HValue* context,
int32_t value,
@@ -3542,9 +3542,9 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
bool IsSpecialDouble() const {
return has_double_value_ &&
- (BitCast<int64_t>(double_value_) == BitCast<int64_t>(-0.0) ||
- FixedDoubleArray::is_the_hole_nan(double_value_) ||
- std::isnan(double_value_));
+ (bit_cast<int64_t>(double_value_) == bit_cast<int64_t>(-0.0) ||
+ FixedDoubleArray::is_the_hole_nan(double_value_) ||
+ std::isnan(double_value_));
}
bool NotInNewSpace() const {
@@ -3561,11 +3561,11 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
return instance_type_ == MAP_TYPE;
}
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
- virtual Representation KnownOptimalRepresentation() V8_OVERRIDE {
+ virtual Representation KnownOptimalRepresentation() OVERRIDE {
if (HasSmiValue() && SmiValuesAre31Bits()) return Representation::Smi();
if (HasInteger32Value()) return Representation::Integer32();
if (HasNumberValue()) return Representation::Double();
@@ -3573,8 +3573,8 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
return Representation::Tagged();
}
- virtual bool EmitAtUses() V8_OVERRIDE;
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual bool EmitAtUses() OVERRIDE;
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
HConstant* CopyToRepresentation(Representation r, Zone* zone) const;
Maybe<HConstant*> CopyToTruncatedInt32(Zone* zone);
Maybe<HConstant*> CopyToTruncatedNumber(Zone* zone);
@@ -3593,7 +3593,8 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
if (HasDoubleValue() && FixedDoubleArray::is_the_hole_nan(double_value_)) {
return true;
}
- return object_.IsKnownGlobal(isolate()->heap()->the_hole_value());
+ return object_.IsInitialized() &&
+ object_.IsKnownGlobal(isolate()->heap()->the_hole_value());
}
bool HasNumberValue() const { return has_double_value_; }
int32_t NumberValueAsInteger32() const {
@@ -3644,11 +3645,11 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
return object_map_;
}
- virtual intptr_t Hashcode() V8_OVERRIDE {
+ virtual intptr_t Hashcode() OVERRIDE {
if (has_int32_value_) {
return static_cast<intptr_t>(int32_value_);
} else if (has_double_value_) {
- return static_cast<intptr_t>(BitCast<int64_t>(double_value_));
+ return static_cast<intptr_t>(bit_cast<int64_t>(double_value_));
} else if (has_external_reference_value_) {
return reinterpret_cast<intptr_t>(external_reference_value_.address());
} else {
@@ -3657,7 +3658,7 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
}
}
- virtual void FinalizeUniqueness() V8_OVERRIDE {
+ virtual void FinalizeUniqueness() OVERRIDE {
if (!has_double_value_ && !has_external_reference_value_) {
DCHECK(!object_.handle().is_null());
object_ = Unique<Object>(object_.handle());
@@ -3672,15 +3673,15 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
return object_.IsInitialized() && object_ == other;
}
- virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ virtual bool DataEquals(HValue* other) OVERRIDE {
HConstant* other_constant = HConstant::cast(other);
if (has_int32_value_) {
return other_constant->has_int32_value_ &&
int32_value_ == other_constant->int32_value_;
} else if (has_double_value_) {
return other_constant->has_double_value_ &&
- BitCast<int64_t>(double_value_) ==
- BitCast<int64_t>(other_constant->double_value_);
+ bit_cast<int64_t>(double_value_) ==
+ bit_cast<int64_t>(other_constant->double_value_);
} else if (has_external_reference_value_) {
return other_constant->has_external_reference_value_ &&
external_reference_value_ ==
@@ -3697,13 +3698,13 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
}
#ifdef DEBUG
- virtual void Verify() V8_OVERRIDE { }
+ virtual void Verify() OVERRIDE { }
#endif
DECLARE_CONCRETE_INSTRUCTION(Constant)
protected:
- virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual Range* InferRange(Zone* zone) OVERRIDE;
private:
friend class HGraph;
@@ -3731,7 +3732,7 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
void Initialize(Representation r);
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
// If this is a numerical constant, object_ either points to the
// HeapObject the constant originated from or is null. If the
@@ -3816,30 +3817,30 @@ class HBinaryOperation : public HTemplateInstruction<3> {
observed_output_representation_ = observed;
}
- virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+ virtual Representation observed_input_representation(int index) OVERRIDE {
if (index == 0) return Representation::Tagged();
return observed_input_representation_[index - 1];
}
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) V8_OVERRIDE {
+ const char* reason) OVERRIDE {
Representation rep = !FLAG_smi_binop && new_rep.IsSmi()
? Representation::Integer32() : new_rep;
HValue::UpdateRepresentation(rep, h_infer, reason);
}
virtual void InferRepresentation(
- HInferRepresentationPhase* h_infer) V8_OVERRIDE;
- virtual Representation RepresentationFromInputs() V8_OVERRIDE;
+ HInferRepresentationPhase* h_infer) OVERRIDE;
+ virtual Representation RepresentationFromInputs() OVERRIDE;
Representation RepresentationFromOutput();
- virtual void AssumeRepresentation(Representation r) V8_OVERRIDE;
+ virtual void AssumeRepresentation(Representation r) OVERRIDE;
virtual bool IsCommutative() const { return false; }
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
if (index == 0) return Representation::Tagged();
return representation();
}
@@ -3854,7 +3855,10 @@ class HBinaryOperation : public HTemplateInstruction<3> {
bool RightIsPowerOf2() {
if (!right()->IsInteger32Constant()) return false;
int32_t value = right()->GetInteger32Constant();
- return IsPowerOf2(value) || IsPowerOf2(-value);
+ if (value < 0) {
+ return base::bits::IsPowerOfTwo32(static_cast<uint32_t>(-value));
+ }
+ return base::bits::IsPowerOfTwo32(static_cast<uint32_t>(value));
}
DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
@@ -3867,22 +3871,22 @@ class HBinaryOperation : public HTemplateInstruction<3> {
};
-class HWrapReceiver V8_FINAL : public HTemplateInstruction<2> {
+class HWrapReceiver FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HWrapReceiver, HValue*, HValue*);
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
HValue* receiver() const { return OperandAt(0); }
HValue* function() const { return OperandAt(1); }
- virtual HValue* Canonicalize() V8_OVERRIDE;
+ virtual HValue* Canonicalize() OVERRIDE;
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
bool known_function() const { return known_function_; }
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver)
@@ -3901,12 +3905,12 @@ class HWrapReceiver V8_FINAL : public HTemplateInstruction<2> {
};
-class HApplyArguments V8_FINAL : public HTemplateInstruction<4> {
+class HApplyArguments FINAL : public HTemplateInstruction<4> {
public:
DECLARE_INSTRUCTION_FACTORY_P4(HApplyArguments, HValue*, HValue*, HValue*,
HValue*);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
// The length is untagged, all other inputs are tagged.
return (index == 2)
? Representation::Integer32()
@@ -3935,20 +3939,20 @@ class HApplyArguments V8_FINAL : public HTemplateInstruction<4> {
};
-class HArgumentsElements V8_FINAL : public HTemplateInstruction<0> {
+class HArgumentsElements FINAL : public HTemplateInstruction<0> {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HArgumentsElements, bool);
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements)
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
bool from_inlined() const { return from_inlined_; }
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
private:
explicit HArgumentsElements(bool from_inlined) : from_inlined_(from_inlined) {
@@ -3958,24 +3962,24 @@ class HArgumentsElements V8_FINAL : public HTemplateInstruction<0> {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
bool from_inlined_;
};
-class HArgumentsLength V8_FINAL : public HUnaryOperation {
+class HArgumentsLength FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HArgumentsLength, HValue*);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
private:
explicit HArgumentsLength(HValue* value) : HUnaryOperation(value) {
@@ -3983,17 +3987,17 @@ class HArgumentsLength V8_FINAL : public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
};
-class HAccessArgumentsAt V8_FINAL : public HTemplateInstruction<3> {
+class HAccessArgumentsAt FINAL : public HTemplateInstruction<3> {
public:
DECLARE_INSTRUCTION_FACTORY_P3(HAccessArgumentsAt, HValue*, HValue*, HValue*);
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
// The arguments elements is considered tagged.
return index == 0
? Representation::Tagged()
@@ -4015,14 +4019,14 @@ class HAccessArgumentsAt V8_FINAL : public HTemplateInstruction<3> {
SetOperandAt(2, index);
}
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
};
class HBoundsCheckBaseIndexInformation;
-class HBoundsCheck V8_FINAL : public HTemplateInstruction<2> {
+class HBoundsCheck FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HBoundsCheck, HValue*, HValue*);
@@ -4051,21 +4055,21 @@ class HBoundsCheck V8_FINAL : public HTemplateInstruction<2> {
}
}
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return representation();
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
virtual void InferRepresentation(
- HInferRepresentationPhase* h_infer) V8_OVERRIDE;
+ HInferRepresentationPhase* h_infer) OVERRIDE;
HValue* index() const { return OperandAt(0); }
HValue* length() const { return OperandAt(1); }
bool allow_equality() const { return allow_equality_; }
void set_allow_equality(bool v) { allow_equality_ = v; }
- virtual int RedefinedOperandIndex() V8_OVERRIDE { return 0; }
- virtual bool IsPurelyInformativeDefinition() V8_OVERRIDE {
+ virtual int RedefinedOperandIndex() OVERRIDE { return 0; }
+ virtual bool IsPurelyInformativeDefinition() OVERRIDE {
return skip_check();
}
@@ -4074,9 +4078,9 @@ class HBoundsCheck V8_FINAL : public HTemplateInstruction<2> {
protected:
friend class HBoundsCheckBaseIndexInformation;
- virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual Range* InferRange(Zone* zone) OVERRIDE;
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
bool skip_check_;
HValue* base_;
int offset_;
@@ -4098,13 +4102,13 @@ class HBoundsCheck V8_FINAL : public HTemplateInstruction<2> {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const V8_OVERRIDE {
+ virtual bool IsDeletable() const OVERRIDE {
return skip_check() && !FLAG_debug_code;
}
};
-class HBoundsCheckBaseIndexInformation V8_FINAL
+class HBoundsCheckBaseIndexInformation FINAL
: public HTemplateInstruction<2> {
public:
explicit HBoundsCheckBaseIndexInformation(HBoundsCheck* check) {
@@ -4122,14 +4126,14 @@ class HBoundsCheckBaseIndexInformation V8_FINAL
DECLARE_CONCRETE_INSTRUCTION(BoundsCheckBaseIndexInformation)
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return representation();
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
- virtual int RedefinedOperandIndex() V8_OVERRIDE { return 0; }
- virtual bool IsPurelyInformativeDefinition() V8_OVERRIDE { return true; }
+ virtual int RedefinedOperandIndex() OVERRIDE { return 0; }
+ virtual bool IsPurelyInformativeDefinition() OVERRIDE { return true; }
};
@@ -4144,7 +4148,7 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
SetAllSideEffects();
}
- virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
+ virtual void RepresentationChanged(Representation to) OVERRIDE {
if (to.IsTagged() &&
(left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
SetAllSideEffects();
@@ -4158,13 +4162,13 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) V8_OVERRIDE {
+ const char* reason) OVERRIDE {
// We only generate either int32 or generic tagged bitwise operations.
if (new_rep.IsDouble()) new_rep = Representation::Integer32();
HBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
- virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+ virtual Representation observed_input_representation(int index) OVERRIDE {
Representation r = HBinaryOperation::observed_input_representation(index);
if (r.IsDouble()) return Representation::Integer32();
return r;
@@ -4178,11 +4182,11 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation)
private:
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
};
-class HMathFloorOfDiv V8_FINAL : public HBinaryOperation {
+class HMathFloorOfDiv FINAL : public HBinaryOperation {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HMathFloorOfDiv,
HValue*,
@@ -4191,7 +4195,7 @@ class HMathFloorOfDiv V8_FINAL : public HBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
private:
HMathFloorOfDiv(HValue* context, HValue* left, HValue* right)
@@ -4206,9 +4210,9 @@ class HMathFloorOfDiv V8_FINAL : public HBinaryOperation {
SetFlag(kAllowUndefinedAsNaN);
}
- virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual Range* InferRange(Zone* zone) OVERRIDE;
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
};
@@ -4221,7 +4225,7 @@ class HArithmeticBinaryOperation : public HBinaryOperation {
SetFlag(kAllowUndefinedAsNaN);
}
- virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
+ virtual void RepresentationChanged(Representation to) OVERRIDE {
if (to.IsTagged() &&
(left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
SetAllSideEffects();
@@ -4236,23 +4240,23 @@ class HArithmeticBinaryOperation : public HBinaryOperation {
DECLARE_ABSTRACT_INSTRUCTION(ArithmeticBinaryOperation)
private:
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
};
-class HCompareGeneric V8_FINAL : public HBinaryOperation {
+class HCompareGeneric FINAL : public HBinaryOperation {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCompareGeneric, HValue*,
HValue*, Token::Value);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return index == 0
? Representation::Tagged()
: representation();
}
Token::Value token() const { return token_; }
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
DECLARE_CONCRETE_INSTRUCTION(CompareGeneric)
@@ -4291,18 +4295,18 @@ class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
}
virtual void InferRepresentation(
- HInferRepresentationPhase* h_infer) V8_OVERRIDE;
+ HInferRepresentationPhase* h_infer) OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return representation();
}
- virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+ virtual Representation observed_input_representation(int index) OVERRIDE {
return observed_input_representation_[index];
}
- virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
void SetOperandPositions(Zone* zone,
HSourcePosition left_pos,
@@ -4333,16 +4337,16 @@ class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
};
-class HCompareHoleAndBranch V8_FINAL : public HUnaryControlInstruction {
+class HCompareHoleAndBranch FINAL : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HCompareHoleAndBranch, HValue*);
DECLARE_INSTRUCTION_FACTORY_P3(HCompareHoleAndBranch, HValue*,
HBasicBlock*, HBasicBlock*);
virtual void InferRepresentation(
- HInferRepresentationPhase* h_infer) V8_OVERRIDE;
+ HInferRepresentationPhase* h_infer) OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return representation();
}
@@ -4359,18 +4363,18 @@ class HCompareHoleAndBranch V8_FINAL : public HUnaryControlInstruction {
};
-class HCompareMinusZeroAndBranch V8_FINAL : public HUnaryControlInstruction {
+class HCompareMinusZeroAndBranch FINAL : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HCompareMinusZeroAndBranch, HValue*);
virtual void InferRepresentation(
- HInferRepresentationPhase* h_infer) V8_OVERRIDE;
+ HInferRepresentationPhase* h_infer) OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return representation();
}
- virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch)
@@ -4387,7 +4391,7 @@ class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
DECLARE_INSTRUCTION_FACTORY_P4(HCompareObjectEqAndBranch, HValue*, HValue*,
HBasicBlock*, HBasicBlock*);
- virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
static const int kNoKnownSuccessorIndex = -1;
int known_successor_index() const { return known_successor_index_; }
@@ -4398,13 +4402,13 @@ class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
HValue* left() const { return OperandAt(0); }
HValue* right() const { return OperandAt(1); }
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
- virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+ virtual Representation observed_input_representation(int index) OVERRIDE {
return Representation::Tagged();
}
@@ -4426,17 +4430,17 @@ class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
};
-class HIsObjectAndBranch V8_FINAL : public HUnaryControlInstruction {
+class HIsObjectAndBranch FINAL : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HIsObjectAndBranch, HValue*);
DECLARE_INSTRUCTION_FACTORY_P3(HIsObjectAndBranch, HValue*,
HBasicBlock*, HBasicBlock*);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
- virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
@@ -4448,17 +4452,17 @@ class HIsObjectAndBranch V8_FINAL : public HUnaryControlInstruction {
};
-class HIsStringAndBranch V8_FINAL : public HUnaryControlInstruction {
+class HIsStringAndBranch FINAL : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HIsStringAndBranch, HValue*);
DECLARE_INSTRUCTION_FACTORY_P3(HIsStringAndBranch, HValue*,
HBasicBlock*, HBasicBlock*);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
- virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
static const int kNoKnownSuccessorIndex = -1;
int known_successor_index() const { return known_successor_index_; }
@@ -4482,7 +4486,7 @@ class HIsStringAndBranch V8_FINAL : public HUnaryControlInstruction {
};
-class HIsSmiAndBranch V8_FINAL : public HUnaryControlInstruction {
+class HIsSmiAndBranch FINAL : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HIsSmiAndBranch, HValue*);
DECLARE_INSTRUCTION_FACTORY_P3(HIsSmiAndBranch, HValue*,
@@ -4490,12 +4494,12 @@ class HIsSmiAndBranch V8_FINAL : public HUnaryControlInstruction {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch)
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
virtual int RedefinedOperandIndex() { return 0; }
private:
@@ -4508,17 +4512,17 @@ class HIsSmiAndBranch V8_FINAL : public HUnaryControlInstruction {
};
-class HIsUndetectableAndBranch V8_FINAL : public HUnaryControlInstruction {
+class HIsUndetectableAndBranch FINAL : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HIsUndetectableAndBranch, HValue*);
DECLARE_INSTRUCTION_FACTORY_P3(HIsUndetectableAndBranch, HValue*,
HBasicBlock*, HBasicBlock*);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
- virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch)
@@ -4542,9 +4546,9 @@ class HStringCompareAndBranch : public HTemplateControlInstruction<2, 3> {
HValue* right() { return OperandAt(2); }
Token::Value token() const { return token_; }
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
@@ -4576,7 +4580,7 @@ class HIsConstructCallAndBranch : public HTemplateControlInstruction<2, 0> {
public:
DECLARE_INSTRUCTION_FACTORY_P0(HIsConstructCallAndBranch);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
@@ -4586,7 +4590,7 @@ class HIsConstructCallAndBranch : public HTemplateControlInstruction<2, 0> {
};
-class HHasInstanceTypeAndBranch V8_FINAL : public HUnaryControlInstruction {
+class HHasInstanceTypeAndBranch FINAL : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P2(
HHasInstanceTypeAndBranch, HValue*, InstanceType);
@@ -4596,13 +4600,13 @@ class HHasInstanceTypeAndBranch V8_FINAL : public HUnaryControlInstruction {
InstanceType from() { return from_; }
InstanceType to() { return to_; }
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
- virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch)
@@ -4619,11 +4623,11 @@ class HHasInstanceTypeAndBranch V8_FINAL : public HUnaryControlInstruction {
};
-class HHasCachedArrayIndexAndBranch V8_FINAL : public HUnaryControlInstruction {
+class HHasCachedArrayIndexAndBranch FINAL : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HHasCachedArrayIndexAndBranch, HValue*);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
@@ -4634,18 +4638,18 @@ class HHasCachedArrayIndexAndBranch V8_FINAL : public HUnaryControlInstruction {
};
-class HGetCachedArrayIndex V8_FINAL : public HUnaryOperation {
+class HGetCachedArrayIndex FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HGetCachedArrayIndex, HValue*);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
private:
explicit HGetCachedArrayIndex(HValue* value) : HUnaryOperation(value) {
@@ -4653,22 +4657,22 @@ class HGetCachedArrayIndex V8_FINAL : public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
};
-class HClassOfTestAndBranch V8_FINAL : public HUnaryControlInstruction {
+class HClassOfTestAndBranch FINAL : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HClassOfTestAndBranch, HValue*,
Handle<String>);
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch)
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
Handle<String> class_name() const { return class_name_; }
@@ -4681,22 +4685,22 @@ class HClassOfTestAndBranch V8_FINAL : public HUnaryControlInstruction {
};
-class HTypeofIsAndBranch V8_FINAL : public HUnaryControlInstruction {
+class HTypeofIsAndBranch FINAL : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HTypeofIsAndBranch, HValue*, Handle<String>);
Handle<String> type_literal() const { return type_literal_.handle(); }
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
- virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
- virtual void FinalizeUniqueness() V8_OVERRIDE {
+ virtual void FinalizeUniqueness() OVERRIDE {
type_literal_ = Unique<String>(type_literal_.handle());
}
@@ -4709,15 +4713,15 @@ class HTypeofIsAndBranch V8_FINAL : public HUnaryControlInstruction {
};
-class HInstanceOf V8_FINAL : public HBinaryOperation {
+class HInstanceOf FINAL : public HBinaryOperation {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInstanceOf, HValue*, HValue*);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
DECLARE_CONCRETE_INSTRUCTION(InstanceOf)
@@ -4730,7 +4734,7 @@ class HInstanceOf V8_FINAL : public HBinaryOperation {
};
-class HInstanceOfKnownGlobal V8_FINAL : public HTemplateInstruction<2> {
+class HInstanceOfKnownGlobal FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInstanceOfKnownGlobal,
HValue*,
@@ -4740,7 +4744,7 @@ class HInstanceOfKnownGlobal V8_FINAL : public HTemplateInstruction<2> {
HValue* left() { return OperandAt(1); }
Handle<JSFunction> function() { return function_; }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
@@ -4761,7 +4765,7 @@ class HInstanceOfKnownGlobal V8_FINAL : public HTemplateInstruction<2> {
};
-class HPower V8_FINAL : public HTemplateInstruction<2> {
+class HPower FINAL : public HTemplateInstruction<2> {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -4771,19 +4775,19 @@ class HPower V8_FINAL : public HTemplateInstruction<2> {
HValue* left() { return OperandAt(0); }
HValue* right() const { return OperandAt(1); }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return index == 0
? Representation::Double()
: Representation::None();
}
- virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+ virtual Representation observed_input_representation(int index) OVERRIDE {
return RequiredInputRepresentation(index);
}
DECLARE_CONCRETE_INSTRUCTION(Power)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
private:
HPower(HValue* left, HValue* right) {
@@ -4794,13 +4798,13 @@ class HPower V8_FINAL : public HTemplateInstruction<2> {
SetChangesFlag(kNewSpacePromotion);
}
- virtual bool IsDeletable() const V8_OVERRIDE {
+ virtual bool IsDeletable() const OVERRIDE {
return !right()->representation().IsTagged();
}
};
-class HAdd V8_FINAL : public HArithmeticBinaryOperation {
+class HAdd FINAL : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -4810,13 +4814,13 @@ class HAdd V8_FINAL : public HArithmeticBinaryOperation {
// Add is only commutative if two integer values are added and not if two
// tagged values are added (because it might be a String concatenation).
// We also do not commute (pointer + offset).
- virtual bool IsCommutative() const V8_OVERRIDE {
+ virtual bool IsCommutative() const OVERRIDE {
return !representation().IsTagged() && !representation().IsExternal();
}
- virtual HValue* Canonicalize() V8_OVERRIDE;
+ virtual HValue* Canonicalize() OVERRIDE;
- virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
+ virtual bool TryDecompose(DecompositionResult* decomposition) OVERRIDE {
if (left()->IsInteger32Constant()) {
decomposition->Apply(right(), left()->GetInteger32Constant());
return true;
@@ -4828,7 +4832,7 @@ class HAdd V8_FINAL : public HArithmeticBinaryOperation {
}
}
- virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
+ virtual void RepresentationChanged(Representation to) OVERRIDE {
if (to.IsTagged() &&
(left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved() ||
left()->ToStringCanBeObserved() || right()->ToStringCanBeObserved())) {
@@ -4844,16 +4848,16 @@ class HAdd V8_FINAL : public HArithmeticBinaryOperation {
}
}
- virtual Representation RepresentationFromInputs() V8_OVERRIDE;
+ virtual Representation RepresentationFromInputs() OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE;
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Add)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual Range* InferRange(Zone* zone) OVERRIDE;
private:
HAdd(HValue* context, HValue* left, HValue* right)
@@ -4863,16 +4867,16 @@ class HAdd V8_FINAL : public HArithmeticBinaryOperation {
};
-class HSub V8_FINAL : public HArithmeticBinaryOperation {
+class HSub FINAL : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
HValue* left,
HValue* right);
- virtual HValue* Canonicalize() V8_OVERRIDE;
+ virtual HValue* Canonicalize() OVERRIDE;
- virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
+ virtual bool TryDecompose(DecompositionResult* decomposition) OVERRIDE {
if (right()->IsInteger32Constant()) {
decomposition->Apply(left(), -right()->GetInteger32Constant());
return true;
@@ -4884,9 +4888,9 @@ class HSub V8_FINAL : public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Sub)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual Range* InferRange(Zone* zone) OVERRIDE;
private:
HSub(HValue* context, HValue* left, HValue* right)
@@ -4896,7 +4900,7 @@ class HSub V8_FINAL : public HArithmeticBinaryOperation {
};
-class HMul V8_FINAL : public HArithmeticBinaryOperation {
+class HMul FINAL : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -4916,16 +4920,16 @@ class HMul V8_FINAL : public HArithmeticBinaryOperation {
return mul;
}
- virtual HValue* Canonicalize() V8_OVERRIDE;
+ virtual HValue* Canonicalize() OVERRIDE;
// Only commutative if it is certain that not two objects are multiplicated.
- virtual bool IsCommutative() const V8_OVERRIDE {
+ virtual bool IsCommutative() const OVERRIDE {
return !representation().IsTagged();
}
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) V8_OVERRIDE {
+ const char* reason) OVERRIDE {
HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -4934,9 +4938,9 @@ class HMul V8_FINAL : public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Mul)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual Range* InferRange(Zone* zone) OVERRIDE;
private:
HMul(HValue* context, HValue* left, HValue* right)
@@ -4946,18 +4950,18 @@ class HMul V8_FINAL : public HArithmeticBinaryOperation {
};
-class HMod V8_FINAL : public HArithmeticBinaryOperation {
+class HMod FINAL : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
HValue* left,
HValue* right);
- virtual HValue* Canonicalize() V8_OVERRIDE;
+ virtual HValue* Canonicalize() OVERRIDE;
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) V8_OVERRIDE {
+ const char* reason) OVERRIDE {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -4965,9 +4969,9 @@ class HMod V8_FINAL : public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Mod)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual Range* InferRange(Zone* zone) OVERRIDE;
private:
HMod(HValue* context,
@@ -4980,18 +4984,18 @@ class HMod V8_FINAL : public HArithmeticBinaryOperation {
};
-class HDiv V8_FINAL : public HArithmeticBinaryOperation {
+class HDiv FINAL : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
HValue* left,
HValue* right);
- virtual HValue* Canonicalize() V8_OVERRIDE;
+ virtual HValue* Canonicalize() OVERRIDE;
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) V8_OVERRIDE {
+ const char* reason) OVERRIDE {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -4999,9 +5003,9 @@ class HDiv V8_FINAL : public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Div)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual Range* InferRange(Zone* zone) OVERRIDE;
private:
HDiv(HValue* context, HValue* left, HValue* right)
@@ -5012,7 +5016,7 @@ class HDiv V8_FINAL : public HArithmeticBinaryOperation {
};
-class HMathMinMax V8_FINAL : public HArithmeticBinaryOperation {
+class HMathMinMax FINAL : public HArithmeticBinaryOperation {
public:
enum Operation { kMathMin, kMathMax };
@@ -5022,14 +5026,14 @@ class HMathMinMax V8_FINAL : public HArithmeticBinaryOperation {
HValue* right,
Operation op);
- virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+ virtual Representation observed_input_representation(int index) OVERRIDE {
return RequiredInputRepresentation(index);
}
virtual void InferRepresentation(
- HInferRepresentationPhase* h_infer) V8_OVERRIDE;
+ HInferRepresentationPhase* h_infer) OVERRIDE;
- virtual Representation RepresentationFromInputs() V8_OVERRIDE {
+ virtual Representation RepresentationFromInputs() OVERRIDE {
Representation left_rep = left()->representation();
Representation right_rep = right()->representation();
Representation result = Representation::Smi();
@@ -5039,19 +5043,19 @@ class HMathMinMax V8_FINAL : public HArithmeticBinaryOperation {
return result;
}
- virtual bool IsCommutative() const V8_OVERRIDE { return true; }
+ virtual bool IsCommutative() const OVERRIDE { return true; }
Operation operation() { return operation_; }
DECLARE_CONCRETE_INSTRUCTION(MathMinMax)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ virtual bool DataEquals(HValue* other) OVERRIDE {
return other->IsMathMinMax() &&
HMathMinMax::cast(other)->operation_ == operation_;
}
- virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual Range* InferRange(Zone* zone) OVERRIDE;
private:
HMathMinMax(HValue* context, HValue* left, HValue* right, Operation op)
@@ -5062,7 +5066,7 @@ class HMathMinMax V8_FINAL : public HArithmeticBinaryOperation {
};
-class HBitwise V8_FINAL : public HBitwiseBinaryOperation {
+class HBitwise FINAL : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -5072,20 +5076,20 @@ class HBitwise V8_FINAL : public HBitwiseBinaryOperation {
Token::Value op() const { return op_; }
- virtual bool IsCommutative() const V8_OVERRIDE { return true; }
+ virtual bool IsCommutative() const OVERRIDE { return true; }
- virtual HValue* Canonicalize() V8_OVERRIDE;
+ virtual HValue* Canonicalize() OVERRIDE;
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
DECLARE_CONCRETE_INSTRUCTION(Bitwise)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ virtual bool DataEquals(HValue* other) OVERRIDE {
return op() == HBitwise::cast(other)->op();
}
- virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual Range* InferRange(Zone* zone) OVERRIDE;
private:
HBitwise(HValue* context,
@@ -5124,18 +5128,18 @@ class HBitwise V8_FINAL : public HBitwiseBinaryOperation {
};
-class HShl V8_FINAL : public HBitwiseBinaryOperation {
+class HShl FINAL : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
HValue* left,
HValue* right);
- virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual Range* InferRange(Zone* zone) OVERRIDE;
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) V8_OVERRIDE {
+ const char* reason) OVERRIDE {
if (new_rep.IsSmi() &&
!(right()->IsInteger32Constant() &&
right()->GetInteger32Constant() >= 0)) {
@@ -5147,7 +5151,7 @@ class HShl V8_FINAL : public HBitwiseBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Shl)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
private:
HShl(HValue* context, HValue* left, HValue* right)
@@ -5155,14 +5159,14 @@ class HShl V8_FINAL : public HBitwiseBinaryOperation {
};
-class HShr V8_FINAL : public HBitwiseBinaryOperation {
+class HShr FINAL : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
HValue* left,
HValue* right);
- virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
+ virtual bool TryDecompose(DecompositionResult* decomposition) OVERRIDE {
if (right()->IsInteger32Constant()) {
if (decomposition->Apply(left(), 0, right()->GetInteger32Constant())) {
// This is intended to look for HAdd and HSub, to handle compounds
@@ -5174,11 +5178,11 @@ class HShr V8_FINAL : public HBitwiseBinaryOperation {
return false;
}
- virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual Range* InferRange(Zone* zone) OVERRIDE;
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) V8_OVERRIDE {
+ const char* reason) OVERRIDE {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -5186,7 +5190,7 @@ class HShr V8_FINAL : public HBitwiseBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Shr)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
private:
HShr(HValue* context, HValue* left, HValue* right)
@@ -5194,14 +5198,14 @@ class HShr V8_FINAL : public HBitwiseBinaryOperation {
};
-class HSar V8_FINAL : public HBitwiseBinaryOperation {
+class HSar FINAL : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
HValue* left,
HValue* right);
- virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
+ virtual bool TryDecompose(DecompositionResult* decomposition) OVERRIDE {
if (right()->IsInteger32Constant()) {
if (decomposition->Apply(left(), 0, right()->GetInteger32Constant())) {
// This is intended to look for HAdd and HSub, to handle compounds
@@ -5213,11 +5217,11 @@ class HSar V8_FINAL : public HBitwiseBinaryOperation {
return false;
}
- virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual Range* InferRange(Zone* zone) OVERRIDE;
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) V8_OVERRIDE {
+ const char* reason) OVERRIDE {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -5225,7 +5229,7 @@ class HSar V8_FINAL : public HBitwiseBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Sar)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
private:
HSar(HValue* context, HValue* left, HValue* right)
@@ -5233,7 +5237,7 @@ class HSar V8_FINAL : public HBitwiseBinaryOperation {
};
-class HRor V8_FINAL : public HBitwiseBinaryOperation {
+class HRor FINAL : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -5244,7 +5248,7 @@ class HRor V8_FINAL : public HBitwiseBinaryOperation {
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) V8_OVERRIDE {
+ const char* reason) OVERRIDE {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -5252,7 +5256,7 @@ class HRor V8_FINAL : public HBitwiseBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Ror)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
private:
HRor(HValue* context, HValue* left, HValue* right)
@@ -5262,13 +5266,13 @@ class HRor V8_FINAL : public HBitwiseBinaryOperation {
};
-class HOsrEntry V8_FINAL : public HTemplateInstruction<0> {
+class HOsrEntry FINAL : public HTemplateInstruction<0> {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HOsrEntry, BailoutId);
BailoutId ast_id() const { return ast_id_; }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
@@ -5284,7 +5288,7 @@ class HOsrEntry V8_FINAL : public HTemplateInstruction<0> {
};
-class HParameter V8_FINAL : public HTemplateInstruction<0> {
+class HParameter FINAL : public HTemplateInstruction<0> {
public:
enum ParameterKind {
STACK_PARAMETER,
@@ -5299,9 +5303,9 @@ class HParameter V8_FINAL : public HTemplateInstruction<0> {
unsigned index() const { return index_; }
ParameterKind kind() const { return kind_; }
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
@@ -5328,14 +5332,14 @@ class HParameter V8_FINAL : public HTemplateInstruction<0> {
};
-class HCallStub V8_FINAL : public HUnaryCall {
+class HCallStub FINAL : public HUnaryCall {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallStub, CodeStub::Major, int);
CodeStub::Major major_key() { return major_key_; }
HValue* context() { return value(); }
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
DECLARE_CONCRETE_INSTRUCTION(CallStub)
@@ -5349,13 +5353,44 @@ class HCallStub V8_FINAL : public HUnaryCall {
};
-class HUnknownOSRValue V8_FINAL : public HTemplateInstruction<0> {
+class HTailCallThroughMegamorphicCache FINAL : public HTemplateInstruction<3> {
+ public:
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HTailCallThroughMegamorphicCache,
+ HValue*, HValue*, Code::Flags);
+
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+ return Representation::Tagged();
+ }
+
+ HValue* context() const { return OperandAt(0); }
+ HValue* receiver() const { return OperandAt(1); }
+ HValue* name() const { return OperandAt(2); }
+ Code::Flags flags() const { return flags_; }
+
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
+
+ DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache)
+
+ private:
+ HTailCallThroughMegamorphicCache(HValue* context, HValue* receiver,
+ HValue* name, Code::Flags flags)
+ : flags_(flags) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, receiver);
+ SetOperandAt(2, name);
+ }
+
+ Code::Flags flags_;
+};
+
+
+class HUnknownOSRValue FINAL : public HTemplateInstruction<0> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HUnknownOSRValue, HEnvironment*, int);
virtual OStream& PrintDataTo(OStream& os) const; // NOLINT
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
@@ -5364,7 +5399,7 @@ class HUnknownOSRValue V8_FINAL : public HTemplateInstruction<0> {
HEnvironment *environment() { return environment_; }
int index() { return index_; }
- virtual Representation KnownOptimalRepresentation() V8_OVERRIDE {
+ virtual Representation KnownOptimalRepresentation() OVERRIDE {
if (incoming_value_ == NULL) return Representation::None();
return incoming_value_->KnownOptimalRepresentation();
}
@@ -5385,7 +5420,7 @@ class HUnknownOSRValue V8_FINAL : public HTemplateInstruction<0> {
};
-class HLoadGlobalCell V8_FINAL : public HTemplateInstruction<0> {
+class HLoadGlobalCell FINAL : public HTemplateInstruction<0> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HLoadGlobalCell, Handle<Cell>,
PropertyDetails);
@@ -5393,24 +5428,24 @@ class HLoadGlobalCell V8_FINAL : public HTemplateInstruction<0> {
Unique<Cell> cell() const { return cell_; }
bool RequiresHoleCheck() const;
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
- virtual intptr_t Hashcode() V8_OVERRIDE {
+ virtual intptr_t Hashcode() OVERRIDE {
return cell_.Hashcode();
}
- virtual void FinalizeUniqueness() V8_OVERRIDE {
+ virtual void FinalizeUniqueness() OVERRIDE {
cell_ = Unique<Cell>(cell_.handle());
}
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ virtual bool DataEquals(HValue* other) OVERRIDE {
return cell_ == HLoadGlobalCell::cast(other)->cell_;
}
@@ -5422,14 +5457,14 @@ class HLoadGlobalCell V8_FINAL : public HTemplateInstruction<0> {
SetDependsOnFlag(kGlobalVars);
}
- virtual bool IsDeletable() const V8_OVERRIDE { return !RequiresHoleCheck(); }
+ virtual bool IsDeletable() const OVERRIDE { return !RequiresHoleCheck(); }
Unique<Cell> cell_;
PropertyDetails details_;
};
-class HLoadGlobalGeneric V8_FINAL : public HTemplateInstruction<2> {
+class HLoadGlobalGeneric FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadGlobalGeneric, HValue*,
Handle<String>, bool);
@@ -5450,9 +5485,9 @@ class HLoadGlobalGeneric V8_FINAL : public HTemplateInstruction<2> {
slot_ = slot;
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
@@ -5476,7 +5511,7 @@ class HLoadGlobalGeneric V8_FINAL : public HTemplateInstruction<2> {
};
-class HAllocate V8_FINAL : public HTemplateInstruction<2> {
+class HAllocate FINAL : public HTemplateInstruction<2> {
public:
static bool CompatibleInstanceTypes(InstanceType type1,
InstanceType type2) {
@@ -5509,7 +5544,7 @@ class HAllocate V8_FINAL : public HTemplateInstruction<2> {
size_upper_bound_ = value;
}
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
if (index == 0) {
return Representation::Tagged();
} else {
@@ -5517,7 +5552,7 @@ class HAllocate V8_FINAL : public HTemplateInstruction<2> {
}
}
- virtual Handle<Map> GetMonomorphicJSObjectMap() {
+ virtual Handle<Map> GetMonomorphicJSObjectMap() OVERRIDE {
return known_initial_map_;
}
@@ -5558,9 +5593,9 @@ class HAllocate V8_FINAL : public HTemplateInstruction<2> {
}
virtual bool HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) V8_OVERRIDE;
+ HValue* dominator) OVERRIDE;
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
DECLARE_CONCRETE_INSTRUCTION(Allocate)
@@ -5664,7 +5699,7 @@ class HAllocate V8_FINAL : public HTemplateInstruction<2> {
};
-class HStoreCodeEntry V8_FINAL: public HTemplateInstruction<2> {
+class HStoreCodeEntry FINAL: public HTemplateInstruction<2> {
public:
static HStoreCodeEntry* New(Zone* zone,
HValue* context,
@@ -5690,7 +5725,7 @@ class HStoreCodeEntry V8_FINAL: public HTemplateInstruction<2> {
};
-class HInnerAllocatedObject V8_FINAL : public HTemplateInstruction<2> {
+class HInnerAllocatedObject FINAL : public HTemplateInstruction<2> {
public:
static HInnerAllocatedObject* New(Zone* zone,
HValue* context,
@@ -5703,11 +5738,11 @@ class HInnerAllocatedObject V8_FINAL : public HTemplateInstruction<2> {
HValue* base_object() const { return OperandAt(0); }
HValue* offset() const { return OperandAt(1); }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return index == 0 ? Representation::Tagged() : Representation::Integer32();
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject)
@@ -5787,27 +5822,25 @@ inline PointersToHereCheck PointersToHereCheckForObject(HValue* object,
}
-class HStoreGlobalCell V8_FINAL : public HUnaryOperation {
+class HStoreGlobalCell FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P3(HStoreGlobalCell, HValue*,
Handle<PropertyCell>, PropertyDetails);
Unique<PropertyCell> cell() const { return cell_; }
- bool RequiresHoleCheck() {
- return !details_.IsDontDelete() || details_.IsReadOnly();
- }
+ bool RequiresHoleCheck() { return details_.IsConfigurable(); }
bool NeedsWriteBarrier() {
return StoringValueNeedsWriteBarrier(value());
}
- virtual void FinalizeUniqueness() V8_OVERRIDE {
+ virtual void FinalizeUniqueness() OVERRIDE {
cell_ = Unique<PropertyCell>(cell_.handle());
}
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell)
@@ -5826,7 +5859,7 @@ class HStoreGlobalCell V8_FINAL : public HUnaryOperation {
};
-class HLoadContextSlot V8_FINAL : public HUnaryOperation {
+class HLoadContextSlot FINAL : public HUnaryOperation {
public:
enum Mode {
// Perform a normal load of the context slot without checking its value.
@@ -5859,29 +5892,29 @@ class HLoadContextSlot V8_FINAL : public HUnaryOperation {
return mode_ != kNoCheck;
}
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ virtual bool DataEquals(HValue* other) OVERRIDE {
HLoadContextSlot* b = HLoadContextSlot::cast(other);
return (slot_index() == b->slot_index());
}
private:
- virtual bool IsDeletable() const V8_OVERRIDE { return !RequiresHoleCheck(); }
+ virtual bool IsDeletable() const OVERRIDE { return !RequiresHoleCheck(); }
int slot_index_;
Mode mode_;
};
-class HStoreContextSlot V8_FINAL : public HTemplateInstruction<2> {
+class HStoreContextSlot FINAL : public HTemplateInstruction<2> {
public:
enum Mode {
// Perform a normal store to the context slot without checking its previous
@@ -5916,11 +5949,11 @@ class HStoreContextSlot V8_FINAL : public HTemplateInstruction<2> {
return mode_ != kNoCheck;
}
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot)
@@ -5939,7 +5972,7 @@ class HStoreContextSlot V8_FINAL : public HTemplateInstruction<2> {
// Represents an access to a portion of an object, such as the map pointer,
// array elements pointer, etc, but not accesses to array elements themselves.
-class HObjectAccess V8_FINAL {
+class HObjectAccess FINAL {
public:
inline bool IsInobject() const {
return portion() != kBackingStore && portion() != kExternalMemory;
@@ -6192,8 +6225,9 @@ class HObjectAccess V8_FINAL {
Representation representation = Representation::Tagged());
// Create an access to a resolved field (in-object or backing store).
- static HObjectAccess ForField(Handle<Map> map,
- LookupResult *lookup, Handle<String> name = Handle<String>::null());
+ static HObjectAccess ForField(Handle<Map> map, int index,
+ Representation representation,
+ Handle<String> name);
// Create an access for the payload of a Cell or JSGlobalPropertyCell.
static HObjectAccess ForCellPayload(Isolate* isolate);
@@ -6313,7 +6347,7 @@ class HObjectAccess V8_FINAL {
OStream& operator<<(OStream& os, const HObjectAccess& access);
-class HLoadNamedField V8_FINAL : public HTemplateInstruction<2> {
+class HLoadNamedField FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_FACTORY_P3(HLoadNamedField, HValue*,
HValue*, HObjectAccess);
@@ -6333,19 +6367,19 @@ class HLoadNamedField V8_FINAL : public HTemplateInstruction<2> {
const UniqueSet<Map>* maps() const { return maps_; }
- virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
- virtual bool HasOutOfBoundsAccess(int size) V8_OVERRIDE {
+ virtual bool HasEscapingOperandAt(int index) OVERRIDE { return false; }
+ virtual bool HasOutOfBoundsAccess(int size) OVERRIDE {
return !access().IsInobject() || access().offset() >= size;
}
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
if (index == 0 && access().IsExternalMemory()) {
// object must be external in case of external memory access
return Representation::External();
}
return Representation::Tagged();
}
- virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual Range* InferRange(Zone* zone) OVERRIDE;
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
bool CanBeReplacedWith(HValue* other) const {
if (!CheckFlag(HValue::kCantBeReplaced)) return false;
@@ -6361,7 +6395,7 @@ class HLoadNamedField V8_FINAL : public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ virtual bool DataEquals(HValue* other) OVERRIDE {
HLoadNamedField* that = HLoadNamedField::cast(other);
if (!this->access_.Equals(that->access_)) return false;
if (this->maps_ == that->maps_) return true;
@@ -6425,14 +6459,14 @@ class HLoadNamedField V8_FINAL : public HTemplateInstruction<2> {
access.SetGVNFlags(this, LOAD);
}
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
HObjectAccess access_;
const UniqueSet<Map>* maps_;
};
-class HLoadNamedGeneric V8_FINAL : public HTemplateInstruction<2> {
+class HLoadNamedGeneric FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HLoadNamedGeneric, HValue*,
Handle<Object>);
@@ -6453,11 +6487,11 @@ class HLoadNamedGeneric V8_FINAL : public HTemplateInstruction<2> {
slot_ = slot;
}
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric)
@@ -6477,20 +6511,20 @@ class HLoadNamedGeneric V8_FINAL : public HTemplateInstruction<2> {
};
-class HLoadFunctionPrototype V8_FINAL : public HUnaryOperation {
+class HLoadFunctionPrototype FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HLoadFunctionPrototype, HValue*);
HValue* function() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
private:
explicit HLoadFunctionPrototype(HValue* function)
@@ -6527,7 +6561,7 @@ enum LoadKeyedHoleMode {
};
-class HLoadKeyed V8_FINAL
+class HLoadKeyed FINAL
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
DECLARE_INSTRUCTION_FACTORY_P4(HLoadKeyed, HValue*, HValue*, HValue*,
@@ -6561,14 +6595,14 @@ class HLoadKeyed V8_FINAL
void SetDehoisted(bool is_dehoisted) {
bit_field_ = IsDehoistedField::update(bit_field_, is_dehoisted);
}
- virtual ElementsKind elements_kind() const V8_OVERRIDE {
+ virtual ElementsKind elements_kind() const OVERRIDE {
return ElementsKindField::decode(bit_field_);
}
LoadKeyedHoleMode hole_mode() const {
return HoleModeField::decode(bit_field_);
}
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
// kind_fast: tagged[int32] (none)
// kind_double: tagged[int32] (none)
// kind_fixed_typed_array: tagged[int32] (none)
@@ -6584,22 +6618,22 @@ class HLoadKeyed V8_FINAL
return Representation::None();
}
- virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+ virtual Representation observed_input_representation(int index) OVERRIDE {
return RequiredInputRepresentation(index);
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
bool UsesMustHandleHole() const;
bool AllUsesCanTreatHoleAsNaN() const;
bool RequiresHoleCheck() const;
- virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual Range* InferRange(Zone* zone) OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ virtual bool DataEquals(HValue* other) OVERRIDE {
if (!other->IsLoadKeyed()) return false;
HLoadKeyed* other_load = HLoadKeyed::cast(other);
@@ -6676,7 +6710,7 @@ class HLoadKeyed V8_FINAL
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const V8_OVERRIDE {
+ virtual bool IsDeletable() const OVERRIDE {
return !RequiresHoleCheck();
}
@@ -6712,7 +6746,7 @@ class HLoadKeyed V8_FINAL
};
-class HLoadKeyedGeneric V8_FINAL : public HTemplateInstruction<3> {
+class HLoadKeyedGeneric FINAL : public HTemplateInstruction<3> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HLoadKeyedGeneric, HValue*,
HValue*);
@@ -6731,14 +6765,14 @@ class HLoadKeyedGeneric V8_FINAL : public HTemplateInstruction<3> {
slot_ = slot;
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
// tagged[tagged]
return Representation::Tagged();
}
- virtual HValue* Canonicalize() V8_OVERRIDE;
+ virtual HValue* Canonicalize() OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
@@ -6768,7 +6802,7 @@ enum StoreFieldOrKeyedMode {
};
-class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
+class HStoreNamedField FINAL : public HTemplateInstruction<3> {
public:
DECLARE_INSTRUCTION_FACTORY_P3(HStoreNamedField, HValue*,
HObjectAccess, HValue*);
@@ -6777,13 +6811,13 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
- virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE {
+ virtual bool HasEscapingOperandAt(int index) OVERRIDE {
return index == 1;
}
- virtual bool HasOutOfBoundsAccess(int size) V8_OVERRIDE {
+ virtual bool HasOutOfBoundsAccess(int size) OVERRIDE {
return !access().IsInobject() || access().offset() >= size;
}
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
if (index == 0 && access().IsExternalMemory()) {
// object must be external in case of external memory access
return Representation::External();
@@ -6808,13 +6842,13 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
return Representation::Tagged();
}
virtual bool HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) V8_OVERRIDE {
+ HValue* dominator) OVERRIDE {
DCHECK(side_effect == kNewSpacePromotion);
if (!FLAG_use_write_barrier_elimination) return false;
dominator_ = dominator;
return false;
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
HValue* object() const { return OperandAt(0); }
HValue* value() const { return OperandAt(1); }
@@ -6913,7 +6947,7 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
};
-class HStoreNamedGeneric V8_FINAL : public HTemplateInstruction<3> {
+class HStoreNamedGeneric FINAL : public HTemplateInstruction<3> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreNamedGeneric, HValue*,
Handle<String>, HValue*,
@@ -6924,9 +6958,9 @@ class HStoreNamedGeneric V8_FINAL : public HTemplateInstruction<3> {
Handle<String> name() const { return name_; }
StrictMode strict_mode() const { return strict_mode_; }
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
@@ -6951,7 +6985,7 @@ class HStoreNamedGeneric V8_FINAL : public HTemplateInstruction<3> {
};
-class HStoreKeyed V8_FINAL
+class HStoreKeyed FINAL
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
DECLARE_INSTRUCTION_FACTORY_P4(HStoreKeyed, HValue*, HValue*, HValue*,
@@ -6961,7 +6995,7 @@ class HStoreKeyed V8_FINAL
DECLARE_INSTRUCTION_FACTORY_P6(HStoreKeyed, HValue*, HValue*, HValue*,
ElementsKind, StoreFieldOrKeyedMode, int);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
// kind_fast: tagged[int32] = tagged
// kind_double: tagged[int32] = double
// kind_smi : tagged[int32] = smi
@@ -7012,7 +7046,7 @@ class HStoreKeyed V8_FINAL
return is_external() || is_fixed_typed_array();
}
- virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+ virtual Representation observed_input_representation(int index) OVERRIDE {
if (index < 2) return RequiredInputRepresentation(index);
if (IsUninitialized()) {
return Representation::None();
@@ -7047,7 +7081,7 @@ class HStoreKeyed V8_FINAL
}
virtual bool HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) V8_OVERRIDE {
+ HValue* dominator) OVERRIDE {
DCHECK(side_effect == kNewSpacePromotion);
dominator_ = dominator;
return false;
@@ -7070,7 +7104,7 @@ class HStoreKeyed V8_FINAL
bool NeedsCanonicalization();
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed)
@@ -7127,7 +7161,7 @@ class HStoreKeyed V8_FINAL
};
-class HStoreKeyedGeneric V8_FINAL : public HTemplateInstruction<4> {
+class HStoreKeyedGeneric FINAL : public HTemplateInstruction<4> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreKeyedGeneric, HValue*,
HValue*, HValue*, StrictMode);
@@ -7138,12 +7172,12 @@ class HStoreKeyedGeneric V8_FINAL : public HTemplateInstruction<4> {
HValue* context() const { return OperandAt(3); }
StrictMode strict_mode() const { return strict_mode_; }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
// tagged[tagged] = tagged
return Representation::Tagged();
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric)
@@ -7165,7 +7199,7 @@ class HStoreKeyedGeneric V8_FINAL : public HTemplateInstruction<4> {
};
-class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> {
+class HTransitionElementsKind FINAL : public HTemplateInstruction<2> {
public:
inline static HTransitionElementsKind* New(Zone* zone,
HValue* context,
@@ -7176,7 +7210,7 @@ class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> {
original_map, transitioned_map);
}
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
@@ -7187,12 +7221,12 @@ class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> {
ElementsKind from_kind() const { return from_kind_; }
ElementsKind to_kind() const { return to_kind_; }
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ virtual bool DataEquals(HValue* other) OVERRIDE {
HTransitionElementsKind* instr = HTransitionElementsKind::cast(other);
return original_map_ == instr->original_map_ &&
transitioned_map_ == instr->transitioned_map_;
@@ -7227,7 +7261,7 @@ class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> {
};
-class HStringAdd V8_FINAL : public HBinaryOperation {
+class HStringAdd FINAL : public HBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -7241,16 +7275,16 @@ class HStringAdd V8_FINAL : public HBinaryOperation {
StringAddFlags flags() const { return flags_; }
PretenureFlag pretenure_flag() const { return pretenure_flag_; }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
DECLARE_CONCRETE_INSTRUCTION(StringAdd)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ virtual bool DataEquals(HValue* other) OVERRIDE {
return flags_ == HStringAdd::cast(other)->flags_ &&
pretenure_flag_ == HStringAdd::cast(other)->pretenure_flag_;
}
@@ -7278,14 +7312,14 @@ class HStringAdd V8_FINAL : public HBinaryOperation {
}
// No side-effects except possible allocation:
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
const StringAddFlags flags_;
const PretenureFlag pretenure_flag_;
};
-class HStringCharCodeAt V8_FINAL : public HTemplateInstruction<3> {
+class HStringCharCodeAt FINAL : public HTemplateInstruction<3> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HStringCharCodeAt,
HValue*,
@@ -7305,9 +7339,9 @@ class HStringCharCodeAt V8_FINAL : public HTemplateInstruction<3> {
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone) V8_OVERRIDE {
+ virtual Range* InferRange(Zone* zone) OVERRIDE {
return new(zone) Range(0, String::kMaxUtf16CodeUnit);
}
@@ -7324,17 +7358,17 @@ class HStringCharCodeAt V8_FINAL : public HTemplateInstruction<3> {
}
// No side effects: runtime function assumes string + number inputs.
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
};
-class HStringCharFromCode V8_FINAL : public HTemplateInstruction<2> {
+class HStringCharFromCode FINAL : public HTemplateInstruction<2> {
public:
static HInstruction* New(Zone* zone,
HValue* context,
HValue* char_code);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return index == 0
? Representation::Tagged()
: Representation::Integer32();
@@ -7343,7 +7377,7 @@ class HStringCharFromCode V8_FINAL : public HTemplateInstruction<2> {
HValue* context() const { return OperandAt(0); }
HValue* value() const { return OperandAt(1); }
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode)
@@ -7357,7 +7391,7 @@ class HStringCharFromCode V8_FINAL : public HTemplateInstruction<2> {
SetChangesFlag(kNewSpacePromotion);
}
- virtual bool IsDeletable() const V8_OVERRIDE {
+ virtual bool IsDeletable() const OVERRIDE {
return !value()->ToNumberCanBeObserved();
}
};
@@ -7384,7 +7418,7 @@ class HMaterializedLiteral : public HTemplateInstruction<V> {
}
private:
- virtual bool IsDeletable() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const FINAL OVERRIDE { return true; }
int literal_index_;
int depth_;
@@ -7392,7 +7426,7 @@ class HMaterializedLiteral : public HTemplateInstruction<V> {
};
-class HRegExpLiteral V8_FINAL : public HMaterializedLiteral<1> {
+class HRegExpLiteral FINAL : public HMaterializedLiteral<1> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HRegExpLiteral,
Handle<FixedArray>,
@@ -7405,7 +7439,7 @@ class HRegExpLiteral V8_FINAL : public HMaterializedLiteral<1> {
Handle<String> pattern() { return pattern_; }
Handle<String> flags() { return flags_; }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
@@ -7432,14 +7466,14 @@ class HRegExpLiteral V8_FINAL : public HMaterializedLiteral<1> {
};
-class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
+class HFunctionLiteral FINAL : public HTemplateInstruction<1> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HFunctionLiteral,
Handle<SharedFunctionInfo>,
bool);
HValue* context() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
@@ -7448,44 +7482,46 @@ class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
bool pretenure() const { return pretenure_; }
bool has_no_literals() const { return has_no_literals_; }
- bool is_generator() const { return is_generator_; }
+ bool is_arrow() const { return IsArrowFunction(kind_); }
+ bool is_generator() const { return IsGeneratorFunction(kind_); }
+ bool is_concise_method() const { return IsConciseMethod(kind_); }
+ FunctionKind kind() const { return kind_; }
StrictMode strict_mode() const { return strict_mode_; }
private:
- HFunctionLiteral(HValue* context,
- Handle<SharedFunctionInfo> shared,
+ HFunctionLiteral(HValue* context, Handle<SharedFunctionInfo> shared,
bool pretenure)
: HTemplateInstruction<1>(HType::JSObject()),
shared_info_(shared),
+ kind_(shared->kind()),
pretenure_(pretenure),
has_no_literals_(shared->num_literals() == 0),
- is_generator_(shared->is_generator()),
strict_mode_(shared->strict_mode()) {
SetOperandAt(0, context);
set_representation(Representation::Tagged());
SetChangesFlag(kNewSpacePromotion);
}
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
Handle<SharedFunctionInfo> shared_info_;
+ FunctionKind kind_;
bool pretenure_ : 1;
bool has_no_literals_ : 1;
- bool is_generator_ : 1;
StrictMode strict_mode_;
};
-class HTypeof V8_FINAL : public HTemplateInstruction<2> {
+class HTypeof FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HTypeof, HValue*);
HValue* context() const { return OperandAt(0); }
HValue* value() const { return OperandAt(1); }
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
@@ -7498,15 +7534,15 @@ class HTypeof V8_FINAL : public HTemplateInstruction<2> {
set_representation(Representation::Tagged());
}
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
};
-class HTrapAllocationMemento V8_FINAL : public HTemplateInstruction<1> {
+class HTrapAllocationMemento FINAL : public HTemplateInstruction<1> {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HTrapAllocationMemento, HValue*);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
@@ -7521,11 +7557,11 @@ class HTrapAllocationMemento V8_FINAL : public HTemplateInstruction<1> {
};
-class HToFastProperties V8_FINAL : public HUnaryOperation {
+class HToFastProperties FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HToFastProperties, HValue*);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
@@ -7546,17 +7582,17 @@ class HToFastProperties V8_FINAL : public HUnaryOperation {
#endif
}
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
};
-class HDateField V8_FINAL : public HUnaryOperation {
+class HDateField FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HDateField, HValue*, Smi*);
Smi* index() const { return index_; }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
@@ -7572,7 +7608,7 @@ class HDateField V8_FINAL : public HUnaryOperation {
};
-class HSeqStringGetChar V8_FINAL : public HTemplateInstruction<2> {
+class HSeqStringGetChar FINAL : public HTemplateInstruction<2> {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -7580,7 +7616,7 @@ class HSeqStringGetChar V8_FINAL : public HTemplateInstruction<2> {
HValue* string,
HValue* index);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return (index == 0) ? Representation::Tagged()
: Representation::Integer32();
}
@@ -7592,11 +7628,11 @@ class HSeqStringGetChar V8_FINAL : public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ virtual bool DataEquals(HValue* other) OVERRIDE {
return encoding() == HSeqStringGetChar::cast(other)->encoding();
}
- virtual Range* InferRange(Zone* zone) V8_OVERRIDE {
+ virtual Range* InferRange(Zone* zone) OVERRIDE {
if (encoding() == String::ONE_BYTE_ENCODING) {
return new(zone) Range(0, String::kMaxOneByteCharCode);
} else {
@@ -7616,13 +7652,13 @@ class HSeqStringGetChar V8_FINAL : public HTemplateInstruction<2> {
SetDependsOnFlag(kStringChars);
}
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
String::Encoding encoding_;
};
-class HSeqStringSetChar V8_FINAL : public HTemplateInstruction<4> {
+class HSeqStringSetChar FINAL : public HTemplateInstruction<4> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(
HSeqStringSetChar, String::Encoding,
@@ -7634,7 +7670,7 @@ class HSeqStringSetChar V8_FINAL : public HTemplateInstruction<4> {
HValue* index() { return OperandAt(2); }
HValue* value() { return OperandAt(3); }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return (index <= 1) ? Representation::Tagged()
: Representation::Integer32();
}
@@ -7659,17 +7695,17 @@ class HSeqStringSetChar V8_FINAL : public HTemplateInstruction<4> {
};
-class HCheckMapValue V8_FINAL : public HTemplateInstruction<2> {
+class HCheckMapValue FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HCheckMapValue, HValue*, HValue*);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
- virtual HType CalculateInferredType() V8_OVERRIDE {
+ virtual HType CalculateInferredType() OVERRIDE {
if (value()->type().IsHeapObject()) return value()->type();
return HType::HeapObject();
}
@@ -7677,14 +7713,14 @@ class HCheckMapValue V8_FINAL : public HTemplateInstruction<2> {
HValue* value() const { return OperandAt(0); }
HValue* map() const { return OperandAt(1); }
- virtual HValue* Canonicalize() V8_OVERRIDE;
+ virtual HValue* Canonicalize() OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(CheckMapValue)
protected:
virtual int RedefinedOperandIndex() { return 0; }
- virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ virtual bool DataEquals(HValue* other) OVERRIDE {
return true;
}
@@ -7701,20 +7737,20 @@ class HCheckMapValue V8_FINAL : public HTemplateInstruction<2> {
};
-class HForInPrepareMap V8_FINAL : public HTemplateInstruction<2> {
+class HForInPrepareMap FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HForInPrepareMap, HValue*);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
HValue* context() const { return OperandAt(0); }
HValue* enumerable() const { return OperandAt(1); }
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
- virtual HType CalculateInferredType() V8_OVERRIDE {
+ virtual HType CalculateInferredType() OVERRIDE {
return HType::Tagged();
}
@@ -7731,11 +7767,11 @@ class HForInPrepareMap V8_FINAL : public HTemplateInstruction<2> {
};
-class HForInCacheArray V8_FINAL : public HTemplateInstruction<2> {
+class HForInCacheArray FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_FACTORY_P3(HForInCacheArray, HValue*, HValue*, int);
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
@@ -7751,9 +7787,9 @@ class HForInCacheArray V8_FINAL : public HTemplateInstruction<2> {
index_cache_ = index_cache;
}
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
- virtual HType CalculateInferredType() V8_OVERRIDE {
+ virtual HType CalculateInferredType() OVERRIDE {
return HType::Tagged();
}
@@ -7773,7 +7809,7 @@ class HForInCacheArray V8_FINAL : public HTemplateInstruction<2> {
};
-class HLoadFieldByIndex V8_FINAL : public HTemplateInstruction<2> {
+class HLoadFieldByIndex FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HLoadFieldByIndex, HValue*, HValue*);
@@ -7785,7 +7821,7 @@ class HLoadFieldByIndex V8_FINAL : public HTemplateInstruction<2> {
set_representation(Representation::Tagged());
}
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
if (index == 1) {
return Representation::Smi();
} else {
@@ -7796,16 +7832,16 @@ class HLoadFieldByIndex V8_FINAL : public HTemplateInstruction<2> {
HValue* object() const { return OperandAt(0); }
HValue* index() const { return OperandAt(1); }
- virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE; // NOLINT
- virtual HType CalculateInferredType() V8_OVERRIDE {
+ virtual HType CalculateInferredType() OVERRIDE {
return HType::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex);
private:
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ virtual bool IsDeletable() const OVERRIDE { return true; }
};
diff --git a/deps/v8/src/hydrogen-removable-simulates.cc b/deps/v8/src/hydrogen-removable-simulates.cc
index a28021deb8..73d7a8e058 100644
--- a/deps/v8/src/hydrogen-removable-simulates.cc
+++ b/deps/v8/src/hydrogen-removable-simulates.cc
@@ -53,6 +53,13 @@ class State : public ZoneObject {
FlushSimulates();
return this;
}
+ if (instr->IsCapturedObject()) {
+ // Do not merge simulates across captured objects - captured objects
+ // change environments during environment replay, and such changes
+ // would not be reflected in the simulate.
+ FlushSimulates();
+ return this;
+ }
// Skip the non-simulates and the first simulate.
if (!instr->IsSimulate()) return this;
if (first_) {
diff --git a/deps/v8/src/hydrogen-types.cc b/deps/v8/src/hydrogen-types.cc
index c83ff3cf89..87047a2552 100644
--- a/deps/v8/src/hydrogen-types.cc
+++ b/deps/v8/src/hydrogen-types.cc
@@ -42,7 +42,10 @@ HType HType::FromType<HeapType>(Handle<HeapType> type);
HType HType::FromValue(Handle<Object> value) {
if (value->IsSmi()) return HType::Smi();
if (value->IsNull()) return HType::Null();
- if (value->IsHeapNumber()) return HType::HeapNumber();
+ if (value->IsHeapNumber()) {
+ double n = Handle<v8::internal::HeapNumber>::cast(value)->value();
+ return IsSmiDouble(n) ? HType::Smi() : HType::HeapNumber();
+ }
if (value->IsString()) return HType::String();
if (value->IsBoolean()) return HType::Boolean();
if (value->IsUndefined()) return HType::Undefined();
diff --git a/deps/v8/src/hydrogen-types.h b/deps/v8/src/hydrogen-types.h
index d662a167b9..a42cba578e 100644
--- a/deps/v8/src/hydrogen-types.h
+++ b/deps/v8/src/hydrogen-types.h
@@ -34,36 +34,36 @@ class OStream;
V(JSArray, 0x621) /* 0000 0110 0010 0001 */ \
V(None, 0x7ff) /* 0000 0111 1111 1111 */
-class HType V8_FINAL {
+class HType FINAL {
public:
#define DECLARE_CONSTRUCTOR(Name, mask) \
- static HType Name() V8_WARN_UNUSED_RESULT { return HType(k##Name); }
+ static HType Name() WARN_UNUSED_RESULT { return HType(k##Name); }
HTYPE_LIST(DECLARE_CONSTRUCTOR)
#undef DECLARE_CONSTRUCTOR
// Return the weakest (least precise) common type.
- HType Combine(HType other) const V8_WARN_UNUSED_RESULT {
+ HType Combine(HType other) const WARN_UNUSED_RESULT {
return HType(static_cast<Kind>(kind_ & other.kind_));
}
- bool Equals(HType other) const V8_WARN_UNUSED_RESULT {
+ bool Equals(HType other) const WARN_UNUSED_RESULT {
return kind_ == other.kind_;
}
- bool IsSubtypeOf(HType other) const V8_WARN_UNUSED_RESULT {
+ bool IsSubtypeOf(HType other) const WARN_UNUSED_RESULT {
return Combine(other).Equals(other);
}
#define DECLARE_IS_TYPE(Name, mask) \
- bool Is##Name() const V8_WARN_UNUSED_RESULT { \
+ bool Is##Name() const WARN_UNUSED_RESULT { \
return IsSubtypeOf(HType::Name()); \
}
HTYPE_LIST(DECLARE_IS_TYPE)
#undef DECLARE_IS_TYPE
template <class T>
- static HType FromType(typename T::TypeHandle type) V8_WARN_UNUSED_RESULT;
- static HType FromValue(Handle<Object> value) V8_WARN_UNUSED_RESULT;
+ static HType FromType(typename T::TypeHandle type) WARN_UNUSED_RESULT;
+ static HType FromValue(Handle<Object> value) WARN_UNUSED_RESULT;
friend OStream& operator<<(OStream& os, const HType& t);
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index 63174aa5db..301e7e40fa 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -34,12 +34,15 @@
#include "src/hydrogen-sce.h"
#include "src/hydrogen-store-elimination.h"
#include "src/hydrogen-uint32-analysis.h"
+#include "src/ic/call-optimization.h"
+#include "src/ic/ic.h"
+// GetRootConstructor
+#include "src/ic/ic-inl.h"
#include "src/lithium-allocator.h"
#include "src/parser.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
#include "src/scopeinfo.h"
#include "src/scopes.h"
-#include "src/stub-cache.h"
#include "src/typing.h"
#if V8_TARGET_ARCH_IA32
@@ -1853,9 +1856,11 @@ HValue* HGraphBuilder::BuildRegExpConstructResult(HValue* length,
HAllocate* elements = BuildAllocateElements(elements_kind, size);
BuildInitializeElementsHeader(elements, elements_kind, length);
- HConstant* size_in_bytes_upper_bound = EstablishElementsAllocationSize(
- elements_kind, max_length->Integer32Value());
- elements->set_size_upper_bound(size_in_bytes_upper_bound);
+ if (!elements->has_size_upper_bound()) {
+ HConstant* size_in_bytes_upper_bound = EstablishElementsAllocationSize(
+ elements_kind, max_length->Integer32Value());
+ elements->set_size_upper_bound(size_in_bytes_upper_bound);
+ }
Add<HStoreNamedField>(
result, HObjectAccess::ForJSArrayOffset(JSArray::kElementsOffset),
@@ -2063,11 +2068,11 @@ HValue* HGraphBuilder::BuildCreateConsString(
HInstruction* right_instance_type = AddLoadStringInstanceType(right);
// Allocate the cons string object. HAllocate does not care whether we
- // pass CONS_STRING_TYPE or CONS_ASCII_STRING_TYPE here, so we just use
+ // pass CONS_STRING_TYPE or CONS_ONE_BYTE_STRING_TYPE here, so we just use
// CONS_STRING_TYPE here. Below we decide whether the cons string is
// one-byte or two-byte and set the appropriate map.
DCHECK(HAllocate::CompatibleInstanceTypes(CONS_STRING_TYPE,
- CONS_ASCII_STRING_TYPE));
+ CONS_ONE_BYTE_STRING_TYPE));
HAllocate* result = BuildAllocate(Add<HConstant>(ConsString::kSize),
HType::String(), CONS_STRING_TYPE,
allocation_mode);
@@ -2112,7 +2117,7 @@ HValue* HGraphBuilder::BuildCreateConsString(
// We can safely skip the write barrier for storing the map here.
Add<HStoreNamedField>(
result, HObjectAccess::ForMap(),
- Add<HConstant>(isolate()->factory()->cons_ascii_string_map()));
+ Add<HConstant>(isolate()->factory()->cons_one_byte_string_map()));
}
if_onebyte.Else();
{
@@ -2240,8 +2245,8 @@ HValue* HGraphBuilder::BuildUncheckedStringAdd(
{
HConstant* string_map =
Add<HConstant>(isolate()->factory()->string_map());
- HConstant* ascii_string_map =
- Add<HConstant>(isolate()->factory()->ascii_string_map());
+ HConstant* one_byte_string_map =
+ Add<HConstant>(isolate()->factory()->one_byte_string_map());
// Determine map and size depending on whether result is one-byte string.
IfBuilder if_onebyte(this);
@@ -2255,7 +2260,7 @@ HValue* HGraphBuilder::BuildUncheckedStringAdd(
{
// Allocate sequential one-byte string object.
Push(length);
- Push(ascii_string_map);
+ Push(one_byte_string_map);
}
if_onebyte.Else();
{
@@ -2275,7 +2280,7 @@ HValue* HGraphBuilder::BuildUncheckedStringAdd(
HValue* size = BuildObjectSizeAlignment(Pop(), SeqString::kHeaderSize);
// Allocate the string object. HAllocate does not care whether we pass
- // STRING_TYPE or ASCII_STRING_TYPE here, so we just use STRING_TYPE here.
+ // STRING_TYPE or ONE_BYTE_STRING_TYPE here, so we just use STRING_TYPE.
HAllocate* result = BuildAllocate(
size, HType::String(), STRING_TYPE, allocation_mode);
Add<HStoreNamedField>(result, HObjectAccess::ForMap(), map);
@@ -3321,7 +3326,6 @@ HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
break_scope_(NULL),
inlined_count_(0),
globals_(10, info->zone()),
- inline_bailout_(false),
osr_(new(info->zone()) HOsrBuilder(this)) {
// This is not initialized in the initializer list because the
// constructor for the initial state relies on function_state_ == NULL
@@ -3430,10 +3434,10 @@ HGraph::HGraph(CompilationInfo* info)
next_inline_id_(0),
inlined_functions_(5, info->zone()) {
if (info->IsStub()) {
- HydrogenCodeStub* stub = info->code_stub();
- CodeStubInterfaceDescriptor* descriptor = stub->GetInterfaceDescriptor();
- start_environment_ = new(zone_) HEnvironment(
- zone_, descriptor->GetEnvironmentParameterCount());
+ CallInterfaceDescriptor descriptor =
+ info->code_stub()->GetCallInterfaceDescriptor();
+ start_environment_ = new (zone_)
+ HEnvironment(zone_, descriptor.GetEnvironmentParameterCount());
} else {
TraceInlinedFunction(info->shared_info(), HSourcePosition::Unknown());
start_environment_ =
@@ -4206,7 +4210,7 @@ void TestContext::BuildBranch(HValue* value) {
void HOptimizedGraphBuilder::Bailout(BailoutReason reason) {
- current_info()->set_bailout_reason(reason);
+ current_info()->AbortOptimization(reason);
SetStackOverflow();
}
@@ -4506,6 +4510,11 @@ void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
}
+Type* HOptimizedGraphBuilder::ToType(Handle<Map> map) {
+ return IC::MapToType<Type>(map, zone());
+}
+
+
void HOptimizedGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
for (int i = 0; i < statements->length(); i++) {
Statement* stmt = statements->at(i);
@@ -4824,14 +4833,9 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
- // We only optimize switch statements with a bounded number of clauses.
- const int kCaseClauseLimit = 128;
ZoneList<CaseClause*>* clauses = stmt->cases();
int clause_count = clauses->length();
ZoneList<HBasicBlock*> body_blocks(clause_count, zone());
- if (clause_count > kCaseClauseLimit) {
- return Bailout(kSwitchStatementTooManyClauses);
- }
CHECK_ALIVE(VisitForValue(stmt->tag()));
Add<HSimulate>(stmt->EntryId());
@@ -5238,6 +5242,14 @@ void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
}
+void HOptimizedGraphBuilder::VisitClassLiteral(ClassLiteral* lit) {
+ DCHECK(!HasStackOverflow());
+ DCHECK(current_block() != NULL);
+ DCHECK(current_block()->HasPredecessor());
+ return Bailout(kClassLiteral);
+}
+
+
void HOptimizedGraphBuilder::VisitNativeFunctionLiteral(
NativeFunctionLiteral* expr) {
DCHECK(!HasStackOverflow());
@@ -5286,20 +5298,27 @@ void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
HOptimizedGraphBuilder::GlobalPropertyAccess
- HOptimizedGraphBuilder::LookupGlobalProperty(
- Variable* var, LookupResult* lookup, PropertyAccessType access_type) {
+HOptimizedGraphBuilder::LookupGlobalProperty(Variable* var, LookupIterator* it,
+ PropertyAccessType access_type) {
if (var->is_this() || !current_info()->has_global_object()) {
return kUseGeneric;
}
- Handle<GlobalObject> global(current_info()->global_object());
- global->Lookup(var->name(), lookup);
- if (!lookup->IsNormal() ||
- (access_type == STORE && lookup->IsReadOnly()) ||
- lookup->holder() != *global) {
- return kUseGeneric;
- }
- return kUseCell;
+ switch (it->state()) {
+ case LookupIterator::ACCESSOR:
+ case LookupIterator::ACCESS_CHECK:
+ case LookupIterator::INTERCEPTOR:
+ case LookupIterator::NOT_FOUND:
+ return kUseGeneric;
+ case LookupIterator::DATA:
+ if (access_type == STORE && it->IsReadOnly()) return kUseGeneric;
+ return kUseCell;
+ case LookupIterator::JSPROXY:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return kUseGeneric;
}
@@ -5340,17 +5359,13 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
return ast_context()->ReturnInstruction(instr, expr->id());
}
- LookupResult lookup(isolate());
- GlobalPropertyAccess type = LookupGlobalProperty(variable, &lookup, LOAD);
-
- if (type == kUseCell &&
- current_info()->global_object()->IsAccessCheckNeeded()) {
- type = kUseGeneric;
- }
+ Handle<GlobalObject> global(current_info()->global_object());
+ LookupIterator it(global, variable->name(),
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ GlobalPropertyAccess type = LookupGlobalProperty(variable, &it, LOAD);
if (type == kUseCell) {
- Handle<GlobalObject> global(current_info()->global_object());
- Handle<PropertyCell> cell(global->GetPropertyCell(&lookup));
+ Handle<PropertyCell> cell = it.GetPropertyCell();
if (cell->type()->IsConstant()) {
PropertyCell::AddDependentCompilationInfo(cell, top_info());
Handle<Object> constant_object = cell->type()->AsConstant()->Value();
@@ -5362,7 +5377,7 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
return ast_context()->ReturnInstruction(constant, expr->id());
} else {
HLoadGlobalCell* instr =
- New<HLoadGlobalCell>(cell, lookup.GetPropertyDetails());
+ New<HLoadGlobalCell>(cell, it.property_details());
return ast_context()->ReturnInstruction(instr, expr->id());
}
} else {
@@ -5603,7 +5618,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
PropertyAccessInfo info(this, STORE, ToType(map), name);
if (info.CanAccessMonomorphic()) {
HValue* checked_literal = Add<HCheckMaps>(literal, map);
- DCHECK(!info.lookup()->IsPropertyCallbacks());
+ DCHECK(!info.IsAccessor());
store = BuildMonomorphicAccess(
&info, literal, checked_literal, value,
BailoutId::None(), BailoutId::None());
@@ -5790,19 +5805,17 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedField(
PropertyAccessInfo* info,
HValue* checked_object) {
// See if this is a load for an immutable property
- if (checked_object->ActualValue()->IsConstant() &&
- info->lookup()->IsCacheable() &&
- info->lookup()->IsReadOnly() && info->lookup()->IsDontDelete()) {
+ if (checked_object->ActualValue()->IsConstant()) {
Handle<Object> object(
HConstant::cast(checked_object->ActualValue())->handle(isolate()));
if (object->IsJSObject()) {
- LookupResult lookup(isolate());
- Handle<JSObject>::cast(object)->Lookup(info->name(), &lookup);
- Handle<Object> value(lookup.GetLazyValue(), isolate());
-
- DCHECK(!value->IsTheHole());
- return New<HConstant>(value);
+ LookupIterator it(object, info->name(),
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ Handle<Object> value = JSObject::GetDataProperty(&it);
+ if (it.IsFound() && it.IsReadOnly() && !it.IsConfigurable()) {
+ return New<HConstant>(value);
+ }
}
}
@@ -5834,7 +5847,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
PropertyAccessInfo* info,
HValue* checked_object,
HValue* value) {
- bool transition_to_field = info->lookup()->IsTransition();
+ bool transition_to_field = info->IsTransition();
// TODO(verwaest): Move this logic into PropertyAccessInfo.
HObjectAccess field_access = info->access();
@@ -5911,26 +5924,26 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatible(
if (!LookupDescriptor()) return false;
- if (!lookup_.IsFound()) {
- return (!info->lookup_.IsFound() || info->has_holder()) &&
- map()->prototype() == info->map()->prototype();
+ if (!IsFound()) {
+ return (!info->IsFound() || info->has_holder()) &&
+ map()->prototype() == info->map()->prototype();
}
// Mismatch if the other access info found the property in the prototype
// chain.
if (info->has_holder()) return false;
- if (lookup_.IsPropertyCallbacks()) {
+ if (IsAccessor()) {
return accessor_.is_identical_to(info->accessor_) &&
api_holder_.is_identical_to(info->api_holder_);
}
- if (lookup_.IsConstant()) {
+ if (IsConstant()) {
return constant_.is_identical_to(info->constant_);
}
- DCHECK(lookup_.IsField());
- if (!info->lookup_.IsField()) return false;
+ DCHECK(IsField());
+ if (!info->IsField()) return false;
Representation r = access_.representation();
if (IsLoad()) {
@@ -5973,23 +5986,23 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupDescriptor() {
bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadResult(Handle<Map> map) {
- if (!IsLoad() && lookup_.IsProperty() &&
- (lookup_.IsReadOnly() || !lookup_.IsCacheable())) {
+ if (!IsLoad() && IsProperty() && IsReadOnly()) {
return false;
}
- if (lookup_.IsField()) {
+ if (IsField()) {
// Construct the object field access.
- access_ = HObjectAccess::ForField(map, &lookup_, name_);
+ int index = GetLocalFieldIndexFromMap(map);
+ access_ = HObjectAccess::ForField(map, index, representation(), name_);
// Load field map for heap objects.
LoadFieldMaps(map);
- } else if (lookup_.IsPropertyCallbacks()) {
- Handle<Object> callback(lookup_.GetValueFromMap(*map), isolate());
- if (!callback->IsAccessorPair()) return false;
- Object* raw_accessor = IsLoad()
- ? Handle<AccessorPair>::cast(callback)->getter()
- : Handle<AccessorPair>::cast(callback)->setter();
+ } else if (IsAccessor()) {
+ Handle<Object> accessors = GetAccessorsFromMap(map);
+ if (!accessors->IsAccessorPair()) return false;
+ Object* raw_accessor =
+ IsLoad() ? Handle<AccessorPair>::cast(accessors)->getter()
+ : Handle<AccessorPair>::cast(accessors)->setter();
if (!raw_accessor->IsJSFunction()) return false;
Handle<JSFunction> accessor = handle(JSFunction::cast(raw_accessor));
if (accessor->shared()->IsApiFunction()) {
@@ -6002,8 +6015,8 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadResult(Handle<Map> map) {
}
}
accessor_ = accessor;
- } else if (lookup_.IsConstant()) {
- constant_ = handle(lookup_.GetConstantFromMap(*map), isolate());
+ } else if (IsConstant()) {
+ constant_ = GetConstantFromMap(map);
}
return true;
@@ -6017,7 +6030,7 @@ void HOptimizedGraphBuilder::PropertyAccessInfo::LoadFieldMaps(
field_type_ = HType::Tagged();
// Figure out the field type from the accessor map.
- Handle<HeapType> field_type(lookup_.GetFieldTypeFromMap(*map), isolate());
+ Handle<HeapType> field_type = GetFieldTypeFromMap(map);
// Collect the (stable) maps from the field type.
int num_field_maps = field_type->NumClasses();
@@ -6042,9 +6055,8 @@ void HOptimizedGraphBuilder::PropertyAccessInfo::LoadFieldMaps(
DCHECK(field_type_.IsHeapObject());
// Add dependency on the map that introduced the field.
- Map::AddDependentCompilationInfo(
- handle(lookup_.GetFieldOwnerFromMap(*map), isolate()),
- DependentCode::kFieldTypeGroup, top_info());
+ Map::AddDependentCompilationInfo(GetFieldOwnerFromMap(map),
+ DependentCode::kFieldTypeGroup, top_info());
}
@@ -6062,7 +6074,7 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupInPrototypes() {
return false;
}
map->LookupDescriptor(*holder_, *name_, &lookup_);
- if (lookup_.IsFound()) return LoadResult(map);
+ if (IsFound()) return LoadResult(map);
}
lookup_.NotFound();
return true;
@@ -6078,19 +6090,23 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessMonomorphic() {
return IsLoad();
}
if (!LookupDescriptor()) return false;
- if (lookup_.IsFound()) {
- if (IsLoad()) return true;
- return !lookup_.IsReadOnly() && lookup_.IsCacheable();
- }
+ if (IsFound()) return IsLoad() || !IsReadOnly();
if (!LookupInPrototypes()) return false;
if (IsLoad()) return true;
- if (lookup_.IsPropertyCallbacks()) return true;
+ if (IsAccessor()) return true;
Handle<Map> map = this->map();
map->LookupTransition(NULL, *name_, &lookup_);
if (lookup_.IsTransitionToField() && map->unused_property_fields() > 0) {
// Construct the object field access.
- access_ = HObjectAccess::ForField(map, &lookup_, name_);
+ int descriptor = transition()->LastAdded();
+ int index =
+ transition()->instance_descriptors()->GetFieldIndex(descriptor) -
+ map->inobject_properties();
+ PropertyDetails details =
+ transition()->instance_descriptors()->GetDetails(descriptor);
+ Representation representation = details.representation();
+ access_ = HObjectAccess::ForField(map, index, representation, name_);
// Load field map for heap objects.
LoadFieldMaps(transition());
@@ -6125,8 +6141,8 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessAsMonomorphic(
if (type_->Is(Type::Number())) return false;
// Multiple maps cannot transition to the same target map.
- DCHECK(!IsLoad() || !lookup_.IsTransition());
- if (lookup_.IsTransition() && types->length() > 1) return false;
+ DCHECK(!IsLoad() || !IsTransition());
+ if (IsTransition() && types->length() > 1) return false;
for (int i = 1; i < types->length(); ++i) {
PropertyAccessInfo test_info(
@@ -6180,12 +6196,12 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicAccess(
checked_holder = BuildCheckPrototypeMaps(prototype, info->holder());
}
- if (!info->lookup()->IsFound()) {
+ if (!info->IsFound()) {
DCHECK(info->IsLoad());
return graph()->GetConstantUndefined();
}
- if (info->lookup()->IsField()) {
+ if (info->IsField()) {
if (info->IsLoad()) {
return BuildLoadNamedField(info, checked_holder);
} else {
@@ -6193,12 +6209,12 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicAccess(
}
}
- if (info->lookup()->IsTransition()) {
+ if (info->IsTransition()) {
DCHECK(!info->IsLoad());
return BuildStoreNamedField(info, checked_object, value);
}
- if (info->lookup()->IsPropertyCallbacks()) {
+ if (info->IsAccessor()) {
Push(checked_object);
int argument_count = 1;
if (!info->IsLoad()) {
@@ -6222,7 +6238,7 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicAccess(
return BuildCallConstantFunction(info->accessor(), argument_count);
}
- DCHECK(info->lookup()->IsConstant());
+ DCHECK(info->IsConstant());
if (info->IsLoad()) {
return New<HConstant>(info->constant());
} else {
@@ -6248,7 +6264,8 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
bool handle_smi = false;
STATIC_ASSERT(kMaxLoadPolymorphism == kMaxStorePolymorphism);
- for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
+ int i;
+ for (i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
PropertyAccessInfo info(this, access_type, ToType(types->at(i)), name);
if (info.type()->Is(Type::String())) {
if (handled_string) continue;
@@ -6263,7 +6280,12 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
}
}
- count = 0;
+ if (i < types->length()) {
+ count = -1;
+ types->Clear();
+ } else {
+ count = 0;
+ }
HControlInstruction* smi_check = NULL;
handled_string = false;
@@ -6384,8 +6406,8 @@ static bool ComputeReceiverTypes(Expression* expr,
types->FilterForPossibleTransitions(root_map);
monomorphic = types->length() == 1;
}
- return monomorphic && CanInlinePropertyAccess(
- IC::MapToType<Type>(types->first(), zone));
+ return monomorphic &&
+ CanInlinePropertyAccess(IC::MapToType<Type>(types->first(), zone));
}
@@ -6408,8 +6430,8 @@ void HOptimizedGraphBuilder::BuildStore(Expression* expr,
HValue* key = environment()->ExpressionStackAt(1);
HValue* object = environment()->ExpressionStackAt(2);
bool has_side_effects = false;
- HandleKeyedElementAccess(object, key, value, expr,
- STORE, &has_side_effects);
+ HandleKeyedElementAccess(object, key, value, expr, ast_id, return_id, STORE,
+ &has_side_effects);
Drop(3);
Push(value);
Add<HSimulate>(return_id, REMOVABLE_SIMULATE);
@@ -6458,11 +6480,11 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
Variable* var,
HValue* value,
BailoutId ast_id) {
- LookupResult lookup(isolate());
- GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, STORE);
+ Handle<GlobalObject> global(current_info()->global_object());
+ LookupIterator it(global, var->name(), LookupIterator::OWN_SKIP_INTERCEPTOR);
+ GlobalPropertyAccess type = LookupGlobalProperty(var, &it, STORE);
if (type == kUseCell) {
- Handle<GlobalObject> global(current_info()->global_object());
- Handle<PropertyCell> cell(global->GetPropertyCell(&lookup));
+ Handle<PropertyCell> cell = it.GetPropertyCell();
if (cell->type()->IsConstant()) {
Handle<Object> constant = cell->type()->AsConstant()->Value();
if (value->IsConstant()) {
@@ -6487,7 +6509,7 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
}
}
HInstruction* instr =
- Add<HStoreGlobalCell>(value, cell, lookup.GetPropertyDetails());
+ Add<HStoreGlobalCell>(value, cell, it.property_details());
if (instr->HasObservableSideEffects()) {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
}
@@ -6744,10 +6766,12 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
- // We don't optimize functions with invalid left-hand sides in
- // assignments, count operations, or for-in. Consequently throw can
- // currently only occur in an effect context.
- DCHECK(ast_context()->IsEffect());
+ if (!ast_context()->IsEffect()) {
+ // The parser turns invalid left-hand sides in assignments into throw
+ // statements, which may not be in effect contexts. We might still try
+ // to optimize such functions; bail out now if we do.
+ return Bailout(kInvalidLeftHandSideInAssignment);
+ }
CHECK_ALIVE(VisitForValue(expr->exception()));
HValue* value = environment()->Pop();
@@ -7099,12 +7123,32 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
- HValue* obj,
- HValue* key,
- HValue* val,
- Expression* expr,
- PropertyAccessType access_type,
+ HValue* obj, HValue* key, HValue* val, Expression* expr, BailoutId ast_id,
+ BailoutId return_id, PropertyAccessType access_type,
bool* has_side_effects) {
+ if (key->ActualValue()->IsConstant()) {
+ Handle<Object> constant =
+ HConstant::cast(key->ActualValue())->handle(isolate());
+ uint32_t array_index;
+ if (constant->IsString() &&
+ !Handle<String>::cast(constant)->AsArrayIndex(&array_index)) {
+ if (!constant->IsUniqueName()) {
+ constant = isolate()->factory()->InternalizeString(
+ Handle<String>::cast(constant));
+ }
+ HInstruction* instr =
+ BuildNamedAccess(access_type, ast_id, return_id, expr, obj,
+ Handle<String>::cast(constant), val, false);
+ if (instr == NULL || instr->IsLinked()) {
+ *has_side_effects = false;
+ } else {
+ AddInstruction(instr);
+ *has_side_effects = instr->HasObservableSideEffects();
+ }
+ return instr;
+ }
+ }
+
DCHECK(!expr->IsPropertyName());
HInstruction* instr = NULL;
@@ -7201,7 +7245,9 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
HInstruction* result = NULL;
if (expr->key()->IsPropertyName()) {
Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
- if (!name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("length"))) return false;
+ if (!String::Equals(name, isolate()->factory()->length_string())) {
+ return false;
+ }
if (function_state()->outer() == NULL) {
HInstruction* elements = Add<HArgumentsElements>(false);
@@ -7313,7 +7359,7 @@ void HOptimizedGraphBuilder::BuildLoad(Property* expr,
bool has_side_effects = false;
HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, expr, LOAD, &has_side_effects);
+ obj, key, NULL, expr, ast_id, expr->LoadId(), LOAD, &has_side_effects);
if (has_side_effects) {
if (ast_context()->IsEffect()) {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -7323,6 +7369,7 @@ void HOptimizedGraphBuilder::BuildLoad(Property* expr,
Drop(1);
}
}
+ if (load == NULL) return;
return ast_context()->ReturnValue(load);
}
return ast_context()->ReturnInstruction(instr, ast_id);
@@ -7390,9 +7437,7 @@ HInstruction* HOptimizedGraphBuilder::NewPlainFunctionCall(
HInstruction* HOptimizedGraphBuilder::NewArgumentAdaptorCall(
HValue* fun, HValue* context,
int argument_count, HValue* expected_param_count) {
- CallInterfaceDescriptor* descriptor =
- isolate()->call_descriptor(Isolate::ArgumentAdaptorCall);
-
+ ArgumentAdaptorDescriptor descriptor(isolate());
HValue* arity = Add<HConstant>(argument_count - 1);
HValue* op_vals[] = { context, fun, arity, expected_param_count };
@@ -7403,7 +7448,7 @@ HInstruction* HOptimizedGraphBuilder::NewArgumentAdaptorCall(
return New<HCallWithDescriptor>(
adaptor_value, argument_count, descriptor,
- Vector<HValue*>(op_vals, descriptor->GetEnvironmentLength()));
+ Vector<HValue*>(op_vals, descriptor.GetEnvironmentLength()));
}
@@ -7472,12 +7517,11 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
bool handled_string = false;
int ordered_functions = 0;
- for (int i = 0;
- i < types->length() && ordered_functions < kMaxCallPolymorphism;
+ int i;
+ for (i = 0; i < types->length() && ordered_functions < kMaxCallPolymorphism;
++i) {
PropertyAccessInfo info(this, LOAD, ToType(types->at(i)), name);
- if (info.CanAccessMonomorphic() &&
- info.lookup()->IsConstant() &&
+ if (info.CanAccessMonomorphic() && info.IsConstant() &&
info.constant()->IsJSFunction()) {
if (info.type()->Is(Type::String())) {
if (handled_string) continue;
@@ -7495,6 +7539,11 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
std::sort(order, order + ordered_functions);
+ if (i < types->length()) {
+ types->Clear();
+ ordered_functions = -1;
+ }
+
HBasicBlock* number_block = NULL;
HBasicBlock* join = NULL;
handled_string = false;
@@ -7812,26 +7861,9 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
// Generate the deoptimization data for the unoptimized version of
// the target function if we don't already have it.
- if (!target_shared->has_deoptimization_support()) {
- // Note that we compile here using the same AST that we will use for
- // generating the optimized inline code.
- target_info.EnableDeoptimizationSupport();
- if (!FullCodeGenerator::MakeCode(&target_info)) {
- TraceInline(target, caller, "could not generate deoptimization info");
- return false;
- }
- if (target_shared->scope_info() == ScopeInfo::Empty(isolate())) {
- // The scope info might not have been set if a lazily compiled
- // function is inlined before being called for the first time.
- Handle<ScopeInfo> target_scope_info =
- ScopeInfo::Create(target_info.scope(), zone());
- target_shared->set_scope_info(*target_scope_info);
- }
- target_shared->EnableDeoptimizationSupport(*target_info.code());
- target_shared->set_feedback_vector(*target_info.feedback_vector());
- Compiler::RecordFunctionCompilation(Logger::FUNCTION_TAG,
- &target_info,
- target_shared);
+ if (!Compiler::EnsureDeoptimizationSupport(&target_info)) {
+ TraceInline(target, caller, "could not generate deoptimization info");
+ return false;
}
// ----------------------------------------------------------------
@@ -7888,10 +7920,9 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
Scope* saved_scope = scope();
set_scope(target_info.scope());
HEnterInlined* enter_inlined =
- Add<HEnterInlined>(return_id, target, arguments_count, function,
+ Add<HEnterInlined>(return_id, target, context, arguments_count, function,
function_state()->inlining_kind(),
- function->scope()->arguments(),
- arguments_object);
+ function->scope()->arguments(), arguments_object);
function_state()->set_entry(enter_inlined);
VisitDeclarations(target_info.scope()->declarations());
@@ -7899,10 +7930,10 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
set_scope(saved_scope);
if (HasStackOverflow()) {
// Bail out if the inline function did, as we cannot residualize a call
- // instead.
+ // instead, but do not disable optimization for the outer function.
TraceInline(target, caller, "inline graph construction failed");
target_shared->DisableOptimization(kInliningBailedOut);
- inline_bailout_ = true;
+ current_info()->RetryOptimization(kInliningBailedOut);
delete target_state;
return true;
}
@@ -8218,7 +8249,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
ElementsKind elements_kind = receiver_map->elements_kind();
if (!IsFastElementsKind(elements_kind)) return false;
if (receiver_map->is_observed()) return false;
- DCHECK(receiver_map->is_extensible());
+ if (!receiver_map->is_extensible()) return false;
Drop(expr->arguments()->length());
HValue* result;
@@ -8283,7 +8314,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
if (!IsFastElementsKind(elements_kind)) return false;
if (receiver_map->is_observed()) return false;
if (JSArray::IsReadOnlyLengthDescriptor(receiver_map)) return false;
- DCHECK(receiver_map->is_extensible());
+ if (!receiver_map->is_extensible()) return false;
// If there may be elements accessors in the prototype chain, the fast
// inlined version can't be used.
@@ -8335,7 +8366,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
ElementsKind kind = receiver_map->elements_kind();
if (!IsFastElementsKind(kind)) return false;
if (receiver_map->is_observed()) return false;
- DCHECK(receiver_map->is_extensible());
+ if (!receiver_map->is_extensible()) return false;
// If there may be elements accessors in the prototype chain, the fast
// inlined version can't be used.
@@ -8450,7 +8481,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
if (!IsFastElementsKind(kind)) return false;
if (receiver_map->is_observed()) return false;
if (argument_count != 2) return false;
- DCHECK(receiver_map->is_extensible());
+ if (!receiver_map->is_extensible()) return false;
// If there may be elements accessors in the prototype chain, the fast
// inlined version can't be used.
@@ -8642,19 +8673,16 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
api_function_address
};
- CallInterfaceDescriptor* descriptor =
- isolate()->call_descriptor(Isolate::ApiFunctionCall);
-
+ ApiFunctionDescriptor descriptor(isolate());
CallApiFunctionStub stub(isolate(), is_store, call_data_is_undefined, argc);
Handle<Code> code = stub.GetCode();
HConstant* code_value = Add<HConstant>(code);
- DCHECK((sizeof(op_vals) / kPointerSize) ==
- descriptor->GetEnvironmentLength());
+ DCHECK((sizeof(op_vals) / kPointerSize) == descriptor.GetEnvironmentLength());
HInstruction* call = New<HCallWithDescriptor>(
code_value, argc + 1, descriptor,
- Vector<HValue*>(op_vals, descriptor->GetEnvironmentLength()));
+ Vector<HValue*>(op_vals, descriptor.GetEnvironmentLength()));
if (drop_extra) Drop(1); // Drop function.
ast_context()->ReturnInstruction(call, ast_id);
@@ -8800,6 +8828,12 @@ HValue* HOptimizedGraphBuilder::BuildArrayIndexOf(HValue* receiver,
Push(graph()->GetConstantMinus1());
if (IsFastDoubleElementsKind(kind) || IsFastSmiElementsKind(kind)) {
+ // Make sure that we can actually compare numbers correctly below, see
+ // https://code.google.com/p/chromium/issues/detail?id=407946 for details.
+ search_element = AddUncasted<HForceRepresentation>(
+ search_element, IsFastSmiElementsKind(kind) ? Representation::Smi()
+ : Representation::Double());
+
LoopBuilder loop(this, context(), direction);
{
HValue* index = loop.BeginBody(initial, terminating, token);
@@ -8807,12 +8841,8 @@ HValue* HOptimizedGraphBuilder::BuildArrayIndexOf(HValue* receiver,
elements, index, static_cast<HValue*>(NULL),
kind, ALLOW_RETURN_HOLE);
IfBuilder if_issame(this);
- if (IsFastDoubleElementsKind(kind)) {
- if_issame.If<HCompareNumericAndBranch>(
- element, search_element, Token::EQ_STRICT);
- } else {
- if_issame.If<HCompareObjectEqAndBranch>(element, search_element);
- }
+ if_issame.If<HCompareNumericAndBranch>(element, search_element,
+ Token::EQ_STRICT);
if_issame.Then();
{
Drop(1);
@@ -9049,12 +9079,13 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
// If there is a global property cell for the name at compile time and
// access check is not enabled we assume that the function will not change
// and generate optimized code for calling the function.
- LookupResult lookup(isolate());
- GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, LOAD);
- if (type == kUseCell &&
- !current_info()->global_object()->IsAccessCheckNeeded()) {
+ Handle<GlobalObject> global(current_info()->global_object());
+ LookupIterator it(global, var->name(),
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ GlobalPropertyAccess type = LookupGlobalProperty(var, &it, LOAD);
+ if (type == kUseCell) {
Handle<GlobalObject> global(current_info()->global_object());
- known_global_function = expr->ComputeGlobalTarget(global, &lookup);
+ known_global_function = expr->ComputeGlobalTarget(global, &it);
}
if (known_global_function) {
Add<HCheckValue>(function, expr->target());
@@ -9762,7 +9793,7 @@ void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
static_cast<int>(Runtime::kFirstInlineFunction);
DCHECK(lookup_index >= 0);
DCHECK(static_cast<size_t>(lookup_index) <
- ARRAY_SIZE(kInlineFunctionGenerators));
+ arraysize(kInlineFunctionGenerators));
InlineFunctionGenerator generator = kInlineFunctionGenerators[lookup_index];
// Call the inline code generator using the pointer-to-member.
@@ -10244,7 +10275,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(
right_type->Maybe(Type::String()) ||
right_type->Maybe(Type::Receiver()));
- if (left_type->Is(Type::None())) {
+ if (!left_type->IsInhabited()) {
Add<HDeoptimize>("Insufficient type feedback for LHS of binary operation",
Deoptimizer::SOFT);
// TODO(rossberg): we should be able to get rid of non-continuous
@@ -10255,7 +10286,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(
left_rep = Representation::FromType(left_type);
}
- if (right_type->Is(Type::None())) {
+ if (!right_type->IsInhabited()) {
Add<HDeoptimize>("Insufficient type feedback for RHS of binary operation",
Deoptimizer::SOFT);
right_type = Type::Any(zone());
@@ -10451,7 +10482,7 @@ static bool IsClassOfTest(CompareOperation* expr) {
Literal* literal = expr->right()->AsLiteral();
if (literal == NULL) return false;
if (!literal->value()->IsString()) return false;
- if (!call->name()->IsOneByteEqualTo(STATIC_ASCII_VECTOR("_ClassOf"))) {
+ if (!call->name()->IsOneByteEqualTo(STATIC_CHAR_VECTOR("_ClassOf"))) {
return false;
}
DCHECK(call->arguments()->length() == 1);
@@ -10688,15 +10719,13 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
Handle<JSFunction> target = Handle<JSFunction>::null();
VariableProxy* proxy = expr->right()->AsVariableProxy();
bool global_function = (proxy != NULL) && proxy->var()->IsUnallocated();
- if (global_function &&
- current_info()->has_global_object() &&
- !current_info()->global_object()->IsAccessCheckNeeded()) {
+ if (global_function && current_info()->has_global_object()) {
Handle<String> name = proxy->name();
Handle<GlobalObject> global(current_info()->global_object());
- LookupResult lookup(isolate());
- global->Lookup(name, &lookup);
- if (lookup.IsNormal() && lookup.GetValue()->IsJSFunction()) {
- Handle<JSFunction> candidate(JSFunction::cast(lookup.GetValue()));
+ LookupIterator it(global, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ Handle<Object> value = JSObject::GetDataProperty(&it);
+ if (it.IsFound() && value->IsJSFunction()) {
+ Handle<JSFunction> candidate = Handle<JSFunction>::cast(value);
// If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code.
if (!isolate()->heap()->InNewSpace(*candidate)) {
@@ -10754,7 +10783,7 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
BailoutId bailout_id) {
// Cases handled below depend on collected type feedback. They should
// soft deoptimize when there is no type feedback.
- if (combined_type->Is(Type::None())) {
+ if (!combined_type->IsInhabited()) {
Add<HDeoptimize>("Insufficient type feedback for combined type "
"of binary operation",
Deoptimizer::SOFT);
@@ -10970,7 +10999,8 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
}
// Copy in-object properties.
- if (boilerplate_object->map()->NumberOfFields() != 0) {
+ if (boilerplate_object->map()->NumberOfFields() != 0 ||
+ boilerplate_object->map()->unused_property_fields() > 0) {
BuildEmitInObjectProperties(boilerplate_object, object, site_context,
pretenure_flag);
}
@@ -11183,6 +11213,14 @@ void HOptimizedGraphBuilder::VisitThisFunction(ThisFunction* expr) {
}
+void HOptimizedGraphBuilder::VisitSuperReference(SuperReference* expr) {
+ DCHECK(!HasStackOverflow());
+ DCHECK(current_block() != NULL);
+ DCHECK(current_block()->HasPredecessor());
+ return Bailout(kSuperReference);
+}
+
+
void HOptimizedGraphBuilder::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
DCHECK(globals_.is_empty());
@@ -11527,10 +11565,9 @@ void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
CallRuntime* call) {
DCHECK(call->arguments()->length() == 3);
- // We need to follow the evaluation order of full codegen.
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* string = Pop();
HValue* value = Pop();
HValue* index = Pop();
@@ -11544,10 +11581,9 @@ void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar(
CallRuntime* call) {
DCHECK(call->arguments()->length() == 3);
- // We need to follow the evaluation order of full codegen.
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* string = Pop();
HValue* value = Pop();
HValue* index = Pop();
@@ -11830,8 +11866,8 @@ void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
- return Bailout(kInlinedRuntimeFunctionFastAsciiArrayJoin);
+void HOptimizedGraphBuilder::GenerateFastOneByteArrayJoin(CallRuntime* call) {
+ return Bailout(kInlinedRuntimeFunctionFastOneByteArrayJoin);
}
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index bc91e19136..d507643e5f 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -10,6 +10,7 @@
#include "src/accessors.h"
#include "src/allocation.h"
#include "src/ast.h"
+#include "src/bailout-reason.h"
#include "src/compiler.h"
#include "src/hydrogen-instructions.h"
#include "src/scopes.h"
@@ -31,7 +32,7 @@ class LChunk;
class LiveRange;
-class HBasicBlock V8_FINAL : public ZoneObject {
+class HBasicBlock FINAL : public ZoneObject {
public:
explicit HBasicBlock(HGraph* graph);
~HBasicBlock() { }
@@ -217,7 +218,7 @@ class HBasicBlock V8_FINAL : public ZoneObject {
OStream& operator<<(OStream& os, const HBasicBlock& b);
-class HPredecessorIterator V8_FINAL BASE_EMBEDDED {
+class HPredecessorIterator FINAL BASE_EMBEDDED {
public:
explicit HPredecessorIterator(HBasicBlock* block)
: predecessor_list_(block->predecessors()), current_(0) { }
@@ -232,7 +233,7 @@ class HPredecessorIterator V8_FINAL BASE_EMBEDDED {
};
-class HInstructionIterator V8_FINAL BASE_EMBEDDED {
+class HInstructionIterator FINAL BASE_EMBEDDED {
public:
explicit HInstructionIterator(HBasicBlock* block)
: instr_(block->first()) {
@@ -252,7 +253,7 @@ class HInstructionIterator V8_FINAL BASE_EMBEDDED {
};
-class HLoopInformation V8_FINAL : public ZoneObject {
+class HLoopInformation FINAL : public ZoneObject {
public:
HLoopInformation(HBasicBlock* loop_header, Zone* zone)
: back_edges_(4, zone),
@@ -300,7 +301,7 @@ class HLoopInformation V8_FINAL : public ZoneObject {
class BoundsCheckTable;
class InductionVariableBlocksTable;
-class HGraph V8_FINAL : public ZoneObject {
+class HGraph FINAL : public ZoneObject {
public:
explicit HGraph(CompilationInfo* info);
@@ -547,7 +548,7 @@ enum FrameType {
};
-class HEnvironment V8_FINAL : public ZoneObject {
+class HEnvironment FINAL : public ZoneObject {
public:
HEnvironment(HEnvironment* outer,
Scope* scope,
@@ -812,37 +813,37 @@ class AstContext {
};
-class EffectContext V8_FINAL : public AstContext {
+class EffectContext FINAL : public AstContext {
public:
explicit EffectContext(HOptimizedGraphBuilder* owner)
: AstContext(owner, Expression::kEffect) {
}
virtual ~EffectContext();
- virtual void ReturnValue(HValue* value) V8_OVERRIDE;
+ virtual void ReturnValue(HValue* value) OVERRIDE;
virtual void ReturnInstruction(HInstruction* instr,
- BailoutId ast_id) V8_OVERRIDE;
+ BailoutId ast_id) OVERRIDE;
virtual void ReturnControl(HControlInstruction* instr,
- BailoutId ast_id) V8_OVERRIDE;
+ BailoutId ast_id) OVERRIDE;
virtual void ReturnContinuation(HIfContinuation* continuation,
- BailoutId ast_id) V8_OVERRIDE;
+ BailoutId ast_id) OVERRIDE;
};
-class ValueContext V8_FINAL : public AstContext {
+class ValueContext FINAL : public AstContext {
public:
ValueContext(HOptimizedGraphBuilder* owner, ArgumentsAllowedFlag flag)
: AstContext(owner, Expression::kValue), flag_(flag) {
}
virtual ~ValueContext();
- virtual void ReturnValue(HValue* value) V8_OVERRIDE;
+ virtual void ReturnValue(HValue* value) OVERRIDE;
virtual void ReturnInstruction(HInstruction* instr,
- BailoutId ast_id) V8_OVERRIDE;
+ BailoutId ast_id) OVERRIDE;
virtual void ReturnControl(HControlInstruction* instr,
- BailoutId ast_id) V8_OVERRIDE;
+ BailoutId ast_id) OVERRIDE;
virtual void ReturnContinuation(HIfContinuation* continuation,
- BailoutId ast_id) V8_OVERRIDE;
+ BailoutId ast_id) OVERRIDE;
bool arguments_allowed() { return flag_ == ARGUMENTS_ALLOWED; }
@@ -851,7 +852,7 @@ class ValueContext V8_FINAL : public AstContext {
};
-class TestContext V8_FINAL : public AstContext {
+class TestContext FINAL : public AstContext {
public:
TestContext(HOptimizedGraphBuilder* owner,
Expression* condition,
@@ -863,13 +864,13 @@ class TestContext V8_FINAL : public AstContext {
if_false_(if_false) {
}
- virtual void ReturnValue(HValue* value) V8_OVERRIDE;
+ virtual void ReturnValue(HValue* value) OVERRIDE;
virtual void ReturnInstruction(HInstruction* instr,
- BailoutId ast_id) V8_OVERRIDE;
+ BailoutId ast_id) OVERRIDE;
virtual void ReturnControl(HControlInstruction* instr,
- BailoutId ast_id) V8_OVERRIDE;
+ BailoutId ast_id) OVERRIDE;
virtual void ReturnContinuation(HIfContinuation* continuation,
- BailoutId ast_id) V8_OVERRIDE;
+ BailoutId ast_id) OVERRIDE;
static TestContext* cast(AstContext* context) {
DCHECK(context->IsTest());
@@ -891,7 +892,7 @@ class TestContext V8_FINAL : public AstContext {
};
-class FunctionState V8_FINAL {
+class FunctionState FINAL {
public:
FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
@@ -964,7 +965,7 @@ class FunctionState V8_FINAL {
};
-class HIfContinuation V8_FINAL {
+class HIfContinuation FINAL {
public:
HIfContinuation()
: continuation_captured_(false),
@@ -1008,7 +1009,7 @@ class HIfContinuation V8_FINAL {
};
-class HAllocationMode V8_FINAL BASE_EMBEDDED {
+class HAllocationMode FINAL BASE_EMBEDDED {
public:
explicit HAllocationMode(Handle<AllocationSite> feedback_site)
: current_site_(NULL), feedback_site_(feedback_site),
@@ -1023,11 +1024,11 @@ class HAllocationMode V8_FINAL BASE_EMBEDDED {
HValue* current_site() const { return current_site_; }
Handle<AllocationSite> feedback_site() const { return feedback_site_; }
- bool CreateAllocationMementos() const V8_WARN_UNUSED_RESULT {
+ bool CreateAllocationMementos() const WARN_UNUSED_RESULT {
return current_site() != NULL;
}
- PretenureFlag GetPretenureMode() const V8_WARN_UNUSED_RESULT {
+ PretenureFlag GetPretenureMode() const WARN_UNUSED_RESULT {
if (!feedback_site().is_null()) return feedback_site()->GetPretenureMode();
return pretenure_flag_;
}
@@ -1473,7 +1474,7 @@ class HGraphBuilder {
void AddIncrementCounter(StatsCounter* counter);
- class IfBuilder V8_FINAL {
+ class IfBuilder FINAL {
public:
// If using this constructor, Initialize() must be called explicitly!
IfBuilder();
@@ -1676,7 +1677,7 @@ class HGraphBuilder {
int deopt_merge_at_join_block_count_;
};
- class LoopBuilder V8_FINAL {
+ class LoopBuilder FINAL {
public:
enum Direction {
kPreIncrement,
@@ -1728,30 +1729,9 @@ class HGraphBuilder {
bool finished_;
};
- template <class A, class P1>
- void DeoptimizeIf(P1 p1, char* const reason) {
- IfBuilder builder(this);
- builder.If<A>(p1);
- builder.ThenDeopt(reason);
- }
-
- template <class A, class P1, class P2>
- void DeoptimizeIf(P1 p1, P2 p2, const char* reason) {
- IfBuilder builder(this);
- builder.If<A>(p1, p2);
- builder.ThenDeopt(reason);
- }
-
- template <class A, class P1, class P2, class P3>
- void DeoptimizeIf(P1 p1, P2 p2, P3 p3, const char* reason) {
- IfBuilder builder(this);
- builder.If<A>(p1, p2, p3);
- builder.ThenDeopt(reason);
- }
-
HValue* BuildNewElementsCapacity(HValue* old_capacity);
- class JSArrayBuilder V8_FINAL {
+ class JSArrayBuilder FINAL {
public:
JSArrayBuilder(HGraphBuilder* builder,
ElementsKind kind,
@@ -2067,7 +2047,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
// A class encapsulating (lazily-allocated) break and continue blocks for
// a breakable statement. Separated from BreakAndContinueScope so that it
// can have a separate lifetime.
- class BreakAndContinueInfo V8_FINAL BASE_EMBEDDED {
+ class BreakAndContinueInfo FINAL BASE_EMBEDDED {
public:
explicit BreakAndContinueInfo(BreakableStatement* target,
Scope* scope,
@@ -2097,7 +2077,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
// A helper class to maintain a stack of current BreakAndContinueInfo
// structures mirroring BreakableStatement nesting.
- class BreakAndContinueScope V8_FINAL BASE_EMBEDDED {
+ class BreakAndContinueScope FINAL BASE_EMBEDDED {
public:
BreakAndContinueScope(BreakAndContinueInfo* info,
HOptimizedGraphBuilder* owner)
@@ -2124,14 +2104,12 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
explicit HOptimizedGraphBuilder(CompilationInfo* info);
- virtual bool BuildGraph() V8_OVERRIDE;
+ virtual bool BuildGraph() OVERRIDE;
// Simple accessors.
BreakAndContinueScope* break_scope() const { return break_scope_; }
void set_break_scope(BreakAndContinueScope* head) { break_scope_ = head; }
- bool inline_bailout() { return inline_bailout_; }
-
HValue* context() { return environment()->context(); }
HOsrBuilder* osr() const { return osr_; }
@@ -2313,13 +2291,13 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void PushArgumentsFromEnvironment(int count);
void SetUpScope(Scope* scope);
- virtual void VisitStatements(ZoneList<Statement*>* statements) V8_OVERRIDE;
+ virtual void VisitStatements(ZoneList<Statement*>* statements) OVERRIDE;
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node) V8_OVERRIDE;
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node) OVERRIDE;
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
- Type* ToType(Handle<Map> map) { return IC::MapToType<Type>(map, zone()); }
+ Type* ToType(Handle<Map> map);
private:
// Helpers for flow graph construction.
@@ -2327,8 +2305,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
kUseCell,
kUseGeneric
};
- GlobalPropertyAccess LookupGlobalProperty(Variable* var,
- LookupResult* lookup,
+ GlobalPropertyAccess LookupGlobalProperty(Variable* var, LookupIterator* it,
PropertyAccessType access_type);
void EnsureArgumentsArePushedForAccess();
@@ -2496,7 +2473,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
bool has_holder() { return !holder_.is_null(); }
bool IsLoad() const { return access_type_ == LOAD; }
- LookupResult* lookup() { return &lookup_; }
Handle<JSObject> holder() { return holder_; }
Handle<JSFunction> accessor() { return accessor_; }
Handle<Object> constant() { return constant_; }
@@ -2505,10 +2481,37 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HType field_type() const { return field_type_; }
HObjectAccess access() { return access_; }
+ bool IsFound() const { return lookup_.IsFound(); }
+ bool IsProperty() const { return lookup_.IsProperty(); }
+ bool IsField() const { return lookup_.IsField(); }
+ bool IsConstant() const { return lookup_.IsConstant(); }
+ bool IsAccessor() const { return lookup_.IsPropertyCallbacks(); }
+ bool IsTransition() const { return lookup_.IsTransition(); }
+
+ bool IsConfigurable() const { return lookup_.IsConfigurable(); }
+ bool IsReadOnly() const { return lookup_.IsReadOnly(); }
+
private:
+ Handle<Object> GetAccessorsFromMap(Handle<Map> map) const {
+ return handle(lookup_.GetValueFromMap(*map), isolate());
+ }
+ Handle<Object> GetConstantFromMap(Handle<Map> map) const {
+ return handle(lookup_.GetConstantFromMap(*map), isolate());
+ }
+ Handle<HeapType> GetFieldTypeFromMap(Handle<Map> map) const {
+ return handle(lookup_.GetFieldTypeFromMap(*map), isolate());
+ }
+ Handle<Map> GetFieldOwnerFromMap(Handle<Map> map) const {
+ return handle(lookup_.GetFieldOwnerFromMap(*map));
+ }
+ int GetLocalFieldIndexFromMap(Handle<Map> map) const {
+ return lookup_.GetLocalFieldIndexFromMap(*map);
+ }
+ Representation representation() const { return lookup_.representation(); }
+
Type* ToType(Handle<Map> map) { return builder_->ToType(map); }
Zone* zone() { return builder_->zone(); }
- Isolate* isolate() { return lookup_.isolate(); }
+ Isolate* isolate() const { return lookup_.isolate(); }
CompilationInfo* top_info() { return builder_->top_info(); }
CompilationInfo* current_info() { return builder_->current_info(); }
@@ -2622,10 +2625,9 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
KeyedAccessStoreMode store_mode,
bool* has_side_effects);
- HValue* HandleKeyedElementAccess(HValue* obj,
- HValue* key,
- HValue* val,
- Expression* expr,
+ HValue* HandleKeyedElementAccess(HValue* obj, HValue* key, HValue* val,
+ Expression* expr, BailoutId ast_id,
+ BailoutId return_id,
PropertyAccessType access_type,
bool* has_side_effects);
@@ -2743,7 +2745,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
Zone* AstContext::zone() const { return owner_->zone(); }
-class HStatistics V8_FINAL: public Malloced {
+class HStatistics FINAL: public Malloced {
public:
HStatistics()
: times_(5),
@@ -2806,7 +2808,7 @@ class HPhase : public CompilationPhase {
};
-class HTracer V8_FINAL : public Malloced {
+class HTracer FINAL : public Malloced {
public:
explicit HTracer(int isolate_id)
: trace_(&string_allocator_), indent_(0) {
@@ -2827,7 +2829,7 @@ class HTracer V8_FINAL : public Malloced {
void TraceLiveRanges(const char* name, LAllocator* allocator);
private:
- class Tag V8_FINAL BASE_EMBEDDED {
+ class Tag FINAL BASE_EMBEDDED {
public:
Tag(HTracer* tracer, const char* name) {
name_ = name;
@@ -2892,7 +2894,7 @@ class HTracer V8_FINAL : public Malloced {
};
-class NoObservableSideEffectsScope V8_FINAL {
+class NoObservableSideEffectsScope FINAL {
public:
explicit NoObservableSideEffectsScope(HGraphBuilder* builder) :
builder_(builder) {
diff --git a/deps/v8/src/i18n.cc b/deps/v8/src/i18n.cc
index 2d67cf13eb..cae3a327a1 100644
--- a/deps/v8/src/i18n.cc
+++ b/deps/v8/src/i18n.cc
@@ -131,22 +131,19 @@ void SetResolvedDateSettings(Isolate* isolate,
icu::UnicodeString pattern;
date_format->toPattern(pattern);
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("pattern"),
+ resolved, factory->NewStringFromStaticChars("pattern"),
factory->NewStringFromTwoByte(
- Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
- pattern.length())).ToHandleChecked(),
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
+ pattern.length())).ToHandleChecked(),
SLOPPY).Assert();
// Set time zone and calendar.
const icu::Calendar* calendar = date_format->getCalendar();
const char* calendar_name = calendar->getType();
- JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("calendar"),
- factory->NewStringFromAsciiChecked(calendar_name),
- SLOPPY).Assert();
+ JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("calendar"),
+ factory->NewStringFromAsciiChecked(calendar_name),
+ SLOPPY).Assert();
const icu::TimeZone& tz = calendar->getTimeZone();
icu::UnicodeString time_zone;
@@ -157,19 +154,16 @@ void SetResolvedDateSettings(Isolate* isolate,
if (U_SUCCESS(status)) {
if (canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/GMT")) {
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("timeZone"),
- factory->NewStringFromStaticAscii("UTC"),
- SLOPPY).Assert();
+ resolved, factory->NewStringFromStaticChars("timeZone"),
+ factory->NewStringFromStaticChars("UTC"), SLOPPY).Assert();
} else {
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("timeZone"),
+ resolved, factory->NewStringFromStaticChars("timeZone"),
factory->NewStringFromTwoByte(
- Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(
- canonical_time_zone.getBuffer()),
- canonical_time_zone.length())).ToHandleChecked(),
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(
+ canonical_time_zone.getBuffer()),
+ canonical_time_zone.length())).ToHandleChecked(),
SLOPPY).Assert();
}
}
@@ -183,16 +177,12 @@ void SetResolvedDateSettings(Isolate* isolate,
if (U_SUCCESS(status)) {
const char* ns = numbering_system->getName();
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("numberingSystem"),
- factory->NewStringFromAsciiChecked(ns),
- SLOPPY).Assert();
+ resolved, factory->NewStringFromStaticChars("numberingSystem"),
+ factory->NewStringFromAsciiChecked(ns), SLOPPY).Assert();
} else {
- JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("numberingSystem"),
- factory->undefined_value(),
- SLOPPY).Assert();
+ JSObject::SetProperty(resolved,
+ factory->NewStringFromStaticChars("numberingSystem"),
+ factory->undefined_value(), SLOPPY).Assert();
}
delete numbering_system;
@@ -202,18 +192,14 @@ void SetResolvedDateSettings(Isolate* isolate,
uloc_toLanguageTag(
icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
if (U_SUCCESS(status)) {
- JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("locale"),
- factory->NewStringFromAsciiChecked(result),
- SLOPPY).Assert();
+ JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
+ factory->NewStringFromAsciiChecked(result),
+ SLOPPY).Assert();
} else {
// This would never happen, since we got the locale from ICU.
- JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("locale"),
- factory->NewStringFromStaticAscii("und"),
- SLOPPY).Assert();
+ JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
+ factory->NewStringFromStaticChars("und"),
+ SLOPPY).Assert();
}
}
@@ -350,24 +336,22 @@ void SetResolvedNumberSettings(Isolate* isolate,
icu::UnicodeString pattern;
number_format->toPattern(pattern);
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("pattern"),
+ resolved, factory->NewStringFromStaticChars("pattern"),
factory->NewStringFromTwoByte(
- Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
- pattern.length())).ToHandleChecked(),
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
+ pattern.length())).ToHandleChecked(),
SLOPPY).Assert();
// Set resolved currency code in options.currency if not empty.
icu::UnicodeString currency(number_format->getCurrency());
if (!currency.isEmpty()) {
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("currency"),
- factory->NewStringFromTwoByte(
- Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(currency.getBuffer()),
- currency.length())).ToHandleChecked(),
+ resolved, factory->NewStringFromStaticChars("currency"),
+ factory->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(
+ currency.getBuffer()),
+ currency.length())).ToHandleChecked(),
SLOPPY).Assert();
}
@@ -380,62 +364,51 @@ void SetResolvedNumberSettings(Isolate* isolate,
if (U_SUCCESS(status)) {
const char* ns = numbering_system->getName();
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("numberingSystem"),
- factory->NewStringFromAsciiChecked(ns),
- SLOPPY).Assert();
+ resolved, factory->NewStringFromStaticChars("numberingSystem"),
+ factory->NewStringFromAsciiChecked(ns), SLOPPY).Assert();
} else {
- JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("numberingSystem"),
- factory->undefined_value(),
- SLOPPY).Assert();
+ JSObject::SetProperty(resolved,
+ factory->NewStringFromStaticChars("numberingSystem"),
+ factory->undefined_value(), SLOPPY).Assert();
}
delete numbering_system;
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("useGrouping"),
- factory->ToBoolean(number_format->isGroupingUsed()),
- SLOPPY).Assert();
+ resolved, factory->NewStringFromStaticChars("useGrouping"),
+ factory->ToBoolean(number_format->isGroupingUsed()), SLOPPY).Assert();
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("minimumIntegerDigits"),
+ resolved, factory->NewStringFromStaticChars("minimumIntegerDigits"),
factory->NewNumberFromInt(number_format->getMinimumIntegerDigits()),
SLOPPY).Assert();
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("minimumFractionDigits"),
+ resolved, factory->NewStringFromStaticChars("minimumFractionDigits"),
factory->NewNumberFromInt(number_format->getMinimumFractionDigits()),
SLOPPY).Assert();
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("maximumFractionDigits"),
+ resolved, factory->NewStringFromStaticChars("maximumFractionDigits"),
factory->NewNumberFromInt(number_format->getMaximumFractionDigits()),
SLOPPY).Assert();
Handle<String> key =
- factory->NewStringFromStaticAscii("minimumSignificantDigits");
+ factory->NewStringFromStaticChars("minimumSignificantDigits");
Maybe<bool> maybe = JSReceiver::HasOwnProperty(resolved, key);
CHECK(maybe.has_value);
if (maybe.value) {
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("minimumSignificantDigits"),
+ resolved, factory->NewStringFromStaticChars("minimumSignificantDigits"),
factory->NewNumberFromInt(number_format->getMinimumSignificantDigits()),
SLOPPY).Assert();
}
- key = factory->NewStringFromStaticAscii("maximumSignificantDigits");
+ key = factory->NewStringFromStaticChars("maximumSignificantDigits");
maybe = JSReceiver::HasOwnProperty(resolved, key);
CHECK(maybe.has_value);
if (maybe.value) {
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("maximumSignificantDigits"),
+ resolved, factory->NewStringFromStaticChars("maximumSignificantDigits"),
factory->NewNumberFromInt(number_format->getMaximumSignificantDigits()),
SLOPPY).Assert();
}
@@ -446,18 +419,14 @@ void SetResolvedNumberSettings(Isolate* isolate,
uloc_toLanguageTag(
icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
if (U_SUCCESS(status)) {
- JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("locale"),
- factory->NewStringFromAsciiChecked(result),
- SLOPPY).Assert();
+ JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
+ factory->NewStringFromAsciiChecked(result),
+ SLOPPY).Assert();
} else {
// This would never happen, since we got the locale from ICU.
- JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("locale"),
- factory->NewStringFromStaticAscii("und"),
- SLOPPY).Assert();
+ JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
+ factory->NewStringFromStaticChars("und"),
+ SLOPPY).Assert();
}
}
@@ -534,8 +503,7 @@ void SetResolvedCollatorSettings(Isolate* isolate,
UErrorCode status = U_ZERO_ERROR;
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("numeric"),
+ resolved, factory->NewStringFromStaticChars("numeric"),
factory->ToBoolean(
collator->getAttribute(UCOL_NUMERIC_COLLATION, status) == UCOL_ON),
SLOPPY).Assert();
@@ -543,106 +511,77 @@ void SetResolvedCollatorSettings(Isolate* isolate,
switch (collator->getAttribute(UCOL_CASE_FIRST, status)) {
case UCOL_LOWER_FIRST:
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("caseFirst"),
- factory->NewStringFromStaticAscii("lower"),
- SLOPPY).Assert();
+ resolved, factory->NewStringFromStaticChars("caseFirst"),
+ factory->NewStringFromStaticChars("lower"), SLOPPY).Assert();
break;
case UCOL_UPPER_FIRST:
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("caseFirst"),
- factory->NewStringFromStaticAscii("upper"),
- SLOPPY).Assert();
+ resolved, factory->NewStringFromStaticChars("caseFirst"),
+ factory->NewStringFromStaticChars("upper"), SLOPPY).Assert();
break;
default:
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("caseFirst"),
- factory->NewStringFromStaticAscii("false"),
- SLOPPY).Assert();
+ resolved, factory->NewStringFromStaticChars("caseFirst"),
+ factory->NewStringFromStaticChars("false"), SLOPPY).Assert();
}
switch (collator->getAttribute(UCOL_STRENGTH, status)) {
case UCOL_PRIMARY: {
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("strength"),
- factory->NewStringFromStaticAscii("primary"),
- SLOPPY).Assert();
+ resolved, factory->NewStringFromStaticChars("strength"),
+ factory->NewStringFromStaticChars("primary"), SLOPPY).Assert();
// case level: true + s1 -> case, s1 -> base.
if (UCOL_ON == collator->getAttribute(UCOL_CASE_LEVEL, status)) {
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("sensitivity"),
- factory->NewStringFromStaticAscii("case"),
- SLOPPY).Assert();
+ resolved, factory->NewStringFromStaticChars("sensitivity"),
+ factory->NewStringFromStaticChars("case"), SLOPPY).Assert();
} else {
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("sensitivity"),
- factory->NewStringFromStaticAscii("base"),
- SLOPPY).Assert();
+ resolved, factory->NewStringFromStaticChars("sensitivity"),
+ factory->NewStringFromStaticChars("base"), SLOPPY).Assert();
}
break;
}
case UCOL_SECONDARY:
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("strength"),
- factory->NewStringFromStaticAscii("secondary"),
- SLOPPY).Assert();
+ resolved, factory->NewStringFromStaticChars("strength"),
+ factory->NewStringFromStaticChars("secondary"), SLOPPY).Assert();
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("sensitivity"),
- factory->NewStringFromStaticAscii("accent"),
- SLOPPY).Assert();
+ resolved, factory->NewStringFromStaticChars("sensitivity"),
+ factory->NewStringFromStaticChars("accent"), SLOPPY).Assert();
break;
case UCOL_TERTIARY:
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("strength"),
- factory->NewStringFromStaticAscii("tertiary"),
- SLOPPY).Assert();
+ resolved, factory->NewStringFromStaticChars("strength"),
+ factory->NewStringFromStaticChars("tertiary"), SLOPPY).Assert();
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("sensitivity"),
- factory->NewStringFromStaticAscii("variant"),
- SLOPPY).Assert();
+ resolved, factory->NewStringFromStaticChars("sensitivity"),
+ factory->NewStringFromStaticChars("variant"), SLOPPY).Assert();
break;
case UCOL_QUATERNARY:
// We shouldn't get quaternary and identical from ICU, but if we do
// put them into variant.
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("strength"),
- factory->NewStringFromStaticAscii("quaternary"),
- SLOPPY).Assert();
+ resolved, factory->NewStringFromStaticChars("strength"),
+ factory->NewStringFromStaticChars("quaternary"), SLOPPY).Assert();
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("sensitivity"),
- factory->NewStringFromStaticAscii("variant"),
- SLOPPY).Assert();
+ resolved, factory->NewStringFromStaticChars("sensitivity"),
+ factory->NewStringFromStaticChars("variant"), SLOPPY).Assert();
break;
default:
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("strength"),
- factory->NewStringFromStaticAscii("identical"),
- SLOPPY).Assert();
+ resolved, factory->NewStringFromStaticChars("strength"),
+ factory->NewStringFromStaticChars("identical"), SLOPPY).Assert();
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("sensitivity"),
- factory->NewStringFromStaticAscii("variant"),
- SLOPPY).Assert();
+ resolved, factory->NewStringFromStaticChars("sensitivity"),
+ factory->NewStringFromStaticChars("variant"), SLOPPY).Assert();
}
JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("ignorePunctuation"),
- factory->ToBoolean(collator->getAttribute(
- UCOL_ALTERNATE_HANDLING, status) == UCOL_SHIFTED),
+ resolved, factory->NewStringFromStaticChars("ignorePunctuation"),
+ factory->ToBoolean(collator->getAttribute(UCOL_ALTERNATE_HANDLING,
+ status) == UCOL_SHIFTED),
SLOPPY).Assert();
// Set the locale
@@ -651,18 +590,14 @@ void SetResolvedCollatorSettings(Isolate* isolate,
uloc_toLanguageTag(
icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
if (U_SUCCESS(status)) {
- JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("locale"),
- factory->NewStringFromAsciiChecked(result),
- SLOPPY).Assert();
+ JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
+ factory->NewStringFromAsciiChecked(result),
+ SLOPPY).Assert();
} else {
// This would never happen, since we got the locale from ICU.
- JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("locale"),
- factory->NewStringFromStaticAscii("und"),
- SLOPPY).Assert();
+ JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
+ factory->NewStringFromStaticChars("und"),
+ SLOPPY).Assert();
}
}
@@ -713,18 +648,14 @@ void SetResolvedBreakIteratorSettings(Isolate* isolate,
uloc_toLanguageTag(
icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
if (U_SUCCESS(status)) {
- JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("locale"),
- factory->NewStringFromAsciiChecked(result),
- SLOPPY).Assert();
+ JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
+ factory->NewStringFromAsciiChecked(result),
+ SLOPPY).Assert();
} else {
// This would never happen, since we got the locale from ICU.
- JSObject::SetProperty(
- resolved,
- factory->NewStringFromStaticAscii("locale"),
- factory->NewStringFromStaticAscii("und"),
- SLOPPY).Assert();
+ JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
+ factory->NewStringFromStaticChars("und"),
+ SLOPPY).Assert();
}
}
@@ -786,7 +717,7 @@ icu::SimpleDateFormat* DateFormat::UnpackDateFormat(
Isolate* isolate,
Handle<JSObject> obj) {
Handle<String> key =
- isolate->factory()->NewStringFromStaticAscii("dateFormat");
+ isolate->factory()->NewStringFromStaticChars("dateFormat");
Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key);
CHECK(maybe.has_value);
if (maybe.value) {
@@ -862,7 +793,7 @@ icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
Isolate* isolate,
Handle<JSObject> obj) {
Handle<String> key =
- isolate->factory()->NewStringFromStaticAscii("numberFormat");
+ isolate->factory()->NewStringFromStaticChars("numberFormat");
Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key);
CHECK(maybe.has_value);
if (maybe.value) {
@@ -919,7 +850,7 @@ icu::Collator* Collator::InitializeCollator(
icu::Collator* Collator::UnpackCollator(Isolate* isolate,
Handle<JSObject> obj) {
- Handle<String> key = isolate->factory()->NewStringFromStaticAscii("collator");
+ Handle<String> key = isolate->factory()->NewStringFromStaticChars("collator");
Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key);
CHECK(maybe.has_value);
if (maybe.value) {
@@ -980,7 +911,7 @@ icu::BreakIterator* BreakIterator::InitializeBreakIterator(
icu::BreakIterator* BreakIterator::UnpackBreakIterator(Isolate* isolate,
Handle<JSObject> obj) {
Handle<String> key =
- isolate->factory()->NewStringFromStaticAscii("breakIterator");
+ isolate->factory()->NewStringFromStaticChars("breakIterator");
Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key);
CHECK(maybe.has_value);
if (maybe.value) {
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index d8cd59cf50..fd6a8d6f98 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -38,6 +38,7 @@
#if V8_TARGET_ARCH_IA32
+#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/disassembler.h"
#include "src/macro-assembler.h"
@@ -271,7 +272,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
- DCHECK(IsPowerOf2(m));
+ DCHECK(base::bits::IsPowerOfTwo32(m));
int mask = m - 1;
int addr = pc_offset();
Nop((m - (addr & mask)) & mask);
@@ -2013,6 +2014,15 @@ void Assembler::subsd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::subsd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x5C);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::divsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
@@ -2464,11 +2474,6 @@ void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
}
-void Assembler::Print() {
- Disassembler::Decode(isolate(), stdout, buffer_, pc_);
-}
-
-
void Assembler::RecordJSReturn() {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 5febffd8c3..cb1765521f 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -148,6 +148,11 @@ struct XMMRegister {
return kMaxNumAllocatableRegisters;
}
+ // TODO(turbofan): Proper support for float32.
+ static int NumAllocatableAliasedRegisters() {
+ return NumAllocatableRegisters();
+ }
+
static int ToAllocationIndex(XMMRegister reg) {
DCHECK(reg.code() != 0);
return reg.code() - 1;
@@ -956,6 +961,7 @@ class Assembler : public AssemblerBase {
void addsd(XMMRegister dst, XMMRegister src);
void addsd(XMMRegister dst, const Operand& src);
void subsd(XMMRegister dst, XMMRegister src);
+ void subsd(XMMRegister dst, const Operand& src);
void mulsd(XMMRegister dst, XMMRegister src);
void mulsd(XMMRegister dst, const Operand& src);
void divsd(XMMRegister dst, XMMRegister src);
@@ -1038,9 +1044,6 @@ class Assembler : public AssemblerBase {
void prefetch(const Operand& src, int level);
// TODO(lrn): Need SFENCE for movnt?
- // Debugging
- void Print();
-
// Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index cca65f4716..c24e77f317 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -6,10 +6,10 @@
#if V8_TARGET_ARCH_IA32
+#include "src/code-factory.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -550,8 +550,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
}
@@ -995,8 +995,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Copy all arguments from the array to the stack.
Label entry, loop;
- Register receiver = LoadIC::ReceiverRegister();
- Register key = LoadIC::NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
__ mov(key, Operand(ebp, kIndexOffset));
__ jmp(&entry);
__ bind(&loop);
@@ -1004,9 +1004,10 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Use inline caching to speed up access to arguments.
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(), Immediate(Smi::FromInt(0)));
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Immediate(Smi::FromInt(0)));
}
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(masm->isolate()).code();
__ call(ic, RelocInfo::CODE_TARGET);
// It is important that we do not have a test instruction after the
// call. A test instruction after the call is used to indicate that
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 104576e64a..39bef3065b 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -6,129 +6,23 @@
#if V8_TARGET_ARCH_IA32
+#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
#include "src/isolate.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
-void FastNewClosureStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, ebx };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
-}
-
-
-void FastNewContextStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, edi };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void ToNumberStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // ToNumberStub invokes a function, and therefore needs a context.
- Register registers[] = { esi, eax };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void NumberToStringStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, eax };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
-}
-
-
-void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, eax, ebx, ecx };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi(),
- Representation::Tagged() };
-
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry,
- representations);
-}
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, eax, ebx, ecx, edx };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
-}
-
-
-void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, ebx, edx };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void CallFunctionStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = {esi, edi};
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void CallConstructStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // eax : number of arguments
- // ebx : feedback vector
- // edx : (only if ebx is not the megamorphic symbol) slot in feedback
- // vector (Smi)
- // edi : constructor function
- // TODO(turbofan): So far we don't gather type feedback and hence skip the
- // slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {esi, eax, edi, ebx};
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void RegExpConstructResultStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, ecx, ebx, eax };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, eax, ebx };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry);
-}
-
-
-const Register InterfaceDescriptor::ContextRegister() { return esi; }
-
-
static void InitializeArrayConstructorDescriptor(
- Isolate* isolate, CodeStub::Major major,
- CodeStubInterfaceDescriptor* descriptor,
+ Isolate* isolate, CodeStubDescriptor* descriptor,
int constant_stack_parameter_count) {
// register state
// eax -- number of arguments
@@ -138,28 +32,17 @@ static void InitializeArrayConstructorDescriptor(
Runtime::kArrayConstructor)->entry;
if (constant_stack_parameter_count == 0) {
- Register registers[] = { esi, edi, ebx };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
- deopt_handler, NULL, constant_stack_parameter_count,
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE);
} else {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = { esi, edi, ebx, eax };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32() };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers, eax,
- deopt_handler, representations,
- constant_stack_parameter_count,
+ descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
}
static void InitializeInternalArrayConstructorDescriptor(
- CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
+ Isolate* isolate, CodeStubDescriptor* descriptor,
int constant_stack_parameter_count) {
// register state
// eax -- number of arguments
@@ -168,200 +51,70 @@ static void InitializeInternalArrayConstructorDescriptor(
Runtime::kInternalArrayConstructor)->entry;
if (constant_stack_parameter_count == 0) {
- Register registers[] = { esi, edi };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
- deopt_handler, NULL, constant_stack_parameter_count,
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE);
} else {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = { esi, edi, eax };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32() };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers, eax,
- deopt_handler, representations,
- constant_stack_parameter_count,
+ descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
}
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), MajorKey(), descriptor, 0);
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), MajorKey(), descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), MajorKey(), descriptor, -1);
-}
-
-
-void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0);
-}
-
-
-void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1);
-}
-
-
-void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1);
+void ArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
}
-void CompareNilICStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, eax };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(CompareNilIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
-}
-
-void ToBooleanStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, eax };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(ToBooleanIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
+void ArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
}
-void BinaryOpICStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, edx, eax };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(BinaryOpIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
+void ArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
}
-void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, ecx, edx, eax };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
+void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
-void StringAddStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, edx, eax };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kStringAdd)->entry);
+void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
}
-void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
- Register registers[] = { esi, // context
- edi, // JSFunction
- eax, // actual number of arguments
- ebx, // expected number of arguments
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // JSFunction
- Representation::Integer32(), // actual number of arguments
- Representation::Integer32(), // expected number of arguments
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::KeyedCall);
- Register registers[] = { esi, // context
- ecx, // key
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // key
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::NamedCall);
- Register registers[] = { esi, // context
- ecx, // name
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // name
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::CallHandler);
- Register registers[] = { esi, // context
- edx, // name
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // receiver
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::ApiFunctionCall);
- Register registers[] = { esi, // context
- eax, // callee
- ebx, // call_data
- ecx, // holder
- edx, // api_function_address
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
+void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
}
#define __ ACCESS_MASM(masm)
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
+ ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
isolate()->counters()->code_stubs()->Increment();
- CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
- int param_count = descriptor->GetEnvironmentParameterCount();
+ CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
+ int param_count = descriptor.GetEnvironmentParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
DCHECK(param_count == 0 ||
- eax.is(descriptor->GetEnvironmentParameterRegister(
- param_count - 1)));
+ eax.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
// Push arguments
for (int i = 0; i < param_count; ++i) {
- __ push(descriptor->GetEnvironmentParameterRegister(i));
+ __ push(descriptor.GetEnvironmentParameterRegister(i));
}
- ExternalReference miss = descriptor->miss_handler();
__ CallExternalReference(miss, param_count);
}
@@ -374,7 +127,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// store the registers in any particular way, but we do have to store and
// restore them.
__ pushad();
- if (save_doubles_ == kSaveFPRegs) {
+ if (save_doubles()) {
__ sub(esp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
@@ -390,7 +143,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
__ CallCFunction(
ExternalReference::store_buffer_overflow_function(isolate()),
argument_count);
- if (save_doubles_ == kSaveFPRegs) {
+ if (save_doubles()) {
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
__ movsd(reg, Operand(esp, i * kDoubleSize));
@@ -628,7 +381,8 @@ void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
void MathPowStub::Generate(MacroAssembler* masm) {
Factory* factory = isolate()->factory();
- const Register exponent = eax;
+ const Register exponent = MathPowTaggedDescriptor::exponent();
+ DCHECK(exponent.is(eax));
const Register base = edx;
const Register scratch = ecx;
const XMMRegister double_result = xmm3;
@@ -642,7 +396,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ mov(scratch, Immediate(1));
__ Cvtsi2sd(double_result, scratch);
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
Label base_is_smi, unpack_exponent;
// The exponent and base are supplied as arguments on the stack.
// This can only happen if the stub is called from non-optimized code.
@@ -673,7 +427,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &call_runtime);
__ movsd(double_exponent,
FieldOperand(exponent, HeapNumber::kValueOffset));
- } else if (exponent_type_ == TAGGED) {
+ } else if (exponent_type() == TAGGED) {
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
__ SmiUntag(exponent);
__ jmp(&int_exponent);
@@ -683,10 +437,12 @@ void MathPowStub::Generate(MacroAssembler* masm) {
FieldOperand(exponent, HeapNumber::kValueOffset));
}
- if (exponent_type_ != INTEGER) {
+ if (exponent_type() != INTEGER) {
Label fast_power, try_arithmetic_simplification;
__ DoubleToI(exponent, double_exponent, double_scratch,
- TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification);
+ TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification,
+ &try_arithmetic_simplification,
+ &try_arithmetic_simplification);
__ jmp(&int_exponent);
__ bind(&try_arithmetic_simplification);
@@ -695,7 +451,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cmp(exponent, Immediate(0x1));
__ j(overflow, &call_runtime);
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
// compile time and uses DoMathPowHalf instead. We then skip this check
// for non-constant cases of +/-0.5 as these hardly occur.
@@ -857,7 +613,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Returning or bailing out.
Counters* counters = isolate()->counters();
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
__ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
@@ -895,7 +651,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
- Register receiver = LoadIC::ReceiverRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, eax,
ebx, &miss);
@@ -905,8 +661,40 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
+void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
+ // Return address is on the stack.
+ Label slow;
+
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
+ Register scratch = eax;
+ DCHECK(!scratch.is(receiver) && !scratch.is(key));
+
+ // Check that the key is an array index, that is Uint32.
+ __ test(key, Immediate(kSmiTagMask | kSmiSignMask));
+ __ j(not_zero, &slow);
+
+ // Everything is fine, call runtime.
+ __ pop(scratch);
+ __ push(receiver); // receiver
+ __ push(key); // key
+ __ push(scratch); // return address
+
+ // Perform tail call to the entry.
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadElementWithInterceptor), masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+
+ __ bind(&slow);
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
+}
+
+
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in edx and the parameter count is in eax.
+ DCHECK(edx.is(ArgumentsAccessReadDescriptor::index()));
+ DCHECK(eax.is(ArgumentsAccessReadDescriptor::parameter_count()));
// The displacement is used for skipping the frame pointer on the
// stack. It is the offset of the last parameter (if any) relative
@@ -1474,7 +1262,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ JumpIfNotSmi(ebx, &runtime);
__ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
__ j(above_equal, &runtime);
- __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kDataOneByteCodeOffset));
__ Move(ecx, Immediate(1)); // Type is one byte.
// (E) Carry on. String handling is done.
@@ -1488,7 +1276,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// eax: subject string
// ebx: previous index (smi)
// edx: code
- // ecx: encoding of subject string (1 if ASCII, 0 if two_byte);
+ // ecx: encoding of subject string (1 if one_byte, 0 if two_byte);
// All checks done. Now push arguments for native regexp code.
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->regexp_entry_native(), 1);
@@ -1533,7 +1321,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// esi: original subject string
// eax: underlying subject string
// ebx: previous index
- // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
+ // ecx: encoding of subject string (1 if one_byte 0 if two_byte);
// edx: code
// Argument 4: End of string data
// Argument 3: Start of string data
@@ -1770,14 +1558,12 @@ static int NegativeComparisonResult(Condition cc) {
}
-static void CheckInputType(MacroAssembler* masm,
- Register input,
- CompareIC::State expected,
- Label* fail) {
+static void CheckInputType(MacroAssembler* masm, Register input,
+ CompareICState::State expected, Label* fail) {
Label ok;
- if (expected == CompareIC::SMI) {
+ if (expected == CompareICState::SMI) {
__ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::NUMBER) {
+ } else if (expected == CompareICState::NUMBER) {
__ JumpIfSmi(input, &ok);
__ cmp(FieldOperand(input, HeapObject::kMapOffset),
Immediate(masm->isolate()->factory()->heap_number_map()));
@@ -1802,13 +1588,13 @@ static void BranchIfNotInternalizedString(MacroAssembler* masm,
}
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
Label check_unequal_objects;
Condition cc = GetCondition();
Label miss;
- CheckInputType(masm, edx, left_, &miss);
- CheckInputType(masm, eax, right_, &miss);
+ CheckInputType(masm, edx, left(), &miss);
+ CheckInputType(masm, eax, right(), &miss);
// Compare two smis.
Label non_smi, smi_done;
@@ -1979,23 +1765,15 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_for_strings);
- __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
- &check_unequal_objects);
+ __ JumpIfNotBothSequentialOneByteStrings(edx, eax, ecx, ebx,
+ &check_unequal_objects);
- // Inline comparison of ASCII strings.
+ // Inline comparison of one-byte strings.
if (cc == equal) {
- StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- edx,
- eax,
- ecx,
- ebx);
+ StringHelper::GenerateFlatOneByteStringEquals(masm, edx, eax, ecx, ebx);
} else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- edx,
- eax,
- ecx,
- ebx,
- edi);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, edx, eax, ecx, ebx,
+ edi);
}
#ifdef DEBUG
__ Abort(kUnexpectedFallThroughFromStringComparison);
@@ -2084,7 +1862,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// function without changing the state.
__ cmp(ecx, edi);
__ j(equal, &done, Label::kFar);
- __ cmp(ecx, Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
__ j(equal, &done, Label::kFar);
if (!FLAG_pretenuring_call_new) {
@@ -2107,14 +1885,14 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate)));
+ __ cmp(ecx, Immediate(TypeFeedbackVector::UninitializedSentinel(isolate)));
__ j(equal, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize),
- Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ mov(
+ FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
+ Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
__ jmp(&done, Label::kFar);
// An uninitialized cache is patched with the function or sentinel to
@@ -2282,7 +2060,7 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
void CallFunctionStub::Generate(MacroAssembler* masm) {
- CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+ CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
}
@@ -2366,7 +2144,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
// edi - function
// edx - slot id
Label miss;
- int argc = state_.arg_count();
+ int argc = arg_count();
ParameterCount actual(argc);
EmitLoadTypeFeedbackVector(masm, ebx);
@@ -2390,7 +2168,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ TailCallStub(&stub);
__ bind(&miss);
- GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+ GenerateMiss(masm);
// The slow case, we need this no matter what to complete a call after a miss.
CallFunctionNoFeedback(masm,
@@ -2410,7 +2188,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
Label extra_checks_or_miss, slow_start;
Label slow, non_function, wrap, cont;
Label have_js_function;
- int argc = state_.arg_count();
+ int argc = arg_count();
ParameterCount actual(argc);
EmitLoadTypeFeedbackVector(masm, ebx);
@@ -2421,7 +2199,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &extra_checks_or_miss);
__ bind(&have_js_function);
- if (state_.CallAsMethod()) {
+ if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
// Load the receiver from the stack.
@@ -2440,7 +2218,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&slow);
EmitSlowCase(isolate, masm, argc, &non_function);
- if (state_.CallAsMethod()) {
+ if (CallAsMethod()) {
__ bind(&wrap);
EmitWrapCase(masm, argc, &cont);
}
@@ -2450,9 +2228,9 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
FixedArray::kHeaderSize));
- __ cmp(ecx, Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
__ j(equal, &slow_start);
- __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate)));
+ __ cmp(ecx, Immediate(TypeFeedbackVector::UninitializedSentinel(isolate)));
__ j(equal, &miss);
if (!FLAG_trace_ic) {
@@ -2463,13 +2241,13 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &miss);
__ mov(FieldOperand(ebx, edx, times_half_pointer_size,
FixedArray::kHeaderSize),
- Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
__ jmp(&slow_start);
}
// We are here because tracing is on or we are going monomorphic.
__ bind(&miss);
- GenerateMiss(masm, IC::kCallIC_Miss);
+ GenerateMiss(masm);
// the slow case
__ bind(&slow_start);
@@ -2487,9 +2265,9 @@ void CallICStub::Generate(MacroAssembler* masm) {
}
-void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
// Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(ecx, Operand(esp, (state_.arg_count() + 1) * kPointerSize));
+ __ mov(ecx, Operand(esp, (arg_count() + 1) * kPointerSize));
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -2501,6 +2279,9 @@ void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
__ push(edx);
// Call the entry.
+ IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+ : IC::kCallIC_Customization_Miss;
+
ExternalReference miss = ExternalReference(IC_Utility(id),
masm->isolate());
__ CallExternalReference(miss, 4);
@@ -2529,13 +2310,8 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
void CodeStub::GenerateFPStubs(Isolate* isolate) {
- CEntryStub save_doubles(isolate, 1, kSaveFPRegs);
- // Stubs might already be in the snapshot, detect that and don't regenerate,
- // which would lead to code stub initialization state being messed up.
- Code* save_doubles_code;
- if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
- save_doubles_code = *(save_doubles.GetCode());
- }
+ // Generate if not already in cache.
+ CEntryStub(isolate, 1, kSaveFPRegs).GetCode();
isolate->set_fp_stubs_generated(true);
}
@@ -2557,7 +2333,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(save_doubles_ == kSaveFPRegs);
+ __ EnterExitFrame(save_doubles());
// ebx: pointer to C function (C callee-saved)
// ebp: frame pointer (restored after C call)
@@ -2565,7 +2341,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// edi: number of arguments including receiver (C callee-saved)
// esi: pointer to the first argument (C callee-saved)
- // Result returned in eax, or eax+edx if result_size_ is 2.
+ // Result returned in eax, or eax+edx if result size is 2.
// Check stack alignment.
if (FLAG_debug_code) {
@@ -2613,7 +2389,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
}
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles_ == kSaveFPRegs);
+ __ LeaveExitFrame(save_doubles());
__ ret(0);
// Handling of exception.
@@ -2640,7 +2416,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
}
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+void JSEntryStub::Generate(MacroAssembler* masm) {
Label invoke, handler_entry, exit;
Label not_outermost_js, not_outermost_js_2;
@@ -2651,7 +2427,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ mov(ebp, esp);
// Push marker in two places.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ int marker = type();
__ push(Immediate(Smi::FromInt(marker))); // context slot
__ push(Immediate(Smi::FromInt(marker))); // function slot
// Save callee-saved registers (C calling conventions).
@@ -2702,7 +2478,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// pop the faked function when we return. Notice that we cannot store a
// reference to the trampoline code directly in this stub, because the
// builtin stubs may not have been generated yet.
- if (is_construct) {
+ if (type() == StackFrame::ENTRY_CONSTRUCT) {
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
isolate());
__ mov(edx, Immediate(construct_entry));
@@ -2772,9 +2548,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
static const int kDeltaToCmpImmediate = 2;
static const int kDeltaToMov = 8;
static const int kDeltaToMovImmediate = 9;
- static const int8_t kCmpEdiOperandByte1 = BitCast<int8_t, uint8_t>(0x3b);
- static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d);
- static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
+ static const int8_t kCmpEdiOperandByte1 = bit_cast<int8_t, uint8_t>(0x3b);
+ static const int8_t kCmpEdiOperandByte2 = bit_cast<int8_t, uint8_t>(0x3d);
+ static const int8_t kMovEaxImmediateByte = bit_cast<int8_t, uint8_t>(0xb8);
DCHECK_EQ(object.code(), InstanceofStub::left().code());
DCHECK_EQ(function.code(), InstanceofStub::right().code());
@@ -2965,12 +2741,6 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
-Register InstanceofStub::left() { return eax; }
-
-
-Register InstanceofStub::right() { return edx; }
-
-
// -------------------------------------------------------------------------
// StringCharCodeAtGenerator
@@ -3070,7 +2840,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiShiftSize == 0);
- DCHECK(IsPowerOf2(String::kMaxOneByteCharCode + 1));
+ DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
__ test(code_,
Immediate(kSmiTagMask |
((~String::kMaxOneByteCharCode) << kSmiTagSize)));
@@ -3081,7 +2851,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiShiftSize == 0);
- // At this point code register contains smi tagged ASCII char code.
+ // At this point code register contains smi tagged one byte char code.
__ mov(result_, FieldOperand(result_,
code_, times_half_pointer_size,
FixedArray::kHeaderSize));
@@ -3143,74 +2913,6 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
}
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash = (seed + character) + ((seed + character) << 10);
- if (masm->serializer_enabled()) {
- __ LoadRoot(scratch, Heap::kHashSeedRootIndex);
- __ SmiUntag(scratch);
- __ add(scratch, character);
- __ mov(hash, scratch);
- __ shl(scratch, 10);
- __ add(hash, scratch);
- } else {
- int32_t seed = masm->isolate()->heap()->HashSeed();
- __ lea(scratch, Operand(character, seed));
- __ shl(scratch, 10);
- __ lea(hash, Operand(scratch, character, times_1, seed));
- }
- // hash ^= hash >> 6;
- __ mov(scratch, hash);
- __ shr(scratch, 6);
- __ xor_(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash += character;
- __ add(hash, character);
- // hash += hash << 10;
- __ mov(scratch, hash);
- __ shl(scratch, 10);
- __ add(hash, scratch);
- // hash ^= hash >> 6;
- __ mov(scratch, hash);
- __ shr(scratch, 6);
- __ xor_(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch) {
- // hash += hash << 3;
- __ mov(scratch, hash);
- __ shl(scratch, 3);
- __ add(hash, scratch);
- // hash ^= hash >> 11;
- __ mov(scratch, hash);
- __ shr(scratch, 11);
- __ xor_(hash, scratch);
- // hash += hash << 15;
- __ mov(scratch, hash);
- __ shl(scratch, 15);
- __ add(hash, scratch);
-
- __ and_(hash, String::kHashBitMask);
-
- // if (hash == 0) hash = 27;
- Label hash_not_zero;
- __ j(not_zero, &hash_not_zero, Label::kNear);
- __ mov(hash, Immediate(StringHasher::kZeroHash));
- __ bind(&hash_not_zero);
-}
-
-
void SubStringStub::Generate(MacroAssembler* masm) {
Label runtime;
@@ -3313,7 +3015,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ test(ebx, Immediate(kStringEncodingMask));
__ j(zero, &two_byte_slice, Label::kNear);
- __ AllocateAsciiSlicedString(eax, ebx, no_reg, &runtime);
+ __ AllocateOneByteSlicedString(eax, ebx, no_reg, &runtime);
__ jmp(&set_slice_header, Label::kNear);
__ bind(&two_byte_slice);
__ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
@@ -3360,8 +3062,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ test_b(ebx, kStringEncodingMask);
__ j(zero, &two_byte_sequential);
- // Sequential ASCII string. Allocate the result.
- __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
+ // Sequential one byte string. Allocate the result.
+ __ AllocateOneByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
// eax: result string
// ecx: result string length
@@ -3432,11 +3134,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
}
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2) {
+void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2) {
Register length = scratch1;
// Compare lengths.
@@ -3459,8 +3161,8 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
// Compare characters.
__ bind(&compare_chars);
- GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
- &strings_not_equal, Label::kNear);
+ GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
+ &strings_not_equal, Label::kNear);
// Characters are equal.
__ Move(eax, Immediate(Smi::FromInt(EQUAL)));
@@ -3468,12 +3170,9 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
}
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
+void StringHelper::GenerateCompareFlatOneByteStrings(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3) {
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->string_compare_native(), 1);
@@ -3499,8 +3198,8 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Compare characters.
Label result_not_equal;
- GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
- &result_not_equal, Label::kNear);
+ GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
+ &result_not_equal, Label::kNear);
// Compare lengths - strings up to min-length are equal.
__ bind(&compare_lengths);
@@ -3534,13 +3233,9 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
}
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch,
- Label* chars_not_equal,
+void StringHelper::GenerateOneByteCharsCompareLoop(
+ MacroAssembler* masm, Register left, Register right, Register length,
+ Register scratch, Label* chars_not_equal,
Label::Distance chars_not_equal_near) {
// Change index to run from -length to -1 by adding length to string
// start. This means that loop ends when index reaches zero, which
@@ -3586,15 +3281,16 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ bind(&not_same);
- // Check that both objects are sequential ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
+ // Check that both objects are sequential one-byte strings.
+ __ JumpIfNotBothSequentialOneByteStrings(edx, eax, ecx, ebx, &runtime);
- // Compare flat ASCII strings.
+ // Compare flat one-byte strings.
// Drop arguments from the stack.
__ pop(ecx);
__ add(esp, Immediate(2 * kPointerSize));
__ push(ecx);
- GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, edx, eax, ecx, ebx,
+ edi);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
@@ -3626,13 +3322,13 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// Tail call into the stub that handles binary operations with allocation
// sites.
- BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+ BinaryOpWithAllocationSiteStub stub(isolate(), state());
__ TailCallStub(&stub);
}
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::SMI);
+void CompareICStub::GenerateSmis(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::SMI);
Label miss;
__ mov(ecx, edx);
__ or_(ecx, eax);
@@ -3657,17 +3353,17 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::NUMBER);
+void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- if (left_ == CompareIC::SMI) {
+ if (left() == CompareICState::SMI) {
__ JumpIfNotSmi(edx, &miss);
}
- if (right_ == CompareIC::SMI) {
+ if (right() == CompareICState::SMI) {
__ JumpIfNotSmi(eax, &miss);
}
@@ -3714,12 +3410,12 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+ CompareICState::GENERIC, CompareICState::GENERIC);
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
+ if (Token::IsOrderedRelationalCompareOp(op())) {
__ cmp(eax, Immediate(isolate()->factory()->undefined_value()));
__ j(not_equal, &miss);
__ JumpIfSmi(edx, &unordered);
@@ -3729,7 +3425,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
}
__ bind(&maybe_undefined2);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
+ if (Token::IsOrderedRelationalCompareOp(op())) {
__ cmp(edx, Immediate(isolate()->factory()->undefined_value()));
__ j(equal, &unordered);
}
@@ -3739,8 +3435,8 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::INTERNALIZED_STRING);
+void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::INTERNALIZED_STRING);
DCHECK(GetCondition() == equal);
// Registers containing left and right operands respectively.
@@ -3784,8 +3480,8 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::UNIQUE_NAME);
+void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::UNIQUE_NAME);
DCHECK(GetCondition() == equal);
// Registers containing left and right operands respectively.
@@ -3808,8 +3504,8 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
__ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
- __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
+ __ JumpIfNotUniqueNameInstanceType(tmp1, &miss, Label::kNear);
+ __ JumpIfNotUniqueNameInstanceType(tmp2, &miss, Label::kNear);
// Unique names are compared by identity.
Label done;
@@ -3829,11 +3525,11 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::STRING);
+void CompareICStub::GenerateStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::STRING);
Label miss;
- bool equality = Token::IsEqualityOp(op_);
+ bool equality = Token::IsEqualityOp(op());
// Registers containing left and right operands respectively.
Register left = edx;
@@ -3889,17 +3585,17 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
__ bind(&do_compare);
}
- // Check that both strings are sequential ASCII.
+ // Check that both strings are sequential one-byte.
Label runtime;
- __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
+ __ JumpIfNotBothSequentialOneByteStrings(left, right, tmp1, tmp2, &runtime);
- // Compare flat ASCII strings. Returns when done.
+ // Compare flat one byte strings. Returns when done.
if (equality) {
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, left, right, tmp1, tmp2);
+ StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1,
+ tmp2);
} else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(
- masm, left, right, tmp1, tmp2, tmp3);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
+ tmp2, tmp3);
}
// Handle more complex cases in runtime.
@@ -3919,8 +3615,8 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::OBJECT);
+void CompareICStub::GenerateObjects(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::OBJECT);
Label miss;
__ mov(ecx, edx);
__ and_(ecx, eax);
@@ -3940,7 +3636,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
Label miss;
__ mov(ecx, edx);
__ and_(ecx, eax);
@@ -3961,7 +3657,7 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+void CompareICStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
@@ -3971,7 +3667,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
__ push(eax);
__ push(edx); // And also use them as the arguments.
__ push(eax);
- __ push(Immediate(Smi::FromInt(op_)));
+ __ push(Immediate(Smi::FromInt(op())));
__ CallExternalReference(miss, 3);
// Compute the entry point of the rewritten stub.
__ lea(edi, FieldOperand(eax, Code::kHeaderSize));
@@ -4034,8 +3730,8 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// Check if the entry name is not a unique name.
__ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
- __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
- miss);
+ __ JumpIfNotUniqueNameInstanceType(
+ FieldOperand(entity_name, Map::kInstanceTypeOffset), miss);
__ bind(&good);
}
@@ -4127,9 +3823,9 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
- Register scratch = result_;
+ Register scratch = result();
- __ mov(scratch, FieldOperand(dictionary_, kCapacityOffset));
+ __ mov(scratch, FieldOperand(dictionary(), kCapacityOffset));
__ dec(scratch);
__ SmiUntag(scratch);
__ push(scratch);
@@ -4149,13 +3845,11 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// Scale the index by multiplying by the entry size.
DCHECK(NameDictionary::kEntrySize == 3);
- __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
+ __ lea(index(), Operand(scratch, scratch, times_2, 0)); // index *= 3.
// Having undefined at this place means the name is not contained.
DCHECK_EQ(kSmiTagSize, 1);
- __ mov(scratch, Operand(dictionary_,
- index_,
- times_pointer_size,
+ __ mov(scratch, Operand(dictionary(), index(), times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
__ cmp(scratch, isolate()->factory()->undefined_value());
__ j(equal, &not_in_dictionary);
@@ -4164,15 +3858,16 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ cmp(scratch, Operand(esp, 3 * kPointerSize));
__ j(equal, &in_dictionary);
- if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
// If we hit a key that is not a unique name during negative
// lookup we have to bailout as this key might be equal to the
// key we are looking for.
// Check if the entry name is not a unique name.
__ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
- &maybe_in_dictionary);
+ __ JumpIfNotUniqueNameInstanceType(
+ FieldOperand(scratch, Map::kInstanceTypeOffset),
+ &maybe_in_dictionary);
}
}
@@ -4180,19 +3875,19 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// If we are doing negative lookup then probing failure should be
// treated as a lookup success. For positive lookup probing failure
// should be treated as lookup failure.
- if (mode_ == POSITIVE_LOOKUP) {
- __ mov(result_, Immediate(0));
+ if (mode() == POSITIVE_LOOKUP) {
+ __ mov(result(), Immediate(0));
__ Drop(1);
__ ret(2 * kPointerSize);
}
__ bind(&in_dictionary);
- __ mov(result_, Immediate(1));
+ __ mov(result(), Immediate(1));
__ Drop(1);
__ ret(2 * kPointerSize);
__ bind(&not_in_dictionary);
- __ mov(result_, Immediate(0));
+ __ mov(result(), Immediate(0));
__ Drop(1);
__ ret(2 * kPointerSize);
}
@@ -4222,11 +3917,8 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
__ jmp(&skip_to_incremental_noncompacting, Label::kNear);
__ jmp(&skip_to_incremental_compacting, Label::kFar);
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
} else {
__ ret(0);
@@ -4248,7 +3940,7 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.Save(masm);
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
__ mov(regs_.scratch0(), Operand(regs_.address(), 0));
@@ -4270,10 +3962,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
mode);
InformIncrementalMarker(masm);
regs_.Restore(masm);
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
__ bind(&dont_need_remembered_set);
@@ -4290,7 +3979,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
__ mov(Operand(esp, 0 * kPointerSize), regs_.object());
@@ -4303,7 +3992,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
ExternalReference::incremental_marking_record_write_function(isolate()),
argument_count);
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
}
@@ -4334,10 +4023,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
} else {
__ ret(0);
@@ -4382,10 +4068,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
} else {
__ ret(0);
@@ -4493,14 +4176,27 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ mov(ebx, MemOperand(ebp, parameter_count_offset));
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
__ pop(ecx);
- int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
- ? kPointerSize
- : 0;
+ int additional_offset =
+ function_mode() == JS_FUNCTION_STUB_MODE ? kPointerSize : 0;
__ lea(esp, MemOperand(esp, ebx, times_pointer_size, additional_offset));
__ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
}
+void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorLoadStub stub(isolate(), state());
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorKeyedLoadStub stub(isolate());
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -4695,7 +4391,7 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
- if (argument_count_ == ANY) {
+ if (argument_count() == ANY) {
Label not_zero_case, not_one_case;
__ test(eax, eax);
__ j(not_zero, &not_zero_case);
@@ -4708,11 +4404,11 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
__ bind(&not_one_case);
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
- } else if (argument_count_ == NONE) {
+ } else if (argument_count() == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- } else if (argument_count_ == ONE) {
+ } else if (argument_count() == ONE) {
CreateArrayDispatchOneArgument(masm, mode);
- } else if (argument_count_ == MORE_THAN_ONE) {
+ } else if (argument_count() == MORE_THAN_ONE) {
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
} else {
UNREACHABLE();
@@ -4722,7 +4418,7 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- eax : argc (only if argument_count_ == ANY)
+ // -- eax : argc (only if argument_count() == ANY)
// -- ebx : AllocationSite or undefined
// -- edi : constructor
// -- esp[0] : return address
@@ -4870,9 +4566,9 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register return_address = edi;
Register context = esi;
- int argc = ArgumentBits::decode(bit_field_);
- bool is_store = IsStoreBits::decode(bit_field_);
- bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+ int argc = this->argc();
+ bool is_store = this->is_store();
+ bool call_data_undefined = this->call_data_undefined();
typedef FunctionCallbackArguments FCA;
@@ -4976,6 +4672,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// -- ...
// -- edx : api_function_address
// -----------------------------------
+ DCHECK(edx.is(ApiGetterDescriptor::function_address()));
// array for v8::Arguments::values_, handler for name and pointer
// to the values (it considered as smi in GC).
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index b72b6dd089..eabb5a56a7 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -5,9 +5,6 @@
#ifndef V8_IA32_CODE_STUBS_IA32_H_
#define V8_IA32_CODE_STUBS_IA32_H_
-#include "src/ic-inl.h"
-#include "src/macro-assembler.h"
-
namespace v8 {
namespace internal {
@@ -17,24 +14,6 @@ void ArrayNativeCode(MacroAssembler* masm,
Label* call_generic_code);
-class StoreBufferOverflowStub: public PlatformCodeStub {
- public:
- StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
- : PlatformCodeStub(isolate), save_doubles_(save_fp) { }
-
- void Generate(MacroAssembler* masm);
-
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- SaveFPRegsMode save_doubles_;
-
- Major MajorKey() const { return StoreBufferOverflow; }
- int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using the rep movs instruction.
@@ -47,69 +26,26 @@ class StringHelper : public AllStatic {
Register scratch,
String::Encoding encoding);
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-class SubStringStub: public PlatformCodeStub {
- public:
- explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- private:
- Major MajorKey() const { return SubString; }
- int MinorKey() const { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public PlatformCodeStub {
- public:
- explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
+ // Compares two flat one byte strings and returns result in eax.
+ static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
+ Register left, Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
- // Compares two flat ASCII strings and returns result in eax.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
+ // Compares two flat one byte strings for equality and returns result in eax.
+ static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+ Register left, Register right,
Register scratch1,
- Register scratch2,
- Register scratch3);
-
- // Compares two flat ASCII strings for equality and returns result
- // in eax.
- static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2);
+ Register scratch2);
private:
- virtual Major MajorKey() const { return StringCompare; }
- virtual int MinorKey() const { return 0; }
- virtual void Generate(MacroAssembler* masm);
-
- static void GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch,
- Label* chars_not_equal,
+ static void GenerateOneByteCharsCompareLoop(
+ MacroAssembler* masm, Register left, Register right, Register length,
+ Register scratch, Label* chars_not_equal,
Label::Distance chars_not_equal_near = Label::kFar);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
@@ -117,15 +53,13 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
- NameDictionaryLookupStub(Isolate* isolate,
- Register dictionary,
- Register result,
- Register index,
- LookupMode mode)
- : PlatformCodeStub(isolate),
- dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
-
- void Generate(MacroAssembler* masm);
+ NameDictionaryLookupStub(Isolate* isolate, Register dictionary,
+ Register result, Register index, LookupMode mode)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = DictionaryBits::encode(dictionary.code()) |
+ ResultBits::encode(result.code()) |
+ IndexBits::encode(index.code()) | LookupModeBits::encode(mode);
+ }
static void GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
@@ -156,24 +90,27 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() const { return NameDictionaryLookup; }
+ Register dictionary() const {
+ return Register::from_code(DictionaryBits::decode(minor_key_));
+ }
+
+ Register result() const {
+ return Register::from_code(ResultBits::decode(minor_key_));
+ }
- int MinorKey() const {
- return DictionaryBits::encode(dictionary_.code()) |
- ResultBits::encode(result_.code()) |
- IndexBits::encode(index_.code()) |
- LookupModeBits::encode(mode_);
+ Register index() const {
+ return Register::from_code(IndexBits::decode(minor_key_));
}
+ LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
+
class DictionaryBits: public BitField<int, 0, 3> {};
class ResultBits: public BitField<int, 3, 3> {};
class IndexBits: public BitField<int, 6, 3> {};
class LookupModeBits: public BitField<LookupMode, 9, 1> {};
- Register dictionary_;
- Register result_;
- Register index_;
- LookupMode mode_;
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
};
@@ -186,16 +123,19 @@ class RecordWriteStub: public PlatformCodeStub {
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
: PlatformCodeStub(isolate),
- object_(object),
- value_(value),
- address_(address),
- remembered_set_action_(remembered_set_action),
- save_fp_regs_mode_(fp_mode),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
+ minor_key_ = ObjectBits::encode(object.code()) |
+ ValueBits::encode(value.code()) |
+ AddressBits::encode(address.code()) |
+ RememberedSetActionBits::encode(remembered_set_action) |
+ SaveFPRegsModeBits::encode(fp_mode);
}
+ RecordWriteStub(uint32_t key, Isolate* isolate)
+ : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
+
enum Mode {
STORE_BUFFER_ONLY,
INCREMENTAL,
@@ -251,6 +191,8 @@ class RecordWriteStub: public PlatformCodeStub {
CpuFeatures::FlushICache(stub->instruction_start(), 7);
}
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+
private:
// This is a helper class for freeing up 3 scratch registers, where the third
// is always ecx (needed for shift operations). The input is two registers
@@ -395,9 +337,11 @@ class RecordWriteStub: public PlatformCodeStub {
enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- }
-;
- void Generate(MacroAssembler* masm);
+ };
+
+ virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+
+ virtual void Generate(MacroAssembler* masm) OVERRIDE;
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
@@ -405,18 +349,28 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
- Major MajorKey() const { return RecordWrite; }
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
- int MinorKey() const {
- return ObjectBits::encode(object_.code()) |
- ValueBits::encode(value_.code()) |
- AddressBits::encode(address_.code()) |
- RememberedSetActionBits::encode(remembered_set_action_) |
- SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+ Register object() const {
+ return Register::from_code(ObjectBits::decode(minor_key_));
}
- void Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ Register value() const {
+ return Register::from_code(ValueBits::decode(minor_key_));
+ }
+
+ Register address() const {
+ return Register::from_code(AddressBits::decode(minor_key_));
+ }
+
+ RememberedSetAction remembered_set_action() const {
+ return RememberedSetActionBits::decode(minor_key_);
+ }
+
+ SaveFPRegsMode save_fp_regs_mode() const {
+ return SaveFPRegsModeBits::decode(minor_key_);
}
class ObjectBits: public BitField<int, 0, 3> {};
@@ -425,12 +379,9 @@ class RecordWriteStub: public PlatformCodeStub {
class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 10, 1> {};
- Register object_;
- Register value_;
- Register address_;
- RememberedSetAction remembered_set_action_;
- SaveFPRegsMode save_fp_regs_mode_;
RegisterAllocation regs_;
+
+ DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
};
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 444f98b16a..52cf72b7a6 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -862,7 +862,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ j(zero, &seq_string, Label::kNear);
// Handle external strings.
- Label ascii_external, done;
+ Label one_byte_external, done;
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
@@ -877,22 +877,22 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
STATIC_ASSERT(kTwoByteStringTag == 0);
__ test_b(result, kStringEncodingMask);
__ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
- __ j(not_equal, &ascii_external, Label::kNear);
+ __ j(not_equal, &one_byte_external, Label::kNear);
// Two-byte string.
__ movzx_w(result, Operand(result, index, times_2, 0));
__ jmp(&done, Label::kNear);
- __ bind(&ascii_external);
- // Ascii string.
+ __ bind(&one_byte_external);
+ // One-byte string.
__ movzx_b(result, Operand(result, index, times_1, 0));
__ jmp(&done, Label::kNear);
- // Dispatch on the encoding: ASCII or two-byte.
- Label ascii;
+ // Dispatch on the encoding: one-byte or two-byte.
+ Label one_byte;
__ bind(&seq_string);
STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ test(result, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii, Label::kNear);
+ __ j(not_zero, &one_byte, Label::kNear);
// Two-byte string.
// Load the two-byte character code into the result register.
@@ -902,9 +902,9 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
SeqTwoByteString::kHeaderSize));
__ jmp(&done, Label::kNear);
- // Ascii string.
+ // One-byte string.
// Load the byte into the result register.
- __ bind(&ascii);
+ __ bind(&one_byte);
__ movzx_b(result, FieldOperand(string,
index,
times_1,
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index 3f59c2cb2f..2382388bea 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -6,7 +6,7 @@
#define V8_IA32_CODEGEN_IA32_H_
#include "src/ast.h"
-#include "src/ic-inl.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index c7a10d47cc..4331b088fb 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -180,17 +180,17 @@ void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Register state for IC load call (from ic-ia32.cc).
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0, false);
}
void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Register state for IC store call (from ic-ia32.cc).
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- Register value = StoreIC::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.bit() | name.bit() | value.bit(), 0, false);
}
@@ -204,9 +204,9 @@ void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC store call (from ic-ia32.cc).
- Register receiver = KeyedStoreIC::ReceiverRegister();
- Register name = KeyedStoreIC::NameRegister();
- Register value = KeyedStoreIC::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.bit() | name.bit() | value.bit(), 0, false);
}
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 5fac8859d5..f40e23c0f3 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -194,7 +194,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
void Deoptimizer::SetPlatformCompiledStubRegisters(
- FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+ FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
intptr_t handler =
reinterpret_cast<intptr_t>(descriptor->deoptimization_handler());
int params = descriptor->GetHandlerParameterCount();
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index aacaeeb6a6..a382dad315 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -6,15 +6,16 @@
#if V8_TARGET_ARCH_IA32
+#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compiler.h"
#include "src/debug.h"
#include "src/full-codegen.h"
+#include "src/ic/ic.h"
#include "src/isolate-inl.h"
#include "src/parser.h"
#include "src/scopes.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -988,7 +989,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1116,7 +1118,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// No need for a write barrier, we are storing a Smi in the feedback vector.
__ LoadHeapObject(ebx, FeedbackVector());
__ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(slot)),
- Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
+ Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check
__ mov(ecx, Operand(esp, 0 * kPointerSize)); // Get enumerated object
@@ -1255,9 +1257,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(),
- info->strict_mode(),
- info->is_generator());
+ FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
__ mov(ebx, Immediate(info));
__ CallStub(&stub);
} else {
@@ -1278,6 +1278,25 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
}
+void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
+ Comment cnmt(masm_, "[ SuperReference ");
+
+ __ mov(LoadDescriptor::ReceiverRegister(),
+ Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+
+ Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
+ __ mov(LoadDescriptor::NameRegister(), home_object_symbol);
+
+ CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+
+ __ cmp(eax, isolate()->factory()->undefined_value());
+ Label done;
+ __ j(not_equal, &done);
+ __ CallRuntime(Runtime::kThrowNonMethodError, 0);
+ __ bind(&done);
+}
+
+
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
TypeofState typeof_state,
Label* slow) {
@@ -1328,10 +1347,10 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
// All extension objects were empty and it is safe to use a global
// load IC call.
- __ mov(LoadIC::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadIC::NameRegister(), proxy->var()->name());
+ __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), proxy->var()->name());
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Immediate(Smi::FromInt(proxy->VariableFeedbackSlot())));
}
@@ -1415,10 +1434,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
switch (var->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
- __ mov(LoadIC::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadIC::NameRegister(), var->name());
+ __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), var->name());
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Immediate(Smi::FromInt(proxy->VariableFeedbackSlot())));
}
CallLoadIC(CONTEXTUAL);
@@ -1634,9 +1653,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
- DCHECK(StoreIC::ValueRegister().is(eax));
- __ mov(StoreIC::NameRegister(), Immediate(key->value()));
- __ mov(StoreIC::ReceiverRegister(), Operand(esp, 0));
+ DCHECK(StoreDescriptor::ValueRegister().is(eax));
+ __ mov(StoreDescriptor::NameRegister(), Immediate(key->value()));
+ __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
@@ -1797,13 +1816,19 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ enum LhsKind {
+ VARIABLE,
+ NAMED_PROPERTY,
+ KEYED_PROPERTY,
+ NAMED_SUPER_PROPERTY
+ };
LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
if (property != NULL) {
assign_type = (property->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
+ ? (property->IsSuperAccess() ? NAMED_SUPER_PROPERTY
+ : NAMED_PROPERTY)
+ : KEYED_PROPERTY;
}
// Evaluate LHS expression.
@@ -1811,11 +1836,20 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case VARIABLE:
// Nothing to do here.
break;
+ case NAMED_SUPER_PROPERTY:
+ VisitForStackValue(property->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(property->obj()->AsSuperReference());
+ __ push(result_register());
+ if (expr->is_compound()) {
+ __ push(MemOperand(esp, kPointerSize));
+ __ push(result_register());
+ }
+ break;
case NAMED_PROPERTY:
if (expr->is_compound()) {
// We need the receiver both on the stack and in the register.
VisitForStackValue(property->obj());
- __ mov(LoadIC::ReceiverRegister(), Operand(esp, 0));
+ __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
} else {
VisitForStackValue(property->obj());
}
@@ -1824,8 +1858,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ mov(LoadIC::ReceiverRegister(), Operand(esp, kPointerSize));
- __ mov(LoadIC::NameRegister(), Operand(esp, 0));
+ __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, kPointerSize));
+ __ mov(LoadDescriptor::NameRegister(), Operand(esp, 0));
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@@ -1844,6 +1878,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
EmitVariableLoad(expr->target()->AsVariableProxy());
PrepareForBailout(expr->target(), TOS_REG);
break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
@@ -1893,6 +1931,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyAssignment(expr);
+ break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
@@ -1907,12 +1948,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
VisitForStackValue(expr->expression());
switch (expr->yield_kind()) {
- case Yield::SUSPEND:
+ case Yield::kSuspend:
// Pop value from top-of-stack slot; box result into result register.
EmitCreateIteratorResult(false);
__ push(result_register());
// Fall through.
- case Yield::INITIAL: {
+ case Yield::kInitial: {
Label suspend, continuation, post_runtime, resume;
__ jmp(&suspend);
@@ -1945,7 +1986,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
- case Yield::FINAL: {
+ case Yield::kFinal: {
VisitForAccumulatorValue(expr->generator_object());
__ mov(FieldOperand(result_register(),
JSGeneratorObject::kContinuationOffset),
@@ -1957,7 +1998,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
- case Yield::DELEGATING: {
+ case Yield::kDelegating: {
VisitForStackValue(expr->generator_object());
// Initial stack layout is as follows:
@@ -1966,8 +2007,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label l_catch, l_try, l_suspend, l_continuation, l_resume;
Label l_next, l_call, l_loop;
- Register load_receiver = LoadIC::ReceiverRegister();
- Register load_name = LoadIC::NameRegister();
+ Register load_receiver = LoadDescriptor::ReceiverRegister();
+ Register load_name = LoadDescriptor::NameRegister();
// Initial send value is undefined.
__ mov(eax, isolate()->factory()->undefined_value());
@@ -2024,10 +2065,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_call);
__ mov(load_receiver, Operand(esp, kPointerSize));
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Immediate(Smi::FromInt(expr->KeyedLoadFeedbackSlot())));
}
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallIC(ic, TypeFeedbackId::None());
__ mov(edi, eax);
__ mov(Operand(esp, 2 * kPointerSize), edi);
@@ -2044,7 +2085,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(load_name,
isolate()->factory()->done_string()); // "done"
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Immediate(Smi::FromInt(expr->DoneFeedbackSlot())));
}
CallLoadIC(NOT_CONTEXTUAL); // result.done in eax
@@ -2058,7 +2099,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(load_name,
isolate()->factory()->value_string()); // "value"
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Immediate(Smi::FromInt(expr->ValueFeedbackSlot())));
}
CallLoadIC(NOT_CONTEXTUAL); // result.value in eax
@@ -2220,9 +2261,11 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
- __ mov(LoadIC::NameRegister(), Immediate(key->value()));
+ DCHECK(!prop->IsSuperAccess());
+
+ __ mov(LoadDescriptor::NameRegister(), Immediate(key->value()));
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Immediate(Smi::FromInt(prop->PropertyFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL);
} else {
@@ -2231,11 +2274,23 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
}
+void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+ // Stack: receiver, home_object.
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+ DCHECK(prop->IsSuperAccess());
+
+ __ push(Immediate(key->value()));
+ __ CallRuntime(Runtime::kLoadFromSuper, 3);
+}
+
+
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Immediate(Smi::FromInt(prop->PropertyFeedbackSlot())));
CallIC(ic);
} else {
@@ -2260,8 +2315,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ mov(eax, ecx);
- BinaryOpICStub stub(isolate(), op, mode);
- CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2343,9 +2398,9 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
__ pop(edx);
- BinaryOpICStub stub(isolate(), op, mode);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(eax);
}
@@ -2375,9 +2430,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
case NAMED_PROPERTY: {
__ push(eax); // Preserve value.
VisitForAccumulatorValue(prop->obj());
- __ Move(StoreIC::ReceiverRegister(), eax);
- __ pop(StoreIC::ValueRegister()); // Restore value.
- __ mov(StoreIC::NameRegister(), prop->key()->AsLiteral()->value());
+ __ Move(StoreDescriptor::ReceiverRegister(), eax);
+ __ pop(StoreDescriptor::ValueRegister()); // Restore value.
+ __ mov(StoreDescriptor::NameRegister(),
+ prop->key()->AsLiteral()->value());
CallStoreIC();
break;
}
@@ -2385,12 +2441,11 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ push(eax); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
- __ Move(KeyedStoreIC::NameRegister(), eax);
- __ pop(KeyedStoreIC::ReceiverRegister()); // Receiver.
- __ pop(KeyedStoreIC::ValueRegister()); // Restore value.
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ __ Move(StoreDescriptor::NameRegister(), eax);
+ __ pop(StoreDescriptor::ReceiverRegister()); // Receiver.
+ __ pop(StoreDescriptor::ValueRegister()); // Restore value.
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic);
break;
}
@@ -2414,8 +2469,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
- __ mov(StoreIC::NameRegister(), var->name());
- __ mov(StoreIC::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(StoreDescriptor::NameRegister(), var->name());
+ __ mov(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
CallStoreIC();
} else if (op == Token::INIT_CONST_LEGACY) {
@@ -2488,28 +2543,44 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
- __ mov(StoreIC::NameRegister(), prop->key()->AsLiteral()->value());
- __ pop(StoreIC::ReceiverRegister());
+ __ mov(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
+ __ pop(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
+void FullCodeGenerator::EmitNamedSuperPropertyAssignment(Assignment* expr) {
+ // Assignment to named property of super.
+ // eax : value
+ // stack : receiver ('this'), home_object
+ Property* prop = expr->target()->AsProperty();
+ DCHECK(prop != NULL);
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(key != NULL);
+
+ __ push(eax);
+ __ push(Immediate(key->value()));
+ __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy),
+ 4);
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
// eax : value
// esp[0] : key
// esp[kPointerSize] : receiver
- __ pop(KeyedStoreIC::NameRegister()); // Key.
- __ pop(KeyedStoreIC::ReceiverRegister());
- DCHECK(KeyedStoreIC::ValueRegister().is(eax));
+ __ pop(StoreDescriptor::NameRegister()); // Key.
+ __ pop(StoreDescriptor::ReceiverRegister());
+ DCHECK(StoreDescriptor::ValueRegister().is(eax));
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2522,16 +2593,23 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
Expression* key = expr->key();
if (key->IsPropertyName()) {
- VisitForAccumulatorValue(expr->obj());
- __ Move(LoadIC::ReceiverRegister(), result_register());
- EmitNamedPropertyLoad(expr);
+ if (!expr->IsSuperAccess()) {
+ VisitForAccumulatorValue(expr->obj());
+ __ Move(LoadDescriptor::ReceiverRegister(), result_register());
+ EmitNamedPropertyLoad(expr);
+ } else {
+ VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(expr->obj()->AsSuperReference());
+ __ push(result_register());
+ EmitNamedSuperPropertyLoad(expr);
+ }
PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(eax);
} else {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
- __ pop(LoadIC::ReceiverRegister()); // Object.
- __ Move(LoadIC::NameRegister(), result_register()); // Key.
+ __ pop(LoadDescriptor::ReceiverRegister()); // Object.
+ __ Move(LoadDescriptor::NameRegister(), result_register()); // Key.
EmitKeyedPropertyLoad(expr);
context()->Plug(eax);
}
@@ -2549,11 +2627,10 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallIC::CallType call_type = callee->IsVariableProxy()
- ? CallIC::FUNCTION
- : CallIC::METHOD;
+ CallICState::CallType call_type =
+ callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
// Get the target function.
- if (call_type == CallIC::FUNCTION) {
+ if (call_type == CallICState::FUNCTION) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
@@ -2564,7 +2641,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
- __ mov(LoadIC::ReceiverRegister(), Operand(esp, 0));
+ DCHECK(!callee->AsProperty()->IsSuperAccess());
+ __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
@@ -2576,6 +2654,42 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+ DCHECK(callee->IsProperty());
+ Property* prop = callee->AsProperty();
+ DCHECK(prop->IsSuperAccess());
+
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+ // Load the function from the receiver.
+ SuperReference* super_ref = callee->AsProperty()->obj()->AsSuperReference();
+ EmitLoadHomeObject(super_ref);
+ __ push(eax);
+ VisitForAccumulatorValue(super_ref->this_var());
+ __ push(eax);
+ __ push(eax);
+ __ push(Operand(esp, kPointerSize * 2));
+ __ push(Immediate(key->value()));
+ // Stack here:
+ // - home_object
+ // - this (receiver)
+ // - this (receiver) <-- LoadFromSuper will pop here and below.
+ // - home_object
+ // - key
+ __ CallRuntime(Runtime::kLoadFromSuper, 3);
+
+ // Replace home_object with target function.
+ __ mov(Operand(esp, kPointerSize), eax);
+
+ // Stack here:
+ // - target function
+ // - this (receiver)
+ EmitCall(expr, CallICState::METHOD);
+}
+
+
// Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* key) {
@@ -2586,8 +2700,8 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
// Load the function from the receiver.
DCHECK(callee->IsProperty());
- __ mov(LoadIC::ReceiverRegister(), Operand(esp, 0));
- __ mov(LoadIC::NameRegister(), eax);
+ __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
+ __ mov(LoadDescriptor::NameRegister(), eax);
EmitKeyedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
@@ -2595,11 +2709,11 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ push(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
- EmitCall(expr, CallIC::METHOD);
+ EmitCall(expr, CallICState::METHOD);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2736,15 +2850,21 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty();
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(property->obj());
- }
- if (property->key()->IsPropertyName()) {
- EmitCallWithLoadIC(expr);
+ bool is_named_call = property->key()->IsPropertyName();
+ // super.x() is handled in EmitCallWithLoadIC.
+ if (property->IsSuperAccess() && is_named_call) {
+ EmitSuperCallWithLoadIC(expr);
} else {
- EmitKeyedCallWithLoadIC(expr, property->key());
+ {
+ PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(property->obj());
+ }
+ if (is_named_call) {
+ EmitCallWithLoadIC(expr);
+ } else {
+ EmitKeyedCallWithLoadIC(expr, property->key());
+ }
}
-
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
@@ -3247,7 +3367,7 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
// Functions have class 'Function'.
__ bind(&function);
- __ mov(eax, isolate()->factory()->function_class_string());
+ __ mov(eax, isolate()->factory()->Function_string());
__ jmp(&done);
// Objects with a non-function constructor have class 'Object'.
@@ -3365,9 +3485,9 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
Register index = ebx;
Register value = ecx;
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- VisitForAccumulatorValue(args->at(0)); // string
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
__ pop(value);
__ pop(index);
@@ -3401,9 +3521,9 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
Register index = ebx;
Register value = ecx;
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- VisitForAccumulatorValue(args->at(0)); // string
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
__ pop(value);
__ pop(index);
@@ -3754,7 +3874,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
@@ -3812,7 +3932,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
array = no_reg;
- // Check that all array elements are sequential ASCII strings, and
+ // Check that all array elements are sequential one-byte strings, and
// accumulate the sum of their lengths, as a smi-encoded value.
__ Move(index, Immediate(0));
__ Move(string_length, Immediate(0));
@@ -3821,7 +3941,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// scratch, string_length, elements.
if (generate_debug_code_) {
__ cmp(index, array_length);
- __ Assert(less, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
+ __ Assert(less, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
}
__ bind(&loop);
__ mov(string, FieldOperand(elements,
@@ -3859,7 +3979,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// string_length: Sum of string lengths, as a smi.
// elements: FixedArray of strings.
- // Check that the separator is a flat ASCII string.
+ // Check that the separator is a flat one-byte string.
__ mov(string, separator_operand);
__ JumpIfSmi(string, &bailout);
__ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
@@ -3883,8 +4003,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Live registers and stack values:
// string_length
// elements
- __ AllocateAsciiString(result_pos, string_length, scratch,
- index, string, &bailout);
+ __ AllocateOneByteString(result_pos, string_length, scratch, index, string,
+ &bailout);
__ mov(result_operand, result_pos);
__ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
@@ -3927,7 +4047,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case
__ bind(&one_char_separator);
- // Replace separator with its ASCII character value.
+ // Replace separator with its one-byte character value.
__ mov_b(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
__ mov_b(separator_operand, scratch);
@@ -4045,10 +4165,10 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
// Load the function from the receiver.
- __ mov(LoadIC::ReceiverRegister(), Operand(esp, 0));
- __ mov(LoadIC::NameRegister(), Immediate(expr->name()));
+ __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
+ __ mov(LoadDescriptor::NameRegister(), Immediate(expr->name()));
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Immediate(Smi::FromInt(expr->CallRuntimeFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL);
} else {
@@ -4220,6 +4340,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (prop != NULL) {
assign_type =
(prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ if (prop->IsSuperAccess()) {
+ // throw exception.
+ VisitSuperReference(prop->obj()->AsSuperReference());
+ return;
+ }
}
// Evaluate expression and get value.
@@ -4235,14 +4360,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == NAMED_PROPERTY) {
// Put the object both on the stack and in the register.
VisitForStackValue(prop->obj());
- __ mov(LoadIC::ReceiverRegister(), Operand(esp, 0));
+ __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
EmitNamedPropertyLoad(prop);
} else {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
- __ mov(LoadIC::ReceiverRegister(),
- Operand(esp, kPointerSize)); // Object.
- __ mov(LoadIC::NameRegister(), Operand(esp, 0)); // Key.
+ __ mov(LoadDescriptor::ReceiverRegister(),
+ Operand(esp, kPointerSize)); // Object.
+ __ mov(LoadDescriptor::NameRegister(), Operand(esp, 0)); // Key.
EmitKeyedPropertyLoad(prop);
}
}
@@ -4327,8 +4452,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ bind(&stub_call);
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
- BinaryOpICStub stub(isolate(), expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), expr->binary_op(),
+ NO_OVERWRITE).code();
+ CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4357,8 +4483,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
case NAMED_PROPERTY: {
- __ mov(StoreIC::NameRegister(), prop->key()->AsLiteral()->value());
- __ pop(StoreIC::ReceiverRegister());
+ __ mov(StoreDescriptor::NameRegister(),
+ prop->key()->AsLiteral()->value());
+ __ pop(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -4371,11 +4498,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
- __ pop(KeyedStoreIC::NameRegister());
- __ pop(KeyedStoreIC::ReceiverRegister());
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ __ pop(StoreDescriptor::NameRegister());
+ __ pop(StoreDescriptor::ReceiverRegister());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -4399,10 +4525,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "[ Global variable");
- __ mov(LoadIC::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadIC::NameRegister(), Immediate(proxy->name()));
+ __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), Immediate(proxy->name()));
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Immediate(Smi::FromInt(proxy->VariableFeedbackSlot())));
}
// Use a regular load, not a contextual load, to avoid a reference
@@ -4564,7 +4690,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
new file mode 100644
index 0000000000..3a0d5268e0
--- /dev/null
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -0,0 +1,304 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_IA32
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+const Register CallInterfaceDescriptor::ContextRegister() { return esi; }
+
+
+const Register LoadDescriptor::ReceiverRegister() { return edx; }
+const Register LoadDescriptor::NameRegister() { return ecx; }
+
+
+const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return eax; }
+
+
+const Register VectorLoadICDescriptor::VectorRegister() { return ebx; }
+
+
+const Register StoreDescriptor::ReceiverRegister() { return edx; }
+const Register StoreDescriptor::NameRegister() { return ecx; }
+const Register StoreDescriptor::ValueRegister() { return eax; }
+
+
+const Register ElementTransitionAndStoreDescriptor::MapRegister() {
+ return ebx;
+}
+
+
+const Register InstanceofDescriptor::left() { return eax; }
+const Register InstanceofDescriptor::right() { return edx; }
+
+
+const Register ArgumentsAccessReadDescriptor::index() { return edx; }
+const Register ArgumentsAccessReadDescriptor::parameter_count() { return eax; }
+
+
+const Register ApiGetterDescriptor::function_address() { return edx; }
+
+
+const Register MathPowTaggedDescriptor::exponent() { return eax; }
+
+
+const Register MathPowIntegerDescriptor::exponent() {
+ return MathPowTaggedDescriptor::exponent();
+}
+
+
+void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, ebx};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, edi};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // ToNumberStub invokes a function, and therefore needs a context.
+ Register registers[] = {esi, eax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, eax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastCloneShallowArrayDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, eax, ebx, ecx};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void FastCloneShallowObjectDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, eax, ebx, ecx, edx};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CreateAllocationSiteDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, ebx, edx};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StoreArrayLiteralElementDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, ecx, eax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, edi};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionWithFeedbackDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, edi, edx};
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // eax : number of arguments
+ // ebx : feedback vector
+ // edx : (only if ebx is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
+ // edi : constructor function
+ // TODO(turbofan): So far we don't gather type feedback and hence skip the
+ // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+ Register registers[] = {esi, eax, edi, ebx};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void RegExpConstructResultDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, ecx, ebx, eax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void TransitionElementsKindDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, eax, ebx};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorConstantArgCountDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // eax -- number of arguments
+ // edi -- function
+ // ebx -- allocation site with elements kind
+ Register registers[] = {esi, edi, ebx};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = {esi, edi, ebx, eax};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(),
+ Representation::Tagged(), Representation::Integer32()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // eax -- number of arguments
+ // edi -- function
+ Register registers[] = {esi, edi};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void InternalArrayConstructorDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = {esi, edi, eax};
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Integer32()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, eax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, eax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, edx, eax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpWithAllocationSiteDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, ecx, edx, eax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, edx, eax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ esi, // context
+ ecx, // key
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ esi, // context
+ ecx, // name
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ esi, // context
+ edx, // name
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ esi, // context
+ edi, // JSFunction
+ eax, // actual number of arguments
+ ebx, // expected number of arguments
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // JSFunction
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ esi, // context
+ eax, // callee
+ ebx, // call_data
+ ecx, // holder
+ edx, // api_function_address
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index 245dcdc482..1d7c8c1b0c 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -6,20 +6,22 @@
#if V8_TARGET_ARCH_IA32
+#include "src/base/bits.h"
+#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/hydrogen-osr.h"
#include "src/ia32/lithium-codegen-ia32.h"
-#include "src/ic.h"
-#include "src/stub-cache.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
// When invoking builtins, we need to record the safepoint in the middle of
// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator V8_FINAL : public CallWrapper {
+class SafepointGenerator FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -29,9 +31,9 @@ class SafepointGenerator V8_FINAL : public CallWrapper {
deopt_mode_(mode) {}
virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
+ virtual void BeforeCall(int call_size) const OVERRIDE {}
- virtual void AfterCall() const V8_OVERRIDE {
+ virtual void AfterCall() const OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@@ -381,16 +383,11 @@ bool LCodeGen::GenerateJumpTable() {
Comment(";;; -------------------- Jump table --------------------");
}
for (int i = 0; i < jump_table_.length(); i++) {
- __ bind(&jump_table_[i].label);
- Address entry = jump_table_[i].address;
- Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
- int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- Comment(";;; jump table entry %d.", i);
- } else {
- Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
- }
- if (jump_table_[i].needs_frame) {
+ Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+ __ bind(&table_entry->label);
+ Address entry = table_entry->address;
+ DeoptComment(table_entry->reason);
+ if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
__ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
if (needs_frame.is_bound()) {
@@ -823,9 +820,10 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(
}
-void LCodeGen::DeoptimizeIf(Condition cc,
- LEnvironment* environment,
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
+ const char* detail,
Deoptimizer::BailoutType bailout_type) {
+ LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
@@ -865,19 +863,19 @@ void LCodeGen::DeoptimizeIf(Condition cc,
__ bind(&done);
}
+ Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), detail);
DCHECK(info()->IsStub() || frame_is_built_);
if (cc == no_condition && frame_is_built_) {
+ DeoptComment(reason);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
+ Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ !frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (jump_table_.is_empty() ||
- jump_table_.last().address != entry ||
- jump_table_.last().needs_frame != !frame_is_built_ ||
- jump_table_.last().bailout_type != bailout_type) {
- Deoptimizer::JumpTableEntry table_entry(entry,
- bailout_type,
- !frame_is_built_);
+ !table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
}
if (cc == no_condition) {
@@ -889,12 +887,12 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
-void LCodeGen::DeoptimizeIf(Condition cc,
- LEnvironment* environment) {
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
+ const char* detail) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(cc, environment, bailout_type);
+ DeoptimizeIf(cc, instr, detail, bailout_type);
}
@@ -902,7 +900,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, 0, TENURED);
+ DeoptimizationInputData::New(isolate(), length, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
@@ -1123,7 +1121,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ and_(dividend, mask);
__ neg(dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
}
__ jmp(&done, Label::kNear);
}
@@ -1140,7 +1138,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
@@ -1155,7 +1153,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ j(not_zero, &remainder_not_zero, Label::kNear);
__ cmp(dividend, Immediate(0));
- DeoptimizeIf(less, instr->environment());
+ DeoptimizeIf(less, instr, "minus zero");
__ bind(&remainder_not_zero);
}
}
@@ -1177,7 +1175,7 @@ void LCodeGen::DoModI(LModI* instr) {
// deopt in this case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(right_reg, Operand(right_reg));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "division by zero");
}
// Check for kMinInt % -1, idiv would signal a divide error. We
@@ -1188,7 +1186,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ cmp(right_reg, -1);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "minus zero");
} else {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ Move(result_reg, Immediate(0));
@@ -1207,7 +1205,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_sign, &positive_left, Label::kNear);
__ idiv(right_reg);
__ test(result_reg, Operand(result_reg));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
__ jmp(&done, Label::kNear);
__ bind(&positive_left);
}
@@ -1220,26 +1218,26 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
Register dividend = ToRegister(instr->dividend());
int32_t divisor = instr->divisor();
Register result = ToRegister(instr->result());
- DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
+ DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
DCHECK(!result.is(dividend));
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmp(dividend, kMinInt);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "overflow");
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ test(dividend, Immediate(mask));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr, "lost precision");
}
__ Move(result, dividend);
int32_t shift = WhichPowerOf2Abs(divisor);
@@ -1260,7 +1258,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
@@ -1268,7 +1266,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
}
__ TruncatingDiv(dividend, Abs(divisor));
@@ -1278,7 +1276,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ mov(eax, edx);
__ imul(eax, eax, divisor);
__ sub(eax, dividend);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "lost precision");
}
}
@@ -1298,7 +1296,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "division by zero");
}
// Check for (0 / -x) that will produce negative zero.
@@ -1307,7 +1305,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ test(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ test(divisor, divisor);
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr, "minus zero");
__ bind(&dividend_not_zero);
}
@@ -1317,7 +1315,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cmp(dividend, kMinInt);
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmp(divisor, -1);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "overflow");
__ bind(&dividend_not_min_int);
}
@@ -1328,7 +1326,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ test(remainder, remainder);
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr, "lost precision");
}
}
@@ -1350,13 +1348,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ neg(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
return;
}
@@ -1383,7 +1381,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
@@ -1391,7 +1389,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -1438,7 +1436,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "division by zero");
}
// Check for (0 / -x) that will produce negative zero.
@@ -1447,7 +1445,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ test(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ test(divisor, divisor);
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr, "minus zero");
__ bind(&dividend_not_zero);
}
@@ -1457,7 +1455,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ cmp(dividend, kMinInt);
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmp(divisor, -1);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "overflow");
__ bind(&dividend_not_min_int);
}
@@ -1535,7 +1533,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -1545,15 +1543,15 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ j(not_zero, &done, Label::kNear);
if (right->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr, "minus zero");
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
__ cmp(ToRegister(instr->temp()), Immediate(0));
- DeoptimizeIf(less, instr->environment());
+ DeoptimizeIf(less, instr, "minus zero");
}
} else {
// Test the non-zero operand for negative sign.
__ or_(ToRegister(instr->temp()), ToOperand(right));
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr, "minus zero");
}
__ bind(&done);
}
@@ -1618,10 +1616,6 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
switch (instr->op()) {
case Token::ROR:
__ ror_cl(ToRegister(left));
- if (instr->can_deopt()) {
- __ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr->environment());
- }
break;
case Token::SAR:
__ sar_cl(ToRegister(left));
@@ -1630,7 +1624,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shr_cl(ToRegister(left));
if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr, "negative value");
}
break;
case Token::SHL:
@@ -1647,7 +1641,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::ROR:
if (shift_count == 0 && instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr, "negative value");
} else {
__ ror(ToRegister(left), shift_count);
}
@@ -1662,7 +1656,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shr(ToRegister(left), shift_count);
} else if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr, "negative value");
}
break;
case Token::SHL:
@@ -1673,7 +1667,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shl(ToRegister(left), shift_count - 1);
}
__ SmiTag(ToRegister(left));
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
} else {
__ shl(ToRegister(left), shift_count);
}
@@ -1699,7 +1693,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
__ sub(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
}
@@ -1716,7 +1710,7 @@ void LCodeGen::DoConstantS(LConstantS* instr) {
void LCodeGen::DoConstantD(LConstantD* instr) {
double v = instr->value();
- uint64_t int_val = BitCast<uint64_t, double>(v);
+ uint64_t int_val = bit_cast<uint64_t, double>(v);
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
DCHECK(instr->result()->IsDoubleRegister());
@@ -1783,9 +1777,9 @@ void LCodeGen::DoDateField(LDateField* instr) {
DCHECK(object.is(eax));
__ test(object, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "Smi");
__ CmpObjectType(object, JS_DATE_TYPE, scratch);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "not a date object");
if (index->value() == 0) {
__ mov(result, FieldOperand(object, JSDate::kValueOffset));
@@ -1915,7 +1909,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
__ add(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
}
}
@@ -2033,8 +2027,9 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(eax));
DCHECK(ToRegister(instr->result()).is(eax));
- BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@@ -2138,7 +2133,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ test(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "Smi");
}
Register map = no_reg; // Keep the compiler happy.
@@ -2195,7 +2190,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr, "unexpected object");
}
}
}
@@ -2459,7 +2454,7 @@ static Condition ComputeCompareCondition(Token::Value op) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
@@ -2536,7 +2531,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
DCHECK(!temp.is(temp2));
__ JumpIfSmi(input, is_false);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
+ if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2566,7 +2561,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
+ if (String::Equals(class_name, isolate()->factory()->Object_string())) {
__ j(not_equal, is_true);
} else {
__ j(not_equal, is_false);
@@ -2627,15 +2622,15 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
@@ -2730,7 +2725,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
@@ -2825,28 +2820,36 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
__ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
}
}
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+ DCHECK(FLAG_vector_ics);
+ Register vector = ToRegister(instr->temp_vector());
+ DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
+ __ mov(vector, instr->hydrogen()->feedback_vector());
+ // No need to allocate this register.
+ DCHECK(VectorLoadICDescriptor::SlotRegister().is(eax));
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Immediate(Smi::FromInt(instr->hydrogen()->slot())));
+}
+
+
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister()));
+ DCHECK(ToRegister(instr->global_object())
+ .is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(eax));
- __ mov(LoadIC::NameRegister(), instr->name());
+ __ mov(LoadDescriptor::NameRegister(), instr->name());
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ mov(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(eax));
- __ mov(LoadIC::SlotRegister(),
- Immediate(Smi::FromInt(instr->hydrogen()->slot())));
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2861,7 +2864,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
}
// Store the value.
@@ -2878,7 +2881,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
} else {
Label is_not_hole;
__ j(not_equal, &is_not_hole, Label::kNear);
@@ -2899,7 +2902,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(target, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
} else {
__ j(not_equal, &skip_assignment, Label::kNear);
}
@@ -2975,20 +2978,14 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(eax));
- __ mov(LoadIC::NameRegister(), instr->name());
+ __ mov(LoadDescriptor::NameRegister(), instr->name());
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ mov(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(eax));
- __ mov(LoadIC::SlotRegister(),
- Immediate(Smi::FromInt(instr->hydrogen()->slot())));
- }
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+ }
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3004,7 +3001,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ cmp(Operand(result), Immediate(factory()->the_hole_value()));
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
// If the function does not have an initial map, we're done.
Label done;
@@ -3097,7 +3094,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ mov(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ test(result, Operand(result));
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr, "negative value");
}
break;
case EXTERNAL_FLOAT32_ELEMENTS:
@@ -3127,7 +3124,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
FAST_DOUBLE_ELEMENTS,
instr->base_offset() + sizeof(kHoleNanLower32));
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
}
Operand double_load_operand = BuildFastArrayOperand(
@@ -3146,20 +3143,18 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// Load the result.
__ mov(result,
- BuildFastArrayOperand(instr->elements(),
- instr->key(),
+ BuildFastArrayOperand(instr->elements(), instr->key(),
instr->hydrogen()->key()->representation(),
- FAST_ELEMENTS,
- instr->base_offset()));
+ FAST_ELEMENTS, instr->base_offset()));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "not a Smi");
} else {
__ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
}
}
}
@@ -3209,20 +3204,14 @@ Operand LCodeGen::BuildFastArrayOperand(
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister()));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ mov(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(eax));
- __ mov(LoadIC::SlotRegister(),
- Immediate(Smi::FromInt(instr->hydrogen()->slot())));
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3312,9 +3301,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "Smi");
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
- DeoptimizeIf(below, instr->environment());
+ DeoptimizeIf(below, instr, "not a JavaScript object");
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
@@ -3340,7 +3329,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, kArgumentsLimit);
- DeoptimizeIf(above, instr->environment());
+ DeoptimizeIf(above, instr, "too many arguments");
__ push(receiver);
__ mov(receiver, length);
@@ -3452,6 +3441,32 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
+void LCodeGen::DoTailCallThroughMegamorphicCache(
+ LTailCallThroughMegamorphicCache* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register name = ToRegister(instr->name());
+ DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(name.is(LoadDescriptor::NameRegister()));
+
+ Register scratch = ebx;
+ Register extra = eax;
+ DCHECK(!scratch.is(receiver) && !scratch.is(name));
+ DCHECK(!extra.is(receiver) && !extra.is(name));
+
+ // Important for the tail-call.
+ bool must_teardown_frame = NeedsEagerFrame();
+
+ // The probe will tail call to a handler if found.
+ isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
+ must_teardown_frame, receiver, name,
+ scratch, extra);
+
+ // Tail call to miss if we ended up here.
+ if (must_teardown_frame) __ leave();
+ LoadIC::GenerateMiss(masm());
+}
+
+
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
@@ -3507,7 +3522,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "not a heap number");
Label slow, allocated, done;
Register tmp = input_reg.is(eax) ? ecx : eax;
@@ -3554,22 +3569,22 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ neg(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr, "overflow");
__ bind(&is_positive);
}
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LMathAbs* instr_;
};
@@ -3612,20 +3627,20 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ j(not_equal, &non_zero, Label::kNear);
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr, "minus zero");
__ bind(&non_zero);
}
__ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
__ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x1);
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
} else {
Label negative_sign, done;
// Deoptimize on unordered.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(parity_even, instr->environment());
+ DeoptimizeIf(parity_even, instr, "NaN");
__ j(below, &negative_sign, Label::kNear);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3634,7 +3649,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ j(above, &positive_sign, Label::kNear);
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr, "minus zero");
__ Move(output_reg, Immediate(0));
__ jmp(&done, Label::kNear);
__ bind(&positive_sign);
@@ -3644,7 +3659,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ cvttsd2si(output_reg, Operand(input_reg));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x1);
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
@@ -3655,7 +3670,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ sub(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
__ bind(&done);
}
@@ -3683,8 +3698,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x1);
- __ RecordComment("D2I conversion overflow");
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
__ jmp(&done, dist);
__ bind(&below_one_half);
@@ -3699,8 +3713,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ cvttsd2si(output_reg, Operand(input_temp));
// Catch minint due to overflow, and to prevent overflow when compensating.
__ cmp(output_reg, 0x1);
- __ RecordComment("D2I conversion overflow");
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
__ Cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
@@ -3716,8 +3729,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// If the sign is positive, we return +0.
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
- __ RecordComment("Minus zero");
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr, "minus zero");
}
__ Move(output_reg, Immediate(0));
__ bind(&done);
@@ -3777,10 +3789,11 @@ void LCodeGen::DoPower(LPower* instr) {
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
+ Register tagged_exponent = MathPowTaggedDescriptor::exponent();
DCHECK(!instr->right()->IsDoubleRegister() ||
ToDoubleRegister(instr->right()).is(xmm1));
DCHECK(!instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(eax));
+ ToRegister(instr->right()).is(tagged_exponent));
DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
@@ -3789,9 +3802,10 @@ void LCodeGen::DoPower(LPower* instr) {
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
- __ JumpIfSmi(eax, &no_deopt);
- __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
- DeoptimizeIf(not_equal, instr->environment());
+ __ JumpIfSmi(tagged_exponent, &no_deopt);
+ DCHECK(!ecx.is(tagged_exponent));
+ __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, ecx);
+ DeoptimizeIf(not_equal, instr, "not a heap number");
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@@ -4074,10 +4088,10 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister()));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- __ mov(StoreIC::NameRegister(), instr->name());
+ __ mov(StoreDescriptor::NameRegister(), instr->name());
Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4103,7 +4117,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ int3();
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr->environment());
+ DeoptimizeIf(cc, instr, "out of bounds");
}
}
@@ -4256,13 +4270,12 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister()));
- DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister()));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- Handle<Code> ic = instr->strict_mode() == STRICT
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4272,7 +4285,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "memento found");
__ bind(&no_memento_found);
}
@@ -4317,15 +4330,15 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
+ class DeferredStringCharCodeAt FINAL : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen,
LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStringCharCodeAt(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@@ -4375,15 +4388,15 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
+ class DeferredStringCharFromCode FINAL : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen,
LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStringCharFromCode(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -4453,16 +4466,16 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagI FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen,
LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagIU(
instr_, instr_->value(), instr_->temp(), SIGNED_INT32);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagI* instr_;
};
@@ -4480,15 +4493,15 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagU FINAL : public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagIU(
instr_, instr_->value(), instr_->temp(), UNSIGNED_INT32);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
@@ -4562,14 +4575,14 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagD FINAL : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagD(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -4617,12 +4630,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ test(input, Immediate(0xc0000000));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr, "overflow");
}
__ SmiTag(input);
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
}
@@ -4633,7 +4646,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
DCHECK(input->IsRegister() && input->Equals(instr->result()));
if (instr->needs_check()) {
__ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr, "not a Smi");
} else {
__ AssertSmi(result);
}
@@ -4641,13 +4654,13 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
}
-void LCodeGen::EmitNumberUntagD(Register input_reg,
- Register temp_reg,
- XMMRegister result_reg,
- bool can_convert_undefined_to_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
+ Register temp_reg, XMMRegister result_reg,
NumberUntagDMode mode) {
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+ bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
@@ -4660,7 +4673,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
if (can_convert_undefined_to_nan) {
__ j(not_equal, &convert, Label::kNear);
} else {
- DeoptimizeIf(not_equal, env);
+ DeoptimizeIf(not_equal, instr, "not a heap number");
}
// Heap number to XMM conversion.
@@ -4673,7 +4686,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
__ j(not_zero, &done, Label::kNear);
__ movmskpd(temp_reg, result_reg);
__ test_b(temp_reg, 1);
- DeoptimizeIf(not_zero, env);
+ DeoptimizeIf(not_zero, instr, "minus zero");
}
__ jmp(&done, Label::kNear);
@@ -4682,7 +4695,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Convert undefined (and hole) to NaN.
__ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, env);
+ DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
@@ -4736,32 +4749,40 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ bind(&check_false);
__ cmp(input_reg, factory()->false_value());
- __ RecordComment("Deferred TaggedToI: cannot truncate");
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false");
__ Move(input_reg, Immediate(0));
} else {
- Label bailout;
- XMMRegister scratch = (instr->temp() != NULL)
- ? ToDoubleRegister(instr->temp())
- : no_xmm_reg;
- __ TaggedToI(input_reg, input_reg, scratch,
- instr->hydrogen()->GetMinusZeroMode(), &bailout);
- __ jmp(done);
- __ bind(&bailout);
- DeoptimizeIf(no_condition, instr->environment());
+ XMMRegister scratch = ToDoubleRegister(instr->temp());
+ DCHECK(!scratch.is(xmm0));
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ DeoptimizeIf(not_equal, instr, "not a heap number");
+ __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ cvttsd2si(input_reg, Operand(xmm0));
+ __ Cvtsi2sd(scratch, Operand(input_reg));
+ __ ucomisd(xmm0, scratch);
+ DeoptimizeIf(not_equal, instr, "lost precision");
+ DeoptimizeIf(parity_even, instr, "NaN");
+ if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
+ __ test(input_reg, Operand(input_reg));
+ __ j(not_zero, done);
+ __ movmskpd(input_reg, xmm0);
+ __ and_(input_reg, 1);
+ DeoptimizeIf(not_zero, instr, "minus zero");
+ }
}
}
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI V8_FINAL : public LDeferredCode {
+ class DeferredTaggedToI FINAL : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredTaggedToI(instr_, done());
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
@@ -4797,8 +4818,6 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
DCHECK(result->IsDoubleRegister());
Register input_reg = ToRegister(input);
- bool deoptimize_on_minus_zero =
- instr->hydrogen()->deoptimize_on_minus_zero();
Register temp_reg = ToRegister(temp);
HValue* value = instr->hydrogen()->value();
@@ -4806,13 +4825,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
XMMRegister result_reg = ToDoubleRegister(result);
- EmitNumberUntagD(input_reg,
- temp_reg,
- result_reg,
- instr->hydrogen()->can_convert_undefined_to_nan(),
- deoptimize_on_minus_zero,
- instr->environment(),
- mode);
+ EmitNumberUntagD(instr, input_reg, temp_reg, result_reg, mode);
}
@@ -4827,14 +4840,20 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
XMMRegister input_reg = ToDoubleRegister(input);
__ TruncateDoubleToI(result_reg, input_reg);
} else {
- Label bailout, done;
+ Label lost_precision, is_nan, minus_zero, done;
XMMRegister input_reg = ToDoubleRegister(input);
XMMRegister xmm_scratch = double_scratch0();
- __ DoubleToI(result_reg, input_reg, xmm_scratch,
- instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
- __ jmp(&done, Label::kNear);
- __ bind(&bailout);
- DeoptimizeIf(no_condition, instr->environment());
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
+ instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
+ &is_nan, &minus_zero, dist);
+ __ jmp(&done, dist);
+ __ bind(&lost_precision);
+ DeoptimizeIf(no_condition, instr, "lost precision");
+ __ bind(&is_nan);
+ DeoptimizeIf(no_condition, instr, "NaN");
+ __ bind(&minus_zero);
+ DeoptimizeIf(no_condition, instr, "minus zero");
__ bind(&done);
}
}
@@ -4847,25 +4866,30 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
DCHECK(result->IsRegister());
Register result_reg = ToRegister(result);
- Label bailout, done;
+ Label lost_precision, is_nan, minus_zero, done;
XMMRegister input_reg = ToDoubleRegister(input);
XMMRegister xmm_scratch = double_scratch0();
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
__ DoubleToI(result_reg, input_reg, xmm_scratch,
- instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
- __ jmp(&done, Label::kNear);
- __ bind(&bailout);
- DeoptimizeIf(no_condition, instr->environment());
+ instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
+ &minus_zero, dist);
+ __ jmp(&done, dist);
+ __ bind(&lost_precision);
+ DeoptimizeIf(no_condition, instr, "lost precision");
+ __ bind(&is_nan);
+ DeoptimizeIf(no_condition, instr, "NaN");
+ __ bind(&minus_zero);
+ DeoptimizeIf(no_condition, instr, "minus zero");
__ bind(&done);
-
__ SmiTag(result_reg);
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr, "not a Smi");
}
@@ -4873,7 +4897,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "Smi");
}
}
@@ -4894,14 +4918,14 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "wrong instance type");
} else {
- DeoptimizeIf(below, instr->environment());
+ DeoptimizeIf(below, instr, "wrong instance type");
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
static_cast<int8_t>(last));
- DeoptimizeIf(above, instr->environment());
+ DeoptimizeIf(above, instr, "wrong instance type");
}
}
} else {
@@ -4909,15 +4933,15 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
uint8_t tag;
instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
- if (IsPowerOf2(mask)) {
- DCHECK(tag == 0 || IsPowerOf2(tag));
+ if (base::bits::IsPowerOfTwo32(mask)) {
+ DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
- DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
+ DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type");
} else {
__ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
__ and_(temp, mask);
__ cmp(temp, tag);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "wrong instance type");
}
}
}
@@ -4933,7 +4957,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
Operand operand = ToOperand(instr->value());
__ cmp(operand, object);
}
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "value mismatch");
}
@@ -4948,22 +4972,22 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ test(eax, Immediate(kSmiTagMask));
}
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "instance migration failed");
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+ class DeferredCheckMaps FINAL : public LDeferredCode {
public:
DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
: LDeferredCode(codegen), instr_(instr), object_(object) {
SetExit(check_maps());
}
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredInstanceMigration(instr_, object_);
}
Label* check_maps() { return &check_maps_; }
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LCheckMaps* instr_;
Label check_maps_;
@@ -5001,7 +5025,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "wrong map");
}
__ bind(&success);
@@ -5040,7 +5064,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
__ mov(input_reg, 0);
__ jmp(&done, Label::kNear);
@@ -5096,14 +5120,14 @@ void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate V8_FINAL : public LDeferredCode {
+ class DeferredAllocate FINAL : public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredAllocate(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LAllocate* instr_;
};
@@ -5270,9 +5294,8 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(),
- instr->hydrogen()->strict_mode(),
- instr->hydrogen()->is_generator());
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ instr->hydrogen()->kind());
__ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
@@ -5436,8 +5459,7 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
if (info()->IsStub() && type == Deoptimizer::EAGER) {
type = Deoptimizer::LAZY;
}
- Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
- DeoptimizeIf(no_condition, instr->environment(), type);
+ DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
}
@@ -5464,14 +5486,14 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck V8_FINAL : public LDeferredCode {
+ class DeferredStackCheck FINAL : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStackCheck(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
@@ -5532,17 +5554,17 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
__ cmp(eax, isolate()->factory()->undefined_value());
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "undefined");
__ cmp(eax, isolate()->factory()->null_value());
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "null");
__ test(eax, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "Smi");
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
- DeoptimizeIf(below_equal, instr->environment());
+ DeoptimizeIf(below_equal, instr, "wrong instance type");
Label use_cache, call_runtime;
__ CheckEnumCache(&call_runtime);
@@ -5557,7 +5579,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "wrong map");
__ bind(&use_cache);
}
@@ -5580,7 +5602,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
__ test(result, result);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "no cache");
}
@@ -5588,7 +5610,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
__ cmp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "wrong map");
}
@@ -5607,7 +5629,7 @@ void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ class DeferredLoadMutableDouble FINAL : public LDeferredCode {
public:
DeferredLoadMutableDouble(LCodeGen* codegen,
LLoadFieldByIndex* instr,
@@ -5618,10 +5640,10 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
object_(object),
index_(index) {
}
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LLoadFieldByIndex* instr_;
Register object_;
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index d2f85f1279..0918252327 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -148,8 +148,8 @@ class LCodeGen: public LCodeGenBase {
// Code generation passes. Returns true if code generation should
// continue.
- void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
- void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE;
+ void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
+ void GenerateBodyInstructionPost(LInstruction* instr) OVERRIDE;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateJumpTable();
@@ -209,10 +209,9 @@ class LCodeGen: public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc,
- LEnvironment* environment,
+ void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail,
Deoptimizer::BailoutType bailout_type);
- void DeoptimizeIf(Condition cc, LEnvironment* environment);
+ void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
@@ -259,7 +258,7 @@ class LCodeGen: public LCodeGenBase {
int arguments,
Safepoint::DeoptMode mode);
- void RecordAndWritePosition(int position) V8_OVERRIDE;
+ void RecordAndWritePosition(int position) OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
@@ -269,14 +268,8 @@ class LCodeGen: public LCodeGenBase {
void EmitBranch(InstrType instr, Condition cc);
template<class InstrType>
void EmitFalseBranch(InstrType instr, Condition cc);
- void EmitNumberUntagD(
- Register input,
- Register temp,
- XMMRegister result,
- bool allow_undefined_as_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
+ void EmitNumberUntagD(LNumberUntagD* instr, Register input, Register temp,
+ XMMRegister result, NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
@@ -311,7 +304,7 @@ class LCodeGen: public LCodeGenBase {
int* offset,
AllocationSiteMode mode);
- void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+ void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
@@ -319,6 +312,9 @@ class LCodeGen: public LCodeGenBase {
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+ template <class T>
+ void EmitVectorLoadICRegisters(T* instr);
+
void EmitReturn(LReturn* instr, bool dynamic_frame_alignment);
// Emits code for pushing either a tagged constant, a (non-double)
@@ -356,7 +352,7 @@ class LCodeGen: public LCodeGenBase {
Safepoint::Kind expected_safepoint_kind_;
- class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
+ class PushSafepointRegistersScope FINAL BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
index 1e590fd4ff..682503b1e4 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
@@ -292,7 +292,7 @@ void LGapResolver::EmitMove(int index) {
}
} else if (destination->IsDoubleRegister()) {
double v = cgen_->ToDouble(constant_source);
- uint64_t int_val = BitCast<uint64_t, double>(v);
+ uint64_t int_val = bit_cast<uint64_t, double>(v);
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
XMMRegister dst = cgen_->ToDoubleRegister(destination);
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.h b/deps/v8/src/ia32/lithium-gap-resolver-ia32.h
index 87549d00bb..43df245835 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.h
+++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.h
@@ -15,7 +15,7 @@ namespace internal {
class LCodeGen;
class LGapResolver;
-class LGapResolver V8_FINAL BASE_EMBEDDED {
+class LGapResolver FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index e02b65e30f..3ed6623e9d 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -461,12 +461,6 @@ LPlatformChunk* LChunkBuilder::Build() {
}
-void LChunkBuilder::Abort(BailoutReason reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
Register::ToAllocationIndex(reg));
@@ -627,9 +621,8 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
ZoneList<HValue*> objects_to_materialize(0, zone());
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator,
- &objects_to_materialize));
+ instr->set_environment(CreateEnvironment(
+ hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
return instr;
}
@@ -1124,13 +1117,13 @@ LInstruction* LChunkBuilder::DoCallJSFunction(
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
- const InterfaceDescriptor* descriptor = instr->descriptor();
+ CallInterfaceDescriptor descriptor = instr->descriptor();
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
ops.Add(target, zone());
for (int i = 1; i < instr->OperandCount(); i++) {
- LOperand* op = UseFixed(instr->OperandAt(i),
- descriptor->GetParameterRegister(i - 1));
+ LOperand* op =
+ UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
ops.Add(op, zone());
}
@@ -1140,6 +1133,19 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
}
+LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
+ HTailCallThroughMegamorphicCache* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* receiver_register =
+ UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
+ LOperand* name_register =
+ UseFixed(instr->name(), LoadDescriptor::NameRegister());
+ // Not marked as call. It can't deoptimize, and it never returns.
+ return new (zone()) LTailCallThroughMegamorphicCache(
+ context, receiver_register, name_register);
+}
+
+
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
@@ -1640,9 +1646,10 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
Representation exponent_type = instr->right()->representation();
DCHECK(instr->left()->representation().IsDouble());
LOperand* left = UseFixedDouble(instr->left(), xmm2);
- LOperand* right = exponent_type.IsDouble() ?
- UseFixedDouble(instr->right(), xmm1) :
- UseFixed(instr->right(), eax);
+ LOperand* right =
+ exponent_type.IsDouble()
+ ? UseFixedDouble(instr->right(), xmm1)
+ : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
LPower* result = new(zone()) LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
CAN_DEOPTIMIZE_EAGERLY);
@@ -2083,7 +2090,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
return DefineAsRegister(new(zone()) LConstantI);
} else if (r.IsDouble()) {
double value = instr->DoubleValue();
- bool value_is_zero = BitCast<uint64_t, double>(value) == 0;
+ bool value_is_zero = bit_cast<uint64_t, double>(value) == 0;
LOperand* temp = value_is_zero ? NULL : TempRegister();
return DefineAsRegister(new(zone()) LConstantD(temp));
} else if (r.IsExternal()) {
@@ -2107,11 +2114,11 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- LOperand* global_object = UseFixed(instr->global_object(),
- LoadIC::ReceiverRegister());
+ LOperand* global_object =
+ UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
@@ -2168,10 +2175,11 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister());
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
context, object, vector);
@@ -2231,11 +2239,12 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), LoadIC::NameRegister());
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadKeyedGeneric* result =
new(zone()) LLoadKeyedGeneric(context, object, key, vector);
@@ -2316,10 +2325,10 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->object(),
- KeyedStoreIC::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), KeyedStoreIC::NameRegister());
- LOperand* value = UseFixed(instr->value(), KeyedStoreIC::ValueRegister());
+ LOperand* object =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+ LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
DCHECK(instr->object()->representation().IsTagged());
DCHECK(instr->key()->representation().IsTagged());
@@ -2399,8 +2408,6 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
val = UseTempRegister(instr->value());
} else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
- } else if (instr->field_representation().IsSmi()) {
- val = UseTempRegister(instr->value());
} else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
} else {
@@ -2421,8 +2428,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->object(), StoreIC::ReceiverRegister());
- LOperand* value = UseFixed(instr->value(), StoreIC::ValueRegister());
+ LOperand* object =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
LStoreNamedGeneric* result =
new(zone()) LStoreNamedGeneric(context, object, value);
@@ -2499,10 +2507,10 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
return DefineAsSpilled(result, spill_index);
} else {
DCHECK(info()->IsStub());
- CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor();
+ CallInterfaceDescriptor descriptor =
+ info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index());
- Register reg = descriptor->GetEnvironmentParameterRegister(index);
+ Register reg = descriptor.GetEnvironmentParameterRegister(index);
return DefineFixed(result, reg);
}
}
@@ -2518,7 +2526,7 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
} else {
spill_index = env_index - instr->environment()->first_local_index();
if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Abort(kNotEnoughSpillSlotsForOsr);
+ Retry(kNotEnoughSpillSlotsForOsr);
spill_index = 0;
}
if (spill_index == 0) {
@@ -2630,6 +2638,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
}
+ inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index 4206482de7..75fed82538 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -155,6 +155,7 @@ class LCodeGen;
V(StringCompareAndBranch) \
V(SubI) \
V(TaggedToI) \
+ V(TailCallThroughMegamorphicCache) \
V(ThisFunction) \
V(ToFastProperties) \
V(TransitionElementsKind) \
@@ -167,11 +168,11 @@ class LCodeGen;
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ virtual Opcode opcode() const FINAL OVERRIDE { \
return LInstruction::k##type; \
} \
- virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
- virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE; \
+ virtual const char* Mnemonic() const FINAL OVERRIDE { \
return mnemonic; \
} \
static L##type* cast(LInstruction* instr) { \
@@ -291,7 +292,7 @@ class LTemplateResultInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ virtual bool HasResult() const FINAL OVERRIDE {
return R != 0 && result() != NULL;
}
void set_result(LOperand* operand) { results_[0] = operand; }
@@ -313,11 +314,11 @@ class LTemplateInstruction : public LTemplateResultInstruction<R> {
private:
// Iterator support.
- virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
- virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+ virtual int InputCount() FINAL OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
- virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
- virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
+ virtual int TempCount() FINAL OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
};
@@ -331,8 +332,8 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsGap() const FINAL OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
static LGap* cast(LInstruction* instr) {
DCHECK(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@@ -368,11 +369,11 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
};
-class LInstructionGap V8_FINAL : public LGap {
+class LInstructionGap FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return !IsRedundant();
}
@@ -380,17 +381,17 @@ class LInstructionGap V8_FINAL : public LGap {
};
-class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGoto(HBasicBlock* block) : block_(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual bool IsControl() const V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ virtual bool IsControl() const OVERRIDE { return true; }
int block_id() const { return block_->block_id(); }
- virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
return false;
}
@@ -401,20 +402,20 @@ class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
};
-class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LDummy FINAL : public LTemplateInstruction<1, 0, 0> {
public:
LDummy() {}
DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
};
-class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
inputs_[0] = value;
@@ -423,25 +424,25 @@ class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- virtual bool IsControl() const V8_OVERRIDE { return true; }
+ virtual bool IsControl() const OVERRIDE { return true; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
-class LLabel V8_FINAL : public LGap {
+class LLabel FINAL : public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -457,16 +458,16 @@ class LLabel V8_FINAL : public LGap {
};
-class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallStub(LOperand* context) {
inputs_[0] = context;
@@ -479,9 +480,30 @@ class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LTailCallThroughMegamorphicCache FINAL
+ : public LTemplateInstruction<0, 3, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ explicit LTailCallThroughMegamorphicCache(LOperand* context,
+ LOperand* receiver,
+ LOperand* name) {
+ inputs_[0] = context;
+ inputs_[1] = receiver;
+ inputs_[2] = name;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* name() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
+ "tail-call-through-megamorphic-cache")
+ DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
+};
+
+
+class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
@@ -493,7 +515,7 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
- virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual bool IsControl() const FINAL OVERRIDE { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -532,7 +554,7 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
};
-class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LWrapReceiver FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LWrapReceiver(LOperand* receiver,
LOperand* function,
@@ -551,7 +573,7 @@ class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
@@ -572,7 +594,7 @@ class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
};
-class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
inputs_[0] = arguments;
@@ -586,11 +608,11 @@ class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -602,20 +624,20 @@ class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
};
-class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
};
-class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LModByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -633,7 +655,7 @@ class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LModByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LModByConstI(LOperand* dividend,
int32_t divisor,
@@ -658,7 +680,7 @@ class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LModI FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LModI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -675,7 +697,7 @@ class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -693,7 +715,7 @@ class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LDivByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LDivByConstI(LOperand* dividend,
int32_t divisor,
@@ -718,7 +740,7 @@ class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LDivI FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
inputs_[0] = dividend;
@@ -735,7 +757,7 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFlooringDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -754,7 +776,7 @@ class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+class LFlooringDivByConstI FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LFlooringDivByConstI(LOperand* dividend,
int32_t divisor,
@@ -782,7 +804,7 @@ class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 3> {
};
-class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivI FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
inputs_[0] = dividend;
@@ -799,7 +821,7 @@ class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LMulI FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMulI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -816,7 +838,7 @@ class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
public:
LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -839,7 +861,7 @@ class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
};
-class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathFloor FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathFloor(LOperand* value) {
inputs_[0] = value;
@@ -852,7 +874,7 @@ class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LMathRound FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LMathRound(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -867,7 +889,7 @@ class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LMathFround V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathFround FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathFround(LOperand* value) { inputs_[0] = value; }
@@ -877,7 +899,7 @@ class LMathFround V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathAbs FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathAbs(LOperand* context, LOperand* value) {
inputs_[1] = context;
@@ -892,7 +914,7 @@ class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathLog FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathLog(LOperand* value) {
inputs_[0] = value;
@@ -904,7 +926,7 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathClz32 FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathClz32(LOperand* value) {
inputs_[0] = value;
@@ -916,7 +938,7 @@ class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LMathExp FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LMathExp(LOperand* value,
LOperand* temp1,
@@ -935,7 +957,7 @@ class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathSqrt FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSqrt(LOperand* value) {
inputs_[0] = value;
@@ -947,7 +969,7 @@ class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LMathPowHalf FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LMathPowHalf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -961,7 +983,7 @@ class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -975,7 +997,7 @@ class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
};
-class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LCmpHoleAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LCmpHoleAndBranch(LOperand* object) {
inputs_[0] = object;
@@ -988,7 +1010,7 @@ class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
};
-class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1004,7 +1026,7 @@ class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsObjectAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1016,11 +1038,11 @@ class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1033,11 +1055,11 @@ class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1048,11 +1070,11 @@ class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1066,11 +1088,11 @@ class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
+class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
public:
LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1086,13 +1108,13 @@ class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
"string-compare-and-branch")
DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Token::Value op() const { return hydrogen()->token(); }
};
-class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1106,11 +1128,11 @@ class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -1123,7 +1145,7 @@ class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndexAndBranch V8_FINAL
+class LHasCachedArrayIndexAndBranch FINAL
: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@@ -1135,11 +1157,11 @@ class LHasCachedArrayIndexAndBranch V8_FINAL
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch FINAL : public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
@@ -1152,7 +1174,7 @@ class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
};
-class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
@@ -1168,11 +1190,11 @@ class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LCmpT FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LCmpT(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1188,7 +1210,7 @@ class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LInstanceOf FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1202,7 +1224,7 @@ class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LInstanceOfKnownGlobal FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
@@ -1223,7 +1245,7 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
return lazy_deopt_env_;
}
virtual void SetDeferredLazyDeoptimizationEnvironment(
- LEnvironment* env) V8_OVERRIDE {
+ LEnvironment* env) OVERRIDE {
lazy_deopt_env_ = env;
}
@@ -1232,7 +1254,7 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
@@ -1247,7 +1269,7 @@ class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
};
-class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LBitI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1264,7 +1286,7 @@ class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LShiftI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -1286,7 +1308,7 @@ class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSubI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1301,7 +1323,7 @@ class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantI FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1310,7 +1332,7 @@ class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantS FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1319,7 +1341,7 @@ class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 1> {
+class LConstantD FINAL : public LTemplateInstruction<1, 0, 1> {
public:
explicit LConstantD(LOperand* temp) {
temps_[0] = temp;
@@ -1334,7 +1356,7 @@ class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 1> {
};
-class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantE FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1345,7 +1367,7 @@ class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantT FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1356,7 +1378,7 @@ class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LBranch FINAL : public LControlInstruction<1, 1> {
public:
LBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1369,11 +1391,11 @@ class LBranch V8_FINAL : public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LCmpMapAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LCmpMapAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1388,7 +1410,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
};
-class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
@@ -1400,7 +1422,7 @@ class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LDateField FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index)
: index_(index) {
@@ -1421,7 +1443,7 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSeqStringGetChar FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
inputs_[0] = string;
@@ -1436,7 +1458,7 @@ class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LSeqStringSetChar FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LSeqStringSetChar(LOperand* context,
LOperand* string,
@@ -1457,7 +1479,7 @@ class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
};
-class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LAddI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1477,7 +1499,7 @@ class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathMinMax(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1492,7 +1514,7 @@ class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LPower FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1507,7 +1529,7 @@ class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1520,18 +1542,18 @@ class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
Token::Value op() const { return op_; }
- virtual Opcode opcode() const V8_OVERRIDE {
+ virtual Opcode opcode() const OVERRIDE {
return LInstruction::kArithmeticD;
}
- virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
- virtual const char* Mnemonic() const V8_OVERRIDE;
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
private:
Token::Value op_;
};
-class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LArithmeticT(Token::Value op,
LOperand* context,
@@ -1547,11 +1569,11 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
LOperand* left() { return inputs_[1]; }
LOperand* right() { return inputs_[2]; }
- virtual Opcode opcode() const V8_OVERRIDE {
+ virtual Opcode opcode() const OVERRIDE {
return LInstruction::kArithmeticT;
}
- virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
- virtual const char* Mnemonic() const V8_OVERRIDE;
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
Token::Value op() const { return op_; }
@@ -1560,7 +1582,7 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LReturn FINAL : public LTemplateInstruction<0, 3, 0> {
public:
explicit LReturn(LOperand* value,
LOperand* context,
@@ -1584,7 +1606,7 @@ class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
-class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
@@ -1597,7 +1619,7 @@ class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LLoadNamedGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
inputs_[0] = context;
@@ -1616,7 +1638,7 @@ class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LLoadFunctionPrototype FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
inputs_[0] = function;
@@ -1631,7 +1653,7 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadRoot FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
@@ -1640,7 +1662,7 @@ class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
@@ -1664,7 +1686,7 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
uint32_t base_offset() const { return hydrogen()->base_offset(); }
bool key_is_smi() {
return hydrogen()->key()->representation().IsTagged();
@@ -1688,7 +1710,7 @@ inline static bool ExternalArrayOpRequiresTemp(
}
-class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> {
+class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
public:
LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key,
LOperand* vector) {
@@ -1708,14 +1730,14 @@ class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> {
};
-class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
LOperand* vector) {
@@ -1736,7 +1758,7 @@ class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LStoreGlobalCell(LOperand* value) {
inputs_[0] = value;
@@ -1749,7 +1771,7 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
@@ -1762,11 +1784,11 @@ class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
@@ -1783,11 +1805,11 @@ class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LPushArgument FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
@@ -1799,7 +1821,7 @@ class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDrop FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
@@ -1812,7 +1834,7 @@ class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
+class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 0> {
public:
LStoreCodeEntry(LOperand* function, LOperand* code_object) {
inputs_[0] = function;
@@ -1829,7 +1851,7 @@ class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
};
-class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
+class LInnerAllocatedObject FINAL: public LTemplateInstruction<1, 2, 0> {
public:
LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
inputs_[0] = base_object;
@@ -1845,21 +1867,21 @@ class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
};
-class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LThisFunction FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
-class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LContext FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
DECLARE_HYDROGEN_ACCESSOR(Context)
};
-class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LDeclareGlobals FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
inputs_[0] = context;
@@ -1872,7 +1894,7 @@ class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallJSFunction(LOperand* function) {
inputs_[0] = function;
@@ -1883,44 +1905,44 @@ class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
+class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
public:
- LCallWithDescriptor(const InterfaceDescriptor* descriptor,
- const ZoneList<LOperand*>& operands,
- Zone* zone)
- : inputs_(descriptor->GetRegisterParameterCount() + 1, zone) {
- DCHECK(descriptor->GetRegisterParameterCount() + 1 == operands.length());
+ LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+ const ZoneList<LOperand*>& operands, Zone* zone)
+ : inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
+ DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
inputs_.AddAll(operands, zone);
}
LOperand* target() const { return inputs_[0]; }
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
- DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
ZoneList<LOperand*> inputs_;
// Iterator support.
- virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
- virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+ virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
- virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
- virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+ virtual int TempCount() FINAL OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
};
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
@@ -1933,13 +1955,13 @@ class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
public:
explicit LCallFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
@@ -1956,7 +1978,7 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNew(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
@@ -1969,13 +1991,13 @@ class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
@@ -1988,13 +2010,13 @@ class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallRuntime(LOperand* context) {
inputs_[0] = context;
@@ -2005,7 +2027,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
@@ -2015,7 +2037,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -2027,7 +2049,7 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LUint32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -2039,7 +2061,7 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberTagI FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LNumberTagI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2053,7 +2075,7 @@ class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberTagU FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LNumberTagU(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2067,7 +2089,7 @@ class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberTagD FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LNumberTagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2083,7 +2105,7 @@ class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LDoubleToI FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDoubleToI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2100,7 +2122,7 @@ class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToSmi FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleToSmi(LOperand* value) {
inputs_[0] = value;
@@ -2114,7 +2136,7 @@ class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LTaggedToI FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LTaggedToI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2131,7 +2153,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiTag FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -2144,7 +2166,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberUntagD FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LNumberUntagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2159,7 +2181,7 @@ class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -2177,7 +2199,7 @@ class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 2> {
public:
LStoreNamedField(LOperand* obj,
LOperand* val,
@@ -2197,11 +2219,11 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
inputs_[0] = context;
@@ -2216,13 +2238,13 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
-class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
inputs_[0] = obj;
@@ -2247,13 +2269,13 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
uint32_t base_offset() const { return hydrogen()->base_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
-class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyedGeneric(LOperand* context,
LOperand* object,
@@ -2273,13 +2295,13 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
-class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* context,
@@ -2300,7 +2322,7 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
Handle<Map> transitioned_map() {
@@ -2311,7 +2333,7 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
};
-class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LTrapAllocationMemento(LOperand* object,
LOperand* temp) {
@@ -2327,7 +2349,7 @@ class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringAdd FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -2344,7 +2366,7 @@ class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringCharCodeAt FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
inputs_[0] = context;
@@ -2361,7 +2383,7 @@ class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringCharFromCode FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringCharFromCode(LOperand* context, LOperand* char_code) {
inputs_[0] = context;
@@ -2376,7 +2398,7 @@ class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckValue FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
@@ -2389,7 +2411,7 @@ class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LCheckInstanceType FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LCheckInstanceType(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2404,7 +2426,7 @@ class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMaps(LOperand* value = NULL) {
inputs_[0] = value;
@@ -2417,7 +2439,7 @@ class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2429,7 +2451,7 @@ class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampDToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampDToUint8(LOperand* value) {
inputs_[0] = value;
@@ -2441,7 +2463,7 @@ class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* value) {
inputs_[0] = value;
@@ -2453,7 +2475,7 @@ class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LClampTToUint8(LOperand* value, LOperand* temp_xmm) {
inputs_[0] = value;
@@ -2467,7 +2489,7 @@ class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
@@ -2480,7 +2502,7 @@ class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleBits FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleBits(LOperand* value) {
inputs_[0] = value;
@@ -2493,7 +2515,7 @@ class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LConstructDouble FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LConstructDouble(LOperand* hi, LOperand* lo) {
inputs_[0] = hi;
@@ -2507,7 +2529,7 @@ class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LAllocate FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
inputs_[0] = context;
@@ -2524,7 +2546,7 @@ class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LRegExpLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LRegExpLiteral(LOperand* context) {
inputs_[0] = context;
@@ -2537,7 +2559,7 @@ class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFunctionLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LFunctionLiteral(LOperand* context) {
inputs_[0] = context;
@@ -2550,7 +2572,7 @@ class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
@@ -2563,7 +2585,7 @@ class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LTypeof FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -2577,7 +2599,7 @@ class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -2590,20 +2612,20 @@ class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
Handle<String> type_literal() { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
};
-class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LStackCheck FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LStackCheck(LOperand* context) {
inputs_[0] = context;
@@ -2621,7 +2643,7 @@ class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LForInPrepareMap FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LForInPrepareMap(LOperand* context, LOperand* object) {
inputs_[0] = context;
@@ -2635,7 +2657,7 @@ class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
@@ -2651,7 +2673,7 @@ class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
@@ -2665,7 +2687,7 @@ class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
};
-class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
@@ -2709,7 +2731,7 @@ class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LPlatformChunk V8_FINAL : public LChunk {
+class LPlatformChunk FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph),
@@ -2725,20 +2747,14 @@ class LPlatformChunk V8_FINAL : public LChunk {
};
-class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
+class LChunkBuilder FINAL : public LChunkBuilderBase {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : LChunkBuilderBase(graph->zone()),
- chunk_(NULL),
- info_(info),
- graph_(graph),
- status_(UNUSED),
+ : LChunkBuilderBase(info, graph),
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- allocator_(allocator) { }
-
- Isolate* isolate() const { return graph_->isolate(); }
+ allocator_(allocator) {}
// Build the sequence for the graph.
LPlatformChunk* Build();
@@ -2768,24 +2784,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
private:
- enum Status {
- UNUSED,
- BUILDING,
- DONE,
- ABORTED
- };
-
- LPlatformChunk* chunk() const { return chunk_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_building() const { return status_ == BUILDING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- void Abort(BailoutReason reason);
-
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(XMMRegister reg);
@@ -2831,7 +2829,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
- virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
@@ -2881,10 +2879,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LOperand* GetStoreKeyedValueOperand(HStoreKeyed* instr);
- LPlatformChunk* chunk_;
- CompilationInfo* info_;
- HGraph* const graph_;
- Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
HBasicBlock* next_block_;
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 7e05e674dd..e95ee39cea 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -6,12 +6,14 @@
#if V8_TARGET_ARCH_IA32
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/isolate-inl.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
#include "src/serialize.h"
namespace v8 {
@@ -179,8 +181,7 @@ void MacroAssembler::RememberedSetHelper(
DCHECK(and_then == kFallThroughAtEnd);
j(equal, &done, Label::kNear);
}
- StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(isolate(), save_fp);
+ StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
CallStub(&store_buffer_overflow);
if (and_then == kReturnAtEnd) {
ret(0);
@@ -249,18 +250,17 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
}
-void MacroAssembler::DoubleToI(Register result_reg,
- XMMRegister input_reg,
+void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
XMMRegister scratch,
MinusZeroMode minus_zero_mode,
- Label* conversion_failed,
- Label::Distance dst) {
+ Label* lost_precision, Label* is_nan,
+ Label* minus_zero, Label::Distance dst) {
DCHECK(!input_reg.is(scratch));
cvttsd2si(result_reg, Operand(input_reg));
Cvtsi2sd(scratch, Operand(result_reg));
ucomisd(scratch, input_reg);
- j(not_equal, conversion_failed, dst);
- j(parity_even, conversion_failed, dst); // NaN.
+ j(not_equal, lost_precision, dst);
+ j(parity_even, is_nan, dst);
if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
Label done;
// The integer converted back is equal to the original. We
@@ -270,9 +270,9 @@ void MacroAssembler::DoubleToI(Register result_reg,
movmskpd(result_reg, input_reg);
// Bit 0 contains the sign of the double in input_reg.
// If input was positive, we are ok and return 0, otherwise
- // jump to conversion_failed.
+ // jump to minus_zero.
and_(result_reg, 1);
- j(not_zero, conversion_failed, dst);
+ j(not_zero, minus_zero, dst);
bind(&done);
}
}
@@ -345,40 +345,6 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
}
-void MacroAssembler::TaggedToI(Register result_reg,
- Register input_reg,
- XMMRegister temp,
- MinusZeroMode minus_zero_mode,
- Label* lost_precision) {
- Label done;
- DCHECK(!temp.is(xmm0));
-
- cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- j(not_equal, lost_precision, Label::kNear);
-
- DCHECK(!temp.is(no_xmm_reg));
-
- movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- cvttsd2si(result_reg, Operand(xmm0));
- Cvtsi2sd(temp, Operand(result_reg));
- ucomisd(xmm0, temp);
- RecordComment("Deferred TaggedToI: lost precision");
- j(not_equal, lost_precision, Label::kNear);
- RecordComment("Deferred TaggedToI: NaN");
- j(parity_even, lost_precision, Label::kNear);
- if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
- test(result_reg, Operand(result_reg));
- j(not_zero, &done, Label::kNear);
- movmskpd(result_reg, xmm0);
- and_(result_reg, 1);
- RecordComment("Deferred TaggedToI: minus zero");
- j(not_zero, lost_precision, Label::kNear);
- }
- bind(&done);
-}
-
-
void MacroAssembler::LoadUint32(XMMRegister dst,
Register src) {
Label done;
@@ -426,8 +392,8 @@ void MacroAssembler::RecordWriteArray(
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(value, Immediate(BitCast<int32_t>(kZapValue)));
- mov(index, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
+ mov(index, Immediate(bit_cast<int32_t>(kZapValue)));
}
}
@@ -471,8 +437,8 @@ void MacroAssembler::RecordWriteField(
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(value, Immediate(BitCast<int32_t>(kZapValue)));
- mov(dst, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
+ mov(dst, Immediate(bit_cast<int32_t>(kZapValue)));
}
}
@@ -532,9 +498,9 @@ void MacroAssembler::RecordWriteForMap(
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(value, Immediate(BitCast<int32_t>(kZapValue)));
- mov(scratch1, Immediate(BitCast<int32_t>(kZapValue)));
- mov(scratch2, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
+ mov(scratch1, Immediate(bit_cast<int32_t>(kZapValue)));
+ mov(scratch2, Immediate(bit_cast<int32_t>(kZapValue)));
}
}
@@ -602,8 +568,8 @@ void MacroAssembler::RecordWrite(
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(address, Immediate(BitCast<int32_t>(kZapValue)));
- mov(value, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(address, Immediate(bit_cast<int32_t>(kZapValue)));
+ mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
}
}
@@ -994,7 +960,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
// Get the required frame alignment for the OS.
const int kFrameAlignment = base::OS::ActivationFrameAlignment();
if (kFrameAlignment > 0) {
- DCHECK(IsPowerOf2(kFrameAlignment));
+ DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
and_(esp, -kFrameAlignment);
}
@@ -1734,12 +1700,10 @@ void MacroAssembler::AllocateTwoByteString(Register result,
}
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
@@ -1748,7 +1712,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
add(scratch1, Immediate(kObjectAlignmentMask));
and_(scratch1, Immediate(~kObjectAlignmentMask));
- // Allocate ASCII string in new space.
+ // Allocate one-byte string in new space.
Allocate(SeqOneByteString::kHeaderSize,
times_1,
scratch1,
@@ -1761,7 +1725,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->ascii_string_map()));
+ Immediate(isolate()->factory()->one_byte_string_map()));
mov(scratch1, length);
SmiTag(scratch1);
mov(FieldOperand(result, String::kLengthOffset), scratch1);
@@ -1770,20 +1734,18 @@ void MacroAssembler::AllocateAsciiString(Register result,
}
-void MacroAssembler::AllocateAsciiString(Register result,
- int length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteString(Register result, int length,
+ Register scratch1, Register scratch2,
+ Label* gc_required) {
DCHECK(length > 0);
- // Allocate ASCII string in new space.
+ // Allocate one-byte string in new space.
Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
gc_required, TAG_OBJECT);
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->ascii_string_map()));
+ Immediate(isolate()->factory()->one_byte_string_map()));
mov(FieldOperand(result, String::kLengthOffset),
Immediate(Smi::FromInt(length)));
mov(FieldOperand(result, String::kHashFieldOffset),
@@ -1805,10 +1767,10 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
}
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
Allocate(ConsString::kSize,
result,
scratch1,
@@ -1818,7 +1780,7 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->cons_ascii_string_map()));
+ Immediate(isolate()->factory()->cons_one_byte_string_map()));
}
@@ -1836,17 +1798,17 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
}
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
// Allocate heap number in new space.
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->sliced_ascii_string_map()));
+ Immediate(isolate()->factory()->sliced_one_byte_string_map()));
}
@@ -1935,7 +1897,7 @@ void MacroAssembler::BooleanBitTest(Register object,
int field_offset,
int bit_index) {
bit_index += kSmiTagSize + kSmiShiftSize;
- DCHECK(IsPowerOf2(kBitsPerByte));
+ DCHECK(base::bits::IsPowerOfTwo32(kBitsPerByte));
int byte_index = bit_index / kBitsPerByte;
int byte_bit_index = bit_index & (kBitsPerByte - 1);
test_b(FieldOperand(object, field_offset + byte_index),
@@ -2123,7 +2085,7 @@ Operand ApiParameterOperand(int index) {
void MacroAssembler::PrepareCallApiFunction(int argc) {
EnterApiExitFrame(argc);
if (emit_debug_code()) {
- mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(esi, Immediate(bit_cast<int32_t>(kZapValue)));
}
}
@@ -2658,7 +2620,7 @@ void MacroAssembler::Move(const Operand& dst, const Immediate& x) {
void MacroAssembler::Move(XMMRegister dst, double val) {
// TODO(titzer): recognize double constants with ExternalReferences.
- uint64_t int_val = BitCast<uint64_t, double>(val);
+ uint64_t int_val = bit_cast<uint64_t, double>(val);
if (int_val == 0) {
xorps(dst, dst);
} else {
@@ -2772,7 +2734,7 @@ void MacroAssembler::CheckStackAlignment() {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
Label alignment_as_expected;
test(esp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected);
@@ -2910,10 +2872,8 @@ void MacroAssembler::LookupNumberStringCache(Register object,
}
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
- Register instance_type,
- Register scratch,
- Label* failure) {
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
+ Register instance_type, Register scratch, Label* failure) {
if (!scratch.is(instance_type)) {
mov(scratch, instance_type);
}
@@ -2924,11 +2884,11 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
}
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label* failure) {
+void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register object1,
+ Register object2,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
// Check that both objects are not smis.
STATIC_ASSERT(kSmiTag == 0);
mov(scratch1, object1);
@@ -2941,24 +2901,24 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
- // Check that both are flat ASCII strings.
- const int kFlatAsciiStringMask =
+ // Check that both are flat one-byte strings.
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag =
+ const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
// Interleave bits from both instance types and compare them in one check.
- DCHECK_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- and_(scratch1, kFlatAsciiStringMask);
- and_(scratch2, kFlatAsciiStringMask);
+ DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
+ and_(scratch1, kFlatOneByteStringMask);
+ and_(scratch2, kFlatOneByteStringMask);
lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3));
+ cmp(scratch1, kFlatOneByteStringTag | (kFlatOneByteStringTag << 3));
j(not_equal, failure);
}
-void MacroAssembler::JumpIfNotUniqueName(Operand operand,
- Label* not_unique_name,
- Label::Distance distance) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
+ Label* not_unique_name,
+ Label::Distance distance) {
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
Label succeed;
test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
@@ -3012,7 +2972,7 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
// and the original value of esp.
mov(scratch, esp);
sub(esp, Immediate((num_arguments + 1) * kPointerSize));
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
and_(esp, -frame_alignment);
mov(Operand(esp, num_arguments * kPointerSize), scratch);
} else {
@@ -3290,12 +3250,12 @@ void MacroAssembler::EnsureNotWhite(
jmp(&is_data_object, Label::kNear);
bind(&not_external);
- // Sequential string, either ASCII or UC16.
+ // Sequential string, either Latin1 or UC16.
DCHECK(kOneByteStringTag == 0x04);
and_(length, Immediate(kStringEncodingMask));
xor_(length, Immediate(kStringEncodingMask));
add(length, Immediate(0x04));
- // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
+ // Value now either 4 (if Latin1) or 8 (if UC16), i.e., char-size shifted
// by 2. If we multiply the string length as smi by this, it still
// won't overflow a 32-bit value.
DCHECK_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
@@ -3423,12 +3383,14 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
DCHECK(!dividend.is(eax));
DCHECK(!dividend.is(edx));
- MultiplierAndShift ms(divisor);
- mov(eax, Immediate(ms.multiplier()));
+ base::MagicNumbersForDivision<uint32_t> mag =
+ base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+ mov(eax, Immediate(mag.multiplier));
imul(dividend);
- if (divisor > 0 && ms.multiplier() < 0) add(edx, dividend);
- if (divisor < 0 && ms.multiplier() > 0) sub(edx, dividend);
- if (ms.shift() > 0) sar(edx, ms.shift());
+ bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+ if (divisor > 0 && neg) add(edx, dividend);
+ if (divisor < 0 && !neg && mag.multiplier > 0) sub(edx, dividend);
+ if (mag.shift > 0) sar(edx, mag.shift);
mov(eax, dividend);
shr(eax, 31);
add(edx, eax);
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 3b2051f231..81347e58f2 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -6,6 +6,7 @@
#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
#include "src/assembler.h"
+#include "src/bailout-reason.h"
#include "src/frames.h"
#include "src/globals.h"
@@ -463,11 +464,9 @@ class MacroAssembler: public Assembler {
void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
void DoubleToI(Register result_reg, XMMRegister input_reg,
- XMMRegister scratch, MinusZeroMode minus_zero_mode,
- Label* conversion_failed, Label::Distance dst = Label::kFar);
-
- void TaggedToI(Register result_reg, Register input_reg, XMMRegister temp,
- MinusZeroMode minus_zero_mode, Label* lost_precision);
+ XMMRegister scratch, MinusZeroMode minus_zero_mode,
+ Label* lost_precision, Label* is_nan, Label* minus_zero,
+ Label::Distance dst = Label::kFar);
// Smi tagging support.
void SmiTag(Register reg) {
@@ -658,17 +657,11 @@ class MacroAssembler: public Assembler {
Register scratch2,
Register scratch3,
Label* gc_required);
- void AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateAsciiString(Register result,
- int length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3, Label* gc_required);
+ void AllocateOneByteString(Register result, int length, Register scratch1,
+ Register scratch2, Label* gc_required);
// Allocate a raw cons string object. Only the map field of the result is
// initialized.
@@ -676,10 +669,8 @@ class MacroAssembler: public Assembler {
Register scratch1,
Register scratch2,
Label* gc_required);
- void AllocateAsciiConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateOneByteConsString(Register result, Register scratch1,
+ Register scratch2, Label* gc_required);
// Allocate a raw sliced string object. Only the map field of the result is
// initialized.
@@ -687,10 +678,8 @@ class MacroAssembler: public Assembler {
Register scratch1,
Register scratch2,
Label* gc_required);
- void AllocateAsciiSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateOneByteSlicedString(Register result, Register scratch1,
+ Register scratch2, Label* gc_required);
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies.
@@ -914,29 +903,27 @@ class MacroAssembler: public Assembler {
Register scratch2,
Label* not_found);
- // Check whether the instance type represents a flat ASCII string. Jump to the
- // label if not. If the instance type can be scratched specify same register
- // for both instance type and scratch.
- void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
- Register scratch,
- Label* on_not_flat_ascii_string);
+ // Check whether the instance type represents a flat one-byte string. Jump to
+ // the label if not. If the instance type can be scratched specify same
+ // register for both instance type and scratch.
+ void JumpIfInstanceTypeIsNotSequentialOneByte(
+ Register instance_type, Register scratch,
+ Label* on_not_flat_one_byte_string);
- // Checks if both objects are sequential ASCII strings, and jumps to label
+ // Checks if both objects are sequential one-byte strings, and jumps to label
// if either is not.
- void JumpIfNotBothSequentialAsciiStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label* on_not_flat_ascii_strings);
+ void JumpIfNotBothSequentialOneByteStrings(
+ Register object1, Register object2, Register scratch1, Register scratch2,
+ Label* on_not_flat_one_byte_strings);
// Checks if the given register or operand is a unique name
- void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
- Label::Distance distance = Label::kFar) {
- JumpIfNotUniqueName(Operand(reg), not_unique_name, distance);
+ void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name,
+ Label::Distance distance = Label::kFar) {
+ JumpIfNotUniqueNameInstanceType(Operand(reg), not_unique_name, distance);
}
- void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
- Label::Distance distance = Label::kFar);
+ void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
+ Label::Distance distance = Label::kFar);
void EmitSeqStringSetCharCheck(Register string,
Register index,
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index 5f31298c9a..4118db8819 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -220,7 +220,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ add(eax, ebx);
BranchOrBacktrack(greater, on_no_match);
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
Label success;
Label fail;
Label loop_increment;
@@ -366,7 +366,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference(
Label loop;
__ bind(&loop);
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
__ movzx_b(eax, Operand(edx, 0));
__ cmpb_al(Operand(ebx, 0));
} else {
@@ -476,7 +476,7 @@ void RegExpMacroAssemblerIA32::CheckBitInTable(
Label* on_bit_set) {
__ mov(eax, Immediate(table));
Register index = current_character();
- if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
+ if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
__ mov(ebx, kTableSize - 1);
__ and_(ebx, current_character());
index = ebx;
@@ -493,7 +493,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
switch (type) {
case 's':
// Match space-characters
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
// One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success;
__ cmp(current_character(), ' ');
@@ -543,8 +543,8 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
return true;
}
case 'w': {
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
__ cmp(current_character(), Immediate('z'));
BranchOrBacktrack(above, on_no_match);
}
@@ -557,8 +557,8 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
}
case 'W': {
Label done;
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
__ cmp(current_character(), Immediate('z'));
__ j(above, &done);
}
@@ -567,7 +567,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
__ test_b(current_character(),
Operand::StaticArray(current_character(), times_1, word_map));
BranchOrBacktrack(not_zero, on_no_match);
- if (mode_ != ASCII) {
+ if (mode_ != LATIN1) {
__ bind(&done);
}
return true;
@@ -584,7 +584,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ sub(eax, Immediate(0x0b));
__ cmp(eax, 0x0c - 0x0b);
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
BranchOrBacktrack(above, on_no_match);
} else {
Label done;
@@ -1099,7 +1099,7 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+ bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
DCHECK(re_code->instruction_start() <= *return_address);
DCHECK(*return_address <=
@@ -1130,8 +1130,8 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
- // If we changed between an ASCII and an UC16 string, the specialized
+ if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
+ // If we changed between an LATIN1 and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
return RETRY;
@@ -1278,7 +1278,7 @@ void RegExpMacroAssemblerIA32::CheckStackLimit() {
void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
if (characters == 4) {
__ mov(current_character(), Operand(esi, edi, times_1, cp_offset));
} else if (characters == 2) {
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
index e04a8ef4b6..8f6499cba9 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
@@ -174,7 +174,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
MacroAssembler* masm_;
- // Which mode to generate code for (ASCII or UC16).
+ // Which mode to generate code for (LATIN1 or UC16).
Mode mode_;
// One greater than maximal register index actually used.
diff --git a/deps/v8/src/ic/access-compiler.cc b/deps/v8/src/ic/access-compiler.cc
new file mode 100644
index 0000000000..c3bf11c439
--- /dev/null
+++ b/deps/v8/src/ic/access-compiler.cc
@@ -0,0 +1,55 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/ic/access-compiler.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+Handle<Code> PropertyAccessCompiler::GetCodeWithFlags(Code::Flags flags,
+ const char* name) {
+ // Create code object in the heap.
+ CodeDesc desc;
+ masm()->GetCode(&desc);
+ Handle<Code> code = factory()->NewCode(desc, flags, masm()->CodeObject());
+ if (code->IsCodeStubOrIC()) code->set_stub_key(CodeStub::NoCacheKey());
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_code_stubs) {
+ OFStream os(stdout);
+ code->Disassemble(name, os);
+ }
+#endif
+ return code;
+}
+
+
+Handle<Code> PropertyAccessCompiler::GetCodeWithFlags(Code::Flags flags,
+ Handle<Name> name) {
+ return (FLAG_print_code_stubs && !name.is_null() && name->IsString())
+ ? GetCodeWithFlags(flags,
+ Handle<String>::cast(name)->ToCString().get())
+ : GetCodeWithFlags(flags, NULL);
+}
+
+
+void PropertyAccessCompiler::TailCallBuiltin(MacroAssembler* masm,
+ Builtins::Name name) {
+ Handle<Code> code(masm->isolate()->builtins()->builtin(name));
+ GenerateTailCall(masm, code);
+}
+
+
+Register* PropertyAccessCompiler::GetCallingConvention(Code::Kind kind) {
+ if (kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC) {
+ return load_calling_convention();
+ }
+ DCHECK(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
+ return store_calling_convention();
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/ic/access-compiler.h b/deps/v8/src/ic/access-compiler.h
new file mode 100644
index 0000000000..928b70b749
--- /dev/null
+++ b/deps/v8/src/ic/access-compiler.h
@@ -0,0 +1,83 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_ACCESS_COMPILER_H_
+#define V8_IC_ACCESS_COMPILER_H_
+
+#include "src/code-stubs.h"
+#include "src/macro-assembler.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+
+class PropertyAccessCompiler BASE_EMBEDDED {
+ public:
+ static Builtins::Name MissBuiltin(Code::Kind kind) {
+ switch (kind) {
+ case Code::LOAD_IC:
+ return Builtins::kLoadIC_Miss;
+ case Code::STORE_IC:
+ return Builtins::kStoreIC_Miss;
+ case Code::KEYED_LOAD_IC:
+ return Builtins::kKeyedLoadIC_Miss;
+ case Code::KEYED_STORE_IC:
+ return Builtins::kKeyedStoreIC_Miss;
+ default:
+ UNREACHABLE();
+ }
+ return Builtins::kLoadIC_Miss;
+ }
+
+ static void TailCallBuiltin(MacroAssembler* masm, Builtins::Name name);
+
+ protected:
+ PropertyAccessCompiler(Isolate* isolate, Code::Kind kind,
+ CacheHolderFlag cache_holder)
+ : registers_(GetCallingConvention(kind)),
+ kind_(kind),
+ cache_holder_(cache_holder),
+ isolate_(isolate),
+ masm_(isolate, NULL, 256) {}
+
+ Code::Kind kind() const { return kind_; }
+ CacheHolderFlag cache_holder() const { return cache_holder_; }
+ MacroAssembler* masm() { return &masm_; }
+ Isolate* isolate() const { return isolate_; }
+ Heap* heap() const { return isolate()->heap(); }
+ Factory* factory() const { return isolate()->factory(); }
+
+ Register receiver() const { return registers_[0]; }
+ Register name() const { return registers_[1]; }
+ Register scratch1() const { return registers_[2]; }
+ Register scratch2() const { return registers_[3]; }
+ Register scratch3() const { return registers_[4]; }
+
+ // Calling convention between indexed store IC and handler.
+ Register transition_map() const { return scratch1(); }
+
+ static Register* GetCallingConvention(Code::Kind);
+ static Register* load_calling_convention();
+ static Register* store_calling_convention();
+ static Register* keyed_store_calling_convention();
+
+ Register* registers_;
+
+ static void GenerateTailCall(MacroAssembler* masm, Handle<Code> code);
+
+ Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name);
+ Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<Name> name);
+
+ private:
+ Code::Kind kind_;
+ CacheHolderFlag cache_holder_;
+
+ Isolate* isolate_;
+ MacroAssembler masm_;
+};
+}
+} // namespace v8::internal
+
+#endif // V8_IC_ACCESS_COMPILER_H_
diff --git a/deps/v8/src/ic/arm/access-compiler-arm.cc b/deps/v8/src/ic/arm/access-compiler-arm.cc
new file mode 100644
index 0000000000..4a4d688c05
--- /dev/null
+++ b/deps/v8/src/ic/arm/access-compiler-arm.cc
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM
+
+#include "src/ic/access-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+ Handle<Code> code) {
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
+Register* PropertyAccessCompiler::load_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ static Register registers[] = {receiver, name, r3, r0, r4, r5};
+ return registers;
+}
+
+
+Register* PropertyAccessCompiler::store_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ DCHECK(r3.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ static Register registers[] = {receiver, name, r3, r4, r5};
+ return registers;
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
index 38f391a336..5314d48ff6 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,9 +6,9 @@
#if V8_TARGET_ARCH_ARM
-#include "src/codegen.h"
-#include "src/ic-inl.h"
-#include "src/stub-cache.h"
+#include "src/ic/call-optimization.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
namespace v8 {
namespace internal {
@@ -16,85 +16,79 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register receiver,
- Register name,
- // Number of the cache entry, not scaled.
- Register offset,
- Register scratch,
- Register scratch2,
- Register offset_scratch) {
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
-
- uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
- uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
- uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
-
- // Check the relative positions of the address fields.
- DCHECK(value_off_addr > key_off_addr);
- DCHECK((value_off_addr - key_off_addr) % 4 == 0);
- DCHECK((value_off_addr - key_off_addr) < (256 * 4));
- DCHECK(map_off_addr > key_off_addr);
- DCHECK((map_off_addr - key_off_addr) % 4 == 0);
- DCHECK((map_off_addr - key_off_addr) < (256 * 4));
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- Label miss;
- Register base_addr = scratch;
- scratch = no_reg;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ add(offset_scratch, offset, Operand(offset, LSL, 1));
-
- // Calculate the base address of the entry.
- __ mov(base_addr, Operand(key_offset));
- __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2));
-
- // Check that the key in the entry matches the name.
- __ ldr(ip, MemOperand(base_addr, 0));
- __ cmp(name, ip);
- __ b(ne, &miss);
-
- // Check the map matches.
- __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
- __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ cmp(ip, scratch2);
- __ b(ne, &miss);
-
- // Get the code entry from the cache.
- Register code = scratch2;
- scratch2 = no_reg;
- __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
-
- // Check that the flags match what we're looking for.
- Register flags_reg = base_addr;
- base_addr = no_reg;
- __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
- // It's a nice optimization if this constant is encodable in the bic insn.
-
- uint32_t mask = Code::kFlagsNotUsedInLookup;
- DCHECK(__ ImmediateFitsAddrMode1Instruction(mask));
- __ bic(flags_reg, flags_reg, Operand(mask));
- __ cmp(flags_reg, Operand(flags));
- __ b(ne, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ ldr(receiver,
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ push(receiver);
+ ParameterCount actual(0);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
}
-#endif
- // Jump to the first instruction in the code stub.
- __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -----------------------------------
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ push(value());
- // Miss: fall through.
- __ bind(&miss);
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ ldr(receiver,
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ Push(receiver, value());
+ ParameterCount actual(1);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ pop(r0);
+
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
}
@@ -138,112 +132,13 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- NameDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- receiver,
- properties,
- name,
- scratch1);
+ NameDictionaryLookupStub::GenerateNegativeLookup(
+ masm, miss_label, &done, receiver, properties, name, scratch1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
}
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2,
- Register extra3) {
- Isolate* isolate = masm->isolate();
- Label miss;
-
- // Make sure that code is valid. The multiplying code relies on the
- // entry size being 12.
- DCHECK(sizeof(Entry) == 12);
-
- // Make sure the flags does not name a specific type.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Make sure that there are no register conflicts.
- DCHECK(!scratch.is(receiver));
- DCHECK(!scratch.is(name));
- DCHECK(!extra.is(receiver));
- DCHECK(!extra.is(name));
- DCHECK(!extra.is(scratch));
- DCHECK(!extra2.is(receiver));
- DCHECK(!extra2.is(name));
- DCHECK(!extra2.is(scratch));
- DCHECK(!extra2.is(extra));
-
- // Check scratch, extra and extra2 registers are valid.
- DCHECK(!scratch.is(no_reg));
- DCHECK(!extra.is(no_reg));
- DCHECK(!extra2.is(no_reg));
- DCHECK(!extra3.is(no_reg));
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
- extra2, extra3);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
- __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ add(scratch, scratch, Operand(ip));
- uint32_t mask = kPrimaryTableSize - 1;
- // We shift out the last two bits because they are not part of the hash and
- // they are always 01 for maps.
- __ mov(scratch, Operand(scratch, LSR, kCacheIndexShift));
- // Mask down the eor argument to the minimum to keep the immediate
- // ARM-encodable.
- __ eor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
- // Prefer and_ to ubfx here because ubfx takes 2 cycles.
- __ and_(scratch, scratch, Operand(mask));
-
- // Probe the primary table.
- ProbeTable(isolate,
- masm,
- flags,
- kPrimary,
- receiver,
- name,
- scratch,
- extra,
- extra2,
- extra3);
-
- // Primary miss: Compute hash for secondary probe.
- __ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
- uint32_t mask2 = kSecondaryTableSize - 1;
- __ add(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
- __ and_(scratch, scratch, Operand(mask2));
-
- // Probe the secondary table.
- ProbeTable(isolate,
- masm,
- flags,
- kSecondary,
- receiver,
- name,
- scratch,
- extra,
- extra2,
- extra3);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
- extra2, extra3);
-}
-
-
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register prototype, Label* miss) {
Isolate* isolate = masm->isolate();
@@ -392,9 +287,29 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
}
-void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
- Handle<Code> code) {
- __ Jump(code, RelocInfo::CODE_TARGET);
+void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
+ // Push receiver, key and value for runtime call.
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
+ // Push receiver, key and value for runtime call.
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
}
@@ -466,8 +381,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
__ jmp(&do_store);
__ bind(&heap_number);
- __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
- miss_label, DONT_DO_SMI_CHECK);
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, miss_label,
+ DONT_DO_SMI_CHECK);
__ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
__ bind(&do_store);
@@ -497,13 +412,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
__ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
// Update the write barrier for the map field.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- scratch2,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
+ __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
if (details.type() == CONSTANT) {
@@ -521,8 +431,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
index -= transition->inobject_properties();
// TODO(verwaest): Share this code as a code stub.
- SmiCheck smi_check = representation.IsTagged()
- ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ SmiCheck smi_check =
+ representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
if (index < 0) {
// Set the property straight into the object.
int offset = transition->instance_size() + (index * kPointerSize);
@@ -537,14 +447,9 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
- __ RecordWriteField(receiver_reg,
- offset,
- storage_reg,
- scratch1,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
+ __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
+ kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, smi_check);
}
} else {
// Write to the properties array.
@@ -563,14 +468,9 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
- __ RecordWriteField(scratch1,
- offset,
- storage_reg,
- receiver_reg,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
+ __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
+ kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, smi_check);
}
}
@@ -581,7 +481,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
}
-void NamedStoreHandlerCompiler::GenerateStoreField(LookupResult* lookup,
+void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
Register value_reg,
Label* miss_label) {
DCHECK(lookup->representation().IsHeapObject());
@@ -614,8 +514,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
// Make sure there's no overlap between holder and object registers.
DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
+ DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
+ !scratch2.is(scratch1));
// Keep track of the current object in register reg.
Register reg = object_reg;
@@ -648,10 +548,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
}
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound);
+ NameDictionary::kNotFound);
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
- scratch1, scratch2);
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
+ scratch2);
__ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -674,9 +574,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch2, miss);
} else if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(
- masm(), Handle<JSGlobalObject>::cast(current), name,
- scratch2, miss);
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
}
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -765,8 +664,8 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
__ push(receiver());
if (heap()->InNewSpace(callback->data())) {
__ Move(scratch3(), callback);
- __ ldr(scratch3(), FieldMemOperand(scratch3(),
- ExecutableAccessorInfo::kDataOffset));
+ __ ldr(scratch3(),
+ FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset));
} else {
__ Move(scratch3(), Handle<Object>(callback->data(), isolate()));
}
@@ -774,14 +673,13 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
__ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
__ mov(scratch4(), scratch3());
__ Push(scratch3(), scratch4());
- __ mov(scratch4(),
- Operand(ExternalReference::isolate_address(isolate())));
+ __ mov(scratch4(), Operand(ExternalReference::isolate_address(isolate())));
__ Push(scratch4(), reg);
__ mov(scratch2(), sp); // scratch2 = PropertyAccessorInfo::args_
__ push(name());
// Abi for CallApiGetter
- Register getter_address_reg = r2;
+ Register getter_address_reg = ApiGetterDescriptor::function_address();
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
@@ -794,91 +692,75 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
}
-void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg,
- LookupResult* lookup,
- Handle<Name> name) {
+void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
+ LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- bool compile_followup_inline = false;
- if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->IsField()) {
- compile_followup_inline = true;
- } else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> callback(
- ExecutableAccessorInfo::cast(lookup->GetCallbackObject()));
- compile_followup_inline =
- callback->getter() != NULL &&
- ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), callback,
- type());
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+ // Preserve the receiver register explicitly whenever it is different from the
+ // holder and it is needed should the interceptor return without any result.
+ // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
+ // case might cause a miss during the prototype check.
+ bool must_perform_prototype_check =
+ !holder().is_identical_to(it->GetHolder<JSObject>());
+ bool must_preserve_receiver_reg =
+ !receiver().is(holder_reg) &&
+ (it->state() == LookupIterator::ACCESSOR || must_perform_prototype_check);
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ {
+ FrameAndConstantPoolScope frame_scope(masm(), StackFrame::INTERNAL);
+ if (must_preserve_receiver_reg) {
+ __ Push(receiver(), holder_reg, this->name());
+ } else {
+ __ Push(holder_reg, this->name());
}
- }
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), holder(),
+ IC::kLoadPropertyWithInterceptorOnly);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
+ __ cmp(r0, scratch1());
+ __ b(eq, &interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ Ret();
- if (compile_followup_inline) {
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
- // Preserve the receiver register explicitly whenever it is different from
- // the holder and it is needed should the interceptor return without any
- // result. The CALLBACKS case needs the receiver to be passed into C++ code,
- // the FIELD case might cause a miss during the prototype check.
- bool must_perfrom_prototype_check = *holder() != lookup->holder();
- bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
- (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- {
- FrameAndConstantPoolScope frame_scope(masm(), StackFrame::INTERNAL);
- if (must_preserve_receiver_reg) {
- __ Push(receiver(), holder_reg, this->name());
- } else {
- __ Push(holder_reg, this->name());
- }
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(
- masm(), receiver(), holder_reg, this->name(), holder(),
- IC::kLoadPropertyWithInterceptorOnly);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
- __ cmp(r0, scratch1());
- __ b(eq, &interceptor_failed);
- frame_scope.GenerateLeaveFrame();
- __ Ret();
-
- __ bind(&interceptor_failed);
- __ pop(this->name());
- __ pop(holder_reg);
- if (must_preserve_receiver_reg) {
- __ pop(receiver());
- }
- // Leave the internal frame.
+ __ bind(&interceptor_failed);
+ __ pop(this->name());
+ __ pop(holder_reg);
+ if (must_preserve_receiver_reg) {
+ __ pop(receiver());
}
+ // Leave the internal frame.
+ }
- GenerateLoadPostInterceptor(holder_reg, name, lookup);
- } else { // !compile_followup_inline
- // Call the runtime system to load the interceptor.
- // Check that the maps haven't changed.
- PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
- holder());
+ GenerateLoadPostInterceptor(it, holder_reg);
+}
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptor),
- isolate());
- __ TailCallExternalReference(
- ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
- }
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
+ // Call the runtime system to load the interceptor.
+ DCHECK(holder()->HasNamedInterceptor());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+ holder());
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
+ __ TailCallExternalReference(
+ ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
}
@@ -904,54 +786,6 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
}
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -----------------------------------
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
- // Save value register, so we can restore it later.
- __ push(value());
-
- if (!setter.is_null()) {
- // Call the JavaScript setter with receiver and value on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ ldr(receiver,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ Push(receiver, value());
- ParameterCount actual(1);
- ParameterCount expected(setter);
- __ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ pop(r0);
-
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
Handle<Name> name) {
__ Push(receiver(), this->name(), value());
@@ -966,79 +800,18 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
}
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
- static Register registers[] = { receiver, name, r3, r0, r4, r5 };
- return registers;
+Register NamedStoreHandlerCompiler::value() {
+ return StoreDescriptor::ValueRegister();
}
-Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- DCHECK(r3.is(KeyedStoreIC::MapRegister()));
- static Register registers[] = { receiver, name, r3, r4, r5 };
- return registers;
-}
-
-
-Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); }
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
- if (!getter.is_null()) {
- // Call the JavaScript getter with the receiver on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ ldr(receiver,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ push(receiver);
- ParameterCount actual(0);
- ParameterCount expected(getter);
- __ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
FrontendHeader(receiver(), name, &miss);
// Get the value from the cell.
- Register result = StoreIC::ValueRegister();
+ Register result = StoreDescriptor::ValueRegister();
__ mov(result, Operand(cell));
__ ldr(result, FieldMemOperand(result, Cell::kValueOffset));
@@ -1060,128 +833,8 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
}
-Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- __ JumpIfNotUniqueName(this->name(), &miss);
- } else {
- __ cmp(this->name(), Operand(name));
- __ b(ne, &miss);
- }
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(KeyedStoreIC::MapRegister()));
-
- int receiver_count = types->length();
- int number_of_handled_maps = 0;
- __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate());
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- __ mov(ip, Operand(map));
- __ cmp(map_reg, ip);
- if (type->Is(HeapType::Number())) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss);
-
- int receiver_count = receiver_maps->length();
- __ ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; ++i) {
- __ mov(ip, Operand(receiver_maps->at(i)));
- __ cmp(scratch1(), ip);
- if (transitioned_maps->at(i).is_null()) {
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
- } else {
- Label next_map;
- __ b(ne, &next_map);
- __ mov(transition_map(), Operand(transitioned_maps->at(i)));
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
- __ bind(&next_map);
- }
- }
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void ElementHandlerCompiler::GenerateLoadDictionaryElement(
- MacroAssembler* masm) {
- // The return address is in lr.
- Label slow, miss;
-
- Register key = LoadIC::NameRegister();
- Register receiver = LoadIC::ReceiverRegister();
- DCHECK(receiver.is(r1));
- DCHECK(key.is(r2));
-
- __ UntagAndJumpIfNotSmi(r6, key, &miss);
- __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ LoadFromNumberDictionary(&slow, r4, key, r0, r6, r3, r5);
- __ Ret();
-
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, r2, r3);
-
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
-
- // Miss case, call the runtime.
- __ bind(&miss);
-
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
}
-
-
-#undef __
-
-} } // namespace v8::internal
+} // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/ic/arm/ic-arm.cc
index d1add6d2ff..ae13161501 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/ic/arm/ic-arm.cc
@@ -6,13 +6,10 @@
#if V8_TARGET_ARCH_ARM
-#include "src/arm/assembler-arm.h"
-#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/disasm.h"
-#include "src/ic-inl.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
@@ -25,8 +22,7 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
Label* global_object) {
// Register usage:
// type: holds the receiver instance type on entry.
@@ -52,12 +48,9 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
// result.
// The generated code assumes that the receiver has slow properties,
// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register result,
- Register scratch1,
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
+ Register elements, Register name,
+ Register result, Register scratch1,
Register scratch2) {
// Main use of the scratch registers.
// scratch1: Used as temporary and to hold the capacity of the property
@@ -66,18 +59,14 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+ name, scratch1, scratch2);
// If probing finds an entry check that the value is a normal
// property.
__ bind(&done); // scratch2 == elements + 4 * index
- const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
@@ -101,12 +90,9 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// result.
// The generated code assumes that the receiver has slow properties,
// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register value,
- Register scratch1,
+static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
+ Register elements, Register name,
+ Register value, Register scratch1,
Register scratch2) {
// Main use of the scratch registers.
// scratch1: Used as temporary and to hold the capacity of the property
@@ -115,23 +101,20 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+ name, scratch1, scratch2);
// If probing finds an entry in the dictionary check that the value
// is a normal property that is not read only.
__ bind(&done); // scratch2 == elements + 4 * index
- const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
const int kTypeAndReadOnlyMask =
(PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+ PropertyDetails::AttributesField::encode(READ_ONLY))
+ << kSmiTagSize;
__ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
__ tst(scratch1, Operand(kTypeAndReadOnlyMask));
__ b(ne, miss);
@@ -143,19 +126,17 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Update the write barrier. Make sure not to clobber the value.
__ mov(scratch1, value);
- __ RecordWrite(
- elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
}
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map,
+ Register receiver, Register map,
Register scratch,
- int interceptor_bit,
- Label* slow) {
+ int interceptor_bit, Label* slow) {
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, slow);
// Get the map of the receiver.
@@ -178,14 +159,10 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// Loads an indexed element from a fast case array.
// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register scratch1,
- Register scratch2,
- Register result,
- Label* not_fast_array,
+static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
+ Register key, Register elements,
+ Register scratch1, Register scratch2,
+ Register result, Label* not_fast_array,
Label* out_of_range) {
// Register use:
//
@@ -237,12 +214,9 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// Checks whether a key is an array index string or a unique name.
// Falls through if a key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_unique) {
+static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
+ Register map, Register hash,
+ Label* index_string, Label* not_unique) {
// The key is not a smi.
Label unique;
// Is it a name?
@@ -268,34 +242,17 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
}
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is in lr.
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- DCHECK(receiver.is(r1));
- DCHECK(name.is(r2));
-
- // Probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, receiver, name, r3, r4, r5, r6);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = r0;
- DCHECK(!dictionary.is(ReceiverRegister()));
- DCHECK(!dictionary.is(NameRegister()));
+ DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
Label slow;
- __ ldr(dictionary,
- FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset));
- GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), r0, r3, r4);
+ __ ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
+ JSObject::kPropertiesOffset));
+ GenerateDictionaryLoad(masm, &slow, dictionary,
+ LoadDescriptor::NameRegister(), r0, r3, r4);
__ Ret();
// Dictionary load failed, go slow (but don't miss).
@@ -314,12 +271,11 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4);
- __ mov(LoadIC_TempRegister(), ReceiverRegister());
- __ Push(LoadIC_TempRegister(), NameRegister());
+ __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
+ __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
// Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
+ ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
__ TailCallExternalReference(ref, 2, 1);
}
@@ -327,21 +283,17 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is in lr.
- __ mov(LoadIC_TempRegister(), ReceiverRegister());
- __ Push(LoadIC_TempRegister(), NameRegister());
+ __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
+ __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
__ TailCallRuntime(Runtime::kGetProperty, 2, 1);
}
-static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
- Register object,
- Register key,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* unmapped_case,
- Label* slow_case) {
+static MemOperand GenerateMappedArgumentsLookup(
+ MacroAssembler* masm, Register object, Register key, Register scratch1,
+ Register scratch2, Register scratch3, Label* unmapped_case,
+ Label* slow_case) {
Heap* heap = masm->isolate()->heap();
// Check that the receiver is a JSObject. Because of the map check
@@ -412,44 +364,15 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
__ b(cs, slow_case);
__ mov(scratch, Operand(kPointerSize >> 1));
__ mul(scratch, key, scratch);
- __ add(scratch,
- scratch,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
return MemOperand(backing_store, scratch);
}
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
- // The return address is in lr.
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- DCHECK(receiver.is(r1));
- DCHECK(key.is(r2));
-
- Label slow, notin;
- MemOperand mapped_location =
- GenerateMappedArgumentsLookup(
- masm, receiver, key, r0, r3, r4, &notin, &slow);
- __ ldr(r0, mapped_location);
- __ Ret();
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in r0.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, key, r0, r3, &slow);
- __ ldr(r0, unmapped_location);
- __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
- __ cmp(r0, r3);
- __ b(eq, &slow);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- Register value = ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register key = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
DCHECK(receiver.is(r1));
DCHECK(key.is(r2));
DCHECK(value.is(r0));
@@ -482,7 +405,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4);
- __ Push(ReceiverRegister(), NameRegister());
+ __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
// Perform tail call to the entry.
ExternalReference ref =
@@ -492,37 +415,10 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
}
-// IC register specifications
-const Register LoadIC::ReceiverRegister() { return r1; }
-const Register LoadIC::NameRegister() { return r2; }
-
-
-const Register LoadIC::SlotRegister() {
- DCHECK(FLAG_vector_ics);
- return r0;
-}
-
-
-const Register LoadIC::VectorRegister() {
- DCHECK(FLAG_vector_ics);
- return r3;
-}
-
-
-const Register StoreIC::ReceiverRegister() { return r1; }
-const Register StoreIC::NameRegister() { return r2; }
-const Register StoreIC::ValueRegister() { return r0; }
-
-
-const Register KeyedStoreIC::MapRegister() {
- return r3;
-}
-
-
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is in lr.
- __ Push(ReceiverRegister(), NameRegister());
+ __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
}
@@ -533,8 +429,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
- Register key = NameRegister();
- Register receiver = ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
DCHECK(key.is(r2));
DCHECK(receiver.is(r1));
@@ -546,14 +442,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, r0, r3, Map::kHasIndexedInterceptor, &slow);
+ GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
+ Map::kHasIndexedInterceptor, &slow);
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(r0, r3, &check_number_dictionary);
- GenerateFastArrayLoad(
- masm, receiver, key, r0, r3, r4, r0, NULL, &slow);
+ GenerateFastArrayLoad(masm, receiver, key, r0, r3, r4, r0, NULL, &slow);
__ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r4, r3);
__ Ret();
@@ -573,15 +468,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Slow case, key and receiver still in r2 and r1.
__ bind(&slow);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
- 1, r4, r3);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, r4,
+ r3);
GenerateRuntimeGetProperty(masm);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, r0, r3, &index_name, &slow);
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, r0, r3, Map::kHasNamedInterceptor, &slow);
+ GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
+ Map::kHasNamedInterceptor, &slow);
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary.
@@ -659,8 +554,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ add(r6, r6, r5); // Index from start of object.
__ sub(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag.
__ ldr(r0, MemOperand(receiver, r6, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1, r4, r3);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+ r4, r3);
__ Ret();
// Load property array property.
@@ -668,8 +563,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ldr(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ add(receiver, receiver, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r0, MemOperand(receiver, r5, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1, r4, r3);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+ r4, r3);
__ Ret();
// Do a quick inline probe of the receiver's dictionary, if it
@@ -681,8 +576,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateGlobalInstanceTypeCheck(masm, r0, &slow);
// Load the property to r0.
GenerateDictionaryLoad(masm, &slow, r3, key, r0, r5, r4);
- __ IncrementCounter(
- isolate->counters()->keyed_load_generic_symbol(), 1, r4, r3);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, r4,
+ r3);
__ Ret();
__ bind(&index_name);
@@ -696,16 +591,13 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// Return address is in lr.
Label miss;
- Register receiver = ReceiverRegister();
- Register index = NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register index = LoadDescriptor::NameRegister();
Register scratch = r3;
Register result = r0;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch,
- result,
+ StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
&miss, // When index out of range.
@@ -721,51 +613,10 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // Return address is in lr.
- Label slow;
-
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- Register scratch1 = r3;
- Register scratch2 = r4;
- DCHECK(!scratch1.is(receiver) && !scratch1.is(key));
- DCHECK(!scratch2.is(receiver) && !scratch2.is(key));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &slow);
-
- // Check that the key is an array index, that is Uint32.
- __ NonNegativeSmiTst(key);
- __ b(ne, &slow);
-
- // Get the map of the receiver.
- __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
- __ and_(scratch2, scratch2, Operand(kSlowCaseBitFieldMask));
- __ cmp(scratch2, Operand(1 << Map::kHasIndexedInterceptor));
- __ b(ne, &slow);
-
- // Everything is fine, call runtime.
- __ Push(receiver, key); // Receiver, key.
-
- // Perform tail call to the entry.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(kLoadElementWithInterceptor),
- masm->isolate()),
- 2, 1);
-
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
@@ -773,55 +624,11 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
}
-void StoreIC::GenerateSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- // Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- __ mov(r0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
- __ Push(r0);
-
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
static void KeyedStoreGenerateGenericHelper(
- MacroAssembler* masm,
- Label* fast_object,
- Label* fast_double,
- Label* slow,
- KeyedStoreCheckMap check_map,
- KeyedStoreIncrementLength increment_length,
- Register value,
- Register key,
- Register receiver,
- Register receiver_map,
- Register elements_map,
- Register elements) {
+ MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
+ KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
+ Register value, Register key, Register receiver, Register receiver_map,
+ Register elements_map, Register elements) {
Label transition_smi_elements;
Label finish_object_store, non_double_value, transition_double_elements;
Label fast_double_without_map_check;
@@ -882,13 +689,8 @@ static void KeyedStoreGenerateGenericHelper(
__ str(value, MemOperand(address));
// Update write barrier for the elements array address.
__ mov(scratch_value, value); // Preserve the value which is returned.
- __ RecordWrite(elements,
- address,
- scratch_value,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWrite(elements, address, scratch_value, kLRHasNotBeenSaved,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret();
__ bind(fast_double);
@@ -903,8 +705,8 @@ static void KeyedStoreGenerateGenericHelper(
// We have to see if the double version of the hole is present. If so
// go to the runtime.
__ add(address, elements,
- Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32))
- - kHeapObjectTag));
+ Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)) -
+ kHeapObjectTag));
__ ldr(scratch_value,
MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
__ cmp(scratch_value, Operand(kHoleNanUpper32));
@@ -930,25 +732,19 @@ static void KeyedStoreGenerateGenericHelper(
// Value is a double. Transition FAST_SMI_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- receiver_map,
- r4,
- slow);
- AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(
- masm, receiver, key, value, receiver_map, mode, slow);
+ __ LoadTransitionedArrayMapConditional(
+ FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r4, slow);
+ AllocationSiteMode mode =
+ AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
+ receiver_map, mode, slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- r4,
- slow);
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
+ receiver_map, r4, slow);
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -959,11 +755,8 @@ static void KeyedStoreGenerateGenericHelper(
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- r4,
- slow);
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
+ receiver_map, r4, slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -985,9 +778,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
Label array, extra, check_if_double_array;
// Register usage.
- Register value = ValueRegister();
- Register key = NameRegister();
- Register receiver = ReceiverRegister();
+ Register value = StoreDescriptor::ValueRegister();
+ Register key = StoreDescriptor::NameRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
DCHECK(receiver.is(r1));
DCHECK(key.is(r2));
DCHECK(value.is(r0));
@@ -1028,7 +821,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// r0: value.
// r1: key.
// r2: receiver.
- GenerateRuntimeSetProperty(masm, strict_mode);
+ PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@@ -1042,8 +835,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ cmp(key, Operand(ip));
__ b(hs, &slow);
__ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
__ b(ne, &check_if_double_array);
__ jmp(&fast_object_grow);
@@ -1064,30 +856,29 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ cmp(key, Operand(ip));
__ b(hs, &extra);
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
- &slow, kCheckMap, kDontIncrementLength,
- value, key, receiver, receiver_map,
- elements_map, elements);
+ KeyedStoreGenerateGenericHelper(
+ masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
+ value, key, receiver, receiver_map, elements_map, elements);
KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength,
- value, key, receiver, receiver_map,
- elements_map, elements);
+ &slow, kDontCheckMap, kIncrementLength, value,
+ key, receiver, receiver_map, elements_map,
+ elements);
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
DCHECK(receiver.is(r1));
DCHECK(name.is(r2));
- DCHECK(ValueRegister().is(r0));
+ DCHECK(StoreDescriptor::ValueRegister().is(r0));
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, receiver, name, r3, r4, r5, r6);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
+ name, r3, r4, r5, r6);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -1095,7 +886,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
void StoreIC::GenerateMiss(MacroAssembler* masm) {
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
// Perform tail call to the entry.
ExternalReference ref =
@@ -1106,9 +898,9 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
void StoreIC::GenerateNormal(MacroAssembler* masm) {
Label miss;
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- Register value = ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
Register dictionary = r3;
DCHECK(receiver.is(r1));
DCHECK(name.is(r2));
@@ -1118,8 +910,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
GenerateDictionaryStore(masm, &miss, dictionary, name, value, r4, r5);
Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(),
- 1, r4, r5);
+ __ IncrementCounter(counters->store_normal_hit(), 1, r4, r5);
__ Ret();
__ bind(&miss);
@@ -1128,18 +919,6 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
}
-void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- __ mov(r0, Operand(Smi::FromInt(strict_mode)));
- __ Push(r0);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
#undef __
@@ -1189,8 +968,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// The delta to the start of the map check instruction and the
// condition code uses at the patched jump.
int delta = Assembler::GetCmpImmediateRawImmediate(instr);
- delta +=
- Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask;
+ delta += Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask;
// If the delta is 0 the instruction is cmp r0, #0 which also signals that
// nothing was inlined.
if (delta == 0) {
@@ -1198,8 +976,8 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
}
if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
- address, cmp_instruction_address, delta);
+ PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", address,
+ cmp_instruction_address, delta);
}
Address patch_address =
@@ -1235,8 +1013,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
patcher.EmitCondition(eq);
}
}
-
-
-} } // namespace v8::internal
+}
+} // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/ic/arm/ic-compiler-arm.cc b/deps/v8/src/ic/arm/ic-compiler-arm.cc
new file mode 100644
index 0000000000..7bef56e94d
--- /dev/null
+++ b/deps/v8/src/ic/arm/ic-compiler-arm.cc
@@ -0,0 +1,130 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM
+
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
+
+ __ mov(r0, Operand(Smi::FromInt(strict_mode)));
+ __ Push(r0);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
+ Label miss;
+
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ // In case we are compiling an IC for dictionary loads and stores, just
+ // check whether the name is unique.
+ if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ Register tmp = scratch1();
+ __ JumpIfSmi(this->name(), &miss);
+ __ ldr(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
+ __ ldrb(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
+ } else {
+ __ cmp(this->name(), Operand(name));
+ __ b(ne, &miss);
+ }
+ }
+
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
+ // Polymorphic keyed stores may use the map register
+ Register map_reg = scratch1();
+ DCHECK(kind() != Code::KEYED_STORE_IC ||
+ map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+
+ int receiver_count = types->length();
+ int number_of_handled_maps = 0;
+ __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ __ mov(ip, Operand(map));
+ __ cmp(map_reg, ip);
+ if (type->Is(HeapType::Number())) {
+ DCHECK(!number_case.is_unused());
+ __ bind(&number_case);
+ }
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
+ }
+ }
+ DCHECK(number_of_handled_maps != 0);
+
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ InlineCacheState state =
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+ return GetCode(kind(), type, name, state);
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+ MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
+ Label miss;
+ __ JumpIfSmi(receiver(), &miss);
+
+ int receiver_count = receiver_maps->length();
+ __ ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ for (int i = 0; i < receiver_count; ++i) {
+ __ mov(ip, Operand(receiver_maps->at(i)));
+ __ cmp(scratch1(), ip);
+ if (transitioned_maps->at(i).is_null()) {
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
+ } else {
+ Label next_map;
+ __ b(ne, &next_map);
+ __ mov(transition_map(), Operand(transitioned_maps->at(i)));
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
+ __ bind(&next_map);
+ }
+ }
+
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/ic/arm/stub-cache-arm.cc b/deps/v8/src/ic/arm/stub-cache-arm.cc
new file mode 100644
index 0000000000..bc8b0fba84
--- /dev/null
+++ b/deps/v8/src/ic/arm/stub-cache-arm.cc
@@ -0,0 +1,175 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM
+
+#include "src/codegen.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
+ Code::Flags flags, bool leave_frame,
+ StubCache::Table table, Register receiver, Register name,
+ // Number of the cache entry, not scaled.
+ Register offset, Register scratch, Register scratch2,
+ Register offset_scratch) {
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+ uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
+ uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
+ uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
+
+ // Check the relative positions of the address fields.
+ DCHECK(value_off_addr > key_off_addr);
+ DCHECK((value_off_addr - key_off_addr) % 4 == 0);
+ DCHECK((value_off_addr - key_off_addr) < (256 * 4));
+ DCHECK(map_off_addr > key_off_addr);
+ DCHECK((map_off_addr - key_off_addr) % 4 == 0);
+ DCHECK((map_off_addr - key_off_addr) < (256 * 4));
+
+ Label miss;
+ Register base_addr = scratch;
+ scratch = no_reg;
+
+ // Multiply by 3 because there are 3 fields per entry (name, code, map).
+ __ add(offset_scratch, offset, Operand(offset, LSL, 1));
+
+ // Calculate the base address of the entry.
+ __ mov(base_addr, Operand(key_offset));
+ __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2));
+
+ // Check that the key in the entry matches the name.
+ __ ldr(ip, MemOperand(base_addr, 0));
+ __ cmp(name, ip);
+ __ b(ne, &miss);
+
+ // Check the map matches.
+ __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
+ __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ cmp(ip, scratch2);
+ __ b(ne, &miss);
+
+ // Get the code entry from the cache.
+ Register code = scratch2;
+ scratch2 = no_reg;
+ __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
+
+ // Check that the flags match what we're looking for.
+ Register flags_reg = base_addr;
+ base_addr = no_reg;
+ __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
+ // It's a nice optimization if this constant is encodable in the bic insn.
+
+ uint32_t mask = Code::kFlagsNotUsedInLookup;
+ DCHECK(__ ImmediateFitsAddrMode1Instruction(mask));
+ __ bic(flags_reg, flags_reg, Operand(mask));
+ __ cmp(flags_reg, Operand(flags));
+ __ b(ne, &miss);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ jmp(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ jmp(&miss);
+ }
+#endif
+
+ if (leave_frame) __ LeaveFrame(StackFrame::INTERNAL);
+
+ // Jump to the first instruction in the code stub.
+ __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Miss: fall through.
+ __ bind(&miss);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
+ bool leave_frame, Register receiver,
+ Register name, Register scratch, Register extra,
+ Register extra2, Register extra3) {
+ Isolate* isolate = masm->isolate();
+ Label miss;
+
+ // Make sure that code is valid. The multiplying code relies on the
+ // entry size being 12.
+ DCHECK(sizeof(Entry) == 12);
+
+ // Make sure the flags does not name a specific type.
+ DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ DCHECK(!scratch.is(receiver));
+ DCHECK(!scratch.is(name));
+ DCHECK(!extra.is(receiver));
+ DCHECK(!extra.is(name));
+ DCHECK(!extra.is(scratch));
+ DCHECK(!extra2.is(receiver));
+ DCHECK(!extra2.is(name));
+ DCHECK(!extra2.is(scratch));
+ DCHECK(!extra2.is(extra));
+
+ // Check scratch, extra and extra2 registers are valid.
+ DCHECK(!scratch.is(no_reg));
+ DCHECK(!extra.is(no_reg));
+ DCHECK(!extra2.is(no_reg));
+ DCHECK(!extra3.is(no_reg));
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
+ extra3);
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Get the map of the receiver and compute the hash.
+ __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
+ __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ add(scratch, scratch, Operand(ip));
+ uint32_t mask = kPrimaryTableSize - 1;
+ // We shift out the last two bits because they are not part of the hash and
+ // they are always 01 for maps.
+ __ mov(scratch, Operand(scratch, LSR, kCacheIndexShift));
+ // Mask down the eor argument to the minimum to keep the immediate
+ // ARM-encodable.
+ __ eor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
+ // Prefer and_ to ubfx here because ubfx takes 2 cycles.
+ __ and_(scratch, scratch, Operand(mask));
+
+ // Probe the primary table.
+ ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
+ uint32_t mask2 = kSecondaryTableSize - 1;
+ __ add(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
+ __ and_(scratch, scratch, Operand(mask2));
+
+ // Probe the secondary table.
+ ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
+ extra3);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/ic/arm64/access-compiler-arm64.cc b/deps/v8/src/ic/arm64/access-compiler-arm64.cc
new file mode 100644
index 0000000000..58e6099ae6
--- /dev/null
+++ b/deps/v8/src/ic/arm64/access-compiler-arm64.cc
@@ -0,0 +1,53 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/ic/access-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+ Handle<Code> code) {
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
+// TODO(all): The so-called scratch registers are significant in some cases. For
+// example, PropertyAccessCompiler::keyed_store_calling_convention()[3] (x3) is
+// actually
+// used for KeyedStoreCompiler::transition_map(). We should verify which
+// registers are actually scratch registers, and which are important. For now,
+// we use the same assignments as ARM to remain on the safe side.
+
+Register* PropertyAccessCompiler::load_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ static Register registers[] = {receiver, name, x3, x0, x4, x5};
+ return registers;
+}
+
+
+Register* PropertyAccessCompiler::store_calling_convention() {
+ // receiver, value, scratch1, scratch2, scratch3.
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ DCHECK(x3.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ static Register registers[] = {receiver, name, x3, x4, x5};
+ return registers;
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/stub-cache-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
index b7d43a4771..f7f82bc9a4 100644
--- a/deps/v8/src/arm64/stub-cache-arm64.cc
+++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -1,4 +1,4 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
+// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,14 +6,13 @@
#if V8_TARGET_ARCH_ARM64
-#include "src/codegen.h"
-#include "src/ic-inl.h"
-#include "src/stub-cache.h"
+#include "src/ic/call-optimization.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
namespace v8 {
namespace internal {
-
#define __ ACCESS_MASM(masm)
@@ -50,154 +49,13 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
__ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label);
- NameDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- receiver,
- properties,
- name,
- scratch1);
+ NameDictionaryLookupStub::GenerateNegativeLookup(
+ masm, miss_label, &done, receiver, properties, name, scratch1);
__ Bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
}
-// Probe primary or secondary table.
-// If the entry is found in the cache, the generated code jump to the first
-// instruction of the stub in the cache.
-// If there is a miss the code fall trough.
-//
-// 'receiver', 'name' and 'offset' registers are preserved on miss.
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register receiver,
- Register name,
- Register offset,
- Register scratch,
- Register scratch2,
- Register scratch3) {
- // Some code below relies on the fact that the Entry struct contains
- // 3 pointers (name, code, map).
- STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize));
-
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
-
- uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
- uintptr_t value_off_addr =
- reinterpret_cast<uintptr_t>(value_offset.address());
- uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
-
- Label miss;
-
- DCHECK(!AreAliased(name, offset, scratch, scratch2, scratch3));
-
- // Multiply by 3 because there are 3 fields per entry.
- __ Add(scratch3, offset, Operand(offset, LSL, 1));
-
- // Calculate the base address of the entry.
- __ Mov(scratch, key_offset);
- __ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2));
-
- // Check that the key in the entry matches the name.
- __ Ldr(scratch2, MemOperand(scratch));
- __ Cmp(name, scratch2);
- __ B(ne, &miss);
-
- // Check the map matches.
- __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr));
- __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Cmp(scratch2, scratch3);
- __ B(ne, &miss);
-
- // Get the code entry from the cache.
- __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr));
-
- // Check that the flags match what we're looking for.
- __ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset));
- __ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup);
- __ Cmp(scratch2.W(), flags);
- __ B(ne, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ B(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ B(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
- __ Br(scratch);
-
- // Miss: fall through.
- __ Bind(&miss);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2,
- Register extra3) {
- Isolate* isolate = masm->isolate();
- Label miss;
-
- // Make sure the flags does not name a specific type.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Make sure that there are no register conflicts.
- DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
- // Make sure extra and extra2 registers are valid.
- DCHECK(!extra.is(no_reg));
- DCHECK(!extra2.is(no_reg));
- DCHECK(!extra3.is(no_reg));
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
- extra2, extra3);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Compute the hash for primary table.
- __ Ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
- __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Add(scratch, scratch, extra);
- __ Eor(scratch, scratch, flags);
- // We shift out the last two bits because they are not part of the hash.
- __ Ubfx(scratch, scratch, kCacheIndexShift,
- CountTrailingZeros(kPrimaryTableSize, 64));
-
- // Probe the primary table.
- ProbeTable(isolate, masm, flags, kPrimary, receiver, name,
- scratch, extra, extra2, extra3);
-
- // Primary miss: Compute hash for secondary table.
- __ Sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
- __ Add(scratch, scratch, flags >> kCacheIndexShift);
- __ And(scratch, scratch, kSecondaryTableSize - 1);
-
- // Probe the secondary table.
- ProbeTable(isolate, masm, flags, kSecondary, receiver, name,
- scratch, extra, extra2, extra3);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ Bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
- extra2, extra3);
-}
-
-
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register prototype, Label* miss) {
Isolate* isolate = masm->isolate();
@@ -349,9 +207,104 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
}
-void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
- Handle<Code> code) {
- __ Jump(code, RelocInfo::CODE_TARGET);
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -----------------------------------
+ Label miss;
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ Push(value());
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ Ldr(receiver,
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ Push(receiver, value());
+ ParameterCount actual(1);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ Pop(x0);
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> getter) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ Ldr(receiver,
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ Push(receiver);
+ ParameterCount actual(0);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
+ // Push receiver, name and value for runtime call.
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
+ ASM_LOCATION("ElementHandlerCompiler::GenerateStoreSlow");
+
+ // Push receiver, key and value for runtime call.
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
}
@@ -359,6 +312,55 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
#define __ ACCESS_MASM(masm())
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
+ Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
+ Label miss;
+ FrontendHeader(receiver(), name, &miss);
+
+ // Get the value from the cell.
+ Register result = StoreDescriptor::ValueRegister();
+ __ Mov(result, Operand(cell));
+ __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+
+ // Check for deleted property if property can actually be deleted.
+ if (is_configurable) {
+ __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &miss);
+ }
+
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3);
+ __ Ret();
+
+ FrontendFooter(name, &miss);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, name);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
+ Handle<Name> name) {
+ Label miss;
+
+ ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreInterceptor");
+
+ __ Push(receiver(), this->name(), value());
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_ic_property = ExternalReference(
+ IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
+ __ TailCallExternalReference(store_ic_property, 3, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Register NamedStoreHandlerCompiler::value() {
+ return StoreDescriptor::ValueRegister();
+}
+
+
void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
Handle<Name> name) {
if (!label->is_unused()) {
@@ -378,8 +380,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
Register scratch2, Register scratch3, Label* miss_label, Label* slow) {
Label exit;
- DCHECK(!AreAliased(receiver_reg, storage_reg, value_reg,
- scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(receiver_reg, storage_reg, value_reg, scratch1, scratch2,
+ scratch3));
// We don't need scratch3.
scratch3 = NoReg;
@@ -423,8 +425,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
Label do_store;
__ JumpIfSmi(value_reg, &do_store);
- __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
- miss_label, DONT_DO_SMI_CHECK);
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, miss_label,
+ DONT_DO_SMI_CHECK);
__ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
__ Bind(&do_store);
@@ -454,13 +456,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
__ Str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
// Update the write barrier for the map field.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- scratch2,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
+ __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
if (details.type() == CONSTANT) {
@@ -478,8 +475,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
index -= transition->inobject_properties();
// TODO(verwaest): Share this code as a code stub.
- SmiCheck smi_check = representation.IsTagged()
- ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ SmiCheck smi_check =
+ representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
Register prop_reg = representation.IsDouble() ? storage_reg : value_reg;
if (index < 0) {
// Set the property straight into the object.
@@ -491,14 +488,9 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
if (!representation.IsDouble()) {
__ Mov(storage_reg, value_reg);
}
- __ RecordWriteField(receiver_reg,
- offset,
- storage_reg,
- scratch1,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
+ __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
+ kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, smi_check);
}
} else {
// Write to the properties array.
@@ -513,14 +505,9 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
if (!representation.IsDouble()) {
__ Mov(storage_reg, value_reg);
}
- __ RecordWriteField(scratch1,
- offset,
- storage_reg,
- receiver_reg,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
+ __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
+ kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, smi_check);
}
}
@@ -531,7 +518,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
}
-void NamedStoreHandlerCompiler::GenerateStoreField(LookupResult* lookup,
+void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
Register value_reg,
Label* miss_label) {
DCHECK(lookup->representation().IsHeapObject());
@@ -595,12 +582,11 @@ Register PropertyHandlerCompiler::CheckPrototypes(
DCHECK(name->IsString());
name = factory()->InternalizeString(Handle<String>::cast(name));
}
- DCHECK(current.is_null() ||
- (current->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound));
+ DCHECK(current.is_null() || (current->property_dictionary()->FindEntry(
+ name) == NameDictionary::kNotFound));
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
- scratch1, scratch2);
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
+ scratch2);
__ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -629,9 +615,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
UseScratchRegisterScope temps(masm());
__ CheckAccessGlobalProxy(reg, scratch2, temps.AcquireX(), miss);
} else if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(
- masm(), Handle<JSGlobalObject>::cast(current), name,
- scratch2, miss);
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
}
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -721,8 +706,8 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
if (heap()->InNewSpace(callback->data())) {
__ Mov(scratch3(), Operand(callback));
- __ Ldr(scratch3(), FieldMemOperand(scratch3(),
- ExecutableAccessorInfo::kDataOffset));
+ __ Ldr(scratch3(),
+ FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset));
} else {
__ Mov(scratch3(), Operand(Handle<Object>(callback->data(), isolate())));
}
@@ -756,91 +741,76 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
}
-void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg,
- LookupResult* lookup,
- Handle<Name> name) {
- DCHECK(!AreAliased(receiver(), this->name(),
- scratch1(), scratch2(), scratch3()));
+void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
+ LookupIterator* it, Register holder_reg) {
+ DCHECK(!AreAliased(receiver(), this->name(), scratch1(), scratch2(),
+ scratch3()));
DCHECK(holder()->HasNamedInterceptor());
DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added later.
- bool compile_followup_inline = false;
- if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->IsField()) {
- compile_followup_inline = true;
- } else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> callback(
- ExecutableAccessorInfo::cast(lookup->GetCallbackObject()));
- compile_followup_inline =
- callback->getter() != NULL &&
- ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), callback,
- type());
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+ // Preserve the receiver register explicitly whenever it is different from the
+ // holder and it is needed should the interceptor return without any result.
+ // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
+ // case might cause a miss during the prototype check.
+ bool must_perform_prototype_check =
+ !holder().is_identical_to(it->GetHolder<JSObject>());
+ bool must_preserve_receiver_reg =
+ !receiver().is(holder_reg) &&
+ (it->state() == LookupIterator::ACCESSOR || must_perform_prototype_check);
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+ if (must_preserve_receiver_reg) {
+ __ Push(receiver(), holder_reg, this->name());
+ } else {
+ __ Push(holder_reg, this->name());
}
- }
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), holder(),
+ IC::kLoadPropertyWithInterceptorOnly);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ JumpIfRoot(x0, Heap::kNoInterceptorResultSentinelRootIndex,
+ &interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ Ret();
- if (compile_followup_inline) {
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
- // Preserve the receiver register explicitly whenever it is different from
- // the holder and it is needed should the interceptor return without any
- // result. The CALLBACKS case needs the receiver to be passed into C++ code,
- // the FIELD case might cause a miss during the prototype check.
- bool must_perfrom_prototype_check = *holder() != lookup->holder();
- bool must_preserve_receiver_reg = !receiver().Is(holder_reg) &&
- (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
- if (must_preserve_receiver_reg) {
- __ Push(receiver(), holder_reg, this->name());
- } else {
- __ Push(holder_reg, this->name());
- }
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(
- masm(), receiver(), holder_reg, this->name(), holder(),
- IC::kLoadPropertyWithInterceptorOnly);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ JumpIfRoot(x0,
- Heap::kNoInterceptorResultSentinelRootIndex,
- &interceptor_failed);
- frame_scope.GenerateLeaveFrame();
- __ Ret();
-
- __ Bind(&interceptor_failed);
- if (must_preserve_receiver_reg) {
- __ Pop(this->name(), holder_reg, receiver());
- } else {
- __ Pop(this->name(), holder_reg);
- }
- // Leave the internal frame.
+ __ Bind(&interceptor_failed);
+ if (must_preserve_receiver_reg) {
+ __ Pop(this->name(), holder_reg, receiver());
+ } else {
+ __ Pop(this->name(), holder_reg);
}
- GenerateLoadPostInterceptor(holder_reg, name, lookup);
- } else { // !compile_followup_inline
- // Call the runtime system to load the interceptor.
- // Check that the maps haven't changed.
- PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
- holder());
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptor),
- isolate());
- __ TailCallExternalReference(
- ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ // Leave the internal frame.
}
+
+ GenerateLoadPostInterceptor(it, holder_reg);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
+ // Call the runtime system to load the interceptor.
+ DCHECK(holder()->HasNamedInterceptor());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+ holder());
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
+ __ TailCallExternalReference(
+ ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
}
@@ -871,285 +841,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save value register, so we can restore it later.
- __ Push(value());
-
- if (!setter.is_null()) {
- // Call the JavaScript setter with receiver and value on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ Ldr(receiver,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ Push(receiver, value());
- ParameterCount actual(1);
- ParameterCount expected(setter);
- __ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ Pop(x0);
-
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
- Handle<Name> name) {
- Label miss;
-
- ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreInterceptor");
-
- __ Push(receiver(), this->name(), value());
-
- // Do tail-call to the runtime system.
- ExternalReference store_ic_property = ExternalReference(
- IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
- __ TailCallExternalReference(store_ic_property, 3, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-// TODO(all): The so-called scratch registers are significant in some cases. For
-// example, PropertyAccessCompiler::keyed_store_calling_convention()[3] (x3) is
-// actually
-// used for KeyedStoreCompiler::transition_map(). We should verify which
-// registers are actually scratch registers, and which are important. For now,
-// we use the same assignments as ARM to remain on the safe side.
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
- static Register registers[] = { receiver, name, x3, x0, x4, x5 };
- return registers;
-}
-
-
-Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, value, scratch1, scratch2, scratch3.
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- DCHECK(x3.is(KeyedStoreIC::MapRegister()));
- static Register registers[] = { receiver, name, x3, x4, x5 };
- return registers;
-}
-
-
-Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); }
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> getter) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- if (!getter.is_null()) {
- // Call the JavaScript getter with the receiver on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ Ldr(receiver,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ Push(receiver);
- ParameterCount actual(0);
- ParameterCount expected(getter);
- __ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
- Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
- Label miss;
- FrontendHeader(receiver(), name, &miss);
-
- // Get the value from the cell.
- Register result = StoreIC::ValueRegister();
- __ Mov(result, Operand(cell));
- __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
-
- // Check for deleted property if property can actually be deleted.
- if (is_configurable) {
- __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &miss);
- }
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3);
- __ Ret();
-
- FrontendFooter(name, &miss);
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, name);
-}
-
-
-Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- __ JumpIfNotUniqueName(this->name(), &miss);
- } else {
- __ CompareAndBranch(this->name(), Operand(name), ne, &miss);
- }
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(KeyedStoreIC::MapRegister()));
- __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = types->length();
- int number_of_handled_maps = 0;
- for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate());
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- Label try_next;
- __ Cmp(map_reg, Operand(map));
- __ B(ne, &try_next);
- if (type->Is(HeapType::Number())) {
- DCHECK(!number_case.is_unused());
- __ Bind(&number_case);
- }
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
- __ Bind(&try_next);
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ Bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- (number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
-
- ASM_LOCATION("PropertyICCompiler::CompileStorePolymorphic");
-
- __ JumpIfSmi(receiver(), &miss);
-
- int receiver_count = receiver_maps->length();
- __ Ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; i++) {
- __ Cmp(scratch1(), Operand(receiver_maps->at(i)));
-
- Label skip;
- __ B(&skip, ne);
- if (!transitioned_maps->at(i).is_null()) {
- // This argument is used by the handler stub. For example, see
- // ElementsTransitionGenerator::GenerateMapChangeElementsTransition.
- __ Mov(transition_map(), Operand(transitioned_maps->at(i)));
- }
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ Bind(&skip);
- }
-
- __ Bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
}
+} // namespace v8::internal
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-void ElementHandlerCompiler::GenerateLoadDictionaryElement(
- MacroAssembler* masm) {
- // The return address is in lr.
- Label slow, miss;
-
- Register result = x0;
- Register key = LoadIC::NameRegister();
- Register receiver = LoadIC::ReceiverRegister();
- DCHECK(receiver.is(x1));
- DCHECK(key.is(x2));
-
- __ JumpIfNotSmi(key, &miss);
- __ Ldr(x4, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ LoadFromNumberDictionary(&slow, x4, key, result, x7, x3, x5, x6);
- __ Ret();
-
- __ Bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(), 1, x4, x3);
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
-
- // Miss case, call the runtime.
- __ Bind(&miss);
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM64
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/arm64/ic-arm64.cc b/deps/v8/src/ic/arm64/ic-arm64.cc
index e08fcfd884..76f9c24cf1 100644
--- a/deps/v8/src/arm64/ic-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-arm64.cc
@@ -6,13 +6,10 @@
#if V8_TARGET_ARCH_ARM64
-#include "src/arm64/assembler-arm64.h"
-#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/disasm.h"
-#include "src/ic-inl.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
@@ -24,8 +21,7 @@ namespace internal {
// "type" holds an instance type on entry and is not clobbered.
// Generated code branch on "global_object" if type is any kind of global
// JS object.
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
Label* global_object) {
__ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
__ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne);
@@ -45,12 +41,9 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
// The scratch registers need to be different from elements, name and result.
// The generated code assumes that the receiver has slow properties,
// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register result,
- Register scratch1,
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
+ Register elements, Register name,
+ Register result, Register scratch1,
Register scratch2) {
DCHECK(!AreAliased(elements, name, scratch1, scratch2));
DCHECK(!AreAliased(result, scratch1, scratch2));
@@ -58,18 +51,14 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+ name, scratch1, scratch2);
// If probing finds an entry check that the value is a normal property.
__ Bind(&done);
- static const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ static const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
@@ -92,31 +81,24 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
//
// The generated code assumes that the receiver has slow properties,
// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register value,
- Register scratch1,
+static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
+ Register elements, Register name,
+ Register value, Register scratch1,
Register scratch2) {
DCHECK(!AreAliased(elements, name, value, scratch1, scratch2));
Label done;
// Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+ name, scratch1, scratch2);
// If probing finds an entry in the dictionary check that the value
// is a normal property that is not read only.
__ Bind(&done);
- static const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ static const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
static const int kTypeAndReadOnlyMask =
@@ -133,8 +115,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Update the write barrier. Make sure not to clobber the value.
__ Mov(scratch1, value);
- __ RecordWrite(
- elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
}
@@ -145,8 +127,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver,
Register map_scratch,
Register scratch,
- int interceptor_bit,
- Label* slow) {
+ int interceptor_bit, Label* slow) {
DCHECK(!AreAliased(map_scratch, scratch));
// Check that the object isn't a smi.
@@ -187,14 +168,10 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// Allowed to be the the same as 'receiver' or 'key'.
// Unchanged on bailout so 'receiver' and 'key' can be safely
// used by further computation.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register elements_map,
- Register scratch2,
- Register result,
- Label* not_fast_array,
+static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
+ Register key, Register elements,
+ Register elements_map, Register scratch2,
+ Register result, Label* not_fast_array,
Label* slow) {
DCHECK(!AreAliased(receiver, key, elements, elements_map, scratch2));
@@ -239,12 +216,9 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// The map of the key is returned in 'map_scratch'.
// If the jump to 'index_string' is done the hash of the key is left
// in 'hash_scratch'.
-static void GenerateKeyNameCheck(MacroAssembler* masm,
- Register key,
- Register map_scratch,
- Register hash_scratch,
- Label* index_string,
- Label* not_unique) {
+static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
+ Register map_scratch, Register hash_scratch,
+ Label* index_string, Label* not_unique) {
DCHECK(!AreAliased(key, map_scratch, hash_scratch));
// Is the key a name?
@@ -256,8 +230,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
// Is the string an array index with cached numeric value?
__ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
- __ TestAndBranchIfAllClear(hash_scratch,
- Name::kContainsCachedArrayIndexMask,
+ __ TestAndBranchIfAllClear(hash_scratch, Name::kContainsCachedArrayIndexMask,
index_string);
// Is the string internalized? We know it's a string, so a single bit test is
@@ -277,10 +250,8 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
// left with the object's elements map. Otherwise, it is used as a scratch
// register.
static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
- Register object,
- Register key,
- Register map,
- Register scratch1,
+ Register object, Register key,
+ Register map, Register scratch1,
Register scratch2,
Label* unmapped_case,
Label* slow_case) {
@@ -293,8 +264,8 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// whether it requires access checks.
__ JumpIfSmi(object, slow_case);
// Check that the object is some kind of JSObject.
- __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE,
- slow_case, lt);
+ __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE, slow_case,
+ lt);
// Check that the key is a positive smi.
__ JumpIfNotSmi(key, slow_case);
@@ -347,47 +318,29 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
Register backing_store = parameter_map;
__ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
- __ CheckMap(
- backing_store, scratch, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
+ __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
+ DONT_DO_SMI_CHECK);
__ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
__ Cmp(key, scratch);
__ B(hs, slow_case);
- __ Add(backing_store,
- backing_store,
+ __ Add(backing_store, backing_store,
FixedArray::kHeaderSize - kHeapObjectTag);
__ SmiUntag(scratch, key);
return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2);
}
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is in lr.
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- DCHECK(receiver.is(x1));
- DCHECK(name.is(x2));
-
- // Probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, receiver, name, x3, x4, x5, x6);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = x0;
- DCHECK(!dictionary.is(ReceiverRegister()));
- DCHECK(!dictionary.is(NameRegister()));
+ DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
Label slow;
- __ Ldr(dictionary,
- FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset));
- GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), x0, x3, x4);
+ __ Ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
+ JSObject::kPropertiesOffset));
+ GenerateDictionaryLoad(masm, &slow, dictionary,
+ LoadDescriptor::NameRegister(), x0, x3, x4);
__ Ret();
// Dictionary load failed, go slow (but don't miss).
@@ -404,55 +357,25 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
// Perform tail call to the entry.
- __ Push(ReceiverRegister(), NameRegister());
- ExternalReference ref =
- ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
+ __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+ ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
__ TailCallExternalReference(ref, 2, 1);
}
void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is in lr.
- __ Push(ReceiverRegister(), NameRegister());
+ __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
__ TailCallRuntime(Runtime::kGetProperty, 2, 1);
}
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
- // The return address is in lr.
- Register result = x0;
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- DCHECK(receiver.is(x1));
- DCHECK(key.is(x2));
-
- Label miss, unmapped;
-
- Register map_scratch = x0;
- MemOperand mapped_location = GenerateMappedArgumentsLookup(
- masm, receiver, key, map_scratch, x3, x4, &unmapped, &miss);
- __ Ldr(result, mapped_location);
- __ Ret();
-
- __ Bind(&unmapped);
- // Parameter map is left in map_scratch when a jump on unmapped is done.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, key, map_scratch, x3, &miss);
- __ Ldr(result, unmapped_location);
- __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &miss);
- __ Ret();
-
- __ Bind(&miss);
- GenerateMiss(masm);
-}
-
-
void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
ASM_LOCATION("KeyedStoreIC::GenerateSloppyArguments");
Label slow, notin;
- Register value = ValueRegister();
- Register key = NameRegister();
- Register receiver = ReceiverRegister();
+ Register value = StoreDescriptor::ValueRegister();
+ Register key = StoreDescriptor::NameRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
DCHECK(receiver.is(x1));
DCHECK(key.is(x2));
DCHECK(value.is(x0));
@@ -464,10 +387,8 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
Register mapped1 = x4;
Register mapped2 = x5;
- MemOperand mapped =
- GenerateMappedArgumentsLookup(masm, receiver, key, map,
- mapped1, mapped2,
- &notin, &slow);
+ MemOperand mapped = GenerateMappedArgumentsLookup(
+ masm, receiver, key, map, mapped1, mapped2, &notin, &slow);
Operand mapped_offset = mapped.OffsetAsOperand();
__ Str(value, mapped);
__ Add(x10, mapped.base(), mapped_offset);
@@ -479,7 +400,7 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
// These registers are used by GenerateMappedArgumentsLookup to build a
// MemOperand. They are live for as long as the MemOperand is live.
- Register unmapped1 = map; // This is assumed to alias 'map'.
+ Register unmapped1 = map; // This is assumed to alias 'map'.
Register unmapped2 = x4;
MemOperand unmapped =
GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow);
@@ -487,8 +408,8 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
__ Str(value, unmapped);
__ Add(x10, unmapped.base(), unmapped_offset);
__ Mov(x11, value);
- __ RecordWrite(unmapped.base(), x10, x11,
- kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ RecordWrite(unmapped.base(), x10, x11, kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
__ Ret();
__ Bind(&slow);
GenerateMiss(masm);
@@ -501,7 +422,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
- __ Push(ReceiverRegister(), NameRegister());
+ __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
// Perform tail call to the entry.
ExternalReference ref =
@@ -511,66 +432,36 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
}
-// IC register specifications
-const Register LoadIC::ReceiverRegister() { return x1; }
-const Register LoadIC::NameRegister() { return x2; }
-
-const Register LoadIC::SlotRegister() {
- DCHECK(FLAG_vector_ics);
- return x0;
-}
-
-
-const Register LoadIC::VectorRegister() {
- DCHECK(FLAG_vector_ics);
- return x3;
-}
-
-
-const Register StoreIC::ReceiverRegister() { return x1; }
-const Register StoreIC::NameRegister() { return x2; }
-const Register StoreIC::ValueRegister() { return x0; }
-
-
-const Register KeyedStoreIC::MapRegister() {
- return x3;
-}
-
-
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is in lr.
- __ Push(ReceiverRegister(), NameRegister());
+ __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
}
-static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm,
- Register key,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label *slow) {
- DCHECK(!AreAliased(
- key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
+static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, Register key,
+ Register receiver, Register scratch1,
+ Register scratch2, Register scratch3,
+ Register scratch4, Register scratch5,
+ Label* slow) {
+ DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
+ scratch5));
Isolate* isolate = masm->isolate();
Label check_number_dictionary;
// If we can load the value, it should be returned in x0.
Register result = x0;
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, scratch1, scratch2, Map::kHasIndexedInterceptor, slow);
+ GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
+ Map::kHasIndexedInterceptor, slow);
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
- GenerateFastArrayLoad(
- masm, receiver, key, scratch3, scratch2, scratch1, result, NULL, slow);
- __ IncrementCounter(
- isolate->counters()->keyed_load_generic_smi(), 1, scratch1, scratch2);
+ GenerateFastArrayLoad(masm, receiver, key, scratch3, scratch2, scratch1,
+ result, NULL, slow);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1,
+ scratch1, scratch2);
__ Ret();
__ Bind(&check_number_dictionary);
@@ -580,30 +471,26 @@ static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm,
// Check whether we have a number dictionary.
__ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
- __ LoadFromNumberDictionary(
- slow, scratch3, key, result, scratch1, scratch2, scratch4, scratch5);
+ __ LoadFromNumberDictionary(slow, scratch3, key, result, scratch1, scratch2,
+ scratch4, scratch5);
__ Ret();
}
-static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm,
- Register key,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label *slow) {
- DCHECK(!AreAliased(
- key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
+static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key,
+ Register receiver, Register scratch1,
+ Register scratch2, Register scratch3,
+ Register scratch4, Register scratch5,
+ Label* slow) {
+ DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
+ scratch5));
Isolate* isolate = masm->isolate();
Label probe_dictionary, property_array_property;
// If we can load the value, it should be returned in x0.
Register result = x0;
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, scratch1, scratch2, Map::kHasNamedInterceptor, slow);
+ GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
+ Map::kHasNamedInterceptor, slow);
// If the receiver is a fast-case object, check the keyed lookup cache.
// Otherwise probe the dictionary.
@@ -678,11 +565,11 @@ static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm,
// Load in-object property.
__ Bind(&load_in_object_property);
__ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset));
- __ Add(scratch5, scratch5, scratch4); // Index from start of object.
+ __ Add(scratch5, scratch5, scratch4); // Index from start of object.
__ Sub(receiver, receiver, kHeapObjectTag); // Remove the heap tag.
__ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1, scratch1, scratch2);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+ scratch1, scratch2);
__ Ret();
// Load property array property.
@@ -690,8 +577,8 @@ static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm,
__ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
__ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1, scratch1, scratch2);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+ scratch1, scratch2);
__ Ret();
// Do a quick inline probe of the receiver's dictionary, if it exists.
@@ -701,8 +588,8 @@ static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm,
GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
// Load the property.
GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
- 1, scratch1, scratch2);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1,
+ scratch1, scratch2);
__ Ret();
}
@@ -711,8 +598,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// The return address is in lr.
Label slow, check_name, index_smi, index_name;
- Register key = NameRegister();
- Register receiver = ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
DCHECK(key.is(x2));
DCHECK(receiver.is(x1));
@@ -724,8 +611,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Slow case.
__ Bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_generic_slow(), 1, x4, x3);
+ __ IncrementCounter(masm->isolate()->counters()->keyed_load_generic_slow(), 1,
+ x4, x3);
GenerateRuntimeGetProperty(masm);
__ Bind(&check_name);
@@ -744,16 +631,13 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// Return address is in lr.
Label miss;
- Register receiver = ReceiverRegister();
- Register index = NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register index = LoadDescriptor::NameRegister();
Register result = x0;
Register scratch = x3;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch,
- result,
+ StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
&miss, // When index out of range.
@@ -769,51 +653,12 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // Return address is in lr.
- Label slow;
-
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- Register scratch1 = x3;
- Register scratch2 = x4;
- DCHECK(!AreAliased(scratch1, scratch2, receiver, key));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &slow);
-
- // Check that the key is an array index, that is Uint32.
- __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
-
- // Get the map of the receiver.
- Register map = scratch1;
- __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ Ldrb(scratch2, FieldMemOperand(map, Map::kBitFieldOffset));
- DCHECK(kSlowCaseBitFieldMask ==
- ((1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor)));
- __ Tbnz(scratch2, Map::kIsAccessCheckNeeded, &slow);
- __ Tbz(scratch2, Map::kHasIndexedInterceptor, &slow);
-
- // Everything is fine, call runtime.
- __ Push(receiver, key);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(kLoadElementWithInterceptor),
- masm->isolate()),
- 2, 1);
-
- __ Bind(&slow);
- GenerateMiss(masm);
-}
-
-
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
ASM_LOCATION("KeyedStoreIC::GenerateMiss");
// Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
@@ -821,50 +666,13 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- ASM_LOCATION("KeyedStoreIC::GenerateSlow");
-
- // Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- ASM_LOCATION("KeyedStoreIC::GenerateRuntimeSetProperty");
-
- // Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- // Push strict_mode for runtime call.
- __ Mov(x10, Smi::FromInt(strict_mode));
- __ Push(x10);
-
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
static void KeyedStoreGenerateGenericHelper(
- MacroAssembler* masm,
- Label* fast_object,
- Label* fast_double,
- Label* slow,
- KeyedStoreCheckMap check_map,
- KeyedStoreIncrementLength increment_length,
- Register value,
- Register key,
- Register receiver,
- Register receiver_map,
- Register elements_map,
- Register elements) {
- DCHECK(!AreAliased(
- value, key, receiver, receiver_map, elements_map, elements, x10, x11));
+ MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
+ KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
+ Register value, Register key, Register receiver, Register receiver_map,
+ Register elements_map, Register elements) {
+ DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
+ x10, x11));
Label transition_smi_elements;
Label transition_double_elements;
@@ -914,13 +722,8 @@ static void KeyedStoreGenerateGenericHelper(
// Update write barrier for the elements array address.
__ Mov(x10, value); // Preserve the value which is returned.
- __ RecordWrite(elements,
- address,
- x10,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWrite(elements, address, x10, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Bind(&dont_record_write);
__ Ret();
@@ -943,11 +746,7 @@ static void KeyedStoreGenerateGenericHelper(
__ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
__ Bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value,
- key,
- elements,
- x10,
- d0,
+ __ StoreNumberToDoubleElements(value, key, elements, x10, d0,
&transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
@@ -964,27 +763,19 @@ static void KeyedStoreGenerateGenericHelper(
// Value is a double. Transition FAST_SMI_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- receiver_map,
- x10,
- x11,
- slow);
- AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(
- masm, receiver, key, value, receiver_map, mode, slow);
+ __ LoadTransitionedArrayMapConditional(
+ FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, x10, x11, slow);
+ AllocationSiteMode mode =
+ AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
+ receiver_map, mode, slow);
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ B(&fast_double_without_map_check);
__ Bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- x10,
- x11,
- slow);
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
+ receiver_map, x10, x11, slow);
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
@@ -997,12 +788,8 @@ static void KeyedStoreGenerateGenericHelper(
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- x10,
- x11,
- slow);
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
+ receiver_map, x10, x11, slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -1022,9 +809,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
Label fast_double_grow;
Label fast_double;
- Register value = ValueRegister();
- Register key = NameRegister();
- Register receiver = ReceiverRegister();
+ Register value = StoreDescriptor::ValueRegister();
+ Register key = StoreDescriptor::NameRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
DCHECK(receiver.is(x1));
DCHECK(key.is(x2));
DCHECK(value.is(x0));
@@ -1065,7 +852,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// x0: value
// x1: key
// x2: receiver
- GenerateRuntimeSetProperty(masm, strict_mode);
+ PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
__ Bind(&extra);
@@ -1101,27 +888,27 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ B(eq, &extra); // We can handle the case where we are appending 1 element.
__ B(lo, &slow);
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
- &slow, kCheckMap, kDontIncrementLength,
- value, key, receiver, receiver_map,
- elements_map, elements);
+ KeyedStoreGenerateGenericHelper(
+ masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
+ value, key, receiver, receiver_map, elements_map, elements);
KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength,
- value, key, receiver, receiver_map,
- elements_map, elements);
+ &slow, kDontCheckMap, kIncrementLength, value,
+ key, receiver, receiver_map, elements_map,
+ elements);
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- DCHECK(!AreAliased(receiver, name, ValueRegister(), x3, x4, x5, x6));
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ DCHECK(!AreAliased(receiver, name, StoreDescriptor::ValueRegister(), x3, x4,
+ x5, x6));
// Probe the stub cache.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, receiver, name, x3, x4, x5, x6);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
+ name, x3, x4, x5, x6);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -1129,7 +916,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
void StoreIC::GenerateMiss(MacroAssembler* masm) {
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
// Tail call to the entry.
ExternalReference ref =
@@ -1140,9 +928,9 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
void StoreIC::GenerateNormal(MacroAssembler* masm) {
Label miss;
- Register value = ValueRegister();
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
Register dictionary = x3;
DCHECK(!AreAliased(value, receiver, name, x3, x4, x5));
@@ -1160,39 +948,6 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
}
-void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- ASM_LOCATION("StoreIC::GenerateRuntimeSetProperty");
-
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- __ Mov(x10, Smi::FromInt(strict_mode));
- __ Push(x10);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-void StoreIC::GenerateSlow(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- x0 : value
- // -- x1 : receiver
- // -- x2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Push receiver, name and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
Condition CompareIC::ComputeCondition(Token::Value op) {
switch (op) {
case Token::EQ_STRICT:
@@ -1215,8 +970,7 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
bool CompareIC::HasInlinedSmiCode(Address address) {
// The address of the instruction following the call.
- Address info_address =
- Assembler::return_address_from_call_start(address);
+ Address info_address = Assembler::return_address_from_call_start(address);
InstructionSequence* patch_info = InstructionSequence::At(info_address);
return patch_info->IsInlineData();
@@ -1231,8 +985,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// instructions which have no side effects, so we can safely execute them.
// The patch information is encoded directly after the call to the helper
// function which is requesting this patch operation.
- Address info_address =
- Assembler::return_address_from_call_start(address);
+ Address info_address = Assembler::return_address_from_call_start(address);
InlineSmiCheckInfo info(info_address);
// Check and decode the patch information instruction.
@@ -1241,8 +994,8 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
}
if (FLAG_trace_ic) {
- PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n",
- address, info_address, reinterpret_cast<void*>(info.SmiCheck()));
+ PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n", address,
+ info_address, reinterpret_cast<void*>(info.SmiCheck()));
}
// Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
@@ -1280,8 +1033,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
patcher.tbz(smi_reg, 0, branch_imm);
}
}
-
-
-} } // namespace v8::internal
+}
+} // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/ic/arm64/ic-compiler-arm64.cc b/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
new file mode 100644
index 0000000000..ffc1069f23
--- /dev/null
+++ b/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
@@ -0,0 +1,133 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ ASM_LOCATION("PropertyICCompiler::GenerateRuntimeSetProperty");
+
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
+
+ __ Mov(x10, Smi::FromInt(strict_mode));
+ __ Push(x10);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
+ Label miss;
+
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ // In case we are compiling an IC for dictionary loads and stores, just
+ // check whether the name is unique.
+ if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ Register tmp = scratch1();
+ __ JumpIfSmi(this->name(), &miss);
+ __ Ldr(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
+ __ Ldrb(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
+ } else {
+ __ CompareAndBranch(this->name(), Operand(name), ne, &miss);
+ }
+ }
+
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
+ // Polymorphic keyed stores may use the map register
+ Register map_reg = scratch1();
+ DCHECK(kind() != Code::KEYED_STORE_IC ||
+ map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ int receiver_count = types->length();
+ int number_of_handled_maps = 0;
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ Label try_next;
+ __ Cmp(map_reg, Operand(map));
+ __ B(ne, &try_next);
+ if (type->Is(HeapType::Number())) {
+ DCHECK(!number_case.is_unused());
+ __ Bind(&number_case);
+ }
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
+ __ Bind(&try_next);
+ }
+ }
+ DCHECK(number_of_handled_maps != 0);
+
+ __ Bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ InlineCacheState state =
+ (number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC;
+ return GetCode(kind(), type, name, state);
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+ MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
+ Label miss;
+
+ ASM_LOCATION("PropertyICCompiler::CompileStorePolymorphic");
+
+ __ JumpIfSmi(receiver(), &miss);
+
+ int receiver_count = receiver_maps->length();
+ __ Ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ for (int i = 0; i < receiver_count; i++) {
+ __ Cmp(scratch1(), Operand(receiver_maps->at(i)));
+
+ Label skip;
+ __ B(&skip, ne);
+ if (!transitioned_maps->at(i).is_null()) {
+ // This argument is used by the handler stub. For example, see
+ // ElementsTransitionGenerator::GenerateMapChangeElementsTransition.
+ __ Mov(transition_map(), Operand(transitioned_maps->at(i)));
+ }
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ __ Bind(&skip);
+ }
+
+ __ Bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/ic/arm64/stub-cache-arm64.cc b/deps/v8/src/ic/arm64/stub-cache-arm64.cc
new file mode 100644
index 0000000000..4d31d49882
--- /dev/null
+++ b/deps/v8/src/ic/arm64/stub-cache-arm64.cc
@@ -0,0 +1,149 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/codegen.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+// Probe primary or secondary table.
+// If the entry is found in the cache, the generated code jump to the first
+// instruction of the stub in the cache.
+// If there is a miss the code fall trough.
+//
+// 'receiver', 'name' and 'offset' registers are preserved on miss.
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
+ Code::Flags flags, bool leave_frame,
+ StubCache::Table table, Register receiver, Register name,
+ Register offset, Register scratch, Register scratch2,
+ Register scratch3) {
+ // Some code below relies on the fact that the Entry struct contains
+ // 3 pointers (name, code, map).
+ STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize));
+
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+ uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
+ uintptr_t value_off_addr =
+ reinterpret_cast<uintptr_t>(value_offset.address());
+ uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
+
+ Label miss;
+
+ DCHECK(!AreAliased(name, offset, scratch, scratch2, scratch3));
+
+ // Multiply by 3 because there are 3 fields per entry.
+ __ Add(scratch3, offset, Operand(offset, LSL, 1));
+
+ // Calculate the base address of the entry.
+ __ Mov(scratch, key_offset);
+ __ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2));
+
+ // Check that the key in the entry matches the name.
+ __ Ldr(scratch2, MemOperand(scratch));
+ __ Cmp(name, scratch2);
+ __ B(ne, &miss);
+
+ // Check the map matches.
+ __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr));
+ __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Cmp(scratch2, scratch3);
+ __ B(ne, &miss);
+
+ // Get the code entry from the cache.
+ __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr));
+
+ // Check that the flags match what we're looking for.
+ __ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset));
+ __ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup);
+ __ Cmp(scratch2.W(), flags);
+ __ B(ne, &miss);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ B(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ B(&miss);
+ }
+#endif
+
+ if (leave_frame) __ LeaveFrame(StackFrame::INTERNAL);
+
+ // Jump to the first instruction in the code stub.
+ __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(scratch);
+
+ // Miss: fall through.
+ __ Bind(&miss);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
+ bool leave_frame, Register receiver,
+ Register name, Register scratch, Register extra,
+ Register extra2, Register extra3) {
+ Isolate* isolate = masm->isolate();
+ Label miss;
+
+ // Make sure the flags does not name a specific type.
+ DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
+
+ // Make sure extra and extra2 registers are valid.
+ DCHECK(!extra.is(no_reg));
+ DCHECK(!extra2.is(no_reg));
+ DCHECK(!extra3.is(no_reg));
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
+ extra3);
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Compute the hash for primary table.
+ __ Ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
+ __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Add(scratch, scratch, extra);
+ __ Eor(scratch, scratch, flags);
+ // We shift out the last two bits because they are not part of the hash.
+ __ Ubfx(scratch, scratch, kCacheIndexShift,
+ CountTrailingZeros(kPrimaryTableSize, 64));
+
+ // Probe the primary table.
+ ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Primary miss: Compute hash for secondary table.
+ __ Sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
+ __ Add(scratch, scratch, flags >> kCacheIndexShift);
+ __ And(scratch, scratch, kSecondaryTableSize - 1);
+
+ // Probe the secondary table.
+ ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ Bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
+ extra3);
+}
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/ic/call-optimization.cc b/deps/v8/src/ic/call-optimization.cc
new file mode 100644
index 0000000000..7ef1b7ed82
--- /dev/null
+++ b/deps/v8/src/ic/call-optimization.cc
@@ -0,0 +1,113 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/ic/call-optimization.h"
+
+
+namespace v8 {
+namespace internal {
+
+CallOptimization::CallOptimization(Handle<JSFunction> function) {
+ Initialize(function);
+}
+
+
+Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
+ Handle<Map> object_map, HolderLookup* holder_lookup) const {
+ DCHECK(is_simple_api_call());
+ if (!object_map->IsJSObjectMap()) {
+ *holder_lookup = kHolderNotFound;
+ return Handle<JSObject>::null();
+ }
+ if (expected_receiver_type_.is_null() ||
+ expected_receiver_type_->IsTemplateFor(*object_map)) {
+ *holder_lookup = kHolderIsReceiver;
+ return Handle<JSObject>::null();
+ }
+ while (true) {
+ if (!object_map->prototype()->IsJSObject()) break;
+ Handle<JSObject> prototype(JSObject::cast(object_map->prototype()));
+ if (!prototype->map()->is_hidden_prototype()) break;
+ object_map = handle(prototype->map());
+ if (expected_receiver_type_->IsTemplateFor(*object_map)) {
+ *holder_lookup = kHolderFound;
+ return prototype;
+ }
+ }
+ *holder_lookup = kHolderNotFound;
+ return Handle<JSObject>::null();
+}
+
+
+bool CallOptimization::IsCompatibleReceiver(Handle<Object> receiver,
+ Handle<JSObject> holder) const {
+ DCHECK(is_simple_api_call());
+ if (!receiver->IsJSObject()) return false;
+ Handle<Map> map(JSObject::cast(*receiver)->map());
+ HolderLookup holder_lookup;
+ Handle<JSObject> api_holder = LookupHolderOfExpectedType(map, &holder_lookup);
+ switch (holder_lookup) {
+ case kHolderNotFound:
+ return false;
+ case kHolderIsReceiver:
+ return true;
+ case kHolderFound:
+ if (api_holder.is_identical_to(holder)) return true;
+ // Check if holder is in prototype chain of api_holder.
+ {
+ JSObject* object = *api_holder;
+ while (true) {
+ Object* prototype = object->map()->prototype();
+ if (!prototype->IsJSObject()) return false;
+ if (prototype == *holder) return true;
+ object = JSObject::cast(prototype);
+ }
+ }
+ break;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+
+void CallOptimization::Initialize(Handle<JSFunction> function) {
+ constant_function_ = Handle<JSFunction>::null();
+ is_simple_api_call_ = false;
+ expected_receiver_type_ = Handle<FunctionTemplateInfo>::null();
+ api_call_info_ = Handle<CallHandlerInfo>::null();
+
+ if (function.is_null() || !function->is_compiled()) return;
+
+ constant_function_ = function;
+ AnalyzePossibleApiFunction(function);
+}
+
+
+void CallOptimization::AnalyzePossibleApiFunction(Handle<JSFunction> function) {
+ if (!function->shared()->IsApiFunction()) return;
+ Handle<FunctionTemplateInfo> info(function->shared()->get_api_func_data());
+
+ // Require a C++ callback.
+ if (info->call_code()->IsUndefined()) return;
+ api_call_info_ =
+ Handle<CallHandlerInfo>(CallHandlerInfo::cast(info->call_code()));
+
+ // Accept signatures that either have no restrictions at all or
+ // only have restrictions on the receiver.
+ if (!info->signature()->IsUndefined()) {
+ Handle<SignatureInfo> signature =
+ Handle<SignatureInfo>(SignatureInfo::cast(info->signature()));
+ if (!signature->args()->IsUndefined()) return;
+ if (!signature->receiver()->IsUndefined()) {
+ expected_receiver_type_ = Handle<FunctionTemplateInfo>(
+ FunctionTemplateInfo::cast(signature->receiver()));
+ }
+ }
+
+ is_simple_api_call_ = true;
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/ic/call-optimization.h b/deps/v8/src/ic/call-optimization.h
new file mode 100644
index 0000000000..99494fa3ba
--- /dev/null
+++ b/deps/v8/src/ic/call-optimization.h
@@ -0,0 +1,62 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_CALL_OPTIMIZATION_H_
+#define V8_IC_CALL_OPTIMIZATION_H_
+
+#include "src/code-stubs.h"
+#include "src/ic/access-compiler.h"
+#include "src/macro-assembler.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+// Holds information about possible function call optimizations.
+class CallOptimization BASE_EMBEDDED {
+ public:
+ explicit CallOptimization(Handle<JSFunction> function);
+
+ bool is_constant_call() const { return !constant_function_.is_null(); }
+
+ Handle<JSFunction> constant_function() const {
+ DCHECK(is_constant_call());
+ return constant_function_;
+ }
+
+ bool is_simple_api_call() const { return is_simple_api_call_; }
+
+ Handle<FunctionTemplateInfo> expected_receiver_type() const {
+ DCHECK(is_simple_api_call());
+ return expected_receiver_type_;
+ }
+
+ Handle<CallHandlerInfo> api_call_info() const {
+ DCHECK(is_simple_api_call());
+ return api_call_info_;
+ }
+
+ enum HolderLookup { kHolderNotFound, kHolderIsReceiver, kHolderFound };
+ Handle<JSObject> LookupHolderOfExpectedType(
+ Handle<Map> receiver_map, HolderLookup* holder_lookup) const;
+
+ // Check if the api holder is between the receiver and the holder.
+ bool IsCompatibleReceiver(Handle<Object> receiver,
+ Handle<JSObject> holder) const;
+
+ private:
+ void Initialize(Handle<JSFunction> function);
+
+ // Determines whether the given function can be called using the
+ // fast api call builtin.
+ void AnalyzePossibleApiFunction(Handle<JSFunction> function);
+
+ Handle<JSFunction> constant_function_;
+ bool is_simple_api_call_;
+ Handle<FunctionTemplateInfo> expected_receiver_type_;
+ Handle<CallHandlerInfo> api_call_info_;
+};
+}
+} // namespace v8::internal
+
+#endif // V8_IC_CALL_OPTIMIZATION_H_
diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc
new file mode 100644
index 0000000000..4ed92ec6bf
--- /dev/null
+++ b/deps/v8/src/ic/handler-compiler.cc
@@ -0,0 +1,410 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/ic/call-optimization.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+#include "src/ic/ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+Handle<Code> PropertyHandlerCompiler::Find(Handle<Name> name,
+ Handle<Map> stub_holder,
+ Code::Kind kind,
+ CacheHolderFlag cache_holder,
+ Code::StubType type) {
+ Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder);
+ Object* probe = stub_holder->FindInCodeCache(*name, flags);
+ if (probe->IsCode()) return handle(Code::cast(probe));
+ return Handle<Code>::null();
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent(
+ Handle<Name> name, Handle<HeapType> type) {
+ Isolate* isolate = name->GetIsolate();
+ Handle<Map> receiver_map = IC::TypeToMap(*type, isolate);
+ if (receiver_map->prototype()->IsNull()) {
+ // TODO(jkummerow/verwaest): If there is no prototype and the property
+ // is nonexistent, introduce a builtin to handle this (fast properties
+ // -> return undefined, dictionary properties -> do negative lookup).
+ return Handle<Code>();
+ }
+ CacheHolderFlag flag;
+ Handle<Map> stub_holder_map =
+ IC::GetHandlerCacheHolder(*type, false, isolate, &flag);
+
+ // If no dictionary mode objects are present in the prototype chain, the load
+ // nonexistent IC stub can be shared for all names for a given map and we use
+ // the empty string for the map cache in that case. If there are dictionary
+ // mode objects involved, we need to do negative lookups in the stub and
+ // therefore the stub will be specific to the name.
+ Handle<Name> cache_name =
+ receiver_map->is_dictionary_map()
+ ? name
+ : Handle<Name>::cast(isolate->factory()->nonexistent_symbol());
+ Handle<Map> current_map = stub_holder_map;
+ Handle<JSObject> last(JSObject::cast(receiver_map->prototype()));
+ while (true) {
+ if (current_map->is_dictionary_map()) cache_name = name;
+ if (current_map->prototype()->IsNull()) break;
+ last = handle(JSObject::cast(current_map->prototype()));
+ current_map = handle(last->map());
+ }
+ // Compile the stub that is either shared for all names or
+ // name specific if there are global objects involved.
+ Handle<Code> handler = PropertyHandlerCompiler::Find(
+ cache_name, stub_holder_map, Code::LOAD_IC, flag, Code::FAST);
+ if (!handler.is_null()) return handler;
+
+ NamedLoadHandlerCompiler compiler(isolate, type, last, flag);
+ handler = compiler.CompileLoadNonexistent(cache_name);
+ Map::UpdateCodeCache(stub_holder_map, cache_name, handler);
+ return handler;
+}
+
+
+Handle<Code> PropertyHandlerCompiler::GetCode(Code::Kind kind,
+ Code::StubType type,
+ Handle<Name> name) {
+ Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder());
+ Handle<Code> code = GetCodeWithFlags(flags, name);
+ PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, *code, *name));
+ return code;
+}
+
+
+void PropertyHandlerCompiler::set_type_for_object(Handle<Object> object) {
+ type_ = IC::CurrentTypeOf(object, isolate());
+}
+
+
+#define __ ACCESS_MASM(masm())
+
+
+Register NamedLoadHandlerCompiler::FrontendHeader(Register object_reg,
+ Handle<Name> name,
+ Label* miss) {
+ PrototypeCheckType check_type = CHECK_ALL_MAPS;
+ int function_index = -1;
+ if (type()->Is(HeapType::String())) {
+ function_index = Context::STRING_FUNCTION_INDEX;
+ } else if (type()->Is(HeapType::Symbol())) {
+ function_index = Context::SYMBOL_FUNCTION_INDEX;
+ } else if (type()->Is(HeapType::Number())) {
+ function_index = Context::NUMBER_FUNCTION_INDEX;
+ } else if (type()->Is(HeapType::Boolean())) {
+ function_index = Context::BOOLEAN_FUNCTION_INDEX;
+ } else {
+ check_type = SKIP_RECEIVER;
+ }
+
+ if (check_type == CHECK_ALL_MAPS) {
+ GenerateDirectLoadGlobalFunctionPrototype(masm(), function_index,
+ scratch1(), miss);
+ Object* function = isolate()->native_context()->get(function_index);
+ Object* prototype = JSFunction::cast(function)->instance_prototype();
+ set_type_for_object(handle(prototype, isolate()));
+ object_reg = scratch1();
+ }
+
+ // Check that the maps starting from the prototype haven't changed.
+ return CheckPrototypes(object_reg, scratch1(), scratch2(), scratch3(), name,
+ miss, check_type);
+}
+
+
+// Frontend for store uses the name register. It has to be restored before a
+// miss.
+Register NamedStoreHandlerCompiler::FrontendHeader(Register object_reg,
+ Handle<Name> name,
+ Label* miss) {
+ return CheckPrototypes(object_reg, this->name(), scratch1(), scratch2(), name,
+ miss, SKIP_RECEIVER);
+}
+
+
+Register PropertyHandlerCompiler::Frontend(Register object_reg,
+ Handle<Name> name) {
+ Label miss;
+ Register reg = FrontendHeader(object_reg, name, &miss);
+ FrontendFooter(name, &miss);
+ return reg;
+}
+
+
+void PropertyHandlerCompiler::NonexistentFrontendHeader(Handle<Name> name,
+ Label* miss,
+ Register scratch1,
+ Register scratch2) {
+ Register holder_reg;
+ Handle<Map> last_map;
+ if (holder().is_null()) {
+ holder_reg = receiver();
+ last_map = IC::TypeToMap(*type(), isolate());
+ // If |type| has null as its prototype, |holder()| is
+ // Handle<JSObject>::null().
+ DCHECK(last_map->prototype() == isolate()->heap()->null_value());
+ } else {
+ holder_reg = FrontendHeader(receiver(), name, miss);
+ last_map = handle(holder()->map());
+ }
+
+ if (last_map->is_dictionary_map()) {
+ if (last_map->IsJSGlobalObjectMap()) {
+ Handle<JSGlobalObject> global =
+ holder().is_null()
+ ? Handle<JSGlobalObject>::cast(type()->AsConstant()->Value())
+ : Handle<JSGlobalObject>::cast(holder());
+ GenerateCheckPropertyCell(masm(), global, name, scratch1, miss);
+ } else {
+ if (!name->IsUniqueName()) {
+ DCHECK(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
+ }
+ DCHECK(holder().is_null() ||
+ holder()->property_dictionary()->FindEntry(name) ==
+ NameDictionary::kNotFound);
+ GenerateDictionaryNegativeLookup(masm(), miss, holder_reg, name, scratch1,
+ scratch2);
+ }
+ }
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadField(Handle<Name> name,
+ FieldIndex field) {
+ Register reg = Frontend(receiver(), name);
+ __ Move(receiver(), reg);
+ LoadFieldStub stub(isolate(), field);
+ GenerateTailCall(masm(), stub.GetCode());
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadConstant(Handle<Name> name,
+ int constant_index) {
+ Register reg = Frontend(receiver(), name);
+ __ Move(receiver(), reg);
+ LoadConstantStub stub(isolate(), constant_index);
+ GenerateTailCall(masm(), stub.GetCode());
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent(
+ Handle<Name> name) {
+ Label miss;
+ NonexistentFrontendHeader(name, &miss, scratch2(), scratch3());
+ GenerateLoadConstant(isolate()->factory()->undefined_value());
+ FrontendFooter(name, &miss);
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
+ Handle<Name> name, Handle<ExecutableAccessorInfo> callback) {
+ Register reg = Frontend(receiver(), name);
+ GenerateLoadCallback(reg, callback);
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
+ Handle<Name> name, const CallOptimization& call_optimization) {
+ DCHECK(call_optimization.is_simple_api_call());
+ Frontend(receiver(), name);
+ Handle<Map> receiver_map = IC::TypeToMap(*type(), isolate());
+ GenerateFastApiCall(masm(), call_optimization, receiver_map, receiver(),
+ scratch1(), false, 0, NULL);
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
+ LookupIterator* it) {
+ // So far the most popular follow ups for interceptor loads are FIELD and
+ // ExecutableAccessorInfo, so inline only them. Other cases may be added
+ // later.
+ bool inline_followup = false;
+ switch (it->state()) {
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+ case LookupIterator::ACCESS_CHECK:
+ case LookupIterator::INTERCEPTOR:
+ case LookupIterator::JSPROXY:
+ case LookupIterator::NOT_FOUND:
+ break;
+ case LookupIterator::DATA:
+ inline_followup = it->property_details().type() == FIELD;
+ break;
+ case LookupIterator::ACCESSOR: {
+ Handle<Object> accessors = it->GetAccessors();
+ inline_followup = accessors->IsExecutableAccessorInfo();
+ if (!inline_followup) break;
+ Handle<ExecutableAccessorInfo> info =
+ Handle<ExecutableAccessorInfo>::cast(accessors);
+ inline_followup = info->getter() != NULL &&
+ ExecutableAccessorInfo::IsCompatibleReceiverType(
+ isolate(), info, type());
+ }
+ }
+
+ Register reg = Frontend(receiver(), it->name());
+ if (inline_followup) {
+ // TODO(368): Compile in the whole chain: all the interceptors in
+ // prototypes and ultimate answer.
+ GenerateLoadInterceptorWithFollowup(it, reg);
+ } else {
+ GenerateLoadInterceptor(reg);
+ }
+ return GetCode(kind(), Code::FAST, it->name());
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor(
+ LookupIterator* it, Register interceptor_reg) {
+ Handle<JSObject> real_named_property_holder(it->GetHolder<JSObject>());
+
+ set_type_for_object(holder());
+ set_holder(real_named_property_holder);
+ Register reg = Frontend(interceptor_reg, it->name());
+
+ switch (it->state()) {
+ case LookupIterator::ACCESS_CHECK:
+ case LookupIterator::INTERCEPTOR:
+ case LookupIterator::JSPROXY:
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+ case LookupIterator::DATA: {
+ DCHECK_EQ(FIELD, it->property_details().type());
+ __ Move(receiver(), reg);
+ LoadFieldStub stub(isolate(), it->GetFieldIndex());
+ GenerateTailCall(masm(), stub.GetCode());
+ break;
+ }
+ case LookupIterator::ACCESSOR:
+ Handle<ExecutableAccessorInfo> info =
+ Handle<ExecutableAccessorInfo>::cast(it->GetAccessors());
+ DCHECK_NE(NULL, info->getter());
+ GenerateLoadCallback(reg, info);
+ }
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadViaGetter(
+ Handle<Name> name, Handle<JSFunction> getter) {
+ Frontend(receiver(), name);
+ GenerateLoadViaGetter(masm(), type(), receiver(), getter);
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+// TODO(verwaest): Cleanup. holder() is actually the receiver.
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
+ Handle<Map> transition, Handle<Name> name) {
+ Label miss, slow;
+
+ // Ensure no transitions to deprecated maps are followed.
+ __ CheckMapDeprecated(transition, scratch1(), &miss);
+
+ // Check that we are allowed to write this.
+ bool is_nonexistent = holder()->map() == transition->GetBackPointer();
+ if (is_nonexistent) {
+ // Find the top object.
+ Handle<JSObject> last;
+ PrototypeIterator iter(isolate(), holder());
+ while (!iter.IsAtEnd()) {
+ last = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
+ iter.Advance();
+ }
+ if (!last.is_null()) set_holder(last);
+ NonexistentFrontendHeader(name, &miss, scratch1(), scratch2());
+ } else {
+ FrontendHeader(receiver(), name, &miss);
+ DCHECK(holder()->HasFastProperties());
+ }
+
+ GenerateStoreTransition(transition, name, receiver(), this->name(), value(),
+ scratch1(), scratch2(), scratch3(), &miss, &slow);
+
+ GenerateRestoreName(&miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ GenerateRestoreName(&slow, name);
+ TailCallBuiltin(masm(), SlowBuiltin(kind()));
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreField(LookupIterator* it) {
+ Label miss;
+ GenerateStoreField(it, value(), &miss);
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ return GetCode(kind(), Code::FAST, it->name());
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreViaSetter(
+ Handle<JSObject> object, Handle<Name> name, Handle<JSFunction> setter) {
+ Frontend(receiver(), name);
+ GenerateStoreViaSetter(masm(), type(), receiver(), setter);
+
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
+ Handle<JSObject> object, Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ Frontend(receiver(), name);
+ Register values[] = {value()};
+ GenerateFastApiCall(masm(), call_optimization, handle(object->map()),
+ receiver(), scratch1(), true, 1, values);
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+#undef __
+
+
+void ElementHandlerCompiler::CompileElementHandlers(
+ MapHandleList* receiver_maps, CodeHandleList* handlers) {
+ for (int i = 0; i < receiver_maps->length(); ++i) {
+ Handle<Map> receiver_map = receiver_maps->at(i);
+ Handle<Code> cached_stub;
+
+ if ((receiver_map->instance_type() & kNotStringTag) == 0) {
+ cached_stub = isolate()->builtins()->KeyedLoadIC_String();
+ } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
+ cached_stub = isolate()->builtins()->KeyedLoadIC_Slow();
+ } else {
+ bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ if (receiver_map->has_indexed_interceptor()) {
+ cached_stub = LoadIndexedInterceptorStub(isolate()).GetCode();
+ } else if (IsSloppyArgumentsElements(elements_kind)) {
+ cached_stub = KeyedLoadSloppyArgumentsStub(isolate()).GetCode();
+ } else if (IsFastElementsKind(elements_kind) ||
+ IsExternalArrayElementsKind(elements_kind) ||
+ IsFixedTypedArrayElementsKind(elements_kind)) {
+ cached_stub = LoadFastElementStub(isolate(), is_js_array, elements_kind)
+ .GetCode();
+ } else {
+ DCHECK(elements_kind == DICTIONARY_ELEMENTS);
+ cached_stub = LoadDictionaryElementStub(isolate()).GetCode();
+ }
+ }
+
+ handlers->Add(cached_stub);
+ }
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/ic/handler-compiler.h b/deps/v8/src/ic/handler-compiler.h
new file mode 100644
index 0000000000..f033f3f2d9
--- /dev/null
+++ b/deps/v8/src/ic/handler-compiler.h
@@ -0,0 +1,275 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_HANDLER_COMPILER_H_
+#define V8_IC_HANDLER_COMPILER_H_
+
+#include "src/ic/access-compiler.h"
+#include "src/ic/ic-state.h"
+
+namespace v8 {
+namespace internal {
+
+class CallOptimization;
+
+enum PrototypeCheckType { CHECK_ALL_MAPS, SKIP_RECEIVER };
+
+class PropertyHandlerCompiler : public PropertyAccessCompiler {
+ public:
+ static Handle<Code> Find(Handle<Name> name, Handle<Map> map, Code::Kind kind,
+ CacheHolderFlag cache_holder, Code::StubType type);
+
+ protected:
+ PropertyHandlerCompiler(Isolate* isolate, Code::Kind kind,
+ Handle<HeapType> type, Handle<JSObject> holder,
+ CacheHolderFlag cache_holder)
+ : PropertyAccessCompiler(isolate, kind, cache_holder),
+ type_(type),
+ holder_(holder) {}
+
+ virtual ~PropertyHandlerCompiler() {}
+
+ virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
+ Label* miss) {
+ UNREACHABLE();
+ return receiver();
+ }
+
+ virtual void FrontendFooter(Handle<Name> name, Label* miss) { UNREACHABLE(); }
+
+ Register Frontend(Register object_reg, Handle<Name> name);
+ void NonexistentFrontendHeader(Handle<Name> name, Label* miss,
+ Register scratch1, Register scratch2);
+
+ // TODO(verwaest): Make non-static.
+ static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map, Register receiver,
+ Register scratch, bool is_store, int argc,
+ Register* values);
+
+ // Helper function used to check that the dictionary doesn't contain
+ // the property. This function may return false negatives, so miss_label
+ // must always call a backup property check that is complete.
+ // This function is safe to call if the receiver has fast properties.
+ // Name must be unique and receiver must be a heap object.
+ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<Name> name, Register r0,
+ Register r1);
+
+ // Generate code to check that a global property cell is empty. Create
+ // the property cell at compilation time if no cell exists for the
+ // property.
+ static void GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<JSGlobalObject> global,
+ Handle<Name> name, Register scratch,
+ Label* miss);
+
+ // Generates code that verifies that the property holder has not changed
+ // (checking maps of objects in the prototype chain for fast and global
+ // objects or doing negative lookup for slow objects, ensures that the
+ // property cells for global objects are still empty) and checks that the map
+ // of the holder has not changed. If necessary the function also generates
+ // code for security check in case of global object holders. Helps to make
+ // sure that the current IC is still valid.
+ //
+ // The scratch and holder registers are always clobbered, but the object
+ // register is only clobbered if it the same as the holder register. The
+ // function returns a register containing the holder - either object_reg or
+ // holder_reg.
+ Register CheckPrototypes(Register object_reg, Register holder_reg,
+ Register scratch1, Register scratch2,
+ Handle<Name> name, Label* miss,
+ PrototypeCheckType check = CHECK_ALL_MAPS);
+
+ Handle<Code> GetCode(Code::Kind kind, Code::StubType type, Handle<Name> name);
+ void set_type_for_object(Handle<Object> object);
+ void set_holder(Handle<JSObject> holder) { holder_ = holder; }
+ Handle<HeapType> type() const { return type_; }
+ Handle<JSObject> holder() const { return holder_; }
+
+ private:
+ Handle<HeapType> type_;
+ Handle<JSObject> holder_;
+};
+
+
+class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
+ public:
+ NamedLoadHandlerCompiler(Isolate* isolate, Handle<HeapType> type,
+ Handle<JSObject> holder,
+ CacheHolderFlag cache_holder)
+ : PropertyHandlerCompiler(isolate, Code::LOAD_IC, type, holder,
+ cache_holder) {}
+
+ virtual ~NamedLoadHandlerCompiler() {}
+
+ Handle<Code> CompileLoadField(Handle<Name> name, FieldIndex index);
+
+ Handle<Code> CompileLoadCallback(Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback);
+
+ Handle<Code> CompileLoadCallback(Handle<Name> name,
+ const CallOptimization& call_optimization);
+
+ Handle<Code> CompileLoadConstant(Handle<Name> name, int constant_index);
+
+ // The LookupIterator is used to perform a lookup behind the interceptor. If
+ // the iterator points to a LookupIterator::PROPERTY, its access will be
+ // inlined.
+ Handle<Code> CompileLoadInterceptor(LookupIterator* it);
+
+ Handle<Code> CompileLoadViaGetter(Handle<Name> name,
+ Handle<JSFunction> getter);
+
+ Handle<Code> CompileLoadGlobal(Handle<PropertyCell> cell, Handle<Name> name,
+ bool is_configurable);
+
+ // Static interface
+ static Handle<Code> ComputeLoadNonexistent(Handle<Name> name,
+ Handle<HeapType> type);
+
+ static void GenerateLoadViaGetter(MacroAssembler* masm, Handle<HeapType> type,
+ Register receiver,
+ Handle<JSFunction> getter);
+
+ static void GenerateLoadViaGetterForDeopt(MacroAssembler* masm) {
+ GenerateLoadViaGetter(masm, Handle<HeapType>::null(), no_reg,
+ Handle<JSFunction>());
+ }
+
+ static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label);
+
+ // These constants describe the structure of the interceptor arguments on the
+ // stack. The arguments are pushed by the (platform-specific)
+ // PushInterceptorArguments and read by LoadPropertyWithInterceptorOnly and
+ // LoadWithInterceptor.
+ static const int kInterceptorArgsNameIndex = 0;
+ static const int kInterceptorArgsInfoIndex = 1;
+ static const int kInterceptorArgsThisIndex = 2;
+ static const int kInterceptorArgsHolderIndex = 3;
+ static const int kInterceptorArgsLength = 4;
+
+ protected:
+ virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
+ Label* miss);
+
+ virtual void FrontendFooter(Handle<Name> name, Label* miss);
+
+ private:
+ Handle<Code> CompileLoadNonexistent(Handle<Name> name);
+ void GenerateLoadConstant(Handle<Object> value);
+ void GenerateLoadCallback(Register reg,
+ Handle<ExecutableAccessorInfo> callback);
+ void GenerateLoadCallback(const CallOptimization& call_optimization,
+ Handle<Map> receiver_map);
+ void GenerateLoadInterceptor(Register holder_reg);
+ void GenerateLoadInterceptorWithFollowup(LookupIterator* it,
+ Register holder_reg);
+ void GenerateLoadPostInterceptor(LookupIterator* it, Register reg);
+
+ // Generates prototype loading code that uses the objects from the
+ // context we were in when this function was called. If the context
+ // has changed, a jump to miss is performed. This ties the generated
+ // code to a particular context and so must not be used in cases
+ // where the generated code is not allowed to have references to
+ // objects from a context.
+ static void GenerateDirectLoadGlobalFunctionPrototype(MacroAssembler* masm,
+ int index,
+ Register prototype,
+ Label* miss);
+
+
+ Register scratch4() { return registers_[5]; }
+};
+
+
+class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
+ public:
+ explicit NamedStoreHandlerCompiler(Isolate* isolate, Handle<HeapType> type,
+ Handle<JSObject> holder)
+ : PropertyHandlerCompiler(isolate, Code::STORE_IC, type, holder,
+ kCacheOnReceiver) {}
+
+ virtual ~NamedStoreHandlerCompiler() {}
+
+ Handle<Code> CompileStoreTransition(Handle<Map> transition,
+ Handle<Name> name);
+ Handle<Code> CompileStoreField(LookupIterator* it);
+ Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback);
+ Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name,
+ const CallOptimization& call_optimization);
+ Handle<Code> CompileStoreViaSetter(Handle<JSObject> object, Handle<Name> name,
+ Handle<JSFunction> setter);
+ Handle<Code> CompileStoreInterceptor(Handle<Name> name);
+
+ static void GenerateStoreViaSetter(MacroAssembler* masm,
+ Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> setter);
+
+ static void GenerateStoreViaSetterForDeopt(MacroAssembler* masm) {
+ GenerateStoreViaSetter(masm, Handle<HeapType>::null(), no_reg,
+ Handle<JSFunction>());
+ }
+
+ static void GenerateSlow(MacroAssembler* masm);
+
+ protected:
+ virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
+ Label* miss);
+
+ virtual void FrontendFooter(Handle<Name> name, Label* miss);
+ void GenerateRestoreName(Label* label, Handle<Name> name);
+
+ private:
+ void GenerateStoreTransition(Handle<Map> transition, Handle<Name> name,
+ Register receiver_reg, Register name_reg,
+ Register value_reg, Register scratch1,
+ Register scratch2, Register scratch3,
+ Label* miss_label, Label* slow);
+
+ void GenerateStoreField(LookupIterator* lookup, Register value_reg,
+ Label* miss_label);
+
+ static Builtins::Name SlowBuiltin(Code::Kind kind) {
+ switch (kind) {
+ case Code::STORE_IC:
+ return Builtins::kStoreIC_Slow;
+ case Code::KEYED_STORE_IC:
+ return Builtins::kKeyedStoreIC_Slow;
+ default:
+ UNREACHABLE();
+ }
+ return Builtins::kStoreIC_Slow;
+ }
+
+ static Register value();
+};
+
+
+class ElementHandlerCompiler : public PropertyHandlerCompiler {
+ public:
+ explicit ElementHandlerCompiler(Isolate* isolate)
+ : PropertyHandlerCompiler(isolate, Code::KEYED_LOAD_IC,
+ Handle<HeapType>::null(),
+ Handle<JSObject>::null(), kCacheOnReceiver) {}
+
+ virtual ~ElementHandlerCompiler() {}
+
+ void CompileElementHandlers(MapHandleList* receiver_maps,
+ CodeHandleList* handlers);
+
+ static void GenerateStoreSlow(MacroAssembler* masm);
+};
+}
+} // namespace v8::internal
+
+#endif // V8_IC_HANDLER_COMPILER_H_
diff --git a/deps/v8/src/ic/ia32/access-compiler-ia32.cc b/deps/v8/src/ic/ia32/access-compiler-ia32.cc
new file mode 100644
index 0000000000..9bcbef0b6f
--- /dev/null
+++ b/deps/v8/src/ic/ia32/access-compiler-ia32.cc
@@ -0,0 +1,44 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_IA32
+
+#include "src/ic/access-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+ Handle<Code> code) {
+ __ jmp(code, RelocInfo::CODE_TARGET);
+}
+
+
+Register* PropertyAccessCompiler::load_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ static Register registers[] = {receiver, name, ebx, eax, edi, no_reg};
+ return registers;
+}
+
+
+Register* PropertyAccessCompiler::store_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ DCHECK(ebx.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ static Register registers[] = {receiver, name, ebx, edi, no_reg};
+ return registers;
+}
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
index 4db24742fe..fd971541b9 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,9 +6,9 @@
#if V8_TARGET_ARCH_IA32
-#include "src/codegen.h"
-#include "src/ic-inl.h"
-#include "src/stub-cache.h"
+#include "src/ic/call-optimization.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
namespace v8 {
namespace internal {
@@ -16,101 +16,34 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register name,
- Register receiver,
- // Number of the cache entry pointer-size scaled.
- Register offset,
- Register extra) {
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
-
- Label miss;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ lea(offset, Operand(offset, offset, times_2, 0));
-
- if (extra.is_valid()) {
- // Get the code entry from the cache.
- __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
-
- // Check that the key in the entry matches the name.
- __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
- __ j(not_equal, &miss);
-
- // Check the map matches.
- __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
- __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ j(not_equal, &miss);
-
- // Check that the flags match what we're looking for.
- __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
- __ and_(offset, ~Code::kFlagsNotUsedInLookup);
- __ cmp(offset, flags);
- __ j(not_equal, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(extra);
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> getter) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- __ bind(&miss);
- } else {
- // Save the offset on the stack.
- __ push(offset);
-
- // Check that the key in the entry matches the name.
- __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
- __ j(not_equal, &miss);
-
- // Check the map matches.
- __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
- __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ j(not_equal, &miss);
-
- // Restore offset register.
- __ mov(offset, Operand(esp, 0));
-
- // Get the code entry from the cache.
- __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
- // Check that the flags match what we're looking for.
- __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
- __ and_(offset, ~Code::kFlagsNotUsedInLookup);
- __ cmp(offset, flags);
- __ j(not_equal, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ mov(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ push(receiver);
+ ParameterCount actual(0);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
}
-#endif
-
- // Restore offset and re-load code entry from cache.
- __ pop(offset);
- __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
- // Jump to the first instruction in the code stub.
- __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(offset);
-
- // Pop at miss.
- __ bind(&miss);
- __ pop(offset);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
+ __ ret(0);
}
@@ -147,89 +80,13 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ j(not_equal, miss_label);
Label done;
- NameDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- properties,
- name,
- scratch1);
+ NameDictionaryLookupStub::GenerateNegativeLookup(masm, miss_label, &done,
+ properties, name, scratch1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1);
}
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2,
- Register extra3) {
- Label miss;
-
- // Assert that code is valid. The multiplying code relies on the entry size
- // being 12.
- DCHECK(sizeof(Entry) == 12);
-
- // Assert the flags do not name a specific type.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Assert that there are no register conflicts.
- DCHECK(!scratch.is(receiver));
- DCHECK(!scratch.is(name));
- DCHECK(!extra.is(receiver));
- DCHECK(!extra.is(name));
- DCHECK(!extra.is(scratch));
-
- // Assert scratch and extra registers are valid, and extra2/3 are unused.
- DCHECK(!scratch.is(no_reg));
- DCHECK(extra2.is(no_reg));
- DCHECK(extra3.is(no_reg));
-
- Register offset = scratch;
- scratch = no_reg;
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
- __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(offset, flags);
- // We mask out the last two bits because they are not part of the hash and
- // they are always 01 for maps. Also in the two 'and' instructions below.
- __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
- // ProbeTable expects the offset to be pointer scaled, which it is, because
- // the heap object tag size is 2 and the pointer size log 2 is also 2.
- DCHECK(kCacheIndexShift == kPointerSizeLog2);
-
- // Probe the primary table.
- ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
-
- // Primary miss: Compute hash for secondary probe.
- __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
- __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(offset, flags);
- __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
- __ sub(offset, name);
- __ add(offset, Immediate(flags));
- __ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
-
- // Probe the secondary table.
- ProbeTable(
- isolate(), masm, flags, kSecondary, name, receiver, offset, extra);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
-}
-
-
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register prototype, Label* miss) {
// Get the global function with the given index.
@@ -259,40 +116,6 @@ void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
}
-static void PushInterceptorArguments(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj) {
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
- __ push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
- Register scratch = name;
- __ mov(scratch, Immediate(interceptor));
- __ push(scratch);
- __ push(receiver);
- __ push(holder);
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj,
- IC::UtilityId id) {
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
- NamedLoadHandlerCompiler::kInterceptorArgsLength);
-}
-
-
// Generate call to api function.
// This function uses push() to generate smaller, faster code than
// the version above. It is an optimization that should will be removed
@@ -307,7 +130,7 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
__ push(receiver);
// Write the arguments to stack frame.
for (int i = 0; i < argc; i++) {
- Register arg = values[argc-1-i];
+ Register arg = values[argc - 1 - i];
DCHECK(!receiver.is(arg));
DCHECK(!scratch_in.is(arg));
__ push(arg);
@@ -325,16 +148,15 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
- receiver_map,
- &holder_lookup);
+ Handle<JSObject> api_holder =
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
break;
case CallOptimization::kHolderFound:
__ LoadHeapObject(holder, api_holder);
- break;
+ break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
break;
@@ -376,8 +198,7 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
void PropertyHandlerCompiler::GenerateCheckPropertyCell(
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
Register scratch, Label* miss) {
- Handle<PropertyCell> cell =
- JSGlobalObject::EnsurePropertyCell(global, name);
+ Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
DCHECK(cell->value()->IsTheHole());
Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
if (masm->serializer_enabled()) {
@@ -391,9 +212,107 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
}
-void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
- Handle<Code> code) {
- __ jmp(code, RelocInfo::CODE_TARGET);
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ push(value());
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ mov(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ push(receiver);
+ __ push(value());
+ ParameterCount actual(1);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ pop(eax);
+
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ }
+ __ ret(0);
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
+ Register holder, Register name,
+ Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
+ __ push(name);
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
+ Register scratch = name;
+ __ mov(scratch, Immediate(interceptor));
+ __ push(scratch);
+ __ push(receiver);
+ __ push(holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm, Register receiver, Register holder, Register name,
+ Handle<JSObject> holder_obj, IC::UtilityId id) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+ __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
+ NamedLoadHandlerCompiler::kInterceptorArgsLength);
+}
+
+
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
+
+ DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
+
+ __ pop(ebx);
+ __ push(receiver);
+ __ push(name);
+ __ push(value);
+ __ push(ebx);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
+ // Return address is on the stack.
+ StoreIC_PushArgs(masm);
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
+ // Return address is on the stack.
+ StoreIC_PushArgs(masm);
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
}
@@ -427,7 +346,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
__ CmpObject(value_reg, constant);
__ j(not_equal, miss_label);
} else if (representation.IsSmi()) {
- __ JumpIfNotSmi(value_reg, miss_label);
+ __ JumpIfNotSmi(value_reg, miss_label);
} else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
HeapType* field_type = descriptors->GetFieldType(descriptor);
@@ -489,13 +408,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
__ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
// Update the write barrier for the map field.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- scratch2,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
+ kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
if (details.type() == CONSTANT) {
DCHECK(value_reg.is(eax));
@@ -511,8 +425,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
// object and the number of in-object properties is not going to change.
index -= transition->inobject_properties();
- SmiCheck smi_check = representation.IsTagged()
- ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ SmiCheck smi_check =
+ representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
@@ -528,13 +442,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
- __ RecordWriteField(receiver_reg,
- offset,
- storage_reg,
- scratch1,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
+ __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
}
} else {
// Write to the properties array.
@@ -552,13 +461,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
- __ RecordWriteField(scratch1,
- offset,
- storage_reg,
- receiver_reg,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
+ __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
}
}
@@ -568,7 +472,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
}
-void NamedStoreHandlerCompiler::GenerateStoreField(LookupResult* lookup,
+void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
Register value_reg,
Label* miss_label) {
DCHECK(lookup->representation().IsHeapObject());
@@ -600,8 +504,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
// Make sure there's no overlap between holder and object registers.
DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
+ DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
+ !scratch2.is(scratch1));
// Keep track of the current object in register reg.
Register reg = object_reg;
@@ -633,10 +537,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
}
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound);
+ NameDictionary::kNotFound);
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
- scratch1, scratch2);
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
+ scratch2);
__ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -661,9 +565,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
} else if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(
- masm(), Handle<JSGlobalObject>::cast(current), name,
- scratch2, miss);
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
}
if (load_prototype_from_map) {
@@ -763,7 +666,7 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
__ push(scratch3()); // Restore return address.
// Abi for CallApiGetter
- Register getter_address = edx;
+ Register getter_address = ApiGetterDescriptor::function_address();
Address function_address = v8::ToCData<Address>(callback->getter());
__ mov(getter_address, Immediate(function_address));
@@ -779,102 +682,86 @@ void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
}
-void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg,
- LookupResult* lookup,
- Handle<Name> name) {
+void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
+ LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- bool compile_followup_inline = false;
- if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->IsField()) {
- compile_followup_inline = true;
- } else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> callback(
- ExecutableAccessorInfo::cast(lookup->GetCallbackObject()));
- compile_followup_inline =
- callback->getter() != NULL &&
- ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), callback,
- type());
- }
- }
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+ // Preserve the receiver register explicitly whenever it is different from the
+ // holder and it is needed should the interceptor return without any result.
+ // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
+ // case might cause a miss during the prototype check.
+ bool must_perform_prototype_check =
+ !holder().is_identical_to(it->GetHolder<JSObject>());
+ bool must_preserve_receiver_reg =
+ !receiver().is(holder_reg) &&
+ (it->state() == LookupIterator::ACCESSOR || must_perform_prototype_check);
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
- if (compile_followup_inline) {
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
- // Preserve the receiver register explicitly whenever it is different from
- // the holder and it is needed should the interceptor return without any
- // result. The CALLBACKS case needs the receiver to be passed into C++ code,
- // the FIELD case might cause a miss during the prototype check.
- bool must_perfrom_prototype_check = *holder() != lookup->holder();
- bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
- (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-
- if (must_preserve_receiver_reg) {
- __ push(receiver());
- }
- __ push(holder_reg);
- __ push(this->name());
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(
- masm(), receiver(), holder_reg, this->name(), holder(),
- IC::kLoadPropertyWithInterceptorOnly);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ cmp(eax, factory()->no_interceptor_result_sentinel());
- __ j(equal, &interceptor_failed);
- frame_scope.GenerateLeaveFrame();
- __ ret(0);
-
- // Clobber registers when generating debug-code to provoke errors.
- __ bind(&interceptor_failed);
- if (FLAG_debug_code) {
- __ mov(receiver(), Immediate(BitCast<int32_t>(kZapValue)));
- __ mov(holder_reg, Immediate(BitCast<int32_t>(kZapValue)));
- __ mov(this->name(), Immediate(BitCast<int32_t>(kZapValue)));
- }
+ if (must_preserve_receiver_reg) {
+ __ push(receiver());
+ }
+ __ push(holder_reg);
+ __ push(this->name());
+
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), holder(),
+ IC::kLoadPropertyWithInterceptorOnly);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ cmp(eax, factory()->no_interceptor_result_sentinel());
+ __ j(equal, &interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ ret(0);
- __ pop(this->name());
- __ pop(holder_reg);
- if (must_preserve_receiver_reg) {
- __ pop(receiver());
- }
+ // Clobber registers when generating debug-code to provoke errors.
+ __ bind(&interceptor_failed);
+ if (FLAG_debug_code) {
+ __ mov(receiver(), Immediate(bit_cast<int32_t>(kZapValue)));
+ __ mov(holder_reg, Immediate(bit_cast<int32_t>(kZapValue)));
+ __ mov(this->name(), Immediate(bit_cast<int32_t>(kZapValue)));
+ }
- // Leave the internal frame.
+ __ pop(this->name());
+ __ pop(holder_reg);
+ if (must_preserve_receiver_reg) {
+ __ pop(receiver());
}
- GenerateLoadPostInterceptor(holder_reg, name, lookup);
- } else { // !compile_followup_inline
- // Call the runtime system to load the interceptor.
- // Check that the maps haven't changed.
- __ pop(scratch2()); // save old return address
- PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
- holder());
- __ push(scratch2()); // restore old return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptor),
- isolate());
- __ TailCallExternalReference(
- ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ // Leave the internal frame.
}
+
+ GenerateLoadPostInterceptor(it, holder_reg);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
+ DCHECK(holder()->HasNamedInterceptor());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ // Call the runtime system to load the interceptor.
+ __ pop(scratch2()); // save old return address
+ PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+ holder());
+ __ push(scratch2()); // restore old return address
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
+ __ TailCallExternalReference(
+ ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
}
@@ -901,55 +788,6 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
}
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save value register, so we can restore it later.
- __ push(value());
-
- if (!setter.is_null()) {
- // Call the JavaScript setter with receiver and value on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ mov(receiver,
- FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ push(receiver);
- __ push(value());
- ParameterCount actual(1);
- ParameterCount expected(setter);
- __ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ pop(eax);
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- }
- __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
Handle<Name> name) {
__ pop(scratch1()); // remove the return address
@@ -968,100 +806,18 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
}
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss, Label::kNear);
- __ mov(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_maps->length(); ++i) {
- __ cmp(scratch1(), receiver_maps->at(i));
- if (transitioned_maps->at(i).is_null()) {
- __ j(equal, handler_stubs->at(i));
- } else {
- Label next_map;
- __ j(not_equal, &next_map, Label::kNear);
- __ mov(transition_map(), Immediate(transitioned_maps->at(i)));
- __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
- }
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
- static Register registers[] = { receiver, name, ebx, eax, edi, no_reg };
- return registers;
+Register NamedStoreHandlerCompiler::value() {
+ return StoreDescriptor::ValueRegister();
}
-Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- DCHECK(ebx.is(KeyedStoreIC::MapRegister()));
- static Register registers[] = { receiver, name, ebx, edi, no_reg };
- return registers;
-}
-
-
-Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); }
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> getter) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- if (!getter.is_null()) {
- // Call the JavaScript getter with the receiver on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ mov(receiver,
- FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ push(receiver);
- ParameterCount actual(0);
- ParameterCount expected(getter);
- __ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- }
- __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
FrontendHeader(receiver(), name, &miss);
// Get the value from the cell.
- Register result = StoreIC::ValueRegister();
+ Register result = StoreDescriptor::ValueRegister();
if (masm()->serializer_enabled()) {
__ mov(result, Immediate(cell));
__ mov(result, FieldOperand(result, PropertyCell::kValueOffset));
@@ -1090,113 +846,8 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
}
-Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- __ JumpIfNotUniqueName(this->name(), &miss);
- } else {
- __ cmp(this->name(), Immediate(name));
- __ j(not_equal, &miss);
- }
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(KeyedStoreIC::MapRegister()));
- __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = types->length();
- int number_of_handled_maps = 0;
- for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate());
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- __ cmp(map_reg, map);
- if (type->Is(HeapType::Number())) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ j(equal, handlers->at(current));
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void ElementHandlerCompiler::GenerateLoadDictionaryElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- DCHECK(edx.is(LoadIC::ReceiverRegister()));
- DCHECK(ecx.is(LoadIC::NameRegister()));
- Label slow, miss;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
- __ JumpIfNotSmi(ecx, &miss);
- __ mov(ebx, ecx);
- __ SmiUntag(ebx);
- __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
-
- // Push receiver on the stack to free up a register for the dictionary
- // probing.
- __ push(edx);
- __ LoadFromNumberDictionary(&slow, eax, ecx, ebx, edx, edi, eax);
- // Pop receiver before returning.
- __ pop(edx);
- __ ret(0);
-
- __ bind(&slow);
- __ pop(edx);
-
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
-
- __ bind(&miss);
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
}
-
-
-#undef __
-
-} } // namespace v8::internal
+} // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ic/ia32/ic-compiler-ia32.cc b/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
new file mode 100644
index 0000000000..ac42f30bf5
--- /dev/null
+++ b/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
@@ -0,0 +1,128 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_IA32
+
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ // Return address is on the stack.
+ DCHECK(!ebx.is(StoreDescriptor::ReceiverRegister()) &&
+ !ebx.is(StoreDescriptor::NameRegister()) &&
+ !ebx.is(StoreDescriptor::ValueRegister()));
+ __ pop(ebx);
+ __ push(StoreDescriptor::ReceiverRegister());
+ __ push(StoreDescriptor::NameRegister());
+ __ push(StoreDescriptor::ValueRegister());
+ __ push(Immediate(Smi::FromInt(strict_mode)));
+ __ push(ebx); // return address
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
+ Label miss;
+
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ // In case we are compiling an IC for dictionary loads and stores, just
+ // check whether the name is unique.
+ if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ Register tmp = scratch1();
+ __ JumpIfSmi(this->name(), &miss);
+ __ mov(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
+ __ movzx_b(tmp, FieldOperand(tmp, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
+ } else {
+ __ cmp(this->name(), Immediate(name));
+ __ j(not_equal, &miss);
+ }
+ }
+
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
+ // Polymorphic keyed stores may use the map register
+ Register map_reg = scratch1();
+ DCHECK(kind() != Code::KEYED_STORE_IC ||
+ map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
+ int receiver_count = types->length();
+ int number_of_handled_maps = 0;
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ __ cmp(map_reg, map);
+ if (type->Is(HeapType::Number())) {
+ DCHECK(!number_case.is_unused());
+ __ bind(&number_case);
+ }
+ __ j(equal, handlers->at(current));
+ }
+ }
+ DCHECK(number_of_handled_maps != 0);
+
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ InlineCacheState state =
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+ return GetCode(kind(), type, name, state);
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+ MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
+ Label miss;
+ __ JumpIfSmi(receiver(), &miss, Label::kNear);
+ __ mov(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
+ for (int i = 0; i < receiver_maps->length(); ++i) {
+ __ cmp(scratch1(), receiver_maps->at(i));
+ if (transitioned_maps->at(i).is_null()) {
+ __ j(equal, handler_stubs->at(i));
+ } else {
+ Label next_map;
+ __ j(not_equal, &next_map, Label::kNear);
+ __ mov(transition_map(), Immediate(transitioned_maps->at(i)));
+ __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ __ bind(&next_map);
+ }
+ }
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ic/ia32/ic-ia32.cc
index 62e845eb27..67247d29ee 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-ia32.cc
@@ -7,9 +7,9 @@
#if V8_TARGET_ARCH_IA32
#include "src/codegen.h"
-#include "src/ic-inl.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
@@ -21,8 +21,7 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
Label* global_object) {
// Register usage:
// type: holds the receiver instance type on entry.
@@ -42,13 +41,9 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
// name is not internalized, and will jump to the miss_label in that
// case. The generated code assumes that the receiver has slow
// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register r0,
- Register r1,
- Register result) {
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
+ Register elements, Register name,
+ Register r0, Register r1, Register result) {
// Register use:
//
// elements - holds the property dictionary on entry and is unchanged.
@@ -66,13 +61,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
+ elements, name, r0, r1);
// If probing finds an entry in the dictionary, r0 contains the
// index into the dictionary. Check that the value is a normal
@@ -99,13 +89,9 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// call if name is not internalized, and will jump to the miss_label in
// that case. The generated code assumes that the receiver has slow
// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register value,
- Register r0,
- Register r1) {
+static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
+ Register elements, Register name,
+ Register value, Register r0, Register r1) {
// Register use:
//
// elements - holds the property dictionary on entry and is clobbered.
@@ -121,13 +107,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
+ elements, name, r0, r1);
// If probing finds an entry in the dictionary, r0 contains the
// index into the dictionary. Check that the value is a normal
@@ -139,7 +120,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
const int kTypeAndReadOnlyMask =
(PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+ PropertyDetails::AttributesField::encode(READ_ONLY))
+ << kSmiTagSize;
__ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
Immediate(kTypeAndReadOnlyMask));
__ j(not_zero, miss_label);
@@ -158,10 +140,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map,
- int interceptor_bit,
- Label* slow) {
+ Register receiver, Register map,
+ int interceptor_bit, Label* slow) {
// Register use:
// receiver - holds the receiver and is unchanged.
// Scratch registers:
@@ -190,12 +170,9 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// Loads an indexed element from a fast case array.
// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register scratch,
- Register result,
- Label* not_fast_array,
+static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
+ Register key, Register scratch,
+ Register result, Label* not_fast_array,
Label* out_of_range) {
// Register use:
// receiver - holds the receiver and is unchanged.
@@ -208,10 +185,8 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
__ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
if (not_fast_array != NULL) {
// Check that the object is in fast mode and writable.
- __ CheckMap(scratch,
- masm->isolate()->factory()->fixed_array_map(),
- not_fast_array,
- DONT_DO_SMI_CHECK);
+ __ CheckMap(scratch, masm->isolate()->factory()->fixed_array_map(),
+ not_fast_array, DONT_DO_SMI_CHECK);
} else {
__ AssertFastElements(scratch);
}
@@ -233,12 +208,9 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// Checks whether a key is an array index string or a unique name.
// Falls through if the key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_unique) {
+static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
+ Register map, Register hash,
+ Label* index_string, Label* not_unique) {
// Register use:
// key - holds the key and is unchanged. Assumed to be non-smi.
// Scratch registers:
@@ -266,13 +238,9 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
}
-static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
- Register object,
- Register key,
- Register scratch1,
- Register scratch2,
- Label* unmapped_case,
- Label* slow_case) {
+static Operand GenerateMappedArgumentsLookup(
+ MacroAssembler* masm, Register object, Register key, Register scratch1,
+ Register scratch2, Label* unmapped_case, Label* slow_case) {
Heap* heap = masm->isolate()->heap();
Factory* factory = masm->isolate()->factory();
@@ -302,10 +270,8 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Load element index and check whether it is the hole.
const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
- __ mov(scratch2, FieldOperand(scratch1,
- key,
- times_half_pointer_size,
- kHeaderSize));
+ __ mov(scratch2,
+ FieldOperand(scratch1, key, times_half_pointer_size, kHeaderSize));
__ cmp(scratch2, factory->the_hole_value());
__ j(equal, unmapped_case);
@@ -314,9 +280,7 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// map in scratch1).
const int kContextOffset = FixedArray::kHeaderSize;
__ mov(scratch1, FieldOperand(scratch1, kContextOffset));
- return FieldOperand(scratch1,
- scratch2,
- times_half_pointer_size,
+ return FieldOperand(scratch1, scratch2, times_half_pointer_size,
Context::kHeaderSize);
}
@@ -336,9 +300,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
__ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
__ cmp(key, scratch);
__ j(greater_equal, slow_case);
- return FieldOperand(backing_store,
- key,
- times_half_pointer_size,
+ return FieldOperand(backing_store, key, times_half_pointer_size,
FixedArray::kHeaderSize);
}
@@ -348,8 +310,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
DCHECK(receiver.is(edx));
DCHECK(key.is(ecx));
@@ -359,8 +321,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Now the key is known to be a smi. This place is also jumped to from
// where a numeric string is converted to a smi.
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, eax, Map::kHasIndexedInterceptor, &slow);
+ GenerateKeyedLoadReceiverCheck(masm, receiver, eax,
+ Map::kHasIndexedInterceptor, &slow);
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(eax, &check_number_dictionary);
@@ -379,9 +341,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check whether the elements is a number dictionary.
// ebx: untagged index
// eax: elements
- __ CheckMap(eax,
- isolate->factory()->hash_table_map(),
- &slow,
+ __ CheckMap(eax, isolate->factory()->hash_table_map(), &slow,
DONT_DO_SMI_CHECK);
Label slow_pop_receiver;
// Push receiver on the stack to free up a register for the dictionary
@@ -404,8 +364,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow);
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, eax, Map::kHasNamedInterceptor, &slow);
+ GenerateKeyedLoadReceiverCheck(masm, receiver, eax, Map::kHasNamedInterceptor,
+ &slow);
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary.
@@ -492,8 +452,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load property array property.
__ bind(&property_array_property);
__ mov(eax, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ mov(eax, FieldOperand(eax, edi, times_pointer_size,
- FixedArray::kHeaderSize));
+ __ mov(eax,
+ FieldOperand(eax, edi, times_pointer_size, FixedArray::kHeaderSize));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
@@ -520,17 +480,14 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// Return address is on the stack.
Label miss;
- Register receiver = ReceiverRegister();
- Register index = NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register index = LoadDescriptor::NameRegister();
Register scratch = ebx;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
Register result = eax;
DCHECK(!result.is(scratch));
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch,
- result,
+ StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
&miss, // When index out of range.
@@ -546,88 +503,18 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // Return address is on the stack.
- Label slow;
-
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- Register scratch = eax;
- DCHECK(!scratch.is(receiver) && !scratch.is(key));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &slow);
-
- // Check that the key is an array index, that is Uint32.
- __ test(key, Immediate(kSmiTagMask | kSmiSignMask));
- __ j(not_zero, &slow);
-
- // Get the map of the receiver.
- __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
- __ and_(scratch, Immediate(kSlowCaseBitFieldMask));
- __ cmp(scratch, Immediate(1 << Map::kHasIndexedInterceptor));
- __ j(not_zero, &slow);
-
- // Everything is fine, call runtime.
- __ pop(scratch);
- __ push(receiver); // receiver
- __ push(key); // key
- __ push(scratch); // return address
-
- // Perform tail call to the entry.
- ExternalReference ref = ExternalReference(
- IC_Utility(kLoadElementWithInterceptor), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
- // The return address is on the stack.
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- DCHECK(receiver.is(edx));
- DCHECK(key.is(ecx));
-
- Label slow, notin;
- Factory* factory = masm->isolate()->factory();
- Operand mapped_location =
- GenerateMappedArgumentsLookup(
- masm, receiver, key, ebx, eax, &notin, &slow);
- __ mov(eax, mapped_location);
- __ Ret();
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in ebx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, key, ebx, eax, &slow);
- __ cmp(unmapped_location, factory->the_hole_value());
- __ j(equal, &slow);
- __ mov(eax, unmapped_location);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
// Return address is on the stack.
Label slow, notin;
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- Register value = ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
DCHECK(receiver.is(edx));
DCHECK(name.is(ecx));
DCHECK(value.is(eax));
- Operand mapped_location =
- GenerateMappedArgumentsLookup(masm, receiver, name, ebx, edi, &notin,
- &slow);
+ Operand mapped_location = GenerateMappedArgumentsLookup(
+ masm, receiver, name, ebx, edi, &notin, &slow);
__ mov(mapped_location, value);
__ lea(ecx, mapped_location);
__ mov(edx, value);
@@ -648,18 +535,14 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
static void KeyedStoreGenerateGenericHelper(
- MacroAssembler* masm,
- Label* fast_object,
- Label* fast_double,
- Label* slow,
- KeyedStoreCheckMap check_map,
- KeyedStoreIncrementLength increment_length) {
+ MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
+ KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
Label transition_smi_elements;
Label finish_object_store, non_double_value, transition_double_elements;
Label fast_double_without_map_check;
- Register receiver = KeyedStoreIC::ReceiverRegister();
- Register key = KeyedStoreIC::NameRegister();
- Register value = KeyedStoreIC::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register key = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
DCHECK(receiver.is(edx));
DCHECK(key.is(ecx));
DCHECK(value.is(eax));
@@ -713,8 +596,8 @@ static void KeyedStoreGenerateGenericHelper(
__ mov(FixedArrayElementOperand(ebx, key), value);
// Update write barrier for the elements array address.
__ mov(edx, value); // Preserve the value which is returned.
- __ RecordWriteArray(
- ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ RecordWriteArray(ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
__ ret(0);
__ bind(fast_double);
@@ -750,32 +633,24 @@ static void KeyedStoreGenerateGenericHelper(
__ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
// Transition the array appropriately depending on the value type.
- __ CheckMap(value,
- masm->isolate()->factory()->heap_number_map(),
- &non_double_value,
- DONT_DO_SMI_CHECK);
+ __ CheckMap(value, masm->isolate()->factory()->heap_number_map(),
+ &non_double_value, DONT_DO_SMI_CHECK);
// Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
// and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- ebx,
- edi,
- slow);
- AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(
- masm, receiver, key, value, ebx, mode, slow);
+ FAST_DOUBLE_ELEMENTS, ebx, edi, slow);
+ AllocationSiteMode mode =
+ AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
+ ebx, mode, slow);
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- ebx,
- edi,
- slow);
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, ebx,
+ edi, slow);
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, receiver, key, value, ebx, mode, slow);
@@ -787,14 +662,11 @@ static void KeyedStoreGenerateGenericHelper(
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
__ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- ebx,
- edi,
- slow);
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
+ ebx, edi, slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(
- masm, receiver, key, value, ebx, mode, slow);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key,
+ value, ebx, mode, slow);
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
@@ -806,8 +678,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
Label slow, fast_object, fast_object_grow;
Label fast_double, fast_double_grow;
Label array, extra, check_if_double_array;
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register key = StoreDescriptor::NameRegister();
DCHECK(receiver.is(edx));
DCHECK(key.is(ecx));
@@ -838,7 +710,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Slow case: call runtime.
__ bind(&slow);
- GenerateRuntimeSetProperty(masm, strict_mode);
+ PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@@ -877,42 +749,24 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ cmp(key, FieldOperand(receiver, JSArray::kLengthOffset)); // Compare smis.
__ j(above_equal, &extra);
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
- &slow, kCheckMap, kDontIncrementLength);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, &slow,
+ kCheckMap, kDontIncrementLength);
KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
&slow, kDontCheckMap, kIncrementLength);
}
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is on the stack.
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- DCHECK(receiver.is(edx));
- DCHECK(name.is(ecx));
-
- // Probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, receiver, name, ebx, eax);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = eax;
- DCHECK(!dictionary.is(ReceiverRegister()));
- DCHECK(!dictionary.is(NameRegister()));
+ DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
Label slow;
- __ mov(dictionary,
- FieldOperand(ReceiverRegister(), JSObject::kPropertiesOffset));
- GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), edi, ebx,
- eax);
+ __ mov(dictionary, FieldOperand(LoadDescriptor::ReceiverRegister(),
+ JSObject::kPropertiesOffset));
+ GenerateDictionaryLoad(masm, &slow, dictionary,
+ LoadDescriptor::NameRegister(), edi, ebx, eax);
__ ret(0);
// Dictionary load failed, go slow (but don't miss).
@@ -922,8 +776,8 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
static void LoadIC_PushArgs(MacroAssembler* masm) {
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
DCHECK(!ebx.is(receiver) && !ebx.is(name));
__ pop(ebx);
@@ -968,33 +822,6 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
}
-// IC register specifications
-const Register LoadIC::ReceiverRegister() { return edx; }
-const Register LoadIC::NameRegister() { return ecx; }
-
-
-const Register LoadIC::SlotRegister() {
- DCHECK(FLAG_vector_ics);
- return eax;
-}
-
-
-const Register LoadIC::VectorRegister() {
- DCHECK(FLAG_vector_ics);
- return ebx;
-}
-
-
-const Register StoreIC::ReceiverRegister() { return edx; }
-const Register StoreIC::NameRegister() { return ecx; }
-const Register StoreIC::ValueRegister() { return eax; }
-
-
-const Register KeyedStoreIC::MapRegister() {
- return ebx;
-}
-
-
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// Return address is on the stack.
LoadIC_PushArgs(masm);
@@ -1009,8 +836,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, ReceiverRegister(), NameRegister(),
- ebx, no_reg);
+ masm, flags, false, StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), ebx, no_reg);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -1018,9 +845,9 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
static void StoreIC_PushArgs(MacroAssembler* masm) {
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- Register value = StoreIC::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
@@ -1045,9 +872,9 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
void StoreIC::GenerateNormal(MacroAssembler* masm) {
Label restore_miss;
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- Register value = ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
Register dictionary = ebx;
__ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
@@ -1070,40 +897,6 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
}
-void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- // Return address is on the stack.
- DCHECK(!ebx.is(ReceiverRegister()) && !ebx.is(NameRegister()) &&
- !ebx.is(ValueRegister()));
- __ pop(ebx);
- __ push(ReceiverRegister());
- __ push(NameRegister());
- __ push(ValueRegister());
- __ push(Immediate(Smi::FromInt(strict_mode)));
- __ push(ebx); // return address
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- // Return address is on the stack.
- DCHECK(!ebx.is(ReceiverRegister()) && !ebx.is(NameRegister()) &&
- !ebx.is(ValueRegister()));
- __ pop(ebx);
- __ push(ReceiverRegister());
- __ push(NameRegister());
- __ push(ValueRegister());
- __ push(Immediate(Smi::FromInt(strict_mode)));
- __ push(ebx); // return address
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// Return address is on the stack.
StoreIC_PushArgs(masm);
@@ -1115,26 +908,6 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
}
-void StoreIC::GenerateSlow(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
#undef __
@@ -1186,8 +959,8 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// condition code uses at the patched jump.
uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, test=%p, delta=%d\n",
- address, test_instruction_address, delta);
+ PrintF("[ patching ic at %p, test=%p, delta=%d\n", address,
+ test_instruction_address, delta);
}
// Patch with a short conditional jump. Enabling means switching from a short
@@ -1195,17 +968,17 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// reverse operation of that.
Address jmp_address = test_instruction_address - delta;
DCHECK((check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ||
- *jmp_address == Assembler::kJcShortOpcode)
- : (*jmp_address == Assembler::kJnzShortOpcode ||
- *jmp_address == Assembler::kJzShortOpcode));
- Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
- : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
+ ? (*jmp_address == Assembler::kJncShortOpcode ||
+ *jmp_address == Assembler::kJcShortOpcode)
+ : (*jmp_address == Assembler::kJnzShortOpcode ||
+ *jmp_address == Assembler::kJzShortOpcode));
+ Condition cc =
+ (check == ENABLE_INLINED_SMI_CHECK)
+ ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
+ : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
*jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
}
-
-
-} } // namespace v8::internal
+}
+} // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ic/ia32/stub-cache-ia32.cc b/deps/v8/src/ic/ia32/stub-cache-ia32.cc
new file mode 100644
index 0000000000..c1f7c9ad31
--- /dev/null
+++ b/deps/v8/src/ic/ia32/stub-cache-ia32.cc
@@ -0,0 +1,189 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_IA32
+
+#include "src/codegen.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
+ Code::Flags flags, bool leave_frame,
+ StubCache::Table table, Register name, Register receiver,
+ // Number of the cache entry pointer-size scaled.
+ Register offset, Register extra) {
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+ Label miss;
+
+ // Multiply by 3 because there are 3 fields per entry (name, code, map).
+ __ lea(offset, Operand(offset, offset, times_2, 0));
+
+ if (extra.is_valid()) {
+ // Get the code entry from the cache.
+ __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
+
+ // Check that the key in the entry matches the name.
+ __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
+ __ j(not_equal, &miss);
+
+ // Check the map matches.
+ __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
+ __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ j(not_equal, &miss);
+
+ // Check that the flags match what we're looking for.
+ __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
+ __ and_(offset, ~Code::kFlagsNotUsedInLookup);
+ __ cmp(offset, flags);
+ __ j(not_equal, &miss);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ jmp(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ jmp(&miss);
+ }
+#endif
+
+ if (leave_frame) __ leave();
+
+ // Jump to the first instruction in the code stub.
+ __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(extra);
+
+ __ bind(&miss);
+ } else {
+ // Save the offset on the stack.
+ __ push(offset);
+
+ // Check that the key in the entry matches the name.
+ __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
+ __ j(not_equal, &miss);
+
+ // Check the map matches.
+ __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
+ __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ j(not_equal, &miss);
+
+ // Restore offset register.
+ __ mov(offset, Operand(esp, 0));
+
+ // Get the code entry from the cache.
+ __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
+
+ // Check that the flags match what we're looking for.
+ __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
+ __ and_(offset, ~Code::kFlagsNotUsedInLookup);
+ __ cmp(offset, flags);
+ __ j(not_equal, &miss);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ jmp(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ jmp(&miss);
+ }
+#endif
+
+ // Restore offset and re-load code entry from cache.
+ __ pop(offset);
+ __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
+
+ if (leave_frame) __ leave();
+
+ // Jump to the first instruction in the code stub.
+ __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(offset);
+
+ // Pop at miss.
+ __ bind(&miss);
+ __ pop(offset);
+ }
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
+ bool leave_frame, Register receiver,
+ Register name, Register scratch, Register extra,
+ Register extra2, Register extra3) {
+ Label miss;
+
+ // Assert that code is valid. The multiplying code relies on the entry size
+ // being 12.
+ DCHECK(sizeof(Entry) == 12);
+
+ // Assert the flags do not name a specific type.
+ DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Assert that there are no register conflicts.
+ DCHECK(!scratch.is(receiver));
+ DCHECK(!scratch.is(name));
+ DCHECK(!extra.is(receiver));
+ DCHECK(!extra.is(name));
+ DCHECK(!extra.is(scratch));
+
+ // Assert scratch and extra registers are valid, and extra2/3 are unused.
+ DCHECK(!scratch.is(no_reg));
+ DCHECK(extra2.is(no_reg));
+ DCHECK(extra3.is(no_reg));
+
+ Register offset = scratch;
+ scratch = no_reg;
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Get the map of the receiver and compute the hash.
+ __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
+ __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xor_(offset, flags);
+ // We mask out the last two bits because they are not part of the hash and
+ // they are always 01 for maps. Also in the two 'and' instructions below.
+ __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
+ // ProbeTable expects the offset to be pointer scaled, which it is, because
+ // the heap object tag size is 2 and the pointer size log 2 is also 2.
+ DCHECK(kCacheIndexShift == kPointerSizeLog2);
+
+ // Probe the primary table.
+ ProbeTable(isolate(), masm, flags, leave_frame, kPrimary, name, receiver,
+ offset, extra);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
+ __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xor_(offset, flags);
+ __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
+ __ sub(offset, name);
+ __ add(offset, Immediate(flags));
+ __ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
+
+ // Probe the secondary table.
+ ProbeTable(isolate(), masm, flags, leave_frame, kSecondary, name, receiver,
+ offset, extra);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ic/ic-compiler.cc b/deps/v8/src/ic/ic-compiler.cc
new file mode 100644
index 0000000000..aeae4ba90e
--- /dev/null
+++ b/deps/v8/src/ic/ic-compiler.cc
@@ -0,0 +1,447 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic-inl.h"
+#include "src/ic/ic-compiler.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+Handle<Code> PropertyICCompiler::Find(Handle<Name> name,
+ Handle<Map> stub_holder, Code::Kind kind,
+ ExtraICState extra_state,
+ CacheHolderFlag cache_holder) {
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(kind, extra_state, cache_holder);
+ Object* probe = stub_holder->FindInCodeCache(*name, flags);
+ if (probe->IsCode()) return handle(Code::cast(probe));
+ return Handle<Code>::null();
+}
+
+
+bool PropertyICCompiler::IncludesNumberType(TypeHandleList* types) {
+ for (int i = 0; i < types->length(); ++i) {
+ if (types->at(i)->Is(HeapType::Number())) return true;
+ }
+ return false;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileMonomorphic(Handle<HeapType> type,
+ Handle<Code> handler,
+ Handle<Name> name,
+ IcCheckType check) {
+ TypeHandleList types(1);
+ CodeHandleList handlers(1);
+ types.Add(type);
+ handlers.Add(handler);
+ Code::StubType stub_type = handler->type();
+ return CompilePolymorphic(&types, &handlers, name, stub_type, check);
+}
+
+
+Handle<Code> PropertyICCompiler::ComputeMonomorphic(
+ Code::Kind kind, Handle<Name> name, Handle<HeapType> type,
+ Handle<Code> handler, ExtraICState extra_ic_state) {
+ Isolate* isolate = name->GetIsolate();
+ if (handler.is_identical_to(isolate->builtins()->LoadIC_Normal()) ||
+ handler.is_identical_to(isolate->builtins()->StoreIC_Normal())) {
+ name = isolate->factory()->normal_ic_symbol();
+ }
+
+ CacheHolderFlag flag;
+ Handle<Map> stub_holder = IC::GetICCacheHolder(*type, isolate, &flag);
+
+ Handle<Code> ic;
+ // There are multiple string maps that all use the same prototype. That
+ // prototype cannot hold multiple handlers, one for each of the string maps,
+ // for a single name. Hence, turn off caching of the IC.
+ bool can_be_cached = !type->Is(HeapType::String());
+ if (can_be_cached) {
+ ic = Find(name, stub_holder, kind, extra_ic_state, flag);
+ if (!ic.is_null()) return ic;
+ }
+
+#ifdef DEBUG
+ if (kind == Code::KEYED_STORE_IC) {
+ DCHECK(STANDARD_STORE ==
+ KeyedStoreIC::GetKeyedAccessStoreMode(extra_ic_state));
+ }
+#endif
+
+ PropertyICCompiler ic_compiler(isolate, kind, extra_ic_state, flag);
+ ic = ic_compiler.CompileMonomorphic(type, handler, name, PROPERTY);
+
+ if (can_be_cached) Map::UpdateCodeCache(stub_holder, name, ic);
+ return ic;
+}
+
+
+Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphic(
+ Handle<Map> receiver_map) {
+ Isolate* isolate = receiver_map->GetIsolate();
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC);
+ Handle<Name> name = isolate->factory()->KeyedLoadMonomorphic_string();
+
+ Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate);
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ Handle<Code> stub;
+ if (receiver_map->has_indexed_interceptor()) {
+ stub = LoadIndexedInterceptorStub(isolate).GetCode();
+ } else if (receiver_map->has_sloppy_arguments_elements()) {
+ stub = KeyedLoadSloppyArgumentsStub(isolate).GetCode();
+ } else if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements() ||
+ receiver_map->has_fixed_typed_array_elements()) {
+ stub = LoadFastElementStub(isolate,
+ receiver_map->instance_type() == JS_ARRAY_TYPE,
+ elements_kind).GetCode();
+ } else {
+ stub = LoadDictionaryElementStub(isolate).GetCode();
+ }
+ PropertyICCompiler compiler(isolate, Code::KEYED_LOAD_IC);
+ Handle<Code> code =
+ compiler.CompileMonomorphic(HeapType::Class(receiver_map, isolate), stub,
+ isolate->factory()->empty_string(), ELEMENT);
+
+ Map::UpdateCodeCache(receiver_map, name, code);
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphic(
+ Handle<Map> receiver_map, StrictMode strict_mode,
+ KeyedAccessStoreMode store_mode) {
+ Isolate* isolate = receiver_map->GetIsolate();
+ ExtraICState extra_state =
+ KeyedStoreIC::ComputeExtraICState(strict_mode, store_mode);
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, extra_state);
+
+ DCHECK(store_mode == STANDARD_STORE ||
+ store_mode == STORE_AND_GROW_NO_TRANSITION ||
+ store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+ store_mode == STORE_NO_TRANSITION_HANDLE_COW);
+
+ Handle<String> name = isolate->factory()->KeyedStoreMonomorphic_string();
+ Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate);
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state);
+ Handle<Code> code =
+ compiler.CompileKeyedStoreMonomorphic(receiver_map, store_mode);
+
+ Map::UpdateCodeCache(receiver_map, name, code);
+ DCHECK(KeyedStoreIC::GetKeyedAccessStoreMode(code->extra_ic_state()) ==
+ store_mode);
+ return code;
+}
+
+
+Code* PropertyICCompiler::FindPreMonomorphic(Isolate* isolate, Code::Kind kind,
+ ExtraICState state) {
+ Code::Flags flags = Code::ComputeFlags(kind, PREMONOMORPHIC, state);
+ UnseededNumberDictionary* dictionary =
+ isolate->heap()->non_monomorphic_cache();
+ int entry = dictionary->FindEntry(isolate, flags);
+ DCHECK(entry != -1);
+ Object* code = dictionary->ValueAt(entry);
+ // This might be called during the marking phase of the collector
+ // hence the unchecked cast.
+ return reinterpret_cast<Code*>(code);
+}
+
+
+static void FillCache(Isolate* isolate, Handle<Code> code) {
+ Handle<UnseededNumberDictionary> dictionary = UnseededNumberDictionary::Set(
+ isolate->factory()->non_monomorphic_cache(), code->flags(), code);
+ isolate->heap()->public_set_non_monomorphic_cache(*dictionary);
+}
+
+
+Handle<Code> PropertyICCompiler::ComputeLoad(Isolate* isolate,
+ InlineCacheState ic_state,
+ ExtraICState extra_state) {
+ Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, ic_state, extra_state);
+ Handle<UnseededNumberDictionary> cache =
+ isolate->factory()->non_monomorphic_cache();
+ int entry = cache->FindEntry(isolate, flags);
+ if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+ PropertyICCompiler compiler(isolate, Code::LOAD_IC);
+ Handle<Code> code;
+ if (ic_state == UNINITIALIZED) {
+ code = compiler.CompileLoadInitialize(flags);
+ } else if (ic_state == PREMONOMORPHIC) {
+ code = compiler.CompileLoadPreMonomorphic(flags);
+ } else {
+ UNREACHABLE();
+ }
+ FillCache(isolate, code);
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::ComputeStore(Isolate* isolate,
+ InlineCacheState ic_state,
+ ExtraICState extra_state) {
+ Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, ic_state, extra_state);
+ Handle<UnseededNumberDictionary> cache =
+ isolate->factory()->non_monomorphic_cache();
+ int entry = cache->FindEntry(isolate, flags);
+ if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+ PropertyICCompiler compiler(isolate, Code::STORE_IC);
+ Handle<Code> code;
+ if (ic_state == UNINITIALIZED) {
+ code = compiler.CompileStoreInitialize(flags);
+ } else if (ic_state == PREMONOMORPHIC) {
+ code = compiler.CompileStorePreMonomorphic(flags);
+ } else if (ic_state == GENERIC) {
+ code = compiler.CompileStoreGeneric(flags);
+ } else if (ic_state == MEGAMORPHIC) {
+ code = compiler.CompileStoreMegamorphic(flags);
+ } else {
+ UNREACHABLE();
+ }
+
+ FillCache(isolate, code);
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::ComputeCompareNil(Handle<Map> receiver_map,
+ CompareNilICStub* stub) {
+ Isolate* isolate = receiver_map->GetIsolate();
+ Handle<String> name(isolate->heap()->empty_string());
+ if (!receiver_map->is_dictionary_map()) {
+ Handle<Code> cached_ic =
+ Find(name, receiver_map, Code::COMPARE_NIL_IC, stub->GetExtraICState());
+ if (!cached_ic.is_null()) return cached_ic;
+ }
+
+ Code::FindAndReplacePattern pattern;
+ pattern.Add(isolate->factory()->meta_map(), receiver_map);
+ Handle<Code> ic = stub->GetCodeCopy(pattern);
+
+ if (!receiver_map->is_dictionary_map()) {
+ Map::UpdateCodeCache(receiver_map, name, ic);
+ }
+
+ return ic;
+}
+
+
+// TODO(verwaest): Change this method so it takes in a TypeHandleList.
+Handle<Code> PropertyICCompiler::ComputeKeyedLoadPolymorphic(
+ MapHandleList* receiver_maps) {
+ Isolate* isolate = receiver_maps->at(0)->GetIsolate();
+ Code::Flags flags = Code::ComputeFlags(Code::KEYED_LOAD_IC, POLYMORPHIC);
+ Handle<PolymorphicCodeCache> cache =
+ isolate->factory()->polymorphic_code_cache();
+ Handle<Object> probe = cache->Lookup(receiver_maps, flags);
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ TypeHandleList types(receiver_maps->length());
+ for (int i = 0; i < receiver_maps->length(); i++) {
+ types.Add(HeapType::Class(receiver_maps->at(i), isolate));
+ }
+ CodeHandleList handlers(receiver_maps->length());
+ ElementHandlerCompiler compiler(isolate);
+ compiler.CompileElementHandlers(receiver_maps, &handlers);
+ PropertyICCompiler ic_compiler(isolate, Code::KEYED_LOAD_IC);
+ Handle<Code> code = ic_compiler.CompilePolymorphic(
+ &types, &handlers, isolate->factory()->empty_string(), Code::NORMAL,
+ ELEMENT);
+
+ isolate->counters()->keyed_load_polymorphic_stubs()->Increment();
+
+ PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::ComputePolymorphic(
+ Code::Kind kind, TypeHandleList* types, CodeHandleList* handlers,
+ int valid_types, Handle<Name> name, ExtraICState extra_ic_state) {
+ Handle<Code> handler = handlers->at(0);
+ Code::StubType type = valid_types == 1 ? handler->type() : Code::NORMAL;
+ DCHECK(kind == Code::LOAD_IC || kind == Code::STORE_IC);
+ PropertyICCompiler ic_compiler(name->GetIsolate(), kind, extra_ic_state);
+ return ic_compiler.CompilePolymorphic(types, handlers, name, type, PROPERTY);
+}
+
+
+Handle<Code> PropertyICCompiler::ComputeKeyedStorePolymorphic(
+ MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode,
+ StrictMode strict_mode) {
+ Isolate* isolate = receiver_maps->at(0)->GetIsolate();
+ DCHECK(store_mode == STANDARD_STORE ||
+ store_mode == STORE_AND_GROW_NO_TRANSITION ||
+ store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+ store_mode == STORE_NO_TRANSITION_HANDLE_COW);
+ Handle<PolymorphicCodeCache> cache =
+ isolate->factory()->polymorphic_code_cache();
+ ExtraICState extra_state =
+ KeyedStoreIC::ComputeExtraICState(strict_mode, store_mode);
+ Code::Flags flags =
+ Code::ComputeFlags(Code::KEYED_STORE_IC, POLYMORPHIC, extra_state);
+ Handle<Object> probe = cache->Lookup(receiver_maps, flags);
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state);
+ Handle<Code> code =
+ compiler.CompileKeyedStorePolymorphic(receiver_maps, store_mode);
+ PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileLoadInitialize(Code::Flags flags) {
+ LoadIC::GenerateInitialize(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadInitialize");
+ PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_INITIALIZE_TAG, *code, 0));
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileLoadPreMonomorphic(Code::Flags flags) {
+ LoadIC::GeneratePreMonomorphic(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadPreMonomorphic");
+ PROFILE(isolate(),
+ CodeCreateEvent(Logger::LOAD_PREMONOMORPHIC_TAG, *code, 0));
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileStoreInitialize(Code::Flags flags) {
+ StoreIC::GenerateInitialize(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreInitialize");
+ PROFILE(isolate(), CodeCreateEvent(Logger::STORE_INITIALIZE_TAG, *code, 0));
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileStorePreMonomorphic(Code::Flags flags) {
+ StoreIC::GeneratePreMonomorphic(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileStorePreMonomorphic");
+ PROFILE(isolate(),
+ CodeCreateEvent(Logger::STORE_PREMONOMORPHIC_TAG, *code, 0));
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileStoreGeneric(Code::Flags flags) {
+ ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
+ StrictMode strict_mode = StoreIC::GetStrictMode(extra_state);
+ GenerateRuntimeSetProperty(masm(), strict_mode);
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreGeneric");
+ PROFILE(isolate(), CodeCreateEvent(Logger::STORE_GENERIC_TAG, *code, 0));
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileStoreMegamorphic(Code::Flags flags) {
+ StoreIC::GenerateMegamorphic(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreMegamorphic");
+ PROFILE(isolate(), CodeCreateEvent(Logger::STORE_MEGAMORPHIC_TAG, *code, 0));
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::GetCode(Code::Kind kind, Code::StubType type,
+ Handle<Name> name,
+ InlineCacheState state) {
+ Code::Flags flags =
+ Code::ComputeFlags(kind, state, extra_ic_state_, type, cache_holder());
+ Handle<Code> code = GetCodeWithFlags(flags, name);
+ IC::RegisterWeakMapDependency(code);
+ PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+ MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode) {
+ // Collect MONOMORPHIC stubs for all |receiver_maps|.
+ CodeHandleList handlers(receiver_maps->length());
+ MapHandleList transitioned_maps(receiver_maps->length());
+ for (int i = 0; i < receiver_maps->length(); ++i) {
+ Handle<Map> receiver_map(receiver_maps->at(i));
+ Handle<Code> cached_stub;
+ Handle<Map> transitioned_map =
+ receiver_map->FindTransitionedMap(receiver_maps);
+
+ // TODO(mvstanton): The code below is doing pessimistic elements
+ // transitions. I would like to stop doing that and rely on Allocation Site
+ // Tracking to do a better job of ensuring the data types are what they need
+ // to be. Not all the elements are in place yet, pessimistic elements
+ // transitions are still important for performance.
+ bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ if (!transitioned_map.is_null()) {
+ cached_stub =
+ ElementsTransitionAndStoreStub(isolate(), elements_kind,
+ transitioned_map->elements_kind(),
+ is_js_array, store_mode).GetCode();
+ } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
+ cached_stub = isolate()->builtins()->KeyedStoreIC_Slow();
+ } else {
+ if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements() ||
+ receiver_map->has_fixed_typed_array_elements()) {
+ cached_stub = StoreFastElementStub(isolate(), is_js_array,
+ elements_kind, store_mode).GetCode();
+ } else {
+ cached_stub = StoreElementStub(isolate(), elements_kind).GetCode();
+ }
+ }
+ DCHECK(!cached_stub.is_null());
+ handlers.Add(cached_stub);
+ transitioned_maps.Add(transitioned_map);
+ }
+
+ Handle<Code> code = CompileKeyedStorePolymorphic(receiver_maps, &handlers,
+ &transitioned_maps);
+ isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
+ PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, 0));
+ return code;
+}
+
+
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphic(
+ Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ Handle<Code> stub;
+ if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements() ||
+ receiver_map->has_fixed_typed_array_elements()) {
+ stub = StoreFastElementStub(isolate(), is_jsarray, elements_kind,
+ store_mode).GetCode();
+ } else {
+ stub = StoreElementStub(isolate(), elements_kind).GetCode();
+ }
+
+ __ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
+
+ TailCallBuiltin(masm(), Builtins::kKeyedStoreIC_Miss);
+
+ return GetCode(kind(), Code::NORMAL, factory()->empty_string());
+}
+
+
+#undef __
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/ic/ic-compiler.h b/deps/v8/src/ic/ic-compiler.h
new file mode 100644
index 0000000000..3b12157a07
--- /dev/null
+++ b/deps/v8/src/ic/ic-compiler.h
@@ -0,0 +1,125 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_IC_COMPILER_H_
+#define V8_IC_IC_COMPILER_H_
+
+#include "src/ic/access-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+
+enum IcCheckType { ELEMENT, PROPERTY };
+
+
+class PropertyICCompiler : public PropertyAccessCompiler {
+ public:
+ // Finds the Code object stored in the Heap::non_monomorphic_cache().
+ static Code* FindPreMonomorphic(Isolate* isolate, Code::Kind kind,
+ ExtraICState extra_ic_state);
+
+ // Named
+ static Handle<Code> ComputeLoad(Isolate* isolate, InlineCacheState ic_state,
+ ExtraICState extra_state);
+ static Handle<Code> ComputeStore(Isolate* isolate, InlineCacheState ic_state,
+ ExtraICState extra_state);
+
+ static Handle<Code> ComputeMonomorphic(Code::Kind kind, Handle<Name> name,
+ Handle<HeapType> type,
+ Handle<Code> handler,
+ ExtraICState extra_ic_state);
+ static Handle<Code> ComputePolymorphic(Code::Kind kind, TypeHandleList* types,
+ CodeHandleList* handlers,
+ int number_of_valid_maps,
+ Handle<Name> name,
+ ExtraICState extra_ic_state);
+
+ // Keyed
+ static Handle<Code> ComputeKeyedLoadMonomorphic(Handle<Map> receiver_map);
+
+ static Handle<Code> ComputeKeyedStoreMonomorphic(
+ Handle<Map> receiver_map, StrictMode strict_mode,
+ KeyedAccessStoreMode store_mode);
+ static Handle<Code> ComputeKeyedLoadPolymorphic(MapHandleList* receiver_maps);
+ static Handle<Code> ComputeKeyedStorePolymorphic(
+ MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode,
+ StrictMode strict_mode);
+
+ // Compare nil
+ static Handle<Code> ComputeCompareNil(Handle<Map> receiver_map,
+ CompareNilICStub* stub);
+
+ // Helpers
+ // TODO(verwaest): Move all uses of these helpers to the PropertyICCompiler
+ // and make the helpers private.
+ static void GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode);
+
+
+ private:
+ PropertyICCompiler(Isolate* isolate, Code::Kind kind,
+ ExtraICState extra_ic_state = kNoExtraICState,
+ CacheHolderFlag cache_holder = kCacheOnReceiver)
+ : PropertyAccessCompiler(isolate, kind, cache_holder),
+ extra_ic_state_(extra_ic_state) {}
+
+ static Handle<Code> Find(Handle<Name> name, Handle<Map> stub_holder_map,
+ Code::Kind kind,
+ ExtraICState extra_ic_state = kNoExtraICState,
+ CacheHolderFlag cache_holder = kCacheOnReceiver);
+
+ Handle<Code> CompileLoadInitialize(Code::Flags flags);
+ Handle<Code> CompileLoadPreMonomorphic(Code::Flags flags);
+ Handle<Code> CompileStoreInitialize(Code::Flags flags);
+ Handle<Code> CompileStorePreMonomorphic(Code::Flags flags);
+ Handle<Code> CompileStoreGeneric(Code::Flags flags);
+ Handle<Code> CompileStoreMegamorphic(Code::Flags flags);
+
+ Handle<Code> CompileMonomorphic(Handle<HeapType> type, Handle<Code> handler,
+ Handle<Name> name, IcCheckType check);
+ Handle<Code> CompilePolymorphic(TypeHandleList* types,
+ CodeHandleList* handlers, Handle<Name> name,
+ Code::StubType type, IcCheckType check);
+
+ Handle<Code> CompileKeyedStoreMonomorphic(Handle<Map> receiver_map,
+ KeyedAccessStoreMode store_mode);
+ Handle<Code> CompileKeyedStorePolymorphic(MapHandleList* receiver_maps,
+ KeyedAccessStoreMode store_mode);
+ Handle<Code> CompileKeyedStorePolymorphic(MapHandleList* receiver_maps,
+ CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps);
+
+ bool IncludesNumberType(TypeHandleList* types);
+
+ Handle<Code> GetCode(Code::Kind kind, Code::StubType type, Handle<Name> name,
+ InlineCacheState state = MONOMORPHIC);
+
+ Logger::LogEventsAndTags log_kind(Handle<Code> code) {
+ if (kind() == Code::LOAD_IC) {
+ return code->ic_state() == MONOMORPHIC ? Logger::LOAD_IC_TAG
+ : Logger::LOAD_POLYMORPHIC_IC_TAG;
+ } else if (kind() == Code::KEYED_LOAD_IC) {
+ return code->ic_state() == MONOMORPHIC
+ ? Logger::KEYED_LOAD_IC_TAG
+ : Logger::KEYED_LOAD_POLYMORPHIC_IC_TAG;
+ } else if (kind() == Code::STORE_IC) {
+ return code->ic_state() == MONOMORPHIC ? Logger::STORE_IC_TAG
+ : Logger::STORE_POLYMORPHIC_IC_TAG;
+ } else {
+ DCHECK_EQ(Code::KEYED_STORE_IC, kind());
+ return code->ic_state() == MONOMORPHIC
+ ? Logger::KEYED_STORE_IC_TAG
+ : Logger::KEYED_STORE_POLYMORPHIC_IC_TAG;
+ }
+ }
+
+ const ExtraICState extra_ic_state_;
+};
+
+
+}
+} // namespace v8::internal
+
+#endif // V8_IC_IC_COMPILER_H_
diff --git a/deps/v8/src/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index c7954ce13a..e10fb456ce 100644
--- a/deps/v8/src/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -5,7 +5,7 @@
#ifndef V8_IC_INL_H_
#define V8_IC_INL_H_
-#include "src/ic.h"
+#include "src/ic/ic.h"
#include "src/compiler.h"
#include "src/debug.h"
@@ -27,8 +27,8 @@ Address IC::address() const {
// At least one break point is active perform additional test to ensure that
// break point locations are updated correctly.
- if (debug->IsDebugBreak(Assembler::target_address_at(result,
- raw_constant_pool()))) {
+ if (debug->IsDebugBreak(
+ Assembler::target_address_at(result, raw_constant_pool()))) {
// If the call site is a call to debug break then return the address in
// the original code instead of the address in the running code. This will
// cause the original code to be updated and keeps the breakpoint active in
@@ -93,8 +93,7 @@ Code* IC::GetTargetAtAddress(Address address,
}
-void IC::SetTargetAtAddress(Address address,
- Code* target,
+void IC::SetTargetAtAddress(Address address, Code* target,
ConstantPoolArray* constant_pool) {
DCHECK(target->is_inline_cache_stub() || target->is_compare_ic_stub());
Heap* heap = target->GetHeap();
@@ -108,8 +107,8 @@ void IC::SetTargetAtAddress(Address address,
StoreIC::GetStrictMode(target->extra_ic_state()));
}
#endif
- Assembler::set_target_address_at(
- address, constant_pool, target->instruction_start());
+ Assembler::set_target_address_at(address, constant_pool,
+ target->instruction_start());
if (heap->gc_state() == Heap::MARK_COMPACT) {
heap->mark_compact_collector()->RecordCodeTargetPatch(address, target);
} else {
@@ -119,6 +118,46 @@ void IC::SetTargetAtAddress(Address address,
}
+void IC::set_target(Code* code) {
+#ifdef VERIFY_HEAP
+ code->VerifyEmbeddedObjectsDependency();
+#endif
+ SetTargetAtAddress(address(), code, constant_pool());
+ target_set_ = true;
+}
+
+
+void LoadIC::set_target(Code* code) {
+ // The contextual mode must be preserved across IC patching.
+ DCHECK(LoadICState::GetContextualMode(code->extra_ic_state()) ==
+ LoadICState::GetContextualMode(target()->extra_ic_state()));
+
+ IC::set_target(code);
+}
+
+
+void StoreIC::set_target(Code* code) {
+ // Strict mode must be preserved across IC patching.
+ DCHECK(GetStrictMode(code->extra_ic_state()) ==
+ GetStrictMode(target()->extra_ic_state()));
+ IC::set_target(code);
+}
+
+
+void KeyedStoreIC::set_target(Code* code) {
+ // Strict mode must be preserved across IC patching.
+ DCHECK(GetStrictMode(code->extra_ic_state()) == strict_mode());
+ IC::set_target(code);
+}
+
+
+Code* IC::raw_target() const {
+ return GetTargetAtAddress(address(), constant_pool());
+}
+
+void IC::UpdateTarget() { target_ = handle(raw_target(), isolate_); }
+
+
template <class TypeClass>
JSFunction* IC::GetRootConstructor(TypeClass* type, Context* native_context) {
if (type->Is(TypeClass::Boolean())) {
@@ -169,21 +208,22 @@ Handle<Map> IC::GetICCacheHolder(HeapType* type, Isolate* isolate,
}
-IC::State CallIC::FeedbackToState(Handle<FixedArray> vector,
+IC::State CallIC::FeedbackToState(Handle<TypeFeedbackVector> vector,
Handle<Smi> slot) const {
IC::State state = UNINITIALIZED;
Object* feedback = vector->get(slot->value());
- if (feedback == *TypeFeedbackInfo::MegamorphicSentinel(isolate())) {
+ if (feedback == *TypeFeedbackVector::MegamorphicSentinel(isolate())) {
state = GENERIC;
} else if (feedback->IsAllocationSite() || feedback->IsJSFunction()) {
state = MONOMORPHIC;
} else {
- CHECK(feedback == *TypeFeedbackInfo::UninitializedSentinel(isolate()));
+ CHECK(feedback == *TypeFeedbackVector::UninitializedSentinel(isolate()));
}
return state;
}
-} } // namespace v8::internal
+}
+} // namespace v8::internal
#endif // V8_IC_INL_H_
diff --git a/deps/v8/src/ic/ic-state.cc b/deps/v8/src/ic/ic-state.cc
new file mode 100644
index 0000000000..4238a7237e
--- /dev/null
+++ b/deps/v8/src/ic/ic-state.cc
@@ -0,0 +1,614 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/ic/ic.h"
+#include "src/ic/ic-state.h"
+
+namespace v8 {
+namespace internal {
+
+void ICUtility::Clear(Isolate* isolate, Address address,
+ ConstantPoolArray* constant_pool) {
+ IC::Clear(isolate, address, constant_pool);
+}
+
+
+CallICState::CallICState(ExtraICState extra_ic_state)
+ : argc_(ArgcBits::decode(extra_ic_state)),
+ call_type_(CallTypeBits::decode(extra_ic_state)) {}
+
+
+ExtraICState CallICState::GetExtraICState() const {
+ ExtraICState extra_ic_state =
+ ArgcBits::encode(argc_) | CallTypeBits::encode(call_type_);
+ return extra_ic_state;
+}
+
+
+OStream& operator<<(OStream& os, const CallICState& s) {
+ return os << "(args(" << s.arg_count() << "), "
+ << (s.call_type() == CallICState::METHOD ? "METHOD" : "FUNCTION")
+ << ", ";
+}
+
+
+BinaryOpICState::BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state)
+ : isolate_(isolate) {
+ op_ =
+ static_cast<Token::Value>(FIRST_TOKEN + OpField::decode(extra_ic_state));
+ mode_ = OverwriteModeField::decode(extra_ic_state);
+ fixed_right_arg_ =
+ Maybe<int>(HasFixedRightArgField::decode(extra_ic_state),
+ 1 << FixedRightArgValueField::decode(extra_ic_state));
+ left_kind_ = LeftKindField::decode(extra_ic_state);
+ if (fixed_right_arg_.has_value) {
+ right_kind_ = Smi::IsValid(fixed_right_arg_.value) ? SMI : INT32;
+ } else {
+ right_kind_ = RightKindField::decode(extra_ic_state);
+ }
+ result_kind_ = ResultKindField::decode(extra_ic_state);
+ DCHECK_LE(FIRST_TOKEN, op_);
+ DCHECK_LE(op_, LAST_TOKEN);
+}
+
+
+ExtraICState BinaryOpICState::GetExtraICState() const {
+ ExtraICState extra_ic_state =
+ OpField::encode(op_ - FIRST_TOKEN) | OverwriteModeField::encode(mode_) |
+ LeftKindField::encode(left_kind_) |
+ ResultKindField::encode(result_kind_) |
+ HasFixedRightArgField::encode(fixed_right_arg_.has_value);
+ if (fixed_right_arg_.has_value) {
+ extra_ic_state = FixedRightArgValueField::update(
+ extra_ic_state, WhichPowerOf2(fixed_right_arg_.value));
+ } else {
+ extra_ic_state = RightKindField::update(extra_ic_state, right_kind_);
+ }
+ return extra_ic_state;
+}
+
+
+// static
+void BinaryOpICState::GenerateAheadOfTime(
+ Isolate* isolate, void (*Generate)(Isolate*, const BinaryOpICState&)) {
+// TODO(olivf) We should investigate why adding stubs to the snapshot is so
+// expensive at runtime. When solved we should be able to add most binops to
+// the snapshot instead of hand-picking them.
+// Generated list of commonly used stubs
+#define GENERATE(op, left_kind, right_kind, result_kind, mode) \
+ do { \
+ BinaryOpICState state(isolate, op, mode); \
+ state.left_kind_ = left_kind; \
+ state.fixed_right_arg_.has_value = false; \
+ state.right_kind_ = right_kind; \
+ state.result_kind_ = result_kind; \
+ Generate(isolate, state); \
+ } while (false)
+ GENERATE(Token::ADD, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::ADD, INT32, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, INT32, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, INT32, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, INT32, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::ADD, INT32, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, NUMBER, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, NUMBER, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::ADD, SMI, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, SMI, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, SMI, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, SMI, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, INT32, INT32, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, INT32, INT32, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, INT32, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, NUMBER, INT32, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, NUMBER, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, SMI, INT32, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, SMI, NUMBER, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, INT32, INT32, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, INT32, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_OR, INT32, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, NUMBER, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_OR, NUMBER, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, SMI, INT32, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_XOR, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_XOR, INT32, INT32, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, INT32, INT32, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, INT32, NUMBER, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_XOR, NUMBER, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, NUMBER, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, NUMBER, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, SMI, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, SMI, INT32, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::DIV, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::DIV, INT32, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, INT32, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::DIV, INT32, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, NUMBER, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::DIV, NUMBER, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::DIV, SMI, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, SMI, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, SMI, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::DIV, SMI, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::DIV, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::DIV, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::MOD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MOD, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::MOD, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::MUL, INT32, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, INT32, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::MUL, INT32, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, INT32, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, NUMBER, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::MUL, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, NUMBER, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::MUL, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, SMI, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::MUL, SMI, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SAR, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::SAR, INT32, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SAR, INT32, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SAR, NUMBER, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SAR, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SAR, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SAR, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SHL, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::SHL, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::SHL, INT32, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SHL, INT32, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SHL, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SHL, SMI, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::SHL, SMI, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::SHL, SMI, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::SHL, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SHL, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SHL, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SHR, INT32, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SHR, INT32, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SHR, INT32, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SHR, NUMBER, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SHR, NUMBER, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SHR, NUMBER, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::SHR, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SHR, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SHR, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::SUB, INT32, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, INT32, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::SUB, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, INT32, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, NUMBER, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::SUB, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, NUMBER, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::SUB, SMI, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SUB, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, SMI, SMI, SMI, OVERWRITE_RIGHT);
+#undef GENERATE
+#define GENERATE(op, left_kind, fixed_right_arg_value, result_kind, mode) \
+ do { \
+ BinaryOpICState state(isolate, op, mode); \
+ state.left_kind_ = left_kind; \
+ state.fixed_right_arg_.has_value = true; \
+ state.fixed_right_arg_.value = fixed_right_arg_value; \
+ state.right_kind_ = SMI; \
+ state.result_kind_ = result_kind; \
+ Generate(isolate, state); \
+ } while (false)
+ GENERATE(Token::MOD, SMI, 2, SMI, NO_OVERWRITE);
+ GENERATE(Token::MOD, SMI, 4, SMI, NO_OVERWRITE);
+ GENERATE(Token::MOD, SMI, 4, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::MOD, SMI, 8, SMI, NO_OVERWRITE);
+ GENERATE(Token::MOD, SMI, 16, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::MOD, SMI, 32, SMI, NO_OVERWRITE);
+ GENERATE(Token::MOD, SMI, 2048, SMI, NO_OVERWRITE);
+#undef GENERATE
+}
+
+
+Type* BinaryOpICState::GetResultType(Zone* zone) const {
+ Kind result_kind = result_kind_;
+ if (HasSideEffects()) {
+ result_kind = NONE;
+ } else if (result_kind == GENERIC && op_ == Token::ADD) {
+ return Type::Union(Type::Number(zone), Type::String(zone), zone);
+ } else if (result_kind == NUMBER && op_ == Token::SHR) {
+ return Type::Unsigned32(zone);
+ }
+ DCHECK_NE(GENERIC, result_kind);
+ return KindToType(result_kind, zone);
+}
+
+
+OStream& operator<<(OStream& os, const BinaryOpICState& s) {
+ os << "(" << Token::Name(s.op_);
+ if (s.mode_ == OVERWRITE_LEFT)
+ os << "_ReuseLeft";
+ else if (s.mode_ == OVERWRITE_RIGHT)
+ os << "_ReuseRight";
+ if (s.CouldCreateAllocationMementos()) os << "_CreateAllocationMementos";
+ os << ":" << BinaryOpICState::KindToString(s.left_kind_) << "*";
+ if (s.fixed_right_arg_.has_value) {
+ os << s.fixed_right_arg_.value;
+ } else {
+ os << BinaryOpICState::KindToString(s.right_kind_);
+ }
+ return os << "->" << BinaryOpICState::KindToString(s.result_kind_) << ")";
+}
+
+
+void BinaryOpICState::Update(Handle<Object> left, Handle<Object> right,
+ Handle<Object> result) {
+ ExtraICState old_extra_ic_state = GetExtraICState();
+
+ left_kind_ = UpdateKind(left, left_kind_);
+ right_kind_ = UpdateKind(right, right_kind_);
+
+ int32_t fixed_right_arg_value = 0;
+ bool has_fixed_right_arg =
+ op_ == Token::MOD && right->ToInt32(&fixed_right_arg_value) &&
+ fixed_right_arg_value > 0 &&
+ base::bits::IsPowerOfTwo32(fixed_right_arg_value) &&
+ FixedRightArgValueField::is_valid(WhichPowerOf2(fixed_right_arg_value)) &&
+ (left_kind_ == SMI || left_kind_ == INT32) &&
+ (result_kind_ == NONE || !fixed_right_arg_.has_value);
+ fixed_right_arg_ = Maybe<int32_t>(has_fixed_right_arg, fixed_right_arg_value);
+
+ result_kind_ = UpdateKind(result, result_kind_);
+
+ if (!Token::IsTruncatingBinaryOp(op_)) {
+ Kind input_kind = Max(left_kind_, right_kind_);
+ if (result_kind_ < input_kind && input_kind <= NUMBER) {
+ result_kind_ = input_kind;
+ }
+ }
+
+ // We don't want to distinguish INT32 and NUMBER for string add (because
+ // NumberToString can't make use of this anyway).
+ if (left_kind_ == STRING && right_kind_ == INT32) {
+ DCHECK_EQ(STRING, result_kind_);
+ DCHECK_EQ(Token::ADD, op_);
+ right_kind_ = NUMBER;
+ } else if (right_kind_ == STRING && left_kind_ == INT32) {
+ DCHECK_EQ(STRING, result_kind_);
+ DCHECK_EQ(Token::ADD, op_);
+ left_kind_ = NUMBER;
+ }
+
+ // Reset overwrite mode unless we can actually make use of it, or may be able
+ // to make use of it at some point in the future.
+ if ((mode_ == OVERWRITE_LEFT && left_kind_ > NUMBER) ||
+ (mode_ == OVERWRITE_RIGHT && right_kind_ > NUMBER) ||
+ result_kind_ > NUMBER) {
+ mode_ = NO_OVERWRITE;
+ }
+
+ if (old_extra_ic_state == GetExtraICState()) {
+ // Tagged operations can lead to non-truncating HChanges
+ if (left->IsUndefined() || left->IsBoolean()) {
+ left_kind_ = GENERIC;
+ } else {
+ DCHECK(right->IsUndefined() || right->IsBoolean());
+ right_kind_ = GENERIC;
+ }
+ }
+}
+
+
+BinaryOpICState::Kind BinaryOpICState::UpdateKind(Handle<Object> object,
+ Kind kind) const {
+ Kind new_kind = GENERIC;
+ bool is_truncating = Token::IsTruncatingBinaryOp(op());
+ if (object->IsBoolean() && is_truncating) {
+ // Booleans will be automatically truncated by HChange.
+ new_kind = INT32;
+ } else if (object->IsUndefined()) {
+ // Undefined will be automatically truncated by HChange.
+ new_kind = is_truncating ? INT32 : NUMBER;
+ } else if (object->IsSmi()) {
+ new_kind = SMI;
+ } else if (object->IsHeapNumber()) {
+ double value = Handle<HeapNumber>::cast(object)->value();
+ new_kind = IsInt32Double(value) ? INT32 : NUMBER;
+ } else if (object->IsString() && op() == Token::ADD) {
+ new_kind = STRING;
+ }
+ if (new_kind == INT32 && SmiValuesAre32Bits()) {
+ new_kind = NUMBER;
+ }
+ if (kind != NONE && ((new_kind <= NUMBER && kind > NUMBER) ||
+ (new_kind > NUMBER && kind <= NUMBER))) {
+ new_kind = GENERIC;
+ }
+ return Max(kind, new_kind);
+}
+
+
+// static
+const char* BinaryOpICState::KindToString(Kind kind) {
+ switch (kind) {
+ case NONE:
+ return "None";
+ case SMI:
+ return "Smi";
+ case INT32:
+ return "Int32";
+ case NUMBER:
+ return "Number";
+ case STRING:
+ return "String";
+ case GENERIC:
+ return "Generic";
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+// static
+Type* BinaryOpICState::KindToType(Kind kind, Zone* zone) {
+ switch (kind) {
+ case NONE:
+ return Type::None(zone);
+ case SMI:
+ return Type::SignedSmall(zone);
+ case INT32:
+ return Type::Signed32(zone);
+ case NUMBER:
+ return Type::Number(zone);
+ case STRING:
+ return Type::String(zone);
+ case GENERIC:
+ return Type::Any(zone);
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+const char* CompareICState::GetStateName(State state) {
+ switch (state) {
+ case UNINITIALIZED:
+ return "UNINITIALIZED";
+ case SMI:
+ return "SMI";
+ case NUMBER:
+ return "NUMBER";
+ case INTERNALIZED_STRING:
+ return "INTERNALIZED_STRING";
+ case STRING:
+ return "STRING";
+ case UNIQUE_NAME:
+ return "UNIQUE_NAME";
+ case OBJECT:
+ return "OBJECT";
+ case KNOWN_OBJECT:
+ return "KNOWN_OBJECT";
+ case GENERIC:
+ return "GENERIC";
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+Type* CompareICState::StateToType(Zone* zone, State state, Handle<Map> map) {
+ switch (state) {
+ case UNINITIALIZED:
+ return Type::None(zone);
+ case SMI:
+ return Type::SignedSmall(zone);
+ case NUMBER:
+ return Type::Number(zone);
+ case STRING:
+ return Type::String(zone);
+ case INTERNALIZED_STRING:
+ return Type::InternalizedString(zone);
+ case UNIQUE_NAME:
+ return Type::UniqueName(zone);
+ case OBJECT:
+ return Type::Receiver(zone);
+ case KNOWN_OBJECT:
+ return map.is_null() ? Type::Receiver(zone) : Type::Class(map, zone);
+ case GENERIC:
+ return Type::Any(zone);
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+CompareICState::State CompareICState::NewInputState(State old_state,
+ Handle<Object> value) {
+ switch (old_state) {
+ case UNINITIALIZED:
+ if (value->IsSmi()) return SMI;
+ if (value->IsHeapNumber()) return NUMBER;
+ if (value->IsInternalizedString()) return INTERNALIZED_STRING;
+ if (value->IsString()) return STRING;
+ if (value->IsSymbol()) return UNIQUE_NAME;
+ if (value->IsJSObject()) return OBJECT;
+ break;
+ case SMI:
+ if (value->IsSmi()) return SMI;
+ if (value->IsHeapNumber()) return NUMBER;
+ break;
+ case NUMBER:
+ if (value->IsNumber()) return NUMBER;
+ break;
+ case INTERNALIZED_STRING:
+ if (value->IsInternalizedString()) return INTERNALIZED_STRING;
+ if (value->IsString()) return STRING;
+ if (value->IsSymbol()) return UNIQUE_NAME;
+ break;
+ case STRING:
+ if (value->IsString()) return STRING;
+ break;
+ case UNIQUE_NAME:
+ if (value->IsUniqueName()) return UNIQUE_NAME;
+ break;
+ case OBJECT:
+ if (value->IsJSObject()) return OBJECT;
+ break;
+ case GENERIC:
+ break;
+ case KNOWN_OBJECT:
+ UNREACHABLE();
+ break;
+ }
+ return GENERIC;
+}
+
+
+// static
+CompareICState::State CompareICState::TargetState(
+ State old_state, State old_left, State old_right, Token::Value op,
+ bool has_inlined_smi_code, Handle<Object> x, Handle<Object> y) {
+ switch (old_state) {
+ case UNINITIALIZED:
+ if (x->IsSmi() && y->IsSmi()) return SMI;
+ if (x->IsNumber() && y->IsNumber()) return NUMBER;
+ if (Token::IsOrderedRelationalCompareOp(op)) {
+ // Ordered comparisons treat undefined as NaN, so the
+ // NUMBER stub will do the right thing.
+ if ((x->IsNumber() && y->IsUndefined()) ||
+ (y->IsNumber() && x->IsUndefined())) {
+ return NUMBER;
+ }
+ }
+ if (x->IsInternalizedString() && y->IsInternalizedString()) {
+ // We compare internalized strings as plain ones if we need to determine
+ // the order in a non-equality compare.
+ return Token::IsEqualityOp(op) ? INTERNALIZED_STRING : STRING;
+ }
+ if (x->IsString() && y->IsString()) return STRING;
+ if (!Token::IsEqualityOp(op)) return GENERIC;
+ if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME;
+ if (x->IsJSObject() && y->IsJSObject()) {
+ if (Handle<JSObject>::cast(x)->map() ==
+ Handle<JSObject>::cast(y)->map()) {
+ return KNOWN_OBJECT;
+ } else {
+ return OBJECT;
+ }
+ }
+ return GENERIC;
+ case SMI:
+ return x->IsNumber() && y->IsNumber() ? NUMBER : GENERIC;
+ case INTERNALIZED_STRING:
+ DCHECK(Token::IsEqualityOp(op));
+ if (x->IsString() && y->IsString()) return STRING;
+ if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME;
+ return GENERIC;
+ case NUMBER:
+ // If the failure was due to one side changing from smi to heap number,
+ // then keep the state (if other changed at the same time, we will get
+ // a second miss and then go to generic).
+ if (old_left == SMI && x->IsHeapNumber()) return NUMBER;
+ if (old_right == SMI && y->IsHeapNumber()) return NUMBER;
+ return GENERIC;
+ case KNOWN_OBJECT:
+ DCHECK(Token::IsEqualityOp(op));
+ if (x->IsJSObject() && y->IsJSObject()) {
+ return OBJECT;
+ }
+ return GENERIC;
+ case STRING:
+ case UNIQUE_NAME:
+ case OBJECT:
+ case GENERIC:
+ return GENERIC;
+ }
+ UNREACHABLE();
+ return GENERIC; // Make the compiler happy.
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/ic/ic-state.h b/deps/v8/src/ic/ic-state.h
new file mode 100644
index 0000000000..b84bdb9a70
--- /dev/null
+++ b/deps/v8/src/ic/ic-state.h
@@ -0,0 +1,238 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_STATE_H_
+#define V8_IC_STATE_H_
+
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+const int kMaxKeyedPolymorphism = 4;
+
+
+class ICUtility : public AllStatic {
+ public:
+ // Clear the inline cache to initial state.
+ static void Clear(Isolate* isolate, Address address,
+ ConstantPoolArray* constant_pool);
+};
+
+
+class CallICState FINAL BASE_EMBEDDED {
+ public:
+ explicit CallICState(ExtraICState extra_ic_state);
+
+ enum CallType { METHOD, FUNCTION };
+
+ CallICState(int argc, CallType call_type)
+ : argc_(argc), call_type_(call_type) {}
+
+ ExtraICState GetExtraICState() const;
+
+ static void GenerateAheadOfTime(Isolate*,
+ void (*Generate)(Isolate*,
+ const CallICState&));
+
+ int arg_count() const { return argc_; }
+ CallType call_type() const { return call_type_; }
+
+ bool CallAsMethod() const { return call_type_ == METHOD; }
+
+ private:
+ class ArgcBits : public BitField<int, 0, Code::kArgumentsBits> {};
+ class CallTypeBits : public BitField<CallType, Code::kArgumentsBits, 1> {};
+
+ const int argc_;
+ const CallType call_type_;
+};
+
+
+OStream& operator<<(OStream& os, const CallICState& s);
+
+
+// Mode to overwrite BinaryExpression values.
+enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
+
+class BinaryOpICState FINAL BASE_EMBEDDED {
+ public:
+ BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state);
+
+ BinaryOpICState(Isolate* isolate, Token::Value op, OverwriteMode mode)
+ : op_(op),
+ mode_(mode),
+ left_kind_(NONE),
+ right_kind_(NONE),
+ result_kind_(NONE),
+ isolate_(isolate) {
+ DCHECK_LE(FIRST_TOKEN, op);
+ DCHECK_LE(op, LAST_TOKEN);
+ }
+
+ InlineCacheState GetICState() const {
+ if (Max(left_kind_, right_kind_) == NONE) {
+ return ::v8::internal::UNINITIALIZED;
+ }
+ if (Max(left_kind_, right_kind_) == GENERIC) {
+ return ::v8::internal::MEGAMORPHIC;
+ }
+ if (Min(left_kind_, right_kind_) == GENERIC) {
+ return ::v8::internal::GENERIC;
+ }
+ return ::v8::internal::MONOMORPHIC;
+ }
+
+ ExtraICState GetExtraICState() const;
+
+ static void GenerateAheadOfTime(Isolate*,
+ void (*Generate)(Isolate*,
+ const BinaryOpICState&));
+
+ bool CanReuseDoubleBox() const {
+ return (result_kind_ > SMI && result_kind_ <= NUMBER) &&
+ ((mode_ == OVERWRITE_LEFT && left_kind_ > SMI &&
+ left_kind_ <= NUMBER) ||
+ (mode_ == OVERWRITE_RIGHT && right_kind_ > SMI &&
+ right_kind_ <= NUMBER));
+ }
+
+ // Returns true if the IC _could_ create allocation mementos.
+ bool CouldCreateAllocationMementos() const {
+ if (left_kind_ == STRING || right_kind_ == STRING) {
+ DCHECK_EQ(Token::ADD, op_);
+ return true;
+ }
+ return false;
+ }
+
+ // Returns true if the IC _should_ create allocation mementos.
+ bool ShouldCreateAllocationMementos() const {
+ return FLAG_allocation_site_pretenuring && CouldCreateAllocationMementos();
+ }
+
+ bool HasSideEffects() const {
+ return Max(left_kind_, right_kind_) == GENERIC;
+ }
+
+ // Returns true if the IC should enable the inline smi code (i.e. if either
+ // parameter may be a smi).
+ bool UseInlinedSmiCode() const {
+ return KindMaybeSmi(left_kind_) || KindMaybeSmi(right_kind_);
+ }
+
+ static const int FIRST_TOKEN = Token::BIT_OR;
+ static const int LAST_TOKEN = Token::MOD;
+
+ Token::Value op() const { return op_; }
+ OverwriteMode mode() const { return mode_; }
+ Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
+
+ Type* GetLeftType(Zone* zone) const { return KindToType(left_kind_, zone); }
+ Type* GetRightType(Zone* zone) const { return KindToType(right_kind_, zone); }
+ Type* GetResultType(Zone* zone) const;
+
+ void Update(Handle<Object> left, Handle<Object> right, Handle<Object> result);
+
+ Isolate* isolate() const { return isolate_; }
+
+ private:
+ friend OStream& operator<<(OStream& os, const BinaryOpICState& s);
+
+ enum Kind { NONE, SMI, INT32, NUMBER, STRING, GENERIC };
+
+ Kind UpdateKind(Handle<Object> object, Kind kind) const;
+
+ static const char* KindToString(Kind kind);
+ static Type* KindToType(Kind kind, Zone* zone);
+ static bool KindMaybeSmi(Kind kind) {
+ return (kind >= SMI && kind <= NUMBER) || kind == GENERIC;
+ }
+
+ // We truncate the last bit of the token.
+ STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 4));
+ class OpField : public BitField<int, 0, 4> {};
+ class OverwriteModeField : public BitField<OverwriteMode, 4, 2> {};
+ class ResultKindField : public BitField<Kind, 6, 3> {};
+ class LeftKindField : public BitField<Kind, 9, 3> {};
+ // When fixed right arg is set, we don't need to store the right kind.
+ // Thus the two fields can overlap.
+ class HasFixedRightArgField : public BitField<bool, 12, 1> {};
+ class FixedRightArgValueField : public BitField<int, 13, 4> {};
+ class RightKindField : public BitField<Kind, 13, 3> {};
+
+ Token::Value op_;
+ OverwriteMode mode_;
+ Kind left_kind_;
+ Kind right_kind_;
+ Kind result_kind_;
+ Maybe<int> fixed_right_arg_;
+ Isolate* isolate_;
+};
+
+
+OStream& operator<<(OStream& os, const BinaryOpICState& s);
+
+
+class CompareICState {
+ public:
+ // The type/state lattice is defined by the following inequations:
+ // UNINITIALIZED < ...
+ // ... < GENERIC
+ // SMI < NUMBER
+ // INTERNALIZED_STRING < STRING
+ // KNOWN_OBJECT < OBJECT
+ enum State {
+ UNINITIALIZED,
+ SMI,
+ NUMBER,
+ STRING,
+ INTERNALIZED_STRING,
+ UNIQUE_NAME, // Symbol or InternalizedString
+ OBJECT, // JSObject
+ KNOWN_OBJECT, // JSObject with specific map (faster check)
+ GENERIC
+ };
+
+ static Type* StateToType(Zone* zone, State state,
+ Handle<Map> map = Handle<Map>());
+
+ static State NewInputState(State old_state, Handle<Object> value);
+
+ static const char* GetStateName(CompareICState::State state);
+
+ static State TargetState(State old_state, State old_left, State old_right,
+ Token::Value op, bool has_inlined_smi_code,
+ Handle<Object> x, Handle<Object> y);
+};
+
+
+class LoadICState FINAL BASE_EMBEDDED {
+ public:
+ explicit LoadICState(ExtraICState extra_ic_state) : state_(extra_ic_state) {}
+
+ explicit LoadICState(ContextualMode mode)
+ : state_(ContextualModeBits::encode(mode)) {}
+
+ ExtraICState GetExtraICState() const { return state_; }
+
+ ContextualMode contextual_mode() const {
+ return ContextualModeBits::decode(state_);
+ }
+
+ static ContextualMode GetContextualMode(ExtraICState state) {
+ return LoadICState(state).contextual_mode();
+ }
+
+ private:
+ class ContextualModeBits : public BitField<ContextualMode, 0, 1> {};
+ STATIC_ASSERT(static_cast<int>(NOT_CONTEXTUAL) == 0);
+
+ const ExtraICState state_;
+};
+}
+}
+
+#endif // V8_IC_STATE_H_
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic/ic.cc
index db82bf5238..500fa1fb7b 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -7,32 +7,43 @@
#include "src/accessors.h"
#include "src/api.h"
#include "src/arguments.h"
+#include "src/base/bits.h"
#include "src/codegen.h"
#include "src/conversions.h"
#include "src/execution.h"
-#include "src/ic-inl.h"
+#include "src/ic/call-optimization.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic-inl.h"
+#include "src/ic/ic-compiler.h"
+#include "src/ic/stub-cache.h"
#include "src/prototype.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
char IC::TransitionMarkFromState(IC::State state) {
switch (state) {
- case UNINITIALIZED: return '0';
- case PREMONOMORPHIC: return '.';
- case MONOMORPHIC: return '1';
+ case UNINITIALIZED:
+ return '0';
+ case PREMONOMORPHIC:
+ return '.';
+ case MONOMORPHIC:
+ return '1';
case PROTOTYPE_FAILURE:
return '^';
- case POLYMORPHIC: return 'P';
- case MEGAMORPHIC: return 'N';
- case GENERIC: return 'G';
+ case POLYMORPHIC:
+ return 'P';
+ case MEGAMORPHIC:
+ return 'N';
+ case GENERIC:
+ return 'G';
// We never see the debugger states here, because the state is
// computed from the original code - not the patched code. Let
// these cases fall through to the unreachable code below.
- case DEBUG_STUB: break;
+ case DEBUG_STUB:
+ break;
// Type-vector-based ICs resolve state to one of the above.
case DEFAULT:
break;
@@ -65,7 +76,13 @@ const char* GetTransitionMarkModifier(KeyedAccessStoreMode mode) {
#else
-#define TRACE_GENERIC_IC(isolate, type, reason)
+#define TRACE_GENERIC_IC(isolate, type, reason) \
+ do { \
+ if (FLAG_trace_ic) { \
+ PrintF("[%s patching generic stub in ", type); \
+ PrintF("(see below) (%s)]\n", reason); \
+ } \
+ } while (false)
#endif // DEBUG
@@ -122,18 +139,15 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
TraceIC(type, name, old_state, new_state)
IC::IC(FrameDepth depth, Isolate* isolate)
- : isolate_(isolate),
- target_set_(false),
- target_maps_set_(false) {
+ : isolate_(isolate), target_set_(false), target_maps_set_(false) {
// To improve the performance of the (much used) IC code, we unfold a few
// levels of the stack frame iteration code. This yields a ~35% speedup when
// running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag.
- const Address entry =
- Isolate::c_entry_fp(isolate->thread_local_top());
+ const Address entry = Isolate::c_entry_fp(isolate->thread_local_top());
Address constant_pool = NULL;
if (FLAG_enable_ool_constant_pool) {
- constant_pool = Memory::Address_at(
- entry + ExitFrameConstants::kConstantPoolOffset);
+ constant_pool =
+ Memory::Address_at(entry + ExitFrameConstants::kConstantPoolOffset);
}
Address* pc_address =
reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
@@ -143,8 +157,8 @@ IC::IC(FrameDepth depth, Isolate* isolate)
// find the frame pointer and the return address stack slot.
if (depth == EXTRA_CALL_FRAME) {
if (FLAG_enable_ool_constant_pool) {
- constant_pool = Memory::Address_at(
- fp + StandardFrameConstants::kConstantPoolOffset);
+ constant_pool =
+ Memory::Address_at(fp + StandardFrameConstants::kConstantPoolOffset);
}
const int kCallerPCOffset = StandardFrameConstants::kCallerPCOffset;
pc_address = reinterpret_cast<Address*>(fp + kCallerPCOffset);
@@ -202,15 +216,11 @@ Code* IC::GetOriginalCode() const {
}
-static bool HasInterceptorSetter(JSObject* object) {
- return !object->GetNamedInterceptor()->setter()->IsUndefined();
-}
-
-
static void LookupForRead(LookupIterator* it) {
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
UNREACHABLE();
case LookupIterator::JSPROXY:
return;
@@ -230,9 +240,9 @@ static void LookupForRead(LookupIterator* it) {
break;
}
return;
- case LookupIterator::PROPERTY:
- if (it->HasProperty()) return; // Yay!
- break;
+ case LookupIterator::ACCESSOR:
+ case LookupIterator::DATA:
+ return;
}
}
}
@@ -278,11 +288,11 @@ bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
}
if (receiver->IsGlobalObject()) {
- LookupResult lookup(isolate());
- GlobalObject* global = GlobalObject::cast(*receiver);
- global->LookupOwnRealNamedProperty(name, &lookup);
- if (!lookup.IsFound()) return false;
- PropertyCell* cell = global->GetPropertyCell(&lookup);
+ Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
+ LookupIterator it(global, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ if (it.state() == LookupIterator::ACCESS_CHECK) return false;
+ if (!it.IsFound()) return false;
+ Handle<PropertyCell> cell = it.GetPropertyCell();
return cell->type()->IsConstant();
}
@@ -303,7 +313,7 @@ bool IC::IsNameCompatibleWithPrototypeFailure(Handle<Object> name) {
void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
- receiver_type_ = CurrentTypeOf(receiver, isolate());
+ update_receiver_type(receiver);
if (!name->IsString()) return;
if (state() != MONOMORPHIC && state() != POLYMORPHIC) return;
if (receiver->IsUndefined() || receiver->IsNull()) return;
@@ -327,22 +337,18 @@ void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
}
-MaybeHandle<Object> IC::TypeError(const char* type,
- Handle<Object> object,
+MaybeHandle<Object> IC::TypeError(const char* type, Handle<Object> object,
Handle<Object> key) {
HandleScope scope(isolate());
- Handle<Object> args[2] = { key, object };
- Handle<Object> error = isolate()->factory()->NewTypeError(
- type, HandleVector(args, 2));
- return isolate()->Throw<Object>(error);
+ Handle<Object> args[2] = {key, object};
+ THROW_NEW_ERROR(isolate(), NewTypeError(type, HandleVector(args, 2)), Object);
}
MaybeHandle<Object> IC::ReferenceError(const char* type, Handle<Name> name) {
HandleScope scope(isolate());
- Handle<Object> error = isolate()->factory()->NewReferenceError(
- type, HandleVector(&name, 1));
- return isolate()->Throw<Object>(error);
+ THROW_NEW_ERROR(isolate(), NewReferenceError(type, HandleVector(&name, 1)),
+ Object);
}
@@ -386,8 +392,8 @@ static void ComputeTypeInfoCountDelta(IC::State old_state, IC::State new_state,
void IC::OnTypeFeedbackChanged(Isolate* isolate, Address address,
State old_state, State new_state,
bool target_remains_ic_stub) {
- Code* host = isolate->
- inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
+ Code* host =
+ isolate->inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
if (host->kind() != Code::FUNCTION) return;
if (FLAG_type_info_threshold > 0 && target_remains_ic_stub &&
@@ -402,8 +408,7 @@ void IC::OnTypeFeedbackChanged(Isolate* isolate, Address address,
info->change_ic_generic_count(generic_delta);
}
if (host->type_feedback_info()->IsTypeFeedbackInfo()) {
- TypeFeedbackInfo* info =
- TypeFeedbackInfo::cast(host->type_feedback_info());
+ TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
info->change_own_type_change_checksum();
}
host->set_profiler_ticks(0);
@@ -471,7 +476,7 @@ void IC::InvalidateMaps(Code* stub) {
void IC::Clear(Isolate* isolate, Address address,
- ConstantPoolArray* constant_pool) {
+ ConstantPoolArray* constant_pool) {
Code* target = GetTargetAtAddress(address, constant_pool);
// Don't clear debug break inline cache as it will remove the break point.
@@ -497,14 +502,13 @@ void IC::Clear(Isolate* isolate, Address address,
// Clearing these is tricky and does not
// make any performance difference.
return;
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
}
-void KeyedLoadIC::Clear(Isolate* isolate,
- Address address,
- Code* target,
+void KeyedLoadIC::Clear(Isolate* isolate, Address address, Code* target,
ConstantPoolArray* constant_pool) {
if (IsCleared(target)) return;
// Make sure to also clear the map used in inline fast cases. If we
@@ -514,17 +518,13 @@ void KeyedLoadIC::Clear(Isolate* isolate,
}
-void CallIC::Clear(Isolate* isolate,
- Address address,
- Code* target,
+void CallIC::Clear(Isolate* isolate, Address address, Code* target,
ConstantPoolArray* constant_pool) {
// Currently, CallIC doesn't have state changes.
}
-void LoadIC::Clear(Isolate* isolate,
- Address address,
- Code* target,
+void LoadIC::Clear(Isolate* isolate, Address address, Code* target,
ConstantPoolArray* constant_pool) {
if (IsCleared(target)) return;
Code* code = PropertyICCompiler::FindPreMonomorphic(isolate, Code::LOAD_IC,
@@ -533,9 +533,7 @@ void LoadIC::Clear(Isolate* isolate,
}
-void StoreIC::Clear(Isolate* isolate,
- Address address,
- Code* target,
+void StoreIC::Clear(Isolate* isolate, Address address, Code* target,
ConstantPoolArray* constant_pool) {
if (IsCleared(target)) return;
Code* code = PropertyICCompiler::FindPreMonomorphic(isolate, Code::STORE_IC,
@@ -544,29 +542,24 @@ void StoreIC::Clear(Isolate* isolate,
}
-void KeyedStoreIC::Clear(Isolate* isolate,
- Address address,
- Code* target,
+void KeyedStoreIC::Clear(Isolate* isolate, Address address, Code* target,
ConstantPoolArray* constant_pool) {
if (IsCleared(target)) return;
- SetTargetAtAddress(address,
- *pre_monomorphic_stub(
- isolate, StoreIC::GetStrictMode(target->extra_ic_state())),
+ SetTargetAtAddress(
+ address, *pre_monomorphic_stub(
+ isolate, StoreIC::GetStrictMode(target->extra_ic_state())),
constant_pool);
}
-void CompareIC::Clear(Isolate* isolate,
- Address address,
- Code* target,
+void CompareIC::Clear(Isolate* isolate, Address address, Code* target,
ConstantPoolArray* constant_pool) {
DCHECK(CodeStub::GetMajorKey(target) == CodeStub::CompareIC);
- CompareIC::State handler_state;
- Token::Value op;
- ICCompareStub::DecodeKey(target->stub_key(), NULL, NULL, &handler_state, &op);
+ CompareICStub stub(target->stub_key(), isolate);
// Only clear CompareICs that can retain objects.
- if (handler_state != KNOWN_OBJECT) return;
- SetTargetAtAddress(address, GetRawUninitialized(isolate, op), constant_pool);
+ if (stub.state() != CompareICState::KNOWN_OBJECT) return;
+ SetTargetAtAddress(address, GetRawUninitialized(isolate, stub.op()),
+ constant_pool);
PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
}
@@ -609,10 +602,8 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
}
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate(),
- result,
- Runtime::GetElementOrCharAt(isolate(), object, index),
- Object);
+ isolate(), result,
+ Runtime::GetElementOrCharAt(isolate(), object, index), Object);
return result;
}
@@ -622,27 +613,22 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
LookupIterator it(object, name);
LookupForRead(&it);
- // If we did not find a property, check if we need to throw an exception.
- if (!it.IsFound()) {
- if (IsUndeclaredGlobal(object)) {
- return ReferenceError("not_defined", name);
- }
- LOG(isolate(), SuspectReadEvent(*name, *object));
- }
-
- // Update inline cache and stub cache.
- if (use_ic) UpdateCaches(&it, object, name);
+ if (it.IsFound() || !IsUndeclaredGlobal(object)) {
+ // Update inline cache and stub cache.
+ if (use_ic) UpdateCaches(&it);
- // Get the property.
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result, Object::GetProperty(&it), Object);
- // If the property is not present, check if we need to throw an exception.
- if (!it.IsFound() && IsUndeclaredGlobal(object)) {
- return ReferenceError("not_defined", name);
+ // Get the property.
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), result, Object::GetProperty(&it),
+ Object);
+ if (it.IsFound()) {
+ return result;
+ } else if (!IsUndeclaredGlobal(object)) {
+ LOG(isolate(), SuspectReadEvent(*name, *object));
+ return result;
+ }
}
-
- return result;
+ return ReferenceError("not_defined", name);
}
@@ -683,8 +669,7 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) {
// there was a prototoype chain failure. In that case, just overwrite the
// handler.
handler_to_overwrite = i;
- } else if (handler_to_overwrite == -1 &&
- current_type->IsClass() &&
+ } else if (handler_to_overwrite == -1 && current_type->IsClass() &&
type->IsClass() &&
IsTransitionOfMonomorphicTarget(*current_type->AsClass()->Map(),
*type->AsClass()->Map())) {
@@ -693,7 +678,7 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) {
}
int number_of_valid_types =
- number_of_types - deprecated_types - (handler_to_overwrite != -1);
+ number_of_types - deprecated_types - (handler_to_overwrite != -1);
if (number_of_valid_types >= 4) return false;
if (number_of_types == 0) return false;
@@ -726,8 +711,8 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) {
Handle<HeapType> IC::CurrentTypeOf(Handle<Object> object, Isolate* isolate) {
return object->IsJSGlobalObject()
- ? HeapType::Constant(Handle<JSGlobalObject>::cast(object), isolate)
- : HeapType::NowOf(object, isolate);
+ ? HeapType::Constant(Handle<JSGlobalObject>::cast(object), isolate)
+ : HeapType::NowOf(object, isolate);
}
@@ -758,12 +743,11 @@ typename T::TypeHandle IC::MapToType(Handle<Map> map,
}
-template
-Type* IC::MapToType<Type>(Handle<Map> map, Zone* zone);
+template Type* IC::MapToType<Type>(Handle<Map> map, Zone* zone);
-template
-Handle<HeapType> IC::MapToType<HeapType>(Handle<Map> map, Isolate* region);
+template Handle<HeapType> IC::MapToType<HeapType>(Handle<Map> map,
+ Isolate* region);
void IC::UpdateMonomorphicIC(Handle<Code> handler, Handle<Name> name) {
@@ -789,12 +773,12 @@ bool IC::IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map) {
if (source_map == NULL) return true;
if (target_map == NULL) return false;
ElementsKind target_elements_kind = target_map->elements_kind();
- bool more_general_transition =
- IsMoreGeneralElementsKindTransition(
- source_map->elements_kind(), target_elements_kind);
- Map* transitioned_map = more_general_transition
- ? source_map->LookupElementsTransitionMap(target_elements_kind)
- : NULL;
+ bool more_general_transition = IsMoreGeneralElementsKindTransition(
+ source_map->elements_kind(), target_elements_kind);
+ Map* transitioned_map =
+ more_general_transition
+ ? source_map->LookupElementsTransitionMap(target_elements_kind)
+ : NULL;
return transitioned_map == target_map;
}
@@ -814,7 +798,7 @@ void IC::PatchCache(Handle<Name> name, Handle<Code> code) {
CopyICToMegamorphicCache(name);
}
set_target(*megamorphic_stub());
- // Fall through.
+ // Fall through.
case MEGAMORPHIC:
UpdateMegamorphicCache(*receiver_type(), *name, *code);
break;
@@ -836,8 +820,8 @@ Handle<Code> LoadIC::initialize_stub(Isolate* isolate,
Handle<Code> LoadIC::megamorphic_stub() {
if (kind() == Code::LOAD_IC) {
- return PropertyICCompiler::ComputeLoad(isolate(), MEGAMORPHIC,
- extra_ic_state());
+ MegamorphicLoadStub stub(isolate(), LoadICState(extra_ic_state()));
+ return stub.GetCode();
} else {
DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
return KeyedLoadIC::generic_stub(isolate());
@@ -872,14 +856,12 @@ Handle<Code> LoadIC::SimpleFieldLoad(FieldIndex index) {
}
-void LoadIC::UpdateCaches(LookupIterator* lookup, Handle<Object> object,
- Handle<Name> name) {
+void LoadIC::UpdateCaches(LookupIterator* lookup) {
if (state() == UNINITIALIZED) {
- // This is the first time we execute this inline cache.
- // Set the target to the pre monomorphic stub to delay
- // setting the monomorphic state.
+ // This is the first time we execute this inline cache. Set the target to
+ // the pre monomorphic stub to delay setting the monomorphic state.
set_target(*pre_monomorphic_stub());
- TRACE_IC("LoadIC", name);
+ TRACE_IC("LoadIC", lookup->name());
return;
}
@@ -889,7 +871,7 @@ void LoadIC::UpdateCaches(LookupIterator* lookup, Handle<Object> object,
code = slow_stub();
} else if (!lookup->IsFound()) {
if (kind() == Code::LOAD_IC) {
- code = NamedLoadHandlerCompiler::ComputeLoadNonexistent(name,
+ code = NamedLoadHandlerCompiler::ComputeLoadNonexistent(lookup->name(),
receiver_type());
// TODO(jkummerow/verwaest): Introduce a builtin that handles this case.
if (code.is_null()) code = slow_stub();
@@ -897,11 +879,11 @@ void LoadIC::UpdateCaches(LookupIterator* lookup, Handle<Object> object,
code = slow_stub();
}
} else {
- code = ComputeHandler(lookup, object, name);
+ code = ComputeHandler(lookup);
}
- PatchCache(name, code);
- TRACE_IC("LoadIC", name);
+ PatchCache(lookup->name(), code);
+ TRACE_IC("LoadIC", lookup->name());
}
@@ -912,61 +894,16 @@ void IC::UpdateMegamorphicCache(HeapType* type, Name* name, Code* code) {
}
-Handle<Code> IC::ComputeHandler(LookupIterator* lookup, Handle<Object> object,
- Handle<Name> name, Handle<Object> value) {
+Handle<Code> IC::ComputeHandler(LookupIterator* lookup, Handle<Object> value) {
bool receiver_is_holder =
- object.is_identical_to(lookup->GetHolder<JSObject>());
- CacheHolderFlag flag;
- Handle<Map> stub_holder_map = IC::GetHandlerCacheHolder(
- *receiver_type(), receiver_is_holder, isolate(), &flag);
-
- Handle<Code> code = PropertyHandlerCompiler::Find(
- name, stub_holder_map, kind(), flag,
- lookup->holder_map()->is_dictionary_map() ? Code::NORMAL : Code::FAST);
- // Use the cached value if it exists, and if it is different from the
- // handler that just missed.
- if (!code.is_null()) {
- if (!maybe_handler_.is_null() &&
- !maybe_handler_.ToHandleChecked().is_identical_to(code)) {
- return code;
- }
- if (maybe_handler_.is_null()) {
- // maybe_handler_ is only populated for MONOMORPHIC and POLYMORPHIC ICs.
- // In MEGAMORPHIC case, check if the handler in the megamorphic stub
- // cache (which just missed) is different from the cached handler.
- if (state() == MEGAMORPHIC && object->IsHeapObject()) {
- Map* map = Handle<HeapObject>::cast(object)->map();
- Code* megamorphic_cached_code =
- isolate()->stub_cache()->Get(*name, map, code->flags());
- if (megamorphic_cached_code != *code) return code;
- } else {
- return code;
- }
- }
- }
-
- code = CompileHandler(lookup, object, name, value, flag);
- DCHECK(code->is_handler());
-
- if (code->type() != Code::NORMAL) {
- Map::UpdateCodeCache(stub_holder_map, name, code);
- }
-
- return code;
-}
-
-
-Handle<Code> IC::ComputeStoreHandler(LookupResult* lookup,
- Handle<Object> object, Handle<Name> name,
- Handle<Object> value) {
- bool receiver_is_holder = lookup->ReceiverIsHolder(object);
+ lookup->GetReceiver().is_identical_to(lookup->GetHolder<JSObject>());
CacheHolderFlag flag;
Handle<Map> stub_holder_map = IC::GetHandlerCacheHolder(
*receiver_type(), receiver_is_holder, isolate(), &flag);
Handle<Code> code = PropertyHandlerCompiler::Find(
- name, stub_holder_map, handler_kind(), flag,
- lookup->holder()->HasFastProperties() ? Code::FAST : Code::NORMAL);
+ lookup->name(), stub_holder_map, kind(), flag,
+ lookup->is_dictionary_holder() ? Code::NORMAL : Code::FAST);
// Use the cached value if it exists, and if it is different from the
// handler that just missed.
if (!code.is_null()) {
@@ -978,10 +915,10 @@ Handle<Code> IC::ComputeStoreHandler(LookupResult* lookup,
// maybe_handler_ is only populated for MONOMORPHIC and POLYMORPHIC ICs.
// In MEGAMORPHIC case, check if the handler in the megamorphic stub
// cache (which just missed) is different from the cached handler.
- if (state() == MEGAMORPHIC && object->IsHeapObject()) {
- Map* map = Handle<HeapObject>::cast(object)->map();
+ if (state() == MEGAMORPHIC && lookup->GetReceiver()->IsHeapObject()) {
+ Map* map = Handle<HeapObject>::cast(lookup->GetReceiver())->map();
Code* megamorphic_cached_code =
- isolate()->stub_cache()->Get(*name, map, code->flags());
+ isolate()->stub_cache()->Get(*lookup->name(), map, code->flags());
if (megamorphic_cached_code != *code) return code;
} else {
return code;
@@ -989,11 +926,18 @@ Handle<Code> IC::ComputeStoreHandler(LookupResult* lookup,
}
}
- code = CompileStoreHandler(lookup, object, name, value, flag);
+ code = CompileHandler(lookup, value, flag);
DCHECK(code->is_handler());
- if (code->type() != Code::NORMAL) {
- Map::UpdateCodeCache(stub_holder_map, name, code);
+ // TODO(mvstanton): we'd only like to cache code on the map when it's custom
+ // code compiled for this map, otherwise it's already cached in the global
+ // code
+ // cache. We are also guarding against installing code with flags that don't
+ // match the desired CacheHolderFlag computed above, which would lead to
+ // invalid lookups later.
+ if (code->type() != Code::NORMAL &&
+ Code::ExtractCacheHolderFromFlags(code->flags()) == flag) {
+ Map::UpdateCodeCache(stub_holder_map, lookup->name(), code);
}
return code;
@@ -1001,26 +945,28 @@ Handle<Code> IC::ComputeStoreHandler(LookupResult* lookup,
Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
- Handle<Object> object, Handle<Name> name,
Handle<Object> unused,
CacheHolderFlag cache_holder) {
- if (object->IsString() &&
- Name::Equals(isolate()->factory()->length_string(), name)) {
+ Handle<Object> receiver = lookup->GetReceiver();
+ if (receiver->IsString() &&
+ Name::Equals(isolate()->factory()->length_string(), lookup->name())) {
FieldIndex index = FieldIndex::ForInObjectOffset(String::kLengthOffset);
return SimpleFieldLoad(index);
}
- if (object->IsStringWrapper() &&
- Name::Equals(isolate()->factory()->length_string(), name)) {
+ if (receiver->IsStringWrapper() &&
+ Name::Equals(isolate()->factory()->length_string(), lookup->name())) {
StringLengthStub string_length_stub(isolate());
return string_length_stub.GetCode();
}
// Use specialized code for getting prototype of functions.
- if (object->IsJSFunction() &&
- Name::Equals(isolate()->factory()->prototype_string(), name) &&
- Handle<JSFunction>::cast(object)->should_have_prototype() &&
- !Handle<JSFunction>::cast(object)->map()->has_non_instance_prototype()) {
+ if (receiver->IsJSFunction() &&
+ Name::Equals(isolate()->factory()->prototype_string(), lookup->name()) &&
+ Handle<JSFunction>::cast(receiver)->should_have_prototype() &&
+ !Handle<JSFunction>::cast(receiver)
+ ->map()
+ ->has_non_instance_prototype()) {
Handle<Code> stub;
FunctionPrototypeStub function_prototype_stub(isolate());
return function_prototype_stub.GetCode();
@@ -1028,117 +974,130 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
Handle<HeapType> type = receiver_type();
Handle<JSObject> holder = lookup->GetHolder<JSObject>();
- bool receiver_is_holder = object.is_identical_to(holder);
- // -------------- Interceptors --------------
- if (lookup->state() == LookupIterator::INTERCEPTOR) {
- DCHECK(!holder->GetNamedInterceptor()->getter()->IsUndefined());
- NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
- cache_holder);
- return compiler.CompileLoadInterceptor(name);
- }
- DCHECK(lookup->state() == LookupIterator::PROPERTY);
-
- // -------------- Accessors --------------
- if (lookup->property_kind() == LookupIterator::ACCESSOR) {
- // Use simple field loads for some well-known callback properties.
- if (receiver_is_holder) {
- DCHECK(object->IsJSObject());
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- int object_offset;
- if (Accessors::IsJSObjectFieldAccessor<HeapType>(type, name,
- &object_offset)) {
- FieldIndex index =
- FieldIndex::ForInObjectOffset(object_offset, receiver->map());
- return SimpleFieldLoad(index);
- }
- }
-
- Handle<Object> accessors = lookup->GetAccessors();
- if (accessors->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> info =
- Handle<ExecutableAccessorInfo>::cast(accessors);
- if (v8::ToCData<Address>(info->getter()) == 0) return slow_stub();
- if (!ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), info,
- type)) {
- return slow_stub();
- }
- if (!holder->HasFastProperties()) return slow_stub();
+ bool receiver_is_holder = receiver.is_identical_to(holder);
+ switch (lookup->state()) {
+ case LookupIterator::INTERCEPTOR: {
+ DCHECK(!holder->GetNamedInterceptor()->getter()->IsUndefined());
NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
cache_holder);
- return compiler.CompileLoadCallback(name, info);
+ // Perform a lookup behind the interceptor. Copy the LookupIterator since
+ // the original iterator will be used to fetch the value.
+ LookupIterator it = *lookup;
+ it.Next();
+ LookupForRead(&it);
+ return compiler.CompileLoadInterceptor(&it);
}
- if (accessors->IsAccessorPair()) {
- Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
- isolate());
- if (!getter->IsJSFunction()) return slow_stub();
- if (!holder->HasFastProperties()) return slow_stub();
- Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
- if (!object->IsJSObject() && !function->IsBuiltin() &&
- function->shared()->strict_mode() == SLOPPY) {
- // Calling sloppy non-builtins with a value as the receiver
- // requires boxing.
- return slow_stub();
+
+ case LookupIterator::ACCESSOR: {
+ // Use simple field loads for some well-known callback properties.
+ if (receiver_is_holder) {
+ DCHECK(receiver->IsJSObject());
+ Handle<JSObject> js_receiver = Handle<JSObject>::cast(receiver);
+ int object_offset;
+ if (Accessors::IsJSObjectFieldAccessor<HeapType>(type, lookup->name(),
+ &object_offset)) {
+ FieldIndex index =
+ FieldIndex::ForInObjectOffset(object_offset, js_receiver->map());
+ return SimpleFieldLoad(index);
+ }
}
- CallOptimization call_optimization(function);
- NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
- cache_holder);
- if (call_optimization.is_simple_api_call() &&
- call_optimization.IsCompatibleReceiver(object, holder)) {
- return compiler.CompileLoadCallback(name, call_optimization);
+
+ Handle<Object> accessors = lookup->GetAccessors();
+ if (accessors->IsExecutableAccessorInfo()) {
+ Handle<ExecutableAccessorInfo> info =
+ Handle<ExecutableAccessorInfo>::cast(accessors);
+ if (v8::ToCData<Address>(info->getter()) == 0) break;
+ if (!ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), info,
+ type)) {
+ break;
+ }
+ if (!holder->HasFastProperties()) break;
+ NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
+ cache_holder);
+ return compiler.CompileLoadCallback(lookup->name(), info);
}
- return compiler.CompileLoadViaGetter(name, function);
+ if (accessors->IsAccessorPair()) {
+ Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
+ isolate());
+ if (!getter->IsJSFunction()) break;
+ if (!holder->HasFastProperties()) break;
+ Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
+ if (!receiver->IsJSObject() && !function->IsBuiltin() &&
+ function->shared()->strict_mode() == SLOPPY) {
+ // Calling sloppy non-builtins with a value as the receiver
+ // requires boxing.
+ break;
+ }
+ CallOptimization call_optimization(function);
+ NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
+ cache_holder);
+ if (call_optimization.is_simple_api_call() &&
+ call_optimization.IsCompatibleReceiver(receiver, holder)) {
+ return compiler.CompileLoadCallback(lookup->name(),
+ call_optimization);
+ }
+ return compiler.CompileLoadViaGetter(lookup->name(), function);
+ }
+ // TODO(dcarney): Handle correctly.
+ DCHECK(accessors->IsDeclaredAccessorInfo());
+ break;
}
- // TODO(dcarney): Handle correctly.
- DCHECK(accessors->IsDeclaredAccessorInfo());
- return slow_stub();
- }
- // -------------- Dictionary properties --------------
- DCHECK(lookup->property_kind() == LookupIterator::DATA);
- if (lookup->property_encoding() == LookupIterator::DICTIONARY) {
- if (kind() != Code::LOAD_IC) return slow_stub();
- if (holder->IsGlobalObject()) {
+ case LookupIterator::DATA: {
+ if (lookup->is_dictionary_holder()) {
+ if (kind() != Code::LOAD_IC) break;
+ if (holder->IsGlobalObject()) {
+ NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
+ cache_holder);
+ Handle<PropertyCell> cell = lookup->GetPropertyCell();
+ Handle<Code> code = compiler.CompileLoadGlobal(
+ cell, lookup->name(), lookup->IsConfigurable());
+ // TODO(verwaest): Move caching of these NORMAL stubs outside as well.
+ CacheHolderFlag flag;
+ Handle<Map> stub_holder_map = GetHandlerCacheHolder(
+ *type, receiver_is_holder, isolate(), &flag);
+ Map::UpdateCodeCache(stub_holder_map, lookup->name(), code);
+ return code;
+ }
+ // There is only one shared stub for loading normalized
+ // properties. It does not traverse the prototype chain, so the
+ // property must be found in the object for the stub to be
+ // applicable.
+ if (!receiver_is_holder) break;
+ return isolate()->builtins()->LoadIC_Normal();
+ }
+
+ // -------------- Fields --------------
+ if (lookup->property_details().type() == FIELD) {
+ FieldIndex field = lookup->GetFieldIndex();
+ if (receiver_is_holder) {
+ return SimpleFieldLoad(field);
+ }
+ NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
+ cache_holder);
+ return compiler.CompileLoadField(lookup->name(), field);
+ }
+
+ // -------------- Constant properties --------------
+ DCHECK(lookup->property_details().type() == CONSTANT);
+ if (receiver_is_holder) {
+ LoadConstantStub stub(isolate(), lookup->GetConstantIndex());
+ return stub.GetCode();
+ }
NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
cache_holder);
- Handle<PropertyCell> cell = lookup->GetPropertyCell();
- Handle<Code> code =
- compiler.CompileLoadGlobal(cell, name, lookup->IsConfigurable());
- // TODO(verwaest): Move caching of these NORMAL stubs outside as well.
- CacheHolderFlag flag;
- Handle<Map> stub_holder_map =
- GetHandlerCacheHolder(*type, receiver_is_holder, isolate(), &flag);
- Map::UpdateCodeCache(stub_holder_map, name, code);
- return code;
- }
- // There is only one shared stub for loading normalized
- // properties. It does not traverse the prototype chain, so the
- // property must be found in the object for the stub to be
- // applicable.
- if (!receiver_is_holder) return slow_stub();
- return isolate()->builtins()->LoadIC_Normal();
- }
-
- // -------------- Fields --------------
- DCHECK(lookup->property_encoding() == LookupIterator::DESCRIPTOR);
- if (lookup->property_details().type() == FIELD) {
- FieldIndex field = lookup->GetFieldIndex();
- if (receiver_is_holder) {
- return SimpleFieldLoad(field);
+ return compiler.CompileLoadConstant(lookup->name(),
+ lookup->GetConstantIndex());
}
- NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
- cache_holder);
- return compiler.CompileLoadField(name, field);
- }
- // -------------- Constant properties --------------
- DCHECK(lookup->property_details().type() == CONSTANT);
- if (receiver_is_holder) {
- LoadConstantStub stub(isolate(), lookup->GetConstantIndex());
- return stub.GetCode();
+ case LookupIterator::ACCESS_CHECK:
+ case LookupIterator::JSPROXY:
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
}
- NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
- cache_holder);
- return compiler.CompileLoadConstant(name, lookup->GetConstantIndex());
+
+ return slow_stub();
}
@@ -1163,14 +1122,6 @@ static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
- // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
- // via megamorphic stubs, since they don't have a map in their relocation info
- // and so the stubs can't be harvested for the object needed for a map check.
- if (target()->type() != Code::NORMAL) {
- TRACE_GENERIC_IC(isolate(), "KeyedIC", "non-NORMAL target type");
- return generic_stub();
- }
-
Handle<Map> receiver_map(receiver->map(), isolate());
MapHandleList target_receiver_maps;
if (target().is_identical_to(string_stub())) {
@@ -1189,10 +1140,9 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
// monomorphic. If this optimistic assumption is not true, the IC will
// miss again and it will become polymorphic and support both the
// untransitioned and transitioned maps.
- if (state() == MONOMORPHIC &&
- IsMoreGeneralElementsKindTransition(
- target_receiver_maps.at(0)->elements_kind(),
- receiver->GetElementsKind())) {
+ if (state() == MONOMORPHIC && IsMoreGeneralElementsKindTransition(
+ target_receiver_maps.at(0)->elements_kind(),
+ receiver->GetElementsKind())) {
return PropertyICCompiler::ComputeKeyedLoadMonomorphic(receiver_map);
}
@@ -1203,14 +1153,14 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map)) {
// If the miss wasn't due to an unseen map, a polymorphic stub
// won't help, use the generic stub.
- TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice");
+ TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "same map added twice");
return generic_stub();
}
// If the maximum number of receiver maps has been exceeded, use the generic
// version of the IC.
if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
- TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded");
+ TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "max polymorph exceeded");
return generic_stub();
}
@@ -1223,9 +1173,7 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
if (MigrateDeprecated(object)) {
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate(),
- result,
- Runtime::GetObjectProperty(isolate(), object, key),
+ isolate(), result, Runtime::GetObjectProperty(isolate(), object, key),
Object);
return result;
}
@@ -1238,23 +1186,15 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
key = TryConvertKey(key, isolate());
if (key->IsInternalizedString() || key->IsSymbol()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(),
- load_handle,
- LoadIC::Load(object, Handle<Name>::cast(key)),
- Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), load_handle,
+ LoadIC::Load(object, Handle<Name>::cast(key)),
+ Object);
} else if (FLAG_use_ic && !object->IsAccessCheckNeeded()) {
if (object->IsString() && key->IsNumber()) {
if (state() == UNINITIALIZED) stub = string_stub();
} else if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->elements()->map() ==
- isolate()->heap()->sloppy_arguments_elements_map()) {
- stub = sloppy_arguments_stub();
- } else if (receiver->HasIndexedInterceptor()) {
- stub = indexed_interceptor_stub();
- } else if (!Object::ToSmi(isolate(), key).is_null() &&
- (!target().is_identical_to(sloppy_arguments_stub()))) {
+ if (!Object::ToSmi(isolate(), key).is_null()) {
stub = LoadElementStub(receiver);
}
}
@@ -1271,84 +1211,73 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
if (!load_handle.is_null()) return load_handle;
Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(),
- result,
- Runtime::GetObjectProperty(isolate(), object, key),
- Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+ Runtime::GetObjectProperty(isolate(), object, key),
+ Object);
return result;
}
-static bool LookupForWrite(Handle<Object> object, Handle<Name> name,
- Handle<Object> value, LookupResult* lookup, IC* ic) {
+bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
+ JSReceiver::StoreFromKeyed store_mode) {
// Disable ICs for non-JSObjects for now.
- if (!object->IsJSObject()) return false;
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ Handle<Object> receiver = it->GetReceiver();
+ if (!receiver->IsJSObject()) return false;
+ DCHECK(!Handle<JSObject>::cast(receiver)->map()->is_deprecated());
- Handle<JSObject> holder = receiver;
- receiver->Lookup(name, lookup);
- if (lookup->IsFound()) {
- if (lookup->IsInterceptor() && !HasInterceptorSetter(lookup->holder())) {
- receiver->LookupOwnRealNamedProperty(name, lookup);
- if (!lookup->IsFound()) return false;
- }
+ for (; it->IsFound(); it->Next()) {
+ switch (it->state()) {
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+ case LookupIterator::JSPROXY:
+ return false;
+ case LookupIterator::INTERCEPTOR: {
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+ InterceptorInfo* info = holder->GetNamedInterceptor();
+ if (it->HolderIsReceiverOrHiddenPrototype()) {
+ if (!info->setter()->IsUndefined()) return true;
+ } else if (!info->getter()->IsUndefined() ||
+ !info->query()->IsUndefined()) {
+ return false;
+ }
+ break;
+ }
+ case LookupIterator::ACCESS_CHECK:
+ if (it->GetHolder<JSObject>()->IsAccessCheckNeeded()) return false;
+ break;
+ case LookupIterator::ACCESSOR:
+ return !it->IsReadOnly();
+ case LookupIterator::DATA: {
+ if (it->IsReadOnly()) return false;
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+ if (receiver.is_identical_to(holder)) {
+ it->PrepareForDataProperty(value);
+ // The previous receiver map might just have been deprecated,
+ // so reload it.
+ update_receiver_type(receiver);
+ return true;
+ }
- if (lookup->IsReadOnly() || !lookup->IsCacheable()) return false;
- if (lookup->holder() == *receiver) return lookup->CanHoldValue(value);
- if (lookup->IsPropertyCallbacks()) return true;
- // JSGlobalProxy either stores on the global object in the prototype, or
- // goes into the runtime if access checks are needed, so this is always
- // safe.
- if (receiver->IsJSGlobalProxy()) {
- PrototypeIterator iter(lookup->isolate(), receiver);
- return lookup->holder() == *PrototypeIterator::GetCurrent(iter);
+ // Receiver != holder.
+ PrototypeIterator iter(it->isolate(), receiver);
+ if (receiver->IsJSGlobalProxy()) {
+ return it->GetHolder<Object>().is_identical_to(
+ PrototypeIterator::GetCurrent(iter));
+ }
+
+ it->PrepareTransitionToDataProperty(value, NONE, store_mode);
+ return it->IsCacheableTransition();
+ }
}
- // Currently normal holders in the prototype chain are not supported. They
- // would require a runtime positive lookup and verification that the details
- // have not changed.
- if (lookup->IsInterceptor() || lookup->IsNormal()) return false;
- holder = Handle<JSObject>(lookup->holder(), lookup->isolate());
- }
-
- // While normally LookupTransition gets passed the receiver, in this case we
- // pass the holder of the property that we overwrite. This keeps the holder in
- // the LookupResult intact so we can later use it to generate a prototype
- // chain check. This avoids a double lookup, but requires us to pass in the
- // receiver when trying to fetch extra information from the transition.
- receiver->map()->LookupTransition(*holder, *name, lookup);
- if (!lookup->IsTransition() || lookup->IsReadOnly()) return false;
-
- // If the value that's being stored does not fit in the field that the
- // instance would transition to, create a new transition that fits the value.
- // This has to be done before generating the IC, since that IC will embed the
- // transition target.
- // Ensure the instance and its map were migrated before trying to update the
- // transition target.
- DCHECK(!receiver->map()->is_deprecated());
- if (!lookup->CanHoldValue(value)) {
- Handle<Map> target(lookup->GetTransitionTarget());
- Representation field_representation = value->OptimalRepresentation();
- Handle<HeapType> field_type = value->OptimalType(
- lookup->isolate(), field_representation);
- Map::GeneralizeRepresentation(
- target, target->LastAdded(),
- field_representation, field_type, FORCE_FIELD);
- // Lookup the transition again since the transition tree may have changed
- // entirely by the migration above.
- receiver->map()->LookupTransition(*holder, *name, lookup);
- if (!lookup->IsTransition()) return false;
- if (!ic->IsNameCompatibleWithPrototypeFailure(name)) return false;
- ic->MarkPrototypeFailure(name);
- return true;
}
- return true;
+ it->PrepareTransitionToDataProperty(value, NONE, store_mode);
+ return it->IsCacheableTransition();
}
-MaybeHandle<Object> StoreIC::Store(Handle<Object> object,
- Handle<Name> name,
+MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode) {
// TODO(verwaest): Let SetProperty do the migration, since storing a property
@@ -1377,8 +1306,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object,
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate(),
- result,
+ isolate(), result,
JSObject::SetElement(receiver, index, value, NONE, strict_mode()),
Object);
return value;
@@ -1395,50 +1323,21 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object,
return result;
}
- LookupResult lookup(isolate());
- bool can_store = LookupForWrite(object, name, value, &lookup, this);
- if (!can_store &&
- strict_mode() == STRICT &&
- !(lookup.IsProperty() && lookup.IsReadOnly()) &&
- object->IsGlobalObject()) {
- // Strict mode doesn't allow setting non-existent global property.
- return ReferenceError("not_defined", name);
- }
- if (FLAG_use_ic) {
- if (state() == UNINITIALIZED) {
- Handle<Code> stub = pre_monomorphic_stub();
- set_target(*stub);
- TRACE_IC("StoreIC", name);
- } else if (can_store) {
- UpdateCaches(&lookup, Handle<JSObject>::cast(object), name, value);
- } else if (lookup.IsNormal() ||
- (lookup.IsField() && lookup.CanHoldValue(value))) {
- Handle<Code> stub = generic_stub();
- set_target(*stub);
- }
- }
+ LookupIterator it(object, name);
+ if (FLAG_use_ic) UpdateCaches(&it, value, store_mode);
// Set the property.
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
- Object::SetProperty(object, name, value, strict_mode(), store_mode),
- Object);
+ Object::SetProperty(&it, value, strict_mode(), store_mode), Object);
return result;
}
-OStream& operator<<(OStream& os, const CallIC::State& s) {
- return os << "(args(" << s.arg_count() << "), "
- << (s.call_type() == CallIC::METHOD ? "METHOD" : "FUNCTION")
- << ", ";
-}
-
-
-Handle<Code> CallIC::initialize_stub(Isolate* isolate,
- int argc,
- CallType call_type) {
- CallICStub stub(isolate, State(argc, call_type));
+Handle<Code> CallIC::initialize_stub(Isolate* isolate, int argc,
+ CallICState::CallType call_type) {
+ CallICStub stub(isolate, CallICState(argc, call_type));
Handle<Code> code = stub.GetCode();
return code;
}
@@ -1454,13 +1353,42 @@ Handle<Code> StoreIC::initialize_stub(Isolate* isolate,
Handle<Code> StoreIC::megamorphic_stub() {
- return PropertyICCompiler::ComputeStore(isolate(), MEGAMORPHIC,
- extra_ic_state());
+ if (kind() == Code::STORE_IC) {
+ return PropertyICCompiler::ComputeStore(isolate(), MEGAMORPHIC,
+ extra_ic_state());
+ } else {
+ DCHECK(kind() == Code::KEYED_STORE_IC);
+ if (strict_mode() == STRICT) {
+ return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+ } else {
+ return isolate()->builtins()->KeyedStoreIC_Generic();
+ }
+ }
}
Handle<Code> StoreIC::generic_stub() const {
- return PropertyICCompiler::ComputeStore(isolate(), GENERIC, extra_ic_state());
+ if (kind() == Code::STORE_IC) {
+ return PropertyICCompiler::ComputeStore(isolate(), GENERIC,
+ extra_ic_state());
+ } else {
+ DCHECK(kind() == Code::KEYED_STORE_IC);
+ if (strict_mode() == STRICT) {
+ return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+ } else {
+ return isolate()->builtins()->KeyedStoreIC_Generic();
+ }
+ }
+}
+
+
+Handle<Code> StoreIC::slow_stub() const {
+ if (kind() == Code::STORE_IC) {
+ return isolate()->builtins()->StoreIC_Slow();
+ } else {
+ DCHECK(kind() == Code::KEYED_STORE_IC);
+ return isolate()->builtins()->KeyedStoreIC_Slow();
+ }
}
@@ -1471,134 +1399,146 @@ Handle<Code> StoreIC::pre_monomorphic_stub(Isolate* isolate,
}
-void StoreIC::UpdateCaches(LookupResult* lookup,
- Handle<JSObject> receiver,
- Handle<Name> name,
- Handle<Object> value) {
- DCHECK(lookup->IsFound());
-
- // These are not cacheable, so we never see such LookupResults here.
- DCHECK(!lookup->IsHandler());
+void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
+ JSReceiver::StoreFromKeyed store_mode) {
+ if (state() == UNINITIALIZED) {
+ // This is the first time we execute this inline cache. Set the target to
+ // the pre monomorphic stub to delay setting the monomorphic state.
+ set_target(*pre_monomorphic_stub());
+ TRACE_IC("StoreIC", lookup->name());
+ return;
+ }
- Handle<Code> code = ComputeStoreHandler(lookup, receiver, name, value);
+ bool use_ic = LookupForWrite(lookup, value, store_mode);
+ if (!use_ic) {
+ TRACE_GENERIC_IC(isolate(), "StoreIC", "LookupForWrite said 'false'");
+ }
+ Handle<Code> code = use_ic ? ComputeHandler(lookup, value) : slow_stub();
- PatchCache(name, code);
- TRACE_IC("StoreIC", name);
+ PatchCache(lookup->name(), code);
+ TRACE_IC("StoreIC", lookup->name());
}
-Handle<Code> StoreIC::CompileStoreHandler(LookupResult* lookup,
- Handle<Object> object,
- Handle<Name> name,
- Handle<Object> value,
- CacheHolderFlag cache_holder) {
- if (object->IsAccessCheckNeeded()) return slow_stub();
- DCHECK(cache_holder == kCacheOnReceiver || lookup->type() == CALLBACKS ||
- (object->IsJSGlobalProxy() && lookup->holder()->IsJSGlobalObject()));
- // This is currently guaranteed by checks in StoreIC::Store.
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
+ Handle<Object> value,
+ CacheHolderFlag cache_holder) {
+ DCHECK_NE(LookupIterator::JSPROXY, lookup->state());
- Handle<JSObject> holder(lookup->holder());
+ // This is currently guaranteed by checks in StoreIC::Store.
+ Handle<JSObject> receiver = Handle<JSObject>::cast(lookup->GetReceiver());
+ Handle<JSObject> holder = lookup->GetHolder<JSObject>();
+ DCHECK(!receiver->IsAccessCheckNeeded());
+
+ switch (lookup->state()) {
+ case LookupIterator::TRANSITION: {
+ Handle<Map> transition = lookup->transition_map();
+ // Currently not handled by CompileStoreTransition.
+ if (!holder->HasFastProperties()) {
+ TRACE_GENERIC_IC(isolate(), "StoreIC", "transition from slow");
+ break;
+ }
- if (lookup->IsTransition()) {
- // Explicitly pass in the receiver map since LookupForWrite may have
- // stored something else than the receiver in the holder.
- Handle<Map> transition(lookup->GetTransitionTarget());
- PropertyDetails details = lookup->GetPropertyDetails();
+ DCHECK(lookup->IsCacheableTransition());
+ NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
+ return compiler.CompileStoreTransition(transition, lookup->name());
+ }
- if (details.type() != CALLBACKS && details.attributes() == NONE &&
- holder->HasFastProperties()) {
+ case LookupIterator::INTERCEPTOR: {
+ DCHECK(!holder->GetNamedInterceptor()->setter()->IsUndefined());
NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
- return compiler.CompileStoreTransition(transition, name);
+ return compiler.CompileStoreInterceptor(lookup->name());
}
- } else {
- switch (lookup->type()) {
- case FIELD: {
- bool use_stub = true;
- if (lookup->representation().IsHeapObject()) {
- // Only use a generic stub if no types need to be tracked.
- HeapType* field_type = lookup->GetFieldType();
- HeapType::Iterator<Map> it = field_type->Classes();
- use_stub = it.Done();
+
+ case LookupIterator::ACCESSOR: {
+ if (!holder->HasFastProperties()) {
+ TRACE_GENERIC_IC(isolate(), "StoreIC", "accessor on slow map");
+ break;
+ }
+ Handle<Object> accessors = lookup->GetAccessors();
+ if (accessors->IsExecutableAccessorInfo()) {
+ Handle<ExecutableAccessorInfo> info =
+ Handle<ExecutableAccessorInfo>::cast(accessors);
+ if (v8::ToCData<Address>(info->setter()) == 0) {
+ TRACE_GENERIC_IC(isolate(), "StoreIC", "setter == 0");
+ break;
}
- if (use_stub) {
- StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
- lookup->representation());
- return stub.GetCode();
+ if (!ExecutableAccessorInfo::IsCompatibleReceiverType(
+ isolate(), info, receiver_type())) {
+ TRACE_GENERIC_IC(isolate(), "StoreIC", "incompatible receiver type");
+ break;
+ }
+ NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
+ return compiler.CompileStoreCallback(receiver, lookup->name(), info);
+ } else if (accessors->IsAccessorPair()) {
+ Handle<Object> setter(Handle<AccessorPair>::cast(accessors)->setter(),
+ isolate());
+ if (!setter->IsJSFunction()) {
+ TRACE_GENERIC_IC(isolate(), "StoreIC", "setter not a function");
+ break;
}
+ Handle<JSFunction> function = Handle<JSFunction>::cast(setter);
+ CallOptimization call_optimization(function);
NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
- return compiler.CompileStoreField(lookup, name);
+ if (call_optimization.is_simple_api_call() &&
+ call_optimization.IsCompatibleReceiver(receiver, holder)) {
+ return compiler.CompileStoreCallback(receiver, lookup->name(),
+ call_optimization);
+ }
+ return compiler.CompileStoreViaSetter(receiver, lookup->name(),
+ Handle<JSFunction>::cast(setter));
}
- case NORMAL:
- if (receiver->IsJSGlobalProxy() || receiver->IsGlobalObject()) {
- // The stub generated for the global object picks the value directly
- // from the property cell. So the property must be directly on the
- // global object.
- PrototypeIterator iter(isolate(), receiver);
- Handle<GlobalObject> global =
- receiver->IsJSGlobalProxy()
- ? Handle<GlobalObject>::cast(
- PrototypeIterator::GetCurrent(iter))
- : Handle<GlobalObject>::cast(receiver);
- Handle<PropertyCell> cell(global->GetPropertyCell(lookup), isolate());
+ // TODO(dcarney): Handle correctly.
+ DCHECK(accessors->IsDeclaredAccessorInfo());
+ TRACE_GENERIC_IC(isolate(), "StoreIC", "declared accessor info");
+ break;
+ }
+
+ case LookupIterator::DATA: {
+ if (lookup->is_dictionary_holder()) {
+ if (holder->IsGlobalObject()) {
+ Handle<PropertyCell> cell = lookup->GetPropertyCell();
Handle<HeapType> union_type = PropertyCell::UpdatedType(cell, value);
- StoreGlobalStub stub(
- isolate(), union_type->IsConstant(), receiver->IsJSGlobalProxy());
- Handle<Code> code = stub.GetCodeCopyFromTemplate(global, cell);
+ StoreGlobalStub stub(isolate(), union_type->IsConstant(),
+ receiver->IsJSGlobalProxy());
+ Handle<Code> code = stub.GetCodeCopyFromTemplate(
+ Handle<GlobalObject>::cast(holder), cell);
// TODO(verwaest): Move caching of these NORMAL stubs outside as well.
- HeapObject::UpdateMapCodeCache(receiver, name, code);
+ HeapObject::UpdateMapCodeCache(receiver, lookup->name(), code);
return code;
}
DCHECK(holder.is_identical_to(receiver));
return isolate()->builtins()->StoreIC_Normal();
- case CALLBACKS: {
- Handle<Object> callback(lookup->GetCallbackObject(), isolate());
- if (callback->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> info =
- Handle<ExecutableAccessorInfo>::cast(callback);
- if (v8::ToCData<Address>(info->setter()) == 0) break;
- if (!holder->HasFastProperties()) break;
- if (!ExecutableAccessorInfo::IsCompatibleReceiverType(
- isolate(), info, receiver_type())) {
- break;
- }
- NamedStoreHandlerCompiler compiler(isolate(), receiver_type(),
- holder);
- return compiler.CompileStoreCallback(receiver, name, info);
- } else if (callback->IsAccessorPair()) {
- Handle<Object> setter(
- Handle<AccessorPair>::cast(callback)->setter(), isolate());
- if (!setter->IsJSFunction()) break;
- if (holder->IsGlobalObject()) break;
- if (!holder->HasFastProperties()) break;
- Handle<JSFunction> function = Handle<JSFunction>::cast(setter);
- CallOptimization call_optimization(function);
- NamedStoreHandlerCompiler compiler(isolate(), receiver_type(),
- holder);
- if (call_optimization.is_simple_api_call() &&
- call_optimization.IsCompatibleReceiver(receiver, holder)) {
- return compiler.CompileStoreCallback(receiver, name,
- call_optimization);
- }
- return compiler.CompileStoreViaSetter(
- receiver, name, Handle<JSFunction>::cast(setter));
- }
- // TODO(dcarney): Handle correctly.
- DCHECK(callback->IsDeclaredAccessorInfo());
- break;
}
- case INTERCEPTOR: {
- DCHECK(HasInterceptorSetter(*holder));
+
+ // -------------- Fields --------------
+ if (lookup->property_details().type() == FIELD) {
+ bool use_stub = true;
+ if (lookup->representation().IsHeapObject()) {
+ // Only use a generic stub if no types need to be tracked.
+ Handle<HeapType> field_type = lookup->GetFieldType();
+ HeapType::Iterator<Map> it = field_type->Classes();
+ use_stub = it.Done();
+ }
+ if (use_stub) {
+ StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
+ lookup->representation());
+ return stub.GetCode();
+ }
NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
- return compiler.CompileStoreInterceptor(name);
+ return compiler.CompileStoreField(lookup);
}
- case CONSTANT:
- break;
- case NONEXISTENT:
- case HANDLER:
- UNREACHABLE();
- break;
+
+ // -------------- Constant properties --------------
+ DCHECK(lookup->property_details().type() == CONSTANT);
+ TRACE_GENERIC_IC(isolate(), "StoreIC", "constant property");
+ break;
}
+
+ case LookupIterator::ACCESS_CHECK:
+ case LookupIterator::JSPROXY:
+ case LookupIterator::NOT_FOUND:
+ UNREACHABLE();
}
return slow_stub();
}
@@ -1610,7 +1550,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
// via megamorphic stubs, since they don't have a map in their relocation info
// and so the stubs can't be harvested for the object needed for a map check.
if (target()->type() != Code::NORMAL) {
- TRACE_GENERIC_IC(isolate(), "KeyedIC", "non-NORMAL target type");
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-NORMAL target type");
return generic_stub();
}
@@ -1676,14 +1616,14 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
if (!map_added) {
// If the miss wasn't due to an unseen map, a polymorphic stub
// won't help, use the generic stub.
- TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice");
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "same map added twice");
return generic_stub();
}
// If the maximum number of receiver maps has been exceeded, use the generic
// version of the IC.
if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
- TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded");
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "max polymorph exceeded");
return generic_stub();
}
@@ -1694,7 +1634,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
if (store_mode == STANDARD_STORE) {
store_mode = old_store_mode;
} else if (store_mode != old_store_mode) {
- TRACE_GENERIC_IC(isolate(), "KeyedIC", "store mode mismatch");
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "store mode mismatch");
return generic_stub();
}
}
@@ -1712,8 +1652,8 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
}
if (external_arrays != 0 &&
external_arrays != target_receiver_maps.length()) {
- TRACE_GENERIC_IC(isolate(), "KeyedIC",
- "unsupported combination of external and normal arrays");
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
+ "unsupported combination of external and normal arrays");
return generic_stub();
}
}
@@ -1724,8 +1664,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
- Handle<Map> map,
- KeyedAccessStoreMode store_mode) {
+ Handle<Map> map, KeyedAccessStoreMode store_mode) {
switch (store_mode) {
case STORE_TRANSITION_SMI_TO_OBJECT:
case STORE_TRANSITION_DOUBLE_TO_OBJECT:
@@ -1745,7 +1684,7 @@ Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
return Map::TransitionElementsTo(map, FAST_HOLEY_DOUBLE_ELEMENTS);
case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
DCHECK(map->has_external_array_elements());
- // Fall through
+ // Fall through
case STORE_NO_TRANSITION_HANDLE_COW:
case STANDARD_STORE:
case STORE_AND_GROW_NO_TRANSITION:
@@ -1756,11 +1695,10 @@ Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
}
-bool IsOutOfBoundsAccess(Handle<JSObject> receiver,
- int index) {
+bool IsOutOfBoundsAccess(Handle<JSObject> receiver, int index) {
if (receiver->IsJSArray()) {
return JSArray::cast(*receiver)->length()->IsSmi() &&
- index >= Smi::cast(JSArray::cast(*receiver)->length())->value();
+ index >= Smi::cast(JSArray::cast(*receiver)->length())->value();
}
return index >= receiver->elements()->length();
}
@@ -1775,7 +1713,7 @@ KeyedAccessStoreMode KeyedStoreIC::GetStoreMode(Handle<JSObject> receiver,
// Don't consider this a growing store if the store would send the receiver to
// dictionary mode.
bool allow_growth = receiver->IsJSArray() && oob_access &&
- !receiver->WouldConvertToSlowElements(key);
+ !receiver->WouldConvertToSlowElements(key);
if (allow_growth) {
// Handle growing array in stub if necessary.
if (receiver->HasFastSmiElements()) {
@@ -1850,10 +1788,8 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
if (MigrateDeprecated(object)) {
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate(),
- result,
- Runtime::SetObjectProperty(
- isolate(), object, key, value, strict_mode()),
+ isolate(), result, Runtime::SetObjectProperty(isolate(), object, key,
+ value, strict_mode()),
Object);
return result;
}
@@ -1867,14 +1803,15 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
if (key->IsInternalizedString()) {
ASSIGN_RETURN_ON_EXCEPTION(
- isolate(),
- store_handle,
- StoreIC::Store(object,
- Handle<String>::cast(key),
- value,
+ isolate(), store_handle,
+ StoreIC::Store(object, Handle<String>::cast(key), value,
JSReceiver::MAY_BE_STORE_FROM_KEYED),
Object);
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
+ // TODO(jkummerow): Ideally we'd wrap this in "if (!is_target_set())",
+ // but doing so causes Hydrogen crashes. Needs investigation.
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
+ "unhandled internalized string key");
+ TRACE_IC("StoreIC", key);
set_target(*stub);
return store_handle;
}
@@ -1888,7 +1825,10 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
// expect to be able to trap element sets to objects with those maps in
// the runtime to enable optimization of element hole access.
Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
- if (heap_object->map()->IsMapInArrayPrototypeChain()) use_ic = false;
+ if (heap_object->map()->IsMapInArrayPrototypeChain()) {
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "map in array prototype");
+ use_ic = false;
+ }
}
if (use_ic) {
@@ -1901,6 +1841,8 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
isolate()->heap()->sloppy_arguments_elements_map()) {
if (strict_mode() == SLOPPY) {
stub = sloppy_arguments_stub();
+ } else {
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "arguments receiver");
}
} else if (key_is_smi_like &&
!(target().is_identical_to(sloppy_arguments_stub()))) {
@@ -1911,17 +1853,22 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
if (!(receiver->map()->DictionaryElementsInPrototypeChainOnly())) {
KeyedAccessStoreMode store_mode = GetStoreMode(receiver, key, value);
stub = StoreElementStub(receiver, store_mode);
+ } else {
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "dictionary prototype");
}
+ } else {
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-smi-like key");
}
+ } else {
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-JSObject receiver");
}
}
if (store_handle.is_null()) {
ASSIGN_RETURN_ON_EXCEPTION(
- isolate(),
- store_handle,
- Runtime::SetObjectProperty(
- isolate(), object, key, value, strict_mode()),
+ isolate(), store_handle,
+ Runtime::SetObjectProperty(isolate(), object, key, value,
+ strict_mode()),
Object);
}
@@ -1930,6 +1877,9 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
if (*stub == generic) {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
}
+ if (*stub == *slow_stub()) {
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "slow stub");
+ }
DCHECK(!stub.is_null());
set_target(*stub);
TRACE_IC("StoreIC", key);
@@ -1938,30 +1888,14 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
}
-CallIC::State::State(ExtraICState extra_ic_state)
- : argc_(ArgcBits::decode(extra_ic_state)),
- call_type_(CallTypeBits::decode(extra_ic_state)) {
-}
-
-
-ExtraICState CallIC::State::GetExtraICState() const {
- ExtraICState extra_ic_state =
- ArgcBits::encode(argc_) |
- CallTypeBits::encode(call_type_);
- return extra_ic_state;
-}
-
-
-bool CallIC::DoCustomHandler(Handle<Object> receiver,
- Handle<Object> function,
- Handle<FixedArray> vector,
- Handle<Smi> slot,
- const State& state) {
+bool CallIC::DoCustomHandler(Handle<Object> receiver, Handle<Object> function,
+ Handle<TypeFeedbackVector> vector,
+ Handle<Smi> slot, const CallICState& state) {
DCHECK(FLAG_use_ic && function->IsJSFunction());
// Are we the array function?
- Handle<JSFunction> array_function = Handle<JSFunction>(
- isolate()->native_context()->array_function());
+ Handle<JSFunction> array_function =
+ Handle<JSFunction>(isolate()->native_context()->array_function());
if (array_function.is_identical_to(Handle<JSFunction>::cast(function))) {
// Alter the slot.
IC::State old_state = FeedbackToState(vector, slot);
@@ -1990,13 +1924,14 @@ bool CallIC::DoCustomHandler(Handle<Object> receiver,
void CallIC::PatchMegamorphic(Handle<Object> function,
- Handle<FixedArray> vector, Handle<Smi> slot) {
- State state(target()->extra_ic_state());
+ Handle<TypeFeedbackVector> vector,
+ Handle<Smi> slot) {
+ CallICState state(target()->extra_ic_state());
IC::State old_state = FeedbackToState(vector, slot);
// We are going generic.
vector->set(slot->value(),
- *TypeFeedbackInfo::MegamorphicSentinel(isolate()),
+ *TypeFeedbackVector::MegamorphicSentinel(isolate()),
SKIP_WRITE_BARRIER);
CallICStub stub(isolate(), state);
@@ -2015,11 +1950,9 @@ void CallIC::PatchMegamorphic(Handle<Object> function,
}
-void CallIC::HandleMiss(Handle<Object> receiver,
- Handle<Object> function,
- Handle<FixedArray> vector,
- Handle<Smi> slot) {
- State state(target()->extra_ic_state());
+void CallIC::HandleMiss(Handle<Object> receiver, Handle<Object> function,
+ Handle<TypeFeedbackVector> vector, Handle<Smi> slot) {
+ CallICState state(target()->extra_ic_state());
IC::State old_state = FeedbackToState(vector, slot);
Handle<Object> name = isolate()->factory()->empty_string();
Object* feedback = vector->get(slot->value());
@@ -2030,7 +1963,7 @@ void CallIC::HandleMiss(Handle<Object> receiver,
if (feedback->IsJSFunction() || !function->IsJSFunction()) {
// We are going generic.
vector->set(slot->value(),
- *TypeFeedbackInfo::MegamorphicSentinel(isolate()),
+ *TypeFeedbackVector::MegamorphicSentinel(isolate()),
SKIP_WRITE_BARRIER);
} else {
// The feedback is either uninitialized or an allocation site.
@@ -2039,7 +1972,7 @@ void CallIC::HandleMiss(Handle<Object> receiver,
// merely need to patch the target to match the feedback.
// TODO(mvstanton): the better approach is to dispense with patching
// altogether, which is in progress.
- DCHECK(feedback == *TypeFeedbackInfo::UninitializedSentinel(isolate()) ||
+ DCHECK(feedback == *TypeFeedbackVector::UninitializedSentinel(isolate()) ||
feedback->IsAllocationSite());
// Do we want to install a custom handler?
@@ -2077,7 +2010,7 @@ RUNTIME_FUNCTION(CallIC_Miss) {
CallIC ic(isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Object> function = args.at<Object>(1);
- Handle<FixedArray> vector = args.at<FixedArray>(2);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(2);
Handle<Smi> slot = args.at<Smi>(3);
ic.HandleMiss(receiver, function, vector, slot);
return *function;
@@ -2091,7 +2024,7 @@ RUNTIME_FUNCTION(CallIC_Customization_Miss) {
// A miss on a custom call ic always results in going megamorphic.
CallIC ic(isolate);
Handle<Object> function = args.at<Object>(1);
- Handle<FixedArray> vector = args.at<FixedArray>(2);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(2);
Handle<Smi> slot = args.at<Smi>(3);
ic.PatchMegamorphic(function, vector, slot);
return *function;
@@ -2105,7 +2038,7 @@ RUNTIME_FUNCTION(LoadIC_Miss) {
DCHECK(args.length() == 2);
LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
Handle<Object> receiver = args.at<Object>(0);
- Handle<String> key = args.at<String>(1);
+ Handle<Name> key = args.at<Name>(1);
ic.UpdateState(receiver, key);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
@@ -2153,9 +2086,7 @@ RUNTIME_FUNCTION(StoreIC_Miss) {
ic.UpdateState(receiver, key);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- result,
- ic.Store(receiver, key, args.at<Object>(2)));
+ isolate, result, ic.Store(receiver, key, args.at<Object>(2)));
return *result;
}
@@ -2170,9 +2101,7 @@ RUNTIME_FUNCTION(StoreIC_MissFromStubFailure) {
ic.UpdateState(receiver, key);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- result,
- ic.Store(receiver, key, args.at<Object>(2)));
+ isolate, result, ic.Store(receiver, key, args.at<Object>(2)));
return *result;
}
@@ -2212,9 +2141,7 @@ RUNTIME_FUNCTION(KeyedStoreIC_Miss) {
ic.UpdateState(receiver, key);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- result,
- ic.Store(receiver, key, args.at<Object>(2)));
+ isolate, result, ic.Store(receiver, key, args.at<Object>(2)));
return *result;
}
@@ -2229,9 +2156,7 @@ RUNTIME_FUNCTION(KeyedStoreIC_MissFromStubFailure) {
ic.UpdateState(receiver, key);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- result,
- ic.Store(receiver, key, args.at<Object>(2)));
+ isolate, result, ic.Store(receiver, key, args.at<Object>(2)));
return *result;
}
@@ -2247,8 +2172,7 @@ RUNTIME_FUNCTION(StoreIC_Slow) {
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- Runtime::SetObjectProperty(
- isolate, object, key, value, strict_mode));
+ Runtime::SetObjectProperty(isolate, object, key, value, strict_mode));
return *result;
}
@@ -2264,8 +2188,7 @@ RUNTIME_FUNCTION(KeyedStoreIC_Slow) {
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- Runtime::SetObjectProperty(
- isolate, object, key, value, strict_mode));
+ Runtime::SetObjectProperty(isolate, object, key, value, strict_mode));
return *result;
}
@@ -2287,429 +2210,15 @@ RUNTIME_FUNCTION(ElementsTransitionAndStoreIC_Miss) {
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- Runtime::SetObjectProperty(
- isolate, object, key, value, strict_mode));
+ Runtime::SetObjectProperty(isolate, object, key, value, strict_mode));
return *result;
}
-BinaryOpIC::State::State(Isolate* isolate, ExtraICState extra_ic_state)
- : isolate_(isolate) {
- op_ = static_cast<Token::Value>(
- FIRST_TOKEN + OpField::decode(extra_ic_state));
- mode_ = OverwriteModeField::decode(extra_ic_state);
- fixed_right_arg_ = Maybe<int>(
- HasFixedRightArgField::decode(extra_ic_state),
- 1 << FixedRightArgValueField::decode(extra_ic_state));
- left_kind_ = LeftKindField::decode(extra_ic_state);
- if (fixed_right_arg_.has_value) {
- right_kind_ = Smi::IsValid(fixed_right_arg_.value) ? SMI : INT32;
- } else {
- right_kind_ = RightKindField::decode(extra_ic_state);
- }
- result_kind_ = ResultKindField::decode(extra_ic_state);
- DCHECK_LE(FIRST_TOKEN, op_);
- DCHECK_LE(op_, LAST_TOKEN);
-}
-
-
-ExtraICState BinaryOpIC::State::GetExtraICState() const {
- ExtraICState extra_ic_state =
- OpField::encode(op_ - FIRST_TOKEN) |
- OverwriteModeField::encode(mode_) |
- LeftKindField::encode(left_kind_) |
- ResultKindField::encode(result_kind_) |
- HasFixedRightArgField::encode(fixed_right_arg_.has_value);
- if (fixed_right_arg_.has_value) {
- extra_ic_state = FixedRightArgValueField::update(
- extra_ic_state, WhichPowerOf2(fixed_right_arg_.value));
- } else {
- extra_ic_state = RightKindField::update(extra_ic_state, right_kind_);
- }
- return extra_ic_state;
-}
-
-
-// static
-void BinaryOpIC::State::GenerateAheadOfTime(
- Isolate* isolate, void (*Generate)(Isolate*, const State&)) {
- // TODO(olivf) We should investigate why adding stubs to the snapshot is so
- // expensive at runtime. When solved we should be able to add most binops to
- // the snapshot instead of hand-picking them.
- // Generated list of commonly used stubs
-#define GENERATE(op, left_kind, right_kind, result_kind, mode) \
- do { \
- State state(isolate, op, mode); \
- state.left_kind_ = left_kind; \
- state.fixed_right_arg_.has_value = false; \
- state.right_kind_ = right_kind; \
- state.result_kind_ = result_kind; \
- Generate(isolate, state); \
- } while (false)
- GENERATE(Token::ADD, INT32, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::ADD, INT32, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::ADD, INT32, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::ADD, INT32, INT32, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::ADD, INT32, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::ADD, INT32, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::ADD, INT32, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::ADD, INT32, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::ADD, NUMBER, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::ADD, NUMBER, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::ADD, SMI, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::ADD, SMI, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::ADD, SMI, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::ADD, SMI, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::ADD, SMI, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::ADD, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, INT32, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, INT32, INT32, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_AND, INT32, INT32, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, INT32, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_AND, INT32, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, INT32, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, NUMBER, INT32, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, NUMBER, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_AND, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, SMI, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_AND, SMI, INT32, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, SMI, NUMBER, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_OR, INT32, INT32, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_OR, INT32, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_OR, INT32, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_OR, INT32, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_OR, NUMBER, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_OR, NUMBER, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_OR, SMI, INT32, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_XOR, INT32, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_XOR, INT32, INT32, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, INT32, INT32, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_XOR, INT32, NUMBER, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, INT32, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_XOR, NUMBER, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, NUMBER, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, NUMBER, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, SMI, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, SMI, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_XOR, SMI, INT32, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_XOR, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::DIV, INT32, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::DIV, INT32, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, INT32, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::DIV, INT32, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::DIV, INT32, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, NUMBER, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::DIV, NUMBER, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::DIV, SMI, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::DIV, SMI, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, SMI, INT32, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::DIV, SMI, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::DIV, SMI, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::DIV, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::DIV, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::DIV, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::MOD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::MOD, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::MOD, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::MUL, INT32, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::MUL, INT32, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, INT32, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::MUL, INT32, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::MUL, INT32, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::MUL, INT32, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, NUMBER, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::MUL, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::MUL, NUMBER, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::MUL, SMI, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::MUL, SMI, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::MUL, SMI, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, SMI, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::MUL, SMI, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::MUL, SMI, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, SMI, SMI, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::MUL, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::MUL, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::MUL, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SAR, INT32, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::SAR, INT32, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SAR, INT32, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SAR, NUMBER, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SAR, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SAR, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::SAR, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SHL, INT32, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::SHL, INT32, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::SHL, INT32, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SHL, INT32, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SHL, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SHL, SMI, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::SHL, SMI, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::SHL, SMI, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::SHL, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SHL, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::SHL, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SHR, INT32, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SHR, INT32, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::SHR, INT32, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SHR, NUMBER, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SHR, NUMBER, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::SHR, NUMBER, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::SHR, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SHR, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::SHR, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SUB, INT32, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::SUB, INT32, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::SUB, INT32, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::SUB, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::SUB, INT32, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::SUB, INT32, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::SUB, NUMBER, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::SUB, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::SUB, NUMBER, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::SUB, SMI, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::SUB, SMI, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::SUB, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SUB, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::SUB, SMI, SMI, SMI, OVERWRITE_RIGHT);
-#undef GENERATE
-#define GENERATE(op, left_kind, fixed_right_arg_value, result_kind, mode) \
- do { \
- State state(isolate, op, mode); \
- state.left_kind_ = left_kind; \
- state.fixed_right_arg_.has_value = true; \
- state.fixed_right_arg_.value = fixed_right_arg_value; \
- state.right_kind_ = SMI; \
- state.result_kind_ = result_kind; \
- Generate(isolate, state); \
- } while (false)
- GENERATE(Token::MOD, SMI, 2, SMI, NO_OVERWRITE);
- GENERATE(Token::MOD, SMI, 4, SMI, NO_OVERWRITE);
- GENERATE(Token::MOD, SMI, 4, SMI, OVERWRITE_LEFT);
- GENERATE(Token::MOD, SMI, 8, SMI, NO_OVERWRITE);
- GENERATE(Token::MOD, SMI, 16, SMI, OVERWRITE_LEFT);
- GENERATE(Token::MOD, SMI, 32, SMI, NO_OVERWRITE);
- GENERATE(Token::MOD, SMI, 2048, SMI, NO_OVERWRITE);
-#undef GENERATE
-}
-
-
-Type* BinaryOpIC::State::GetResultType(Zone* zone) const {
- Kind result_kind = result_kind_;
- if (HasSideEffects()) {
- result_kind = NONE;
- } else if (result_kind == GENERIC && op_ == Token::ADD) {
- return Type::Union(Type::Number(zone), Type::String(zone), zone);
- } else if (result_kind == NUMBER && op_ == Token::SHR) {
- return Type::Unsigned32(zone);
- }
- DCHECK_NE(GENERIC, result_kind);
- return KindToType(result_kind, zone);
-}
-
-
-OStream& operator<<(OStream& os, const BinaryOpIC::State& s) {
- os << "(" << Token::Name(s.op_);
- if (s.mode_ == OVERWRITE_LEFT)
- os << "_ReuseLeft";
- else if (s.mode_ == OVERWRITE_RIGHT)
- os << "_ReuseRight";
- if (s.CouldCreateAllocationMementos()) os << "_CreateAllocationMementos";
- os << ":" << BinaryOpIC::State::KindToString(s.left_kind_) << "*";
- if (s.fixed_right_arg_.has_value) {
- os << s.fixed_right_arg_.value;
- } else {
- os << BinaryOpIC::State::KindToString(s.right_kind_);
- }
- return os << "->" << BinaryOpIC::State::KindToString(s.result_kind_) << ")";
-}
-
-
-void BinaryOpIC::State::Update(Handle<Object> left,
- Handle<Object> right,
- Handle<Object> result) {
- ExtraICState old_extra_ic_state = GetExtraICState();
-
- left_kind_ = UpdateKind(left, left_kind_);
- right_kind_ = UpdateKind(right, right_kind_);
-
- int32_t fixed_right_arg_value = 0;
- bool has_fixed_right_arg =
- op_ == Token::MOD &&
- right->ToInt32(&fixed_right_arg_value) &&
- fixed_right_arg_value > 0 &&
- IsPowerOf2(fixed_right_arg_value) &&
- FixedRightArgValueField::is_valid(WhichPowerOf2(fixed_right_arg_value)) &&
- (left_kind_ == SMI || left_kind_ == INT32) &&
- (result_kind_ == NONE || !fixed_right_arg_.has_value);
- fixed_right_arg_ = Maybe<int32_t>(has_fixed_right_arg,
- fixed_right_arg_value);
-
- result_kind_ = UpdateKind(result, result_kind_);
-
- if (!Token::IsTruncatingBinaryOp(op_)) {
- Kind input_kind = Max(left_kind_, right_kind_);
- if (result_kind_ < input_kind && input_kind <= NUMBER) {
- result_kind_ = input_kind;
- }
- }
-
- // We don't want to distinguish INT32 and NUMBER for string add (because
- // NumberToString can't make use of this anyway).
- if (left_kind_ == STRING && right_kind_ == INT32) {
- DCHECK_EQ(STRING, result_kind_);
- DCHECK_EQ(Token::ADD, op_);
- right_kind_ = NUMBER;
- } else if (right_kind_ == STRING && left_kind_ == INT32) {
- DCHECK_EQ(STRING, result_kind_);
- DCHECK_EQ(Token::ADD, op_);
- left_kind_ = NUMBER;
- }
-
- // Reset overwrite mode unless we can actually make use of it, or may be able
- // to make use of it at some point in the future.
- if ((mode_ == OVERWRITE_LEFT && left_kind_ > NUMBER) ||
- (mode_ == OVERWRITE_RIGHT && right_kind_ > NUMBER) ||
- result_kind_ > NUMBER) {
- mode_ = NO_OVERWRITE;
- }
-
- if (old_extra_ic_state == GetExtraICState()) {
- // Tagged operations can lead to non-truncating HChanges
- if (left->IsUndefined() || left->IsBoolean()) {
- left_kind_ = GENERIC;
- } else {
- DCHECK(right->IsUndefined() || right->IsBoolean());
- right_kind_ = GENERIC;
- }
- }
-}
-
-
-BinaryOpIC::State::Kind BinaryOpIC::State::UpdateKind(Handle<Object> object,
- Kind kind) const {
- Kind new_kind = GENERIC;
- bool is_truncating = Token::IsTruncatingBinaryOp(op());
- if (object->IsBoolean() && is_truncating) {
- // Booleans will be automatically truncated by HChange.
- new_kind = INT32;
- } else if (object->IsUndefined()) {
- // Undefined will be automatically truncated by HChange.
- new_kind = is_truncating ? INT32 : NUMBER;
- } else if (object->IsSmi()) {
- new_kind = SMI;
- } else if (object->IsHeapNumber()) {
- double value = Handle<HeapNumber>::cast(object)->value();
- new_kind = IsInt32Double(value) ? INT32 : NUMBER;
- } else if (object->IsString() && op() == Token::ADD) {
- new_kind = STRING;
- }
- if (new_kind == INT32 && SmiValuesAre32Bits()) {
- new_kind = NUMBER;
- }
- if (kind != NONE &&
- ((new_kind <= NUMBER && kind > NUMBER) ||
- (new_kind > NUMBER && kind <= NUMBER))) {
- new_kind = GENERIC;
- }
- return Max(kind, new_kind);
-}
-
-
-// static
-const char* BinaryOpIC::State::KindToString(Kind kind) {
- switch (kind) {
- case NONE: return "None";
- case SMI: return "Smi";
- case INT32: return "Int32";
- case NUMBER: return "Number";
- case STRING: return "String";
- case GENERIC: return "Generic";
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-// static
-Type* BinaryOpIC::State::KindToType(Kind kind, Zone* zone) {
- switch (kind) {
- case NONE: return Type::None(zone);
- case SMI: return Type::SignedSmall(zone);
- case INT32: return Type::Signed32(zone);
- case NUMBER: return Type::Number(zone);
- case STRING: return Type::String(zone);
- case GENERIC: return Type::Any(zone);
- }
- UNREACHABLE();
- return NULL;
-}
-
-
MaybeHandle<Object> BinaryOpIC::Transition(
- Handle<AllocationSite> allocation_site,
- Handle<Object> left,
+ Handle<AllocationSite> allocation_site, Handle<Object> left,
Handle<Object> right) {
- State state(isolate(), target()->extra_ic_state());
+ BinaryOpICState state(isolate(), target()->extra_ic_state());
// Compute the actual result using the builtin for the binary operation.
Object* builtin = isolate()->js_builtins_object()->javascript_builtin(
@@ -2717,16 +2226,14 @@ MaybeHandle<Object> BinaryOpIC::Transition(
Handle<JSFunction> function = handle(JSFunction::cast(builtin), isolate());
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate(),
- result,
- Execution::Call(isolate(), function, left, 1, &right),
+ isolate(), result, Execution::Call(isolate(), function, left, 1, &right),
Object);
// Execution::Call can execute arbitrary JavaScript, hence potentially
// update the state of this very IC, so we must update the stored state.
UpdateTarget();
// Compute the new state.
- State old_state(isolate(), target()->extra_ic_state());
+ BinaryOpICState old_state(isolate(), target()->extra_ic_state());
state.Update(left, right, result);
// Check if we have a string operation here.
@@ -2784,8 +2291,7 @@ RUNTIME_FUNCTION(BinaryOpIC_Miss) {
BinaryOpIC ic(isolate);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- result,
+ isolate, result,
ic.Transition(Handle<AllocationSite>::null(), left, right));
return *result;
}
@@ -2795,24 +2301,23 @@ RUNTIME_FUNCTION(BinaryOpIC_MissWithAllocationSite) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- Handle<AllocationSite> allocation_site = args.at<AllocationSite>(
- BinaryOpWithAllocationSiteStub::kAllocationSite);
- Handle<Object> left = args.at<Object>(
- BinaryOpWithAllocationSiteStub::kLeft);
- Handle<Object> right = args.at<Object>(
- BinaryOpWithAllocationSiteStub::kRight);
+ Handle<AllocationSite> allocation_site =
+ args.at<AllocationSite>(BinaryOpWithAllocationSiteStub::kAllocationSite);
+ Handle<Object> left = args.at<Object>(BinaryOpWithAllocationSiteStub::kLeft);
+ Handle<Object> right =
+ args.at<Object>(BinaryOpWithAllocationSiteStub::kRight);
BinaryOpIC ic(isolate);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- result,
- ic.Transition(allocation_site, left, right));
+ isolate, result, ic.Transition(allocation_site, left, right));
return *result;
}
Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op) {
- ICCompareStub stub(isolate, op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
+ CompareICStub stub(isolate, op, CompareICState::UNINITIALIZED,
+ CompareICState::UNINITIALIZED,
+ CompareICState::UNINITIALIZED);
Code* code = NULL;
CHECK(stub.FindCodeInCache(&code));
return code;
@@ -2820,178 +2325,25 @@ Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op) {
Handle<Code> CompareIC::GetUninitialized(Isolate* isolate, Token::Value op) {
- ICCompareStub stub(isolate, op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
+ CompareICStub stub(isolate, op, CompareICState::UNINITIALIZED,
+ CompareICState::UNINITIALIZED,
+ CompareICState::UNINITIALIZED);
return stub.GetCode();
}
-const char* CompareIC::GetStateName(State state) {
- switch (state) {
- case UNINITIALIZED: return "UNINITIALIZED";
- case SMI: return "SMI";
- case NUMBER: return "NUMBER";
- case INTERNALIZED_STRING: return "INTERNALIZED_STRING";
- case STRING: return "STRING";
- case UNIQUE_NAME: return "UNIQUE_NAME";
- case OBJECT: return "OBJECT";
- case KNOWN_OBJECT: return "KNOWN_OBJECT";
- case GENERIC: return "GENERIC";
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-Type* CompareIC::StateToType(
- Zone* zone,
- CompareIC::State state,
- Handle<Map> map) {
- switch (state) {
- case CompareIC::UNINITIALIZED: return Type::None(zone);
- case CompareIC::SMI: return Type::SignedSmall(zone);
- case CompareIC::NUMBER: return Type::Number(zone);
- case CompareIC::STRING: return Type::String(zone);
- case CompareIC::INTERNALIZED_STRING: return Type::InternalizedString(zone);
- case CompareIC::UNIQUE_NAME: return Type::UniqueName(zone);
- case CompareIC::OBJECT: return Type::Receiver(zone);
- case CompareIC::KNOWN_OBJECT:
- return map.is_null() ? Type::Receiver(zone) : Type::Class(map, zone);
- case CompareIC::GENERIC: return Type::Any(zone);
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-void CompareIC::StubInfoToType(uint32_t stub_key, Type** left_type,
- Type** right_type, Type** overall_type,
- Handle<Map> map, Zone* zone) {
- State left_state, right_state, handler_state;
- ICCompareStub::DecodeKey(stub_key, &left_state, &right_state, &handler_state,
- NULL);
- *left_type = StateToType(zone, left_state);
- *right_type = StateToType(zone, right_state);
- *overall_type = StateToType(zone, handler_state, map);
-}
-
-
-CompareIC::State CompareIC::NewInputState(State old_state,
- Handle<Object> value) {
- switch (old_state) {
- case UNINITIALIZED:
- if (value->IsSmi()) return SMI;
- if (value->IsHeapNumber()) return NUMBER;
- if (value->IsInternalizedString()) return INTERNALIZED_STRING;
- if (value->IsString()) return STRING;
- if (value->IsSymbol()) return UNIQUE_NAME;
- if (value->IsJSObject()) return OBJECT;
- break;
- case SMI:
- if (value->IsSmi()) return SMI;
- if (value->IsHeapNumber()) return NUMBER;
- break;
- case NUMBER:
- if (value->IsNumber()) return NUMBER;
- break;
- case INTERNALIZED_STRING:
- if (value->IsInternalizedString()) return INTERNALIZED_STRING;
- if (value->IsString()) return STRING;
- if (value->IsSymbol()) return UNIQUE_NAME;
- break;
- case STRING:
- if (value->IsString()) return STRING;
- break;
- case UNIQUE_NAME:
- if (value->IsUniqueName()) return UNIQUE_NAME;
- break;
- case OBJECT:
- if (value->IsJSObject()) return OBJECT;
- break;
- case GENERIC:
- break;
- case KNOWN_OBJECT:
- UNREACHABLE();
- break;
- }
- return GENERIC;
-}
-
-
-CompareIC::State CompareIC::TargetState(State old_state,
- State old_left,
- State old_right,
- bool has_inlined_smi_code,
- Handle<Object> x,
- Handle<Object> y) {
- switch (old_state) {
- case UNINITIALIZED:
- if (x->IsSmi() && y->IsSmi()) return SMI;
- if (x->IsNumber() && y->IsNumber()) return NUMBER;
- if (Token::IsOrderedRelationalCompareOp(op_)) {
- // Ordered comparisons treat undefined as NaN, so the
- // NUMBER stub will do the right thing.
- if ((x->IsNumber() && y->IsUndefined()) ||
- (y->IsNumber() && x->IsUndefined())) {
- return NUMBER;
- }
- }
- if (x->IsInternalizedString() && y->IsInternalizedString()) {
- // We compare internalized strings as plain ones if we need to determine
- // the order in a non-equality compare.
- return Token::IsEqualityOp(op_) ? INTERNALIZED_STRING : STRING;
- }
- if (x->IsString() && y->IsString()) return STRING;
- if (!Token::IsEqualityOp(op_)) return GENERIC;
- if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME;
- if (x->IsJSObject() && y->IsJSObject()) {
- if (Handle<JSObject>::cast(x)->map() ==
- Handle<JSObject>::cast(y)->map()) {
- return KNOWN_OBJECT;
- } else {
- return OBJECT;
- }
- }
- return GENERIC;
- case SMI:
- return x->IsNumber() && y->IsNumber() ? NUMBER : GENERIC;
- case INTERNALIZED_STRING:
- DCHECK(Token::IsEqualityOp(op_));
- if (x->IsString() && y->IsString()) return STRING;
- if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME;
- return GENERIC;
- case NUMBER:
- // If the failure was due to one side changing from smi to heap number,
- // then keep the state (if other changed at the same time, we will get
- // a second miss and then go to generic).
- if (old_left == SMI && x->IsHeapNumber()) return NUMBER;
- if (old_right == SMI && y->IsHeapNumber()) return NUMBER;
- return GENERIC;
- case KNOWN_OBJECT:
- DCHECK(Token::IsEqualityOp(op_));
- if (x->IsJSObject() && y->IsJSObject()) return OBJECT;
- return GENERIC;
- case STRING:
- case UNIQUE_NAME:
- case OBJECT:
- case GENERIC:
- return GENERIC;
- }
- UNREACHABLE();
- return GENERIC; // Make the compiler happy.
-}
-
-
Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
HandleScope scope(isolate());
- State previous_left, previous_right, previous_state;
- ICCompareStub::DecodeKey(target()->stub_key(), &previous_left,
- &previous_right, &previous_state, NULL);
- State new_left = NewInputState(previous_left, x);
- State new_right = NewInputState(previous_right, y);
- State state = TargetState(previous_state, previous_left, previous_right,
- HasInlinedSmiCode(address()), x, y);
- ICCompareStub stub(isolate(), op_, new_left, new_right, state);
- if (state == KNOWN_OBJECT) {
+ CompareICStub old_stub(target()->stub_key(), isolate());
+ CompareICState::State new_left =
+ CompareICState::NewInputState(old_stub.left(), x);
+ CompareICState::State new_right =
+ CompareICState::NewInputState(old_stub.right(), y);
+ CompareICState::State state = CompareICState::TargetState(
+ old_stub.state(), old_stub.left(), old_stub.right(), op_,
+ HasInlinedSmiCode(address()), x, y);
+ CompareICStub stub(isolate(), op_, new_left, new_right, state);
+ if (state == CompareICState::KNOWN_OBJECT) {
stub.set_known_map(
Handle<Map>(Handle<JSObject>::cast(x)->map(), isolate()));
}
@@ -3002,18 +2354,17 @@ Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
PrintF("[CompareIC in ");
JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
PrintF(" ((%s+%s=%s)->(%s+%s=%s))#%s @ %p]\n",
- GetStateName(previous_left),
- GetStateName(previous_right),
- GetStateName(previous_state),
- GetStateName(new_left),
- GetStateName(new_right),
- GetStateName(state),
- Token::Name(op_),
+ CompareICState::GetStateName(old_stub.left()),
+ CompareICState::GetStateName(old_stub.right()),
+ CompareICState::GetStateName(old_stub.state()),
+ CompareICState::GetStateName(new_left),
+ CompareICState::GetStateName(new_right),
+ CompareICState::GetStateName(state), Token::Name(op_),
static_cast<void*>(*stub.GetCode()));
}
// Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
+ if (old_stub.state() == CompareICState::UNINITIALIZED) {
PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
}
@@ -3021,7 +2372,7 @@ Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
}
-// Used from ICCompareStub::GenerateMiss in code-stubs-<arch>.cc.
+// Used from CompareICStub::GenerateMiss in code-stubs-<arch>.cc.
RUNTIME_FUNCTION(CompareIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
@@ -3031,14 +2382,12 @@ RUNTIME_FUNCTION(CompareIC_Miss) {
}
-void CompareNilIC::Clear(Address address,
- Code* target,
+void CompareNilIC::Clear(Address address, Code* target,
ConstantPoolArray* constant_pool) {
if (IsCleared(target)) return;
ExtraICState state = target->extra_ic_state();
- CompareNilICStub stub(target->GetIsolate(),
- state,
+ CompareNilICStub stub(target->GetIsolate(), state,
HydrogenCodeStub::UNINITIALIZED);
stub.ClearState();
@@ -3049,8 +2398,7 @@ void CompareNilIC::Clear(Address address,
}
-Handle<Object> CompareNilIC::DoCompareNilSlow(Isolate* isolate,
- NilValue nil,
+Handle<Object> CompareNilIC::DoCompareNilSlow(Isolate* isolate, NilValue nil,
Handle<Object> object) {
if (object->IsNull() || object->IsUndefined()) {
return handle(Smi::FromInt(true), isolate);
@@ -3070,14 +2418,14 @@ Handle<Object> CompareNilIC::CompareNil(Handle<Object> object) {
stub.UpdateStatus(object);
- NilValue nil = stub.GetNilValue();
+ NilValue nil = stub.nil_value();
// Find or create the specialized stub to support the new set of types.
Handle<Code> code;
if (stub.IsMonomorphic()) {
Handle<Map> monomorphic_map(already_monomorphic && FirstTargetMap() != NULL
- ? FirstTargetMap()
- : HeapObject::cast(*object)->map());
+ ? FirstTargetMap()
+ : HeapObject::cast(*object)->map());
code = PropertyICCompiler::ComputeCompareNil(monomorphic_map, &stub);
} else {
code = stub.GetCode();
@@ -3163,17 +2511,179 @@ RUNTIME_FUNCTION(ToBooleanIC_Miss) {
}
+RUNTIME_FUNCTION(StoreCallbackProperty) {
+ Handle<JSObject> receiver = args.at<JSObject>(0);
+ Handle<JSObject> holder = args.at<JSObject>(1);
+ Handle<ExecutableAccessorInfo> callback = args.at<ExecutableAccessorInfo>(2);
+ Handle<Name> name = args.at<Name>(3);
+ Handle<Object> value = args.at<Object>(4);
+ HandleScope scope(isolate);
+
+ DCHECK(callback->IsCompatibleReceiver(*receiver));
+
+ Address setter_address = v8::ToCData<Address>(callback->setter());
+ v8::AccessorNameSetterCallback fun =
+ FUNCTION_CAST<v8::AccessorNameSetterCallback>(setter_address);
+ DCHECK(fun != NULL);
+
+ LOG(isolate, ApiNamedPropertyAccess("store", *receiver, *name));
+ PropertyCallbackArguments custom_args(isolate, callback->data(), *receiver,
+ *holder);
+ custom_args.Call(fun, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value));
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ return *value;
+}
+
+
+/**
+ * Attempts to load a property with an interceptor (which must be present),
+ * but doesn't search the prototype chain.
+ *
+ * Returns |Heap::no_interceptor_result_sentinel()| if interceptor doesn't
+ * provide any value for the given name.
+ */
+RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly) {
+ DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ Handle<Name> name_handle =
+ args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
+ Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(
+ NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex);
+
+ // TODO(rossberg): Support symbols in the API.
+ if (name_handle->IsSymbol())
+ return isolate->heap()->no_interceptor_result_sentinel();
+ Handle<String> name = Handle<String>::cast(name_handle);
+
+ Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
+ v8::NamedPropertyGetterCallback getter =
+ FUNCTION_CAST<v8::NamedPropertyGetterCallback>(getter_address);
+ DCHECK(getter != NULL);
+
+ Handle<JSObject> receiver =
+ args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
+ Handle<JSObject> holder =
+ args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
+ PropertyCallbackArguments callback_args(isolate, interceptor_info->data(),
+ *receiver, *holder);
+ {
+ // Use the interceptor getter.
+ HandleScope scope(isolate);
+ v8::Handle<v8::Value> r =
+ callback_args.Call(getter, v8::Utils::ToLocal(name));
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ if (!r.IsEmpty()) {
+ Handle<Object> result = v8::Utils::OpenHandle(*r);
+ result->VerifyApiCallResultType();
+ return *v8::Utils::OpenHandle(*r);
+ }
+ }
+
+ return isolate->heap()->no_interceptor_result_sentinel();
+}
+
+
+static Object* ThrowReferenceError(Isolate* isolate, Name* name) {
+ // If the load is non-contextual, just return the undefined result.
+ // Note that both keyed and non-keyed loads may end up here.
+ HandleScope scope(isolate);
+ LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
+ if (ic.contextual_mode() != CONTEXTUAL) {
+ return isolate->heap()->undefined_value();
+ }
+
+ // Throw a reference error.
+ Handle<Name> name_handle(name);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewReferenceError("not_defined", HandleVector(&name_handle, 1)));
+}
+
+
+/**
+ * Loads a property with an interceptor performing post interceptor
+ * lookup if interceptor failed.
+ */
+RUNTIME_FUNCTION(LoadPropertyWithInterceptor) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ Handle<Name> name =
+ args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
+ Handle<JSObject> receiver =
+ args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
+ Handle<JSObject> holder =
+ args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
+
+ Handle<Object> result;
+ LookupIterator it(receiver, name, holder);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSObject::GetProperty(&it));
+
+ if (it.IsFound()) return *result;
+
+ return ThrowReferenceError(isolate, Name::cast(args[0]));
+}
+
+
+RUNTIME_FUNCTION(StorePropertyWithInterceptor) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ Handle<JSObject> receiver = args.at<JSObject>(0);
+ Handle<Name> name = args.at<Name>(1);
+ Handle<Object> value = args.at<Object>(2);
+#ifdef DEBUG
+ PrototypeIterator iter(isolate, receiver,
+ PrototypeIterator::START_AT_RECEIVER);
+ bool found = false;
+ while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
+ Handle<Object> current = PrototypeIterator::GetCurrent(iter);
+ if (current->IsJSObject() &&
+ Handle<JSObject>::cast(current)->HasNamedInterceptor()) {
+ found = true;
+ break;
+ }
+ }
+ DCHECK(found);
+#endif
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::SetProperty(receiver, name, value, ic.strict_mode()));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(LoadElementWithInterceptor) {
+ HandleScope scope(isolate);
+ Handle<JSObject> receiver = args.at<JSObject>(0);
+ DCHECK(args.smi_at(1) >= 0);
+ uint32_t index = args.smi_at(1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::GetElementWithInterceptor(receiver, receiver, index));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(VectorLoadIC_MissFromStubFailure) {
+ // TODO(mvstanton): To be enabled when ICs can accept a vector and slot
+ return NULL;
+}
+
+
+RUNTIME_FUNCTION(VectorKeyedLoadIC_MissFromStubFailure) {
+ // TODO(mvstanton): To be enabled when ICs can accept a vector and slot
+ return NULL;
+}
+
+
static const Address IC_utilities[] = {
#define ADDR(name) FUNCTION_ADDR(name),
- IC_UTIL_LIST(ADDR)
- NULL
+ IC_UTIL_LIST(ADDR) NULL
#undef ADDR
};
-Address IC::AddressFromUtilityId(IC::UtilityId id) {
- return IC_utilities[id];
+Address IC::AddressFromUtilityId(IC::UtilityId id) { return IC_utilities[id]; }
}
-
-
-} } // namespace v8::internal
+} // namespace v8::internal
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic/ic.h
index eb844cf747..d86d2b7b64 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -5,15 +5,13 @@
#ifndef V8_IC_H_
#define V8_IC_H_
+#include "src/ic/ic-state.h"
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
-const int kMaxKeyedPolymorphism = 4;
-
-
// IC_UTIL_LIST defines all utility functions called from generated
// inline caching code. The argument for the macro, ICU, is the function name.
#define IC_UTIL_LIST(ICU) \
@@ -44,9 +42,9 @@ class IC {
public:
// The ids for utility called from the generated code.
enum UtilityId {
- #define CONST_NAME(name) k##name,
+#define CONST_NAME(name) k##name,
IC_UTIL_LIST(CONST_NAME)
- #undef CONST_NAME
+#undef CONST_NAME
kUtilityCount
};
@@ -58,10 +56,7 @@ class IC {
// The IC code is either invoked with no extra frames on the stack
// or with a single extra frame for supporting calls.
- enum FrameDepth {
- NO_EXTRA_FRAME = 0,
- EXTRA_CALL_FRAME = 1
- };
+ enum FrameDepth { NO_EXTRA_FRAME = 0, EXTRA_CALL_FRAME = 1 };
// Construct the IC structure with the given number of extra
// JavaScript frames on the stack.
@@ -89,8 +84,7 @@ class IC {
static void InvalidateMaps(Code* stub);
// Clear the inline cache to initial state.
- static void Clear(Isolate* isolate,
- Address address,
+ static void Clear(Isolate* isolate, Address address,
ConstantPoolArray* constant_pool);
#ifdef DEBUG
@@ -102,9 +96,7 @@ class IC {
return target()->is_store_stub() || target()->is_keyed_store_stub();
}
- bool IsCallStub() const {
- return target()->is_call_stub();
- }
+ bool IsCallStub() const { return target()->is_call_stub(); }
#endif
template <class TypeClass>
@@ -151,14 +143,7 @@ class IC {
Code* GetOriginalCode() const;
// Set the call-site target.
- void set_target(Code* code) {
-#ifdef VERIFY_HEAP
- code->VerifyEmbeddedObjectsDependency();
-#endif
- SetTargetAtAddress(address(), code, constant_pool());
- target_set_ = true;
- }
-
+ inline void set_target(Code* code);
bool is_target_set() { return target_set_; }
char TransitionMarkFromState(IC::State state);
@@ -166,16 +151,14 @@ class IC {
void TraceIC(const char* type, Handle<Object> name, State old_state,
State new_state);
- MaybeHandle<Object> TypeError(const char* type,
- Handle<Object> object,
+ MaybeHandle<Object> TypeError(const char* type, Handle<Object> object,
Handle<Object> key);
MaybeHandle<Object> ReferenceError(const char* type, Handle<Name> name);
// Access the target code for the given IC address.
static inline Code* GetTargetAtAddress(Address address,
ConstantPoolArray* constant_pool);
- static inline void SetTargetAtAddress(Address address,
- Code* target,
+ static inline void SetTargetAtAddress(Address address, Code* target,
ConstantPoolArray* constant_pool);
static void OnTypeFeedbackChanged(Isolate* isolate, Address address,
State old_state, State new_state,
@@ -183,29 +166,14 @@ class IC {
static void PostPatching(Address address, Code* target, Code* old_target);
// Compute the handler either by compiling or by retrieving a cached version.
- Handle<Code> ComputeHandler(LookupIterator* lookup, Handle<Object> object,
- Handle<Name> name,
+ Handle<Code> ComputeHandler(LookupIterator* lookup,
Handle<Object> value = Handle<Code>::null());
virtual Handle<Code> CompileHandler(LookupIterator* lookup,
- Handle<Object> object,
- Handle<Name> name, Handle<Object> value,
+ Handle<Object> value,
CacheHolderFlag cache_holder) {
UNREACHABLE();
return Handle<Code>::null();
}
- // Temporary copy of the above, but using a LookupResult.
- // TODO(jkummerow): Migrate callers to LookupIterator and delete these.
- Handle<Code> ComputeStoreHandler(LookupResult* lookup, Handle<Object> object,
- Handle<Name> name,
- Handle<Object> value = Handle<Code>::null());
- virtual Handle<Code> CompileStoreHandler(LookupResult* lookup,
- Handle<Object> object,
- Handle<Name> name,
- Handle<Object> value,
- CacheHolderFlag cache_holder) {
- UNREACHABLE();
- return Handle<Code>::null();
- }
void UpdateMonomorphicIC(Handle<Code> handler, Handle<Name> name);
bool UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code);
@@ -230,11 +198,12 @@ class IC {
Handle<String> name);
ExtraICState extra_ic_state() const { return extra_ic_state_; }
- void set_extra_ic_state(ExtraICState state) {
- extra_ic_state_ = state;
- }
+ void set_extra_ic_state(ExtraICState state) { extra_ic_state_ = state; }
Handle<HeapType> receiver_type() { return receiver_type_; }
+ void update_receiver_type(Handle<Object> receiver) {
+ receiver_type_ = CurrentTypeOf(receiver, isolate_);
+ }
void TargetMaps(MapHandleList* list) {
FindTargetMaps();
@@ -246,7 +215,7 @@ class IC {
void TargetTypes(TypeHandleList* list) {
FindTargetMaps();
for (int i = 0; i < target_maps_.length(); i++) {
- list->Add(IC::MapToType<HeapType>(target_maps_.at(i), isolate_));
+ list->Add(MapToType<HeapType>(target_maps_.at(i), isolate_));
}
}
@@ -256,14 +225,10 @@ class IC {
}
protected:
- void UpdateTarget() {
- target_ = handle(raw_target(), isolate_);
- }
+ inline void UpdateTarget();
private:
- Code* raw_target() const {
- return GetTargetAtAddress(address(), constant_pool());
- }
+ inline Code* raw_target() const;
inline ConstantPoolArray* constant_pool() const;
inline ConstantPoolArray* raw_constant_pool() const;
@@ -314,133 +279,57 @@ class IC {
class IC_Utility {
public:
explicit IC_Utility(IC::UtilityId id)
- : address_(IC::AddressFromUtilityId(id)), id_(id) {}
+ : address_(IC::AddressFromUtilityId(id)), id_(id) {}
Address address() const { return address_; }
IC::UtilityId id() const { return id_; }
+
private:
Address address_;
IC::UtilityId id_;
};
-class CallIC: public IC {
+class CallIC : public IC {
public:
- enum CallType { METHOD, FUNCTION };
-
- class State V8_FINAL BASE_EMBEDDED {
- public:
- explicit State(ExtraICState extra_ic_state);
+ explicit CallIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
- State(int argc, CallType call_type)
- : argc_(argc), call_type_(call_type) {
- }
-
- ExtraICState GetExtraICState() const;
-
- static void GenerateAheadOfTime(
- Isolate*, void (*Generate)(Isolate*, const State&));
-
- int arg_count() const { return argc_; }
- CallType call_type() const { return call_type_; }
-
- bool CallAsMethod() const { return call_type_ == METHOD; }
-
- private:
- class ArgcBits: public BitField<int, 0, Code::kArgumentsBits> {};
- class CallTypeBits: public BitField<CallType, Code::kArgumentsBits, 1> {};
-
- const int argc_;
- const CallType call_type_;
- };
+ void PatchMegamorphic(Handle<Object> function,
+ Handle<TypeFeedbackVector> vector, Handle<Smi> slot);
- explicit CallIC(Isolate* isolate)
- : IC(EXTRA_CALL_FRAME, isolate) {
- }
-
- void PatchMegamorphic(Handle<Object> function, Handle<FixedArray> vector,
- Handle<Smi> slot);
-
- void HandleMiss(Handle<Object> receiver,
- Handle<Object> function,
- Handle<FixedArray> vector,
- Handle<Smi> slot);
+ void HandleMiss(Handle<Object> receiver, Handle<Object> function,
+ Handle<TypeFeedbackVector> vector, Handle<Smi> slot);
// Returns true if a custom handler was installed.
- bool DoCustomHandler(Handle<Object> receiver,
- Handle<Object> function,
- Handle<FixedArray> vector,
- Handle<Smi> slot,
- const State& state);
+ bool DoCustomHandler(Handle<Object> receiver, Handle<Object> function,
+ Handle<TypeFeedbackVector> vector, Handle<Smi> slot,
+ const CallICState& state);
// Code generator routines.
- static Handle<Code> initialize_stub(Isolate* isolate,
- int argc,
- CallType call_type);
+ static Handle<Code> initialize_stub(Isolate* isolate, int argc,
+ CallICState::CallType call_type);
static void Clear(Isolate* isolate, Address address, Code* target,
ConstantPoolArray* constant_pool);
private:
- inline IC::State FeedbackToState(Handle<FixedArray> vector,
+ inline IC::State FeedbackToState(Handle<TypeFeedbackVector> vector,
Handle<Smi> slot) const;
};
-OStream& operator<<(OStream& os, const CallIC::State& s);
-
-
-class LoadIC: public IC {
+class LoadIC : public IC {
public:
- enum ParameterIndices {
- kReceiverIndex,
- kNameIndex,
- kParameterCount
- };
- static const Register ReceiverRegister();
- static const Register NameRegister();
-
- // With flag vector-ics, there is an additional argument. And for calls from
- // crankshaft, yet another.
- static const Register SlotRegister();
- static const Register VectorRegister();
-
- class State V8_FINAL BASE_EMBEDDED {
- public:
- explicit State(ExtraICState extra_ic_state)
- : state_(extra_ic_state) {}
-
- explicit State(ContextualMode mode)
- : state_(ContextualModeBits::encode(mode)) {}
-
- ExtraICState GetExtraICState() const { return state_; }
-
- ContextualMode contextual_mode() const {
- return ContextualModeBits::decode(state_);
- }
-
- private:
- class ContextualModeBits: public BitField<ContextualMode, 0, 1> {};
- STATIC_ASSERT(static_cast<int>(NOT_CONTEXTUAL) == 0);
-
- const ExtraICState state_;
- };
-
static ExtraICState ComputeExtraICState(ContextualMode contextual_mode) {
- return State(contextual_mode).GetExtraICState();
- }
-
- static ContextualMode GetContextualMode(ExtraICState state) {
- return State(state).contextual_mode();
+ return LoadICState(contextual_mode).GetExtraICState();
}
ContextualMode contextual_mode() const {
- return GetContextualMode(extra_ic_state());
+ return LoadICState::GetContextualMode(extra_ic_state());
}
- explicit LoadIC(FrameDepth depth, Isolate* isolate)
- : IC(depth, isolate) {
+ explicit LoadIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) {
DCHECK(IsLoadStub());
}
@@ -461,7 +350,6 @@ class LoadIC: public IC {
GenerateMiss(masm);
}
static void GenerateMiss(MacroAssembler* masm);
- static void GenerateMegamorphic(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm);
static void GenerateRuntimeGetProperty(MacroAssembler* masm);
@@ -472,13 +360,7 @@ class LoadIC: public IC {
Handle<Name> name);
protected:
- void set_target(Code* code) {
- // The contextual mode must be preserved across IC patching.
- DCHECK(GetContextualMode(code->extra_ic_state()) ==
- GetContextualMode(target()->extra_ic_state()));
-
- IC::set_target(code);
- }
+ inline void set_target(Code* code);
Handle<Code> slow_stub() const {
if (kind() == Code::LOAD_IC) {
@@ -489,16 +371,13 @@ class LoadIC: public IC {
}
}
- virtual Handle<Code> megamorphic_stub();
+ virtual Handle<Code> megamorphic_stub() OVERRIDE;
// Update the inline cache and the global stub cache based on the
// lookup result.
- void UpdateCaches(LookupIterator* lookup, Handle<Object> object,
- Handle<Name> name);
+ void UpdateCaches(LookupIterator* lookup);
virtual Handle<Code> CompileHandler(LookupIterator* lookup,
- Handle<Object> object,
- Handle<Name> name,
Handle<Object> unused,
CacheHolderFlag cache_holder);
@@ -509,16 +388,14 @@ class LoadIC: public IC {
Handle<Code> SimpleFieldLoad(FieldIndex index);
- static void Clear(Isolate* isolate,
- Address address,
- Code* target,
+ static void Clear(Isolate* isolate, Address address, Code* target,
ConstantPoolArray* constant_pool);
friend class IC;
};
-class KeyedLoadIC: public LoadIC {
+class KeyedLoadIC : public LoadIC {
public:
explicit KeyedLoadIC(FrameDepth depth, Isolate* isolate)
: LoadIC(depth, isolate) {
@@ -537,8 +414,6 @@ class KeyedLoadIC: public LoadIC {
}
static void GenerateGeneric(MacroAssembler* masm);
static void GenerateString(MacroAssembler* masm);
- static void GenerateIndexedInterceptor(MacroAssembler* masm);
- static void GenerateSloppyArguments(MacroAssembler* masm);
// Bit mask to be tested against bit field for the cases when
// generic stub should go into slow case.
@@ -558,28 +433,20 @@ class KeyedLoadIC: public LoadIC {
private:
Handle<Code> generic_stub() const { return generic_stub(isolate()); }
- Handle<Code> indexed_interceptor_stub() {
- return isolate()->builtins()->KeyedLoadIC_IndexedInterceptor();
- }
- Handle<Code> sloppy_arguments_stub() {
- return isolate()->builtins()->KeyedLoadIC_SloppyArguments();
- }
Handle<Code> string_stub() {
return isolate()->builtins()->KeyedLoadIC_String();
}
- static void Clear(Isolate* isolate,
- Address address,
- Code* target,
+ static void Clear(Isolate* isolate, Address address, Code* target,
ConstantPoolArray* constant_pool);
friend class IC;
};
-class StoreIC: public IC {
+class StoreIC : public IC {
public:
- class StrictModeState: public BitField<StrictMode, 1, 1> {};
+ class StrictModeState : public BitField<StrictMode, 1, 1> {};
static ExtraICState ComputeExtraICState(StrictMode flag) {
return StrictModeState::encode(flag);
}
@@ -589,21 +456,9 @@ class StoreIC: public IC {
// For convenience, a statically declared encoding of strict mode extra
// IC state.
- static const ExtraICState kStrictModeState =
- 1 << StrictModeState::kShift;
-
- enum ParameterIndices {
- kReceiverIndex,
- kNameIndex,
- kValueIndex,
- kParameterCount
- };
- static const Register ReceiverRegister();
- static const Register NameRegister();
- static const Register ValueRegister();
+ static const ExtraICState kStrictModeState = 1 << StrictModeState::kShift;
- StoreIC(FrameDepth depth, Isolate* isolate)
- : IC(depth, isolate) {
+ StoreIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) {
DCHECK(IsStoreStub());
}
@@ -623,25 +478,23 @@ class StoreIC: public IC {
static void GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictMode strict_mode);
- static Handle<Code> initialize_stub(Isolate* isolate,
- StrictMode strict_mode);
+ static Handle<Code> initialize_stub(Isolate* isolate, StrictMode strict_mode);
MUST_USE_RESULT MaybeHandle<Object> Store(
- Handle<Object> object,
- Handle<Name> name,
- Handle<Object> value,
+ Handle<Object> object, Handle<Name> name, Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode =
JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED);
+ bool LookupForWrite(LookupIterator* it, Handle<Object> value,
+ JSReceiver::StoreFromKeyed store_mode);
+
protected:
- virtual Handle<Code> megamorphic_stub();
+ virtual Handle<Code> megamorphic_stub() OVERRIDE;
// Stub accessors.
- virtual Handle<Code> generic_stub() const;
+ Handle<Code> generic_stub() const;
- virtual Handle<Code> slow_stub() const {
- return isolate()->builtins()->StoreIC_Slow();
- }
+ Handle<Code> slow_stub() const;
virtual Handle<Code> pre_monomorphic_stub() const {
return pre_monomorphic_stub(isolate(), strict_mode());
@@ -652,56 +505,39 @@ class StoreIC: public IC {
// Update the inline cache and the global stub cache based on the
// lookup result.
- void UpdateCaches(LookupResult* lookup,
- Handle<JSObject> receiver,
- Handle<Name> name,
- Handle<Object> value);
- virtual Handle<Code> CompileStoreHandler(LookupResult* lookup,
- Handle<Object> object,
- Handle<Name> name,
- Handle<Object> value,
- CacheHolderFlag cache_holder);
+ void UpdateCaches(LookupIterator* lookup, Handle<Object> value,
+ JSReceiver::StoreFromKeyed store_mode);
+ virtual Handle<Code> CompileHandler(LookupIterator* lookup,
+ Handle<Object> value,
+ CacheHolderFlag cache_holder);
private:
- void set_target(Code* code) {
- // Strict mode must be preserved across IC patching.
- DCHECK(GetStrictMode(code->extra_ic_state()) ==
- GetStrictMode(target()->extra_ic_state()));
- IC::set_target(code);
- }
+ inline void set_target(Code* code);
- static void Clear(Isolate* isolate,
- Address address,
- Code* target,
+ static void Clear(Isolate* isolate, Address address, Code* target,
ConstantPoolArray* constant_pool);
friend class IC;
};
-enum KeyedStoreCheckMap {
- kDontCheckMap,
- kCheckMap
-};
+enum KeyedStoreCheckMap { kDontCheckMap, kCheckMap };
-enum KeyedStoreIncrementLength {
- kDontIncrementLength,
- kIncrementLength
-};
+enum KeyedStoreIncrementLength { kDontIncrementLength, kIncrementLength };
-class KeyedStoreIC: public StoreIC {
+class KeyedStoreIC : public StoreIC {
public:
// ExtraICState bits (building on IC)
// ExtraICState bits
- class ExtraICStateKeyedAccessStoreMode:
- public BitField<KeyedAccessStoreMode, 2, 4> {}; // NOLINT
+ class ExtraICStateKeyedAccessStoreMode
+ : public BitField<KeyedAccessStoreMode, 2, 4> {}; // NOLINT
static ExtraICState ComputeExtraICState(StrictMode flag,
KeyedAccessStoreMode mode) {
return StrictModeState::encode(flag) |
- ExtraICStateKeyedAccessStoreMode::encode(mode);
+ ExtraICStateKeyedAccessStoreMode::encode(mode);
}
static KeyedAccessStoreMode GetKeyedAccessStoreMode(
@@ -709,13 +545,7 @@ class KeyedStoreIC: public StoreIC {
return ExtraICStateKeyedAccessStoreMode::decode(extra_state);
}
- // The map register isn't part of the normal call specification, but
- // ElementsTransitionAndStoreStub, used in polymorphic keyed store
- // stub implementations requires it to be initialized.
- static const Register MapRegister();
-
- KeyedStoreIC(FrameDepth depth, Isolate* isolate)
- : StoreIC(depth, isolate) {
+ KeyedStoreIC(FrameDepth depth, Isolate* isolate) : StoreIC(depth, isolate) {
DCHECK(target()->is_keyed_store_stub());
}
@@ -730,8 +560,6 @@ class KeyedStoreIC: public StoreIC {
}
static void GenerateMiss(MacroAssembler* masm);
static void GenerateSlow(MacroAssembler* masm);
- static void GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode);
static void GenerateGeneric(MacroAssembler* masm, StrictMode strict_mode);
static void GenerateSloppyArguments(MacroAssembler* masm);
@@ -747,48 +575,23 @@ class KeyedStoreIC: public StoreIC {
return isolate->builtins()->KeyedStoreIC_PreMonomorphic();
}
}
- virtual Handle<Code> slow_stub() const {
- return isolate()->builtins()->KeyedStoreIC_Slow();
- }
- virtual Handle<Code> megamorphic_stub() {
- if (strict_mode() == STRICT) {
- return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
- } else {
- return isolate()->builtins()->KeyedStoreIC_Generic();
- }
- }
Handle<Code> StoreElementStub(Handle<JSObject> receiver,
KeyedAccessStoreMode store_mode);
private:
- void set_target(Code* code) {
- // Strict mode must be preserved across IC patching.
- DCHECK(GetStrictMode(code->extra_ic_state()) == strict_mode());
- IC::set_target(code);
- }
+ inline void set_target(Code* code);
// Stub accessors.
- virtual Handle<Code> generic_stub() const {
- if (strict_mode() == STRICT) {
- return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
- } else {
- return isolate()->builtins()->KeyedStoreIC_Generic();
- }
- }
-
Handle<Code> sloppy_arguments_stub() {
return isolate()->builtins()->KeyedStoreIC_SloppyArguments();
}
- static void Clear(Isolate* isolate,
- Address address,
- Code* target,
+ static void Clear(Isolate* isolate, Address address, Code* target,
ConstantPoolArray* constant_pool);
KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
- Handle<Object> key,
- Handle<Object> value);
+ Handle<Object> key, Handle<Object> value);
Handle<Map> ComputeTransitionedMap(Handle<Map> map,
KeyedAccessStoreMode store_mode);
@@ -797,205 +600,42 @@ class KeyedStoreIC: public StoreIC {
};
-// Mode to overwrite BinaryExpression values.
-enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
-
// Type Recording BinaryOpIC, that records the types of the inputs and outputs.
-class BinaryOpIC: public IC {
+class BinaryOpIC : public IC {
public:
- class State V8_FINAL BASE_EMBEDDED {
- public:
- State(Isolate* isolate, ExtraICState extra_ic_state);
-
- State(Isolate* isolate, Token::Value op, OverwriteMode mode)
- : op_(op), mode_(mode), left_kind_(NONE), right_kind_(NONE),
- result_kind_(NONE), isolate_(isolate) {
- DCHECK_LE(FIRST_TOKEN, op);
- DCHECK_LE(op, LAST_TOKEN);
- }
-
- InlineCacheState GetICState() const {
- if (Max(left_kind_, right_kind_) == NONE) {
- return ::v8::internal::UNINITIALIZED;
- }
- if (Max(left_kind_, right_kind_) == GENERIC) {
- return ::v8::internal::MEGAMORPHIC;
- }
- if (Min(left_kind_, right_kind_) == GENERIC) {
- return ::v8::internal::GENERIC;
- }
- return ::v8::internal::MONOMORPHIC;
- }
-
- ExtraICState GetExtraICState() const;
-
- static void GenerateAheadOfTime(
- Isolate*, void (*Generate)(Isolate*, const State&));
-
- bool CanReuseDoubleBox() const {
- return (result_kind_ > SMI && result_kind_ <= NUMBER) &&
- ((mode_ == OVERWRITE_LEFT &&
- left_kind_ > SMI && left_kind_ <= NUMBER) ||
- (mode_ == OVERWRITE_RIGHT &&
- right_kind_ > SMI && right_kind_ <= NUMBER));
- }
-
- // Returns true if the IC _could_ create allocation mementos.
- bool CouldCreateAllocationMementos() const {
- if (left_kind_ == STRING || right_kind_ == STRING) {
- DCHECK_EQ(Token::ADD, op_);
- return true;
- }
- return false;
- }
-
- // Returns true if the IC _should_ create allocation mementos.
- bool ShouldCreateAllocationMementos() const {
- return FLAG_allocation_site_pretenuring &&
- CouldCreateAllocationMementos();
- }
-
- bool HasSideEffects() const {
- return Max(left_kind_, right_kind_) == GENERIC;
- }
-
- // Returns true if the IC should enable the inline smi code (i.e. if either
- // parameter may be a smi).
- bool UseInlinedSmiCode() const {
- return KindMaybeSmi(left_kind_) || KindMaybeSmi(right_kind_);
- }
-
- static const int FIRST_TOKEN = Token::BIT_OR;
- static const int LAST_TOKEN = Token::MOD;
-
- Token::Value op() const { return op_; }
- OverwriteMode mode() const { return mode_; }
- Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
-
- Type* GetLeftType(Zone* zone) const {
- return KindToType(left_kind_, zone);
- }
- Type* GetRightType(Zone* zone) const {
- return KindToType(right_kind_, zone);
- }
- Type* GetResultType(Zone* zone) const;
-
- void Update(Handle<Object> left,
- Handle<Object> right,
- Handle<Object> result);
-
- Isolate* isolate() const { return isolate_; }
-
- private:
- friend OStream& operator<<(OStream& os, const BinaryOpIC::State& s);
-
- enum Kind { NONE, SMI, INT32, NUMBER, STRING, GENERIC };
-
- Kind UpdateKind(Handle<Object> object, Kind kind) const;
-
- static const char* KindToString(Kind kind);
- static Type* KindToType(Kind kind, Zone* zone);
- static bool KindMaybeSmi(Kind kind) {
- return (kind >= SMI && kind <= NUMBER) || kind == GENERIC;
- }
-
- // We truncate the last bit of the token.
- STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 4));
- class OpField: public BitField<int, 0, 4> {};
- class OverwriteModeField: public BitField<OverwriteMode, 4, 2> {};
- class ResultKindField: public BitField<Kind, 6, 3> {};
- class LeftKindField: public BitField<Kind, 9, 3> {};
- // When fixed right arg is set, we don't need to store the right kind.
- // Thus the two fields can overlap.
- class HasFixedRightArgField: public BitField<bool, 12, 1> {};
- class FixedRightArgValueField: public BitField<int, 13, 4> {};
- class RightKindField: public BitField<Kind, 13, 3> {};
-
- Token::Value op_;
- OverwriteMode mode_;
- Kind left_kind_;
- Kind right_kind_;
- Kind result_kind_;
- Maybe<int> fixed_right_arg_;
- Isolate* isolate_;
- };
-
- explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { }
+ explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
static Builtins::JavaScript TokenToJSBuiltin(Token::Value op);
MaybeHandle<Object> Transition(Handle<AllocationSite> allocation_site,
Handle<Object> left,
- Handle<Object> right) V8_WARN_UNUSED_RESULT;
+ Handle<Object> right) WARN_UNUSED_RESULT;
};
-OStream& operator<<(OStream& os, const BinaryOpIC::State& s);
-
-
-class CompareIC: public IC {
+class CompareIC : public IC {
public:
- // The type/state lattice is defined by the following inequations:
- // UNINITIALIZED < ...
- // ... < GENERIC
- // SMI < NUMBER
- // INTERNALIZED_STRING < STRING
- // KNOWN_OBJECT < OBJECT
- enum State {
- UNINITIALIZED,
- SMI,
- NUMBER,
- STRING,
- INTERNALIZED_STRING,
- UNIQUE_NAME, // Symbol or InternalizedString
- OBJECT, // JSObject
- KNOWN_OBJECT, // JSObject with specific map (faster check)
- GENERIC
- };
-
- static State NewInputState(State old_state, Handle<Object> value);
-
- static Type* StateToType(Zone* zone,
- State state,
- Handle<Map> map = Handle<Map>());
-
- static void StubInfoToType(uint32_t stub_key, Type** left_type,
- Type** right_type, Type** overall_type,
- Handle<Map> map, Zone* zone);
-
CompareIC(Isolate* isolate, Token::Value op)
- : IC(EXTRA_CALL_FRAME, isolate), op_(op) { }
+ : IC(EXTRA_CALL_FRAME, isolate), op_(op) {}
// Update the inline cache for the given operands.
Code* UpdateCaches(Handle<Object> x, Handle<Object> y);
-
- // Factory method for getting an uninitialized compare stub.
- static Handle<Code> GetUninitialized(Isolate* isolate, Token::Value op);
-
// Helper function for computing the condition for a compare operation.
static Condition ComputeCondition(Token::Value op);
- static const char* GetStateName(State state);
+ // Factory method for getting an uninitialized compare stub.
+ static Handle<Code> GetUninitialized(Isolate* isolate, Token::Value op);
private:
static bool HasInlinedSmiCode(Address address);
- State TargetState(State old_state,
- State old_left,
- State old_right,
- bool has_inlined_smi_code,
- Handle<Object> x,
- Handle<Object> y);
-
bool strict() const { return op_ == Token::EQ_STRICT; }
Condition GetCondition() const { return ComputeCondition(op_); }
static Code* GetRawUninitialized(Isolate* isolate, Token::Value op);
- static void Clear(Isolate* isolate,
- Address address,
- Code* target,
+ static void Clear(Isolate* isolate, Address address, Code* target,
ConstantPoolArray* constant_pool);
Token::Value op_;
@@ -1004,7 +644,7 @@ class CompareIC: public IC {
};
-class CompareNilIC: public IC {
+class CompareNilIC : public IC {
public:
explicit CompareNilIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
@@ -1012,8 +652,7 @@ class CompareNilIC: public IC {
static Handle<Code> GetUninitialized();
- static void Clear(Address address,
- Code* target,
+ static void Clear(Address address, Code* target,
ConstantPoolArray* constant_pool);
static Handle<Object> DoCompareNilSlow(Isolate* isolate, NilValue nil,
@@ -1021,9 +660,9 @@ class CompareNilIC: public IC {
};
-class ToBooleanIC: public IC {
+class ToBooleanIC : public IC {
public:
- explicit ToBooleanIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { }
+ explicit ToBooleanIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
Handle<Object> ToBoolean(Handle<Object> object);
};
@@ -1042,8 +681,18 @@ DECLARE_RUNTIME_FUNCTION(BinaryOpIC_Miss);
DECLARE_RUNTIME_FUNCTION(BinaryOpIC_MissWithAllocationSite);
DECLARE_RUNTIME_FUNCTION(CompareNilIC_Miss);
DECLARE_RUNTIME_FUNCTION(ToBooleanIC_Miss);
-
-
-} } // namespace v8::internal
+DECLARE_RUNTIME_FUNCTION(VectorLoadIC_MissFromStubFailure);
+DECLARE_RUNTIME_FUNCTION(VectorKeyedLoadIC_MissFromStubFailure);
+
+// Support functions for callbacks handlers.
+DECLARE_RUNTIME_FUNCTION(StoreCallbackProperty);
+
+// Support functions for interceptor handlers.
+DECLARE_RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly);
+DECLARE_RUNTIME_FUNCTION(LoadPropertyWithInterceptor);
+DECLARE_RUNTIME_FUNCTION(LoadElementWithInterceptor);
+DECLARE_RUNTIME_FUNCTION(StorePropertyWithInterceptor);
+}
+} // namespace v8::internal
#endif // V8_IC_H_
diff --git a/deps/v8/src/ic/mips/OWNERS b/deps/v8/src/ic/mips/OWNERS
new file mode 100644
index 0000000000..5508ba626f
--- /dev/null
+++ b/deps/v8/src/ic/mips/OWNERS
@@ -0,0 +1,5 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
diff --git a/deps/v8/src/ic/mips/access-compiler-mips.cc b/deps/v8/src/ic/mips/access-compiler-mips.cc
new file mode 100644
index 0000000000..dce7602ae0
--- /dev/null
+++ b/deps/v8/src/ic/mips/access-compiler-mips.cc
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS
+
+#include "src/ic/access-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+ Handle<Code> code) {
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
+Register* PropertyAccessCompiler::load_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ static Register registers[] = {receiver, name, a3, a0, t0, t1};
+ return registers;
+}
+
+
+Register* PropertyAccessCompiler::store_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ DCHECK(a3.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ static Register registers[] = {receiver, name, a3, t0, t1};
+ return registers;
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
index 8f6af7a7fd..5b4555fa84 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,9 +6,9 @@
#if V8_TARGET_ARCH_MIPS
-#include "src/codegen.h"
-#include "src/ic-inl.h"
-#include "src/stub-cache.h"
+#include "src/ic/call-optimization.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
namespace v8 {
namespace internal {
@@ -16,81 +16,79 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register receiver,
- Register name,
- // Number of the cache entry, not scaled.
- Register offset,
- Register scratch,
- Register scratch2,
- Register offset_scratch) {
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
-
- uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
- uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
- uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
-
- // Check the relative positions of the address fields.
- DCHECK(value_off_addr > key_off_addr);
- DCHECK((value_off_addr - key_off_addr) % 4 == 0);
- DCHECK((value_off_addr - key_off_addr) < (256 * 4));
- DCHECK(map_off_addr > key_off_addr);
- DCHECK((map_off_addr - key_off_addr) % 4 == 0);
- DCHECK((map_off_addr - key_off_addr) < (256 * 4));
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- a0 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- Label miss;
- Register base_addr = scratch;
- scratch = no_reg;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ sll(offset_scratch, offset, 1);
- __ Addu(offset_scratch, offset_scratch, offset);
-
- // Calculate the base address of the entry.
- __ li(base_addr, Operand(key_offset));
- __ sll(at, offset_scratch, kPointerSizeLog2);
- __ Addu(base_addr, base_addr, at);
-
- // Check that the key in the entry matches the name.
- __ lw(at, MemOperand(base_addr, 0));
- __ Branch(&miss, ne, name, Operand(at));
-
- // Check the map matches.
- __ lw(at, MemOperand(base_addr, map_off_addr - key_off_addr));
- __ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Branch(&miss, ne, at, Operand(scratch2));
-
- // Get the code entry from the cache.
- Register code = scratch2;
- scratch2 = no_reg;
- __ lw(code, MemOperand(base_addr, value_off_addr - key_off_addr));
-
- // Check that the flags match what we're looking for.
- Register flags_reg = base_addr;
- base_addr = no_reg;
- __ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
- __ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
- __ Branch(&miss, ne, flags_reg, Operand(flags));
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ lw(receiver,
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ push(receiver);
+ ParameterCount actual(0);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ push(value());
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ lw(receiver,
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ Push(receiver, value());
+ ParameterCount actual(1);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
}
-#endif
- // Jump to the first instruction in the code stub.
- __ Addu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ // We have to return the passed value, not the return value of the setter.
+ __ pop(v0);
- // Miss: fall through.
- __ bind(&miss);
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
}
@@ -106,7 +104,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
Label done;
const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
// Bail out if the receiver has a named interceptor or requires access checks.
Register map = scratch1;
@@ -132,110 +130,13 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- NameDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- receiver,
- properties,
- name,
- scratch1);
+ NameDictionaryLookupStub::GenerateNegativeLookup(
+ masm, miss_label, &done, receiver, properties, name, scratch1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
}
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2,
- Register extra3) {
- Isolate* isolate = masm->isolate();
- Label miss;
-
- // Make sure that code is valid. The multiplying code relies on the
- // entry size being 12.
- DCHECK(sizeof(Entry) == 12);
-
- // Make sure the flags does not name a specific type.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Make sure that there are no register conflicts.
- DCHECK(!scratch.is(receiver));
- DCHECK(!scratch.is(name));
- DCHECK(!extra.is(receiver));
- DCHECK(!extra.is(name));
- DCHECK(!extra.is(scratch));
- DCHECK(!extra2.is(receiver));
- DCHECK(!extra2.is(name));
- DCHECK(!extra2.is(scratch));
- DCHECK(!extra2.is(extra));
-
- // Check register validity.
- DCHECK(!scratch.is(no_reg));
- DCHECK(!extra.is(no_reg));
- DCHECK(!extra2.is(no_reg));
- DCHECK(!extra3.is(no_reg));
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
- extra2, extra3);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ lw(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
- __ lw(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Addu(scratch, scratch, at);
- uint32_t mask = kPrimaryTableSize - 1;
- // We shift out the last two bits because they are not part of the hash and
- // they are always 01 for maps.
- __ srl(scratch, scratch, kCacheIndexShift);
- __ Xor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
- __ And(scratch, scratch, Operand(mask));
-
- // Probe the primary table.
- ProbeTable(isolate,
- masm,
- flags,
- kPrimary,
- receiver,
- name,
- scratch,
- extra,
- extra2,
- extra3);
-
- // Primary miss: Compute hash for secondary probe.
- __ srl(at, name, kCacheIndexShift);
- __ Subu(scratch, scratch, at);
- uint32_t mask2 = kSecondaryTableSize - 1;
- __ Addu(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
- __ And(scratch, scratch, Operand(mask2));
-
- // Probe the secondary table.
- ProbeTable(isolate,
- masm,
- flags,
- kSecondary,
- receiver,
- name,
- scratch,
- extra,
- extra2,
- extra3);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
- extra2, extra3);
-}
-
-
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register prototype, Label* miss) {
Isolate* isolate = masm->isolate();
@@ -268,6 +169,9 @@ void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
}
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
void PropertyHandlerCompiler::GenerateCheckPropertyCell(
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
Register scratch, Label* miss) {
@@ -280,10 +184,8 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
}
-static void PushInterceptorArguments(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
+static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
+ Register holder, Register name,
Handle<JSObject> holder_obj) {
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
@@ -300,12 +202,8 @@ static void PushInterceptorArguments(MacroAssembler* masm,
static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj,
- IC::UtilityId id) {
+ MacroAssembler* masm, Register receiver, Register holder, Register name,
+ Handle<JSObject> holder_obj, IC::UtilityId id) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
__ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
NamedLoadHandlerCompiler::kInterceptorArgsLength);
@@ -323,10 +221,10 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
__ sw(receiver, MemOperand(sp, argc * kPointerSize)); // Push receiver.
// Write the arguments to stack frame.
for (int i = 0; i < argc; i++) {
- Register arg = values[argc-1-i];
+ Register arg = values[argc - 1 - i];
DCHECK(!receiver.is(arg));
DCHECK(!scratch_in.is(arg));
- __ sw(arg, MemOperand(sp, (argc-1-i) * kPointerSize)); // Push arg.
+ __ sw(arg, MemOperand(sp, (argc - 1 - i) * kPointerSize)); // Push arg.
}
DCHECK(optimization.is_simple_api_call());
@@ -338,16 +236,15 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
- receiver_map,
- &holder_lookup);
+ Handle<JSObject> api_holder =
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
break;
case CallOptimization::kHolderFound:
__ li(holder, api_holder);
- break;
+ break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
break;
@@ -385,9 +282,29 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
}
-void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
- Handle<Code> code) {
- __ Jump(code, RelocInfo::CODE_TARGET);
+void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
+ // Push receiver, key and value for runtime call.
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
+ // Push receiver, key and value for runtime call.
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
}
@@ -459,8 +376,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
__ jmp(&do_store);
__ bind(&heap_number);
- __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
- miss_label, DONT_DO_SMI_CHECK);
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, miss_label,
+ DONT_DO_SMI_CHECK);
__ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
__ bind(&do_store);
@@ -479,9 +396,9 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
__ li(a2, Operand(transition));
__ Push(a2, a0);
__ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
- isolate()),
- 3, 1);
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+ isolate()),
+ 3, 1);
return;
}
@@ -490,13 +407,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
__ sw(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
// Update the write barrier for the map field.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- scratch2,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
+ __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
+ kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
if (details.type() == CONSTANT) {
@@ -515,8 +427,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
index -= transition->inobject_properties();
// TODO(verwaest): Share this code as a code stub.
- SmiCheck smi_check = representation.IsTagged()
- ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ SmiCheck smi_check =
+ representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
if (index < 0) {
// Set the property straight into the object.
int offset = transition->instance_size() + (index * kPointerSize);
@@ -531,21 +443,15 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
- __ RecordWriteField(receiver_reg,
- offset,
- storage_reg,
- scratch1,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
+ __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
+ kRAHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, smi_check);
}
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array
- __ lw(scratch1,
- FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ lw(scratch1, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
if (representation.IsDouble()) {
__ sw(storage_reg, FieldMemOperand(scratch1, offset));
} else {
@@ -557,14 +463,9 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
- __ RecordWriteField(scratch1,
- offset,
- storage_reg,
- receiver_reg,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
+ __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
+ kRAHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, smi_check);
}
}
@@ -576,7 +477,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
}
-void NamedStoreHandlerCompiler::GenerateStoreField(LookupResult* lookup,
+void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
Register value_reg,
Label* miss_label) {
DCHECK(lookup->representation().IsHeapObject());
@@ -611,8 +512,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
// Make sure there's no overlap between holder and object registers.
DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
+ DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
+ !scratch2.is(scratch1));
// Keep track of the current object in register reg.
Register reg = object_reg;
@@ -645,10 +546,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
}
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound);
+ NameDictionary::kNotFound);
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
- scratch1, scratch2);
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
+ scratch2);
__ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -671,9 +572,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch2, miss);
} else if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(
- masm(), Handle<JSGlobalObject>::cast(current), name,
- scratch2, miss);
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
}
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -762,8 +662,8 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
__ push(receiver());
if (heap()->InNewSpace(callback->data())) {
__ li(scratch3(), callback);
- __ lw(scratch3(), FieldMemOperand(scratch3(),
- ExecutableAccessorInfo::kDataOffset));
+ __ lw(scratch3(),
+ FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset));
} else {
__ li(scratch3(), Handle<Object>(callback->data(), isolate()));
}
@@ -772,8 +672,7 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
__ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
__ sw(scratch3(), MemOperand(sp, 4 * kPointerSize));
__ sw(scratch3(), MemOperand(sp, 3 * kPointerSize));
- __ li(scratch4(),
- Operand(ExternalReference::isolate_address(isolate())));
+ __ li(scratch4(), Operand(ExternalReference::isolate_address(isolate())));
__ sw(scratch4(), MemOperand(sp, 2 * kPointerSize));
__ sw(reg, MemOperand(sp, 1 * kPointerSize));
__ sw(name(), MemOperand(sp, 0 * kPointerSize));
@@ -781,7 +680,7 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
__ mov(a2, scratch2()); // Saved in case scratch2 == a1.
// Abi for CallApiGetter.
- Register getter_address_reg = a2;
+ Register getter_address_reg = ApiGetterDescriptor::function_address();
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
@@ -794,88 +693,74 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
}
-void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg,
- LookupResult* lookup,
- Handle<Name> name) {
+void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
+ LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- bool compile_followup_inline = false;
- if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->IsField()) {
- compile_followup_inline = true;
- } else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> callback(
- ExecutableAccessorInfo::cast(lookup->GetCallbackObject()));
- compile_followup_inline =
- callback->getter() != NULL &&
- ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), callback,
- type());
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+ // Preserve the receiver register explicitly whenever it is different from the
+ // holder and it is needed should the interceptor return without any result.
+ // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
+ // case might cause a miss during the prototype check.
+ bool must_perform_prototype_check =
+ !holder().is_identical_to(it->GetHolder<JSObject>());
+ bool must_preserve_receiver_reg =
+ !receiver().is(holder_reg) &&
+ (it->state() == LookupIterator::ACCESSOR || must_perform_prototype_check);
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+ if (must_preserve_receiver_reg) {
+ __ Push(receiver(), holder_reg, this->name());
+ } else {
+ __ Push(holder_reg, this->name());
}
- }
-
- if (compile_followup_inline) {
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
- // Preserve the receiver register explicitly whenever it is different from
- // the holder and it is needed should the interceptor return without any
- // result. The CALLBACKS case needs the receiver to be passed into C++ code,
- // the FIELD case might cause a miss during the prototype check.
- bool must_perfrom_prototype_check = *holder() != lookup->holder();
- bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
- (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
- if (must_preserve_receiver_reg) {
- __ Push(receiver(), holder_reg, this->name());
- } else {
- __ Push(holder_reg, this->name());
- }
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method).
- CompileCallLoadPropertyWithInterceptor(
- masm(), receiver(), holder_reg, this->name(), holder(),
- IC::kLoadPropertyWithInterceptorOnly);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
- __ Branch(&interceptor_failed, eq, v0, Operand(scratch1()));
- frame_scope.GenerateLeaveFrame();
- __ Ret();
-
- __ bind(&interceptor_failed);
- __ pop(this->name());
- __ pop(holder_reg);
- if (must_preserve_receiver_reg) {
- __ pop(receiver());
- }
- // Leave the internal frame.
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method).
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), holder(),
+ IC::kLoadPropertyWithInterceptorOnly);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
+ __ Branch(&interceptor_failed, eq, v0, Operand(scratch1()));
+ frame_scope.GenerateLeaveFrame();
+ __ Ret();
+
+ __ bind(&interceptor_failed);
+ if (must_preserve_receiver_reg) {
+ __ Pop(receiver(), holder_reg, this->name());
+ } else {
+ __ Pop(holder_reg, this->name());
}
- GenerateLoadPostInterceptor(holder_reg, name, lookup);
- } else { // !compile_followup_inline
- // Call the runtime system to load the interceptor.
- // Check that the maps haven't changed.
- PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
- holder());
-
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
- __ TailCallExternalReference(
- ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ // Leave the internal frame.
}
+
+ GenerateLoadPostInterceptor(it, holder_reg);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
+ // Call the runtime system to load the interceptor.
+ DCHECK(holder()->HasNamedInterceptor());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+ holder());
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
+ __ TailCallExternalReference(
+ ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
}
@@ -885,7 +770,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Register holder_reg = Frontend(receiver(), name);
__ Push(receiver(), holder_reg); // Receiver.
- __ li(at, Operand(callback)); // Callback info.
+ __ li(at, Operand(callback)); // Callback info.
__ push(at);
__ li(at, Operand(name));
__ Push(at, value());
@@ -900,54 +785,6 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
}
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save value register, so we can restore it later.
- __ push(value());
-
- if (!setter.is_null()) {
- // Call the JavaScript setter with receiver and value on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ lw(receiver,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ Push(receiver, value());
- ParameterCount actual(1);
- ParameterCount expected(setter);
- __ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ pop(v0);
-
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
Handle<Name> name) {
__ Push(receiver(), this->name(), value());
@@ -962,72 +799,11 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
}
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
- static Register registers[] = { receiver, name, a3, a0, t0, t1 };
- return registers;
-}
-
-
-Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- DCHECK(a3.is(KeyedStoreIC::MapRegister()));
- static Register registers[] = { receiver, name, a3, t0, t1 };
- return registers;
-}
-
-
-Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); }
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- if (!getter.is_null()) {
- // Call the JavaScript getter with the receiver on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ lw(receiver,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ push(receiver);
- ParameterCount actual(0);
- ParameterCount expected(getter);
- __ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
+Register NamedStoreHandlerCompiler::value() {
+ return StoreDescriptor::ValueRegister();
}
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
@@ -1035,7 +811,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
FrontendHeader(receiver(), name, &miss);
// Get the value from the cell.
- Register result = StoreIC::ValueRegister();
+ Register result = StoreDescriptor::ValueRegister();
__ li(result, Operand(cell));
__ lw(result, FieldMemOperand(result, Cell::kValueOffset));
@@ -1057,130 +833,8 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
}
-Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- __ JumpIfNotUniqueName(this->name(), &miss);
- } else {
- __ Branch(&miss, ne, this->name(), Operand(name));
- }
- }
-
- Label number_case;
- Register match = scratch2();
- Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target, match); // Reg match is 0 if Smi.
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(KeyedStoreIC::MapRegister()));
-
- int receiver_count = types->length();
- int number_of_handled_maps = 0;
- __ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate());
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- // Check map and tail call if there's a match.
- // Separate compare from branch, to provide path for above JumpIfSmi().
- __ Subu(match, map_reg, Operand(map));
- if (type->Is(HeapType::Number())) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET,
- eq, match, Operand(zero_reg));
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss);
-
- int receiver_count = receiver_maps->length();
- __ lw(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; ++i) {
- if (transitioned_maps->at(i).is_null()) {
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq,
- scratch1(), Operand(receiver_maps->at(i)));
- } else {
- Label next_map;
- __ Branch(&next_map, ne, scratch1(), Operand(receiver_maps->at(i)));
- __ li(transition_map(), Operand(transitioned_maps->at(i)));
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
- }
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void ElementHandlerCompiler::GenerateLoadDictionaryElement(
- MacroAssembler* masm) {
- // The return address is in ra.
- Label slow, miss;
-
- Register key = LoadIC::NameRegister();
- Register receiver = LoadIC::ReceiverRegister();
- DCHECK(receiver.is(a1));
- DCHECK(key.is(a2));
-
- __ UntagAndJumpIfNotSmi(t2, key, &miss);
- __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ LoadFromNumberDictionary(&slow, t0, key, v0, t2, a3, t1);
- __ Ret();
-
- // Slow case, key and receiver still unmodified.
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, a2, a3);
-
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
-
- // Miss case, call the runtime.
- __ bind(&miss);
-
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
}
-
-
-#undef __
-
-} } // namespace v8::internal
+} // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/ic/mips/ic-compiler-mips.cc b/deps/v8/src/ic/mips/ic-compiler-mips.cc
new file mode 100644
index 0000000000..c1e67f9ab6
--- /dev/null
+++ b/deps/v8/src/ic/mips/ic-compiler-mips.cc
@@ -0,0 +1,131 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS
+
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
+ Label miss;
+
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ // In case we are compiling an IC for dictionary loads and stores, just
+ // check whether the name is unique.
+ if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ Register tmp = scratch1();
+ __ JumpIfSmi(this->name(), &miss);
+ __ lw(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
+ __ lbu(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
+ } else {
+ __ Branch(&miss, ne, this->name(), Operand(name));
+ }
+ }
+
+ Label number_case;
+ Register match = scratch2();
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target, match); // Reg match is 0 if Smi.
+
+ // Polymorphic keyed stores may use the map register
+ Register map_reg = scratch1();
+ DCHECK(kind() != Code::KEYED_STORE_IC ||
+ map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+
+ int receiver_count = types->length();
+ int number_of_handled_maps = 0;
+ __ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ // Check map and tail call if there's a match.
+ // Separate compare from branch, to provide path for above JumpIfSmi().
+ __ Subu(match, map_reg, Operand(map));
+ if (type->Is(HeapType::Number())) {
+ DCHECK(!number_case.is_unused());
+ __ bind(&number_case);
+ }
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq, match,
+ Operand(zero_reg));
+ }
+ }
+ DCHECK(number_of_handled_maps != 0);
+
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ InlineCacheState state =
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+ return GetCode(kind(), type, name, state);
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+ MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
+ Label miss;
+ __ JumpIfSmi(receiver(), &miss);
+
+ int receiver_count = receiver_maps->length();
+ __ lw(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ for (int i = 0; i < receiver_count; ++i) {
+ if (transitioned_maps->at(i).is_null()) {
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq, scratch1(),
+ Operand(receiver_maps->at(i)));
+ } else {
+ Label next_map;
+ __ Branch(&next_map, ne, scratch1(), Operand(receiver_maps->at(i)));
+ __ li(transition_map(), Operand(transitioned_maps->at(i)));
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ __ bind(&next_map);
+ }
+ }
+
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
+
+ __ li(a0, Operand(Smi::FromInt(strict_mode)));
+ __ Push(a0);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/ic/mips/ic-mips.cc
index 1f4a7bea2d..d97a6ba066 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/ic/mips/ic-mips.cc
@@ -3,16 +3,14 @@
// found in the LICENSE file.
-
#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/ic-inl.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
@@ -25,8 +23,7 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
Label* global_object) {
// Register usage:
// type: holds the receiver instance type on entry.
@@ -51,12 +48,9 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
// is not a global object and does not have interceptors.
// The address returned from GenerateStringDictionaryProbes() in scratch2
// is used.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register result,
- Register scratch1,
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
+ Register elements, Register name,
+ Register result, Register scratch1,
Register scratch2) {
// Main use of the scratch registers.
// scratch1: Used as temporary and to hold the capacity of the property
@@ -65,23 +59,18 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+ name, scratch1, scratch2);
// If probing finds an entry check that the value is a normal
// property.
__ bind(&done); // scratch2 == elements + 4 * index.
- const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ And(at,
- scratch1,
+ __ And(at, scratch1,
Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
__ Branch(miss, ne, at, Operand(zero_reg));
@@ -104,12 +93,9 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// is not a global object and does not have interceptors.
// The address returned from GenerateStringDictionaryProbes() in scratch2
// is used.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register value,
- Register scratch1,
+static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
+ Register elements, Register name,
+ Register value, Register scratch1,
Register scratch2) {
// Main use of the scratch registers.
// scratch1: Used as temporary and to hold the capacity of the property
@@ -118,23 +104,20 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+ name, scratch1, scratch2);
// If probing finds an entry in the dictionary check that the value
// is a normal property that is not read only.
__ bind(&done); // scratch2 == elements + 4 * index.
- const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
const int kTypeAndReadOnlyMask =
(PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+ PropertyDetails::AttributesField::encode(READ_ONLY))
+ << kSmiTagSize;
__ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
__ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
__ Branch(miss, ne, at, Operand(zero_reg));
@@ -146,19 +129,17 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Update the write barrier. Make sure not to clobber the value.
__ mov(scratch1, value);
- __ RecordWrite(
- elements, scratch2, scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs);
+ __ RecordWrite(elements, scratch2, scratch1, kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
}
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map,
+ Register receiver, Register map,
Register scratch,
- int interceptor_bit,
- Label* slow) {
+ int interceptor_bit, Label* slow) {
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, slow);
// Get the map of the receiver.
@@ -180,14 +161,10 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// Loads an indexed element from a fast case array.
// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register scratch1,
- Register scratch2,
- Register result,
- Label* not_fast_array,
+static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
+ Register key, Register elements,
+ Register scratch1, Register scratch2,
+ Register result, Label* not_fast_array,
Label* out_of_range) {
// Register use:
//
@@ -244,12 +221,9 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// Checks whether a key is an array index string or a unique name.
// Falls through if a key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_unique) {
+static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
+ Register map, Register hash,
+ Label* index_string, Label* not_unique) {
// The key is not a smi.
Label unique;
// Is it a name?
@@ -275,34 +249,17 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
}
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is in lr.
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- DCHECK(receiver.is(a1));
- DCHECK(name.is(a2));
-
- // Probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, receiver, name, a3, t0, t1, t2);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = a0;
- DCHECK(!dictionary.is(ReceiverRegister()));
- DCHECK(!dictionary.is(NameRegister()));
+ DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
Label slow;
- __ lw(dictionary,
- FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset));
- GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), v0, a3, t0);
+ __ lw(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
+ JSObject::kPropertiesOffset));
+ GenerateDictionaryLoad(masm, &slow, dictionary,
+ LoadDescriptor::NameRegister(), v0, a3, t0);
__ Ret();
// Dictionary load failed, go slow (but don't miss).
@@ -321,8 +278,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
- __ mov(LoadIC_TempRegister(), ReceiverRegister());
- __ Push(LoadIC_TempRegister(), NameRegister());
+ __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
+ __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
// Perform tail call to the entry.
ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
@@ -333,21 +290,17 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is in ra.
- __ mov(LoadIC_TempRegister(), ReceiverRegister());
- __ Push(LoadIC_TempRegister(), NameRegister());
+ __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
+ __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
__ TailCallRuntime(Runtime::kGetProperty, 2, 1);
}
-static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
- Register object,
- Register key,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* unmapped_case,
- Label* slow_case) {
+static MemOperand GenerateMappedArgumentsLookup(
+ MacroAssembler* masm, Register object, Register key, Register scratch1,
+ Register scratch2, Register scratch3, Label* unmapped_case,
+ Label* slow_case) {
Heap* heap = masm->isolate()->heap();
// Check that the receiver is a JSObject. Because of the map check
@@ -365,11 +318,7 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Load the elements into scratch1 and check its map.
Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
__ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
- __ CheckMap(scratch1,
- scratch2,
- arguments_map,
- slow_case,
- DONT_DO_SMI_CHECK);
+ __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
// Check if element is in the range of mapped arguments. If not, jump
// to the unmapped lookup with the parameter map in scratch1.
__ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
@@ -413,54 +362,22 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
Register backing_store = parameter_map;
__ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
- __ CheckMap(backing_store,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- slow_case,
+ __ CheckMap(backing_store, scratch, Heap::kFixedArrayMapRootIndex, slow_case,
DONT_DO_SMI_CHECK);
__ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
__ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
__ li(scratch, Operand(kPointerSize >> 1));
__ Mul(scratch, key, scratch);
- __ Addu(scratch,
- scratch,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Addu(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ Addu(scratch, backing_store, scratch);
return MemOperand(scratch);
}
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
- // The return address is in ra.
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- DCHECK(receiver.is(a1));
- DCHECK(key.is(a2));
-
- Label slow, notin;
- MemOperand mapped_location =
- GenerateMappedArgumentsLookup(
- masm, receiver, key, a0, a3, t0, &notin, &slow);
- __ Ret(USE_DELAY_SLOT);
- __ lw(v0, mapped_location);
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in a0.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, key, a0, a3, &slow);
- __ lw(a0, unmapped_location);
- __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
- __ Branch(&slow, eq, a0, Operand(a3));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- Register value = ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register key = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
DCHECK(value.is(a0));
Label slow, notin;
@@ -470,8 +387,8 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
__ sw(value, mapped_location);
__ mov(t5, value);
DCHECK_EQ(mapped_location.offset(), 0);
- __ RecordWrite(a3, mapped_location.rm(), t5,
- kRAHasNotBeenSaved, kDontSaveFPRegs);
+ __ RecordWrite(a3, mapped_location.rm(), t5, kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, value); // (In delay slot) return the value stored in v0.
__ bind(&notin);
@@ -482,8 +399,8 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
__ sw(value, unmapped_location);
__ mov(t5, value);
DCHECK_EQ(unmapped_location.offset(), 0);
- __ RecordWrite(a3, unmapped_location.rm(), t5,
- kRAHasNotBeenSaved, kDontSaveFPRegs);
+ __ RecordWrite(a3, unmapped_location.rm(), t5, kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); // (In delay slot) return the value stored in v0.
__ bind(&slow);
@@ -497,7 +414,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
- __ Push(ReceiverRegister(), NameRegister());
+ __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
// Perform tail call to the entry.
ExternalReference ref =
@@ -507,37 +424,10 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
}
-// IC register specifications
-const Register LoadIC::ReceiverRegister() { return a1; }
-const Register LoadIC::NameRegister() { return a2; }
-
-
-const Register LoadIC::SlotRegister() {
- DCHECK(FLAG_vector_ics);
- return a0;
-}
-
-
-const Register LoadIC::VectorRegister() {
- DCHECK(FLAG_vector_ics);
- return a3;
-}
-
-
-const Register StoreIC::ReceiverRegister() { return a1; }
-const Register StoreIC::NameRegister() { return a2; }
-const Register StoreIC::ValueRegister() { return a0; }
-
-
-const Register KeyedStoreIC::MapRegister() {
- return a3;
-}
-
-
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is in ra.
- __ Push(ReceiverRegister(), NameRegister());
+ __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
}
@@ -548,8 +438,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
- Register key = NameRegister();
- Register receiver = ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
DCHECK(key.is(a2));
DCHECK(receiver.is(a1));
@@ -561,14 +451,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, a0, a3, Map::kHasIndexedInterceptor, &slow);
+ GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
+ Map::kHasIndexedInterceptor, &slow);
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(a0, a3, &check_number_dictionary);
- GenerateFastArrayLoad(
- masm, receiver, key, a0, a3, t0, v0, NULL, &slow);
+ GenerateFastArrayLoad(masm, receiver, key, a0, a3, t0, v0, NULL, &slow);
__ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, t0, a3);
__ Ret();
@@ -587,17 +476,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Slow case, key and receiver still in a2 and a1.
__ bind(&slow);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
- 1,
- t0,
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, t0,
a3);
GenerateRuntimeGetProperty(masm);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, a0, a3, Map::kHasNamedInterceptor, &slow);
+ GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
+ Map::kHasNamedInterceptor, &slow);
// If the receiver is a fast-case object, check the keyed lookup
@@ -671,10 +558,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ sll(at, t2, kPointerSizeLog2);
__ addu(at, receiver, at);
__ lw(v0, MemOperand(at));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1,
- t0,
- a3);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+ t0, a3);
__ Ret();
// Load property array property.
@@ -684,10 +569,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ sll(v0, t1, kPointerSizeLog2);
__ Addu(v0, v0, receiver);
__ lw(v0, MemOperand(v0));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1,
- t0,
- a3);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+ t0, a3);
__ Ret();
@@ -700,9 +583,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
// Load the property to v0.
GenerateDictionaryLoad(masm, &slow, a3, key, v0, t1, t0);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
- 1,
- t0,
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, t0,
a3);
__ Ret();
@@ -717,16 +598,13 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// Return address is in ra.
Label miss;
- Register receiver = ReceiverRegister();
- Register index = NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register index = LoadDescriptor::NameRegister();
Register scratch = a3;
Register result = v0;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch,
- result,
+ StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
&miss, // When index out of range.
@@ -742,30 +620,11 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- // Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
- __ li(a0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
- __ Push(a0);
-
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
static void KeyedStoreGenerateGenericHelper(
- MacroAssembler* masm,
- Label* fast_object,
- Label* fast_double,
- Label* slow,
- KeyedStoreCheckMap check_map,
- KeyedStoreIncrementLength increment_length,
- Register value,
- Register key,
- Register receiver,
- Register receiver_map,
- Register elements_map,
- Register elements) {
+ MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
+ KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
+ Register value, Register key, Register receiver, Register receiver_map,
+ Register elements_map, Register elements) {
Label transition_smi_elements;
Label finish_object_store, non_double_value, transition_double_elements;
Label fast_double_without_map_check;
@@ -829,13 +688,8 @@ static void KeyedStoreGenerateGenericHelper(
__ sw(value, MemOperand(address));
// Update write barrier for the elements array address.
__ mov(scratch_value, value); // Preserve the value which is returned.
- __ RecordWrite(elements,
- address,
- scratch_value,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWrite(elements, address, scratch_value, kRAHasNotBeenSaved,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret();
__ bind(fast_double);
@@ -849,9 +703,8 @@ static void KeyedStoreGenerateGenericHelper(
// HOLECHECK: guards "A[i] double hole?"
// We have to see if the double version of the hole is present. If so
// go to the runtime.
- __ Addu(address, elements,
- Operand(FixedDoubleArray::kHeaderSize + kHoleNanUpper32Offset
- - kHeapObjectTag));
+ __ Addu(address, elements, Operand(FixedDoubleArray::kHeaderSize +
+ kHoleNanUpper32Offset - kHeapObjectTag));
__ sll(at, key, kPointerSizeLog2);
__ addu(address, address, at);
__ lw(scratch_value, MemOperand(address));
@@ -861,13 +714,10 @@ static void KeyedStoreGenerateGenericHelper(
slow);
__ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value,
- key,
+ __ StoreNumberToDoubleElements(value, key,
elements, // Overwritten.
a3, // Scratch regs...
- t0,
- t1,
- &transition_double_elements);
+ t0, t1, &transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
__ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
@@ -883,25 +733,19 @@ static void KeyedStoreGenerateGenericHelper(
// Value is a double. Transition FAST_SMI_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- receiver_map,
- t0,
- slow);
- AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(
- masm, receiver, key, value, receiver_map, mode, slow);
+ __ LoadTransitionedArrayMapConditional(
+ FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, t0, slow);
+ AllocationSiteMode mode =
+ AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
+ receiver_map, mode, slow);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- t0,
- slow);
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
+ receiver_map, t0, slow);
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -912,11 +756,8 @@ static void KeyedStoreGenerateGenericHelper(
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- t0,
- slow);
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
+ receiver_map, t0, slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -938,9 +779,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
Label array, extra, check_if_double_array;
// Register usage.
- Register value = ValueRegister();
- Register key = NameRegister();
- Register receiver = ReceiverRegister();
+ Register value = StoreDescriptor::ValueRegister();
+ Register key = StoreDescriptor::NameRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
DCHECK(value.is(a0));
Register receiver_map = a3;
Register elements_map = t2;
@@ -956,8 +797,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Check that the receiver does not require access checks and is not observed.
// The generic stub does not perform map checks or handle observed objects.
__ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded |
- 1 << Map::kIsObserved));
+ __ And(t0, t0,
+ Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
__ Branch(&slow, ne, t0, Operand(zero_reg));
// Check if the object is a JS array or not.
__ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
@@ -977,7 +818,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// a0: value.
// a1: key.
// a2: receiver.
- GenerateRuntimeSetProperty(masm, strict_mode);
+ PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@@ -991,8 +832,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ Branch(&slow, hs, key, Operand(t0));
__ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Branch(
- &check_if_double_array, ne, elements_map, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&check_if_double_array, ne, elements_map,
+ Heap::kFixedArrayMapRootIndex);
__ jmp(&fast_object_grow);
@@ -1010,58 +851,20 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Branch(&extra, hs, key, Operand(t0));
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
- &slow, kCheckMap, kDontIncrementLength,
- value, key, receiver, receiver_map,
- elements_map, elements);
+ KeyedStoreGenerateGenericHelper(
+ masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
+ value, key, receiver, receiver_map, elements_map, elements);
KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength,
- value, key, receiver, receiver_map,
- elements_map, elements);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // Return address is in ra.
- Label slow;
-
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- Register scratch1 = a3;
- Register scratch2 = t0;
- DCHECK(!scratch1.is(receiver) && !scratch1.is(key));
- DCHECK(!scratch2.is(receiver) && !scratch2.is(key));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &slow);
-
- // Check that the key is an array index, that is Uint32.
- __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask));
- __ Branch(&slow, ne, t0, Operand(zero_reg));
-
- // Get the map of the receiver.
- __ lw(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
- __ And(scratch2, scratch2, Operand(kSlowCaseBitFieldMask));
- __ Branch(&slow, ne, scratch2, Operand(1 << Map::kHasIndexedInterceptor));
- // Everything is fine, call runtime.
- __ Push(receiver, key); // Receiver, key.
-
- // Perform tail call to the entry.
- __ TailCallExternalReference(ExternalReference(
- IC_Utility(kLoadElementWithInterceptor), masm->isolate()), 2, 1);
-
- __ bind(&slow);
- GenerateMiss(masm);
+ &slow, kDontCheckMap, kIncrementLength, value,
+ key, receiver, receiver_map, elements_map,
+ elements);
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
@@ -1069,44 +872,18 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
}
-void StoreIC::GenerateSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- // We can't use MultiPush as the order of the registers is important.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
-
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
DCHECK(receiver.is(a1));
DCHECK(name.is(a2));
- DCHECK(ValueRegister().is(a0));
+ DCHECK(StoreDescriptor::ValueRegister().is(a0));
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, receiver, name, a3, t0, t1, t2);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
+ name, a3, t0, t1, t2);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -1114,19 +891,20 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
void StoreIC::GenerateMiss(MacroAssembler* masm) {
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
// Perform tail call to the entry.
- ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss),
- masm->isolate());
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
void StoreIC::GenerateNormal(MacroAssembler* masm) {
Label miss;
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- Register value = ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
Register dictionary = a3;
DCHECK(receiver.is(a1));
DCHECK(name.is(a2));
@@ -1145,18 +923,6 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
}
-void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- __ li(a0, Operand(Smi::FromInt(strict_mode)));
- __ Push(a0);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
#undef __
@@ -1189,7 +955,7 @@ bool CompareIC::HasInlinedSmiCode(Address address) {
// was inlined.
Instr instr = Assembler::instr_at(andi_instruction_address);
return Assembler::IsAndImmediate(instr) &&
- Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
+ Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
}
@@ -1216,8 +982,8 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
}
if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, andi=%p, delta=%d\n",
- address, andi_instruction_address, delta);
+ PrintF("[ patching ic at %p, andi=%p, delta=%d\n", address,
+ andi_instruction_address, delta);
}
Address patch_address =
@@ -1252,8 +1018,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
patcher.ChangeBranchCondition(eq);
}
}
-
-
-} } // namespace v8::internal
+}
+} // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/ic/mips/stub-cache-mips.cc b/deps/v8/src/ic/mips/stub-cache-mips.cc
new file mode 100644
index 0000000000..e538712d3f
--- /dev/null
+++ b/deps/v8/src/ic/mips/stub-cache-mips.cc
@@ -0,0 +1,169 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS
+
+#include "src/codegen.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
+ Code::Flags flags, bool leave_frame,
+ StubCache::Table table, Register receiver, Register name,
+ // Number of the cache entry, not scaled.
+ Register offset, Register scratch, Register scratch2,
+ Register offset_scratch) {
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+ uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
+ uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
+ uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
+
+ // Check the relative positions of the address fields.
+ DCHECK(value_off_addr > key_off_addr);
+ DCHECK((value_off_addr - key_off_addr) % 4 == 0);
+ DCHECK((value_off_addr - key_off_addr) < (256 * 4));
+ DCHECK(map_off_addr > key_off_addr);
+ DCHECK((map_off_addr - key_off_addr) % 4 == 0);
+ DCHECK((map_off_addr - key_off_addr) < (256 * 4));
+
+ Label miss;
+ Register base_addr = scratch;
+ scratch = no_reg;
+
+ // Multiply by 3 because there are 3 fields per entry (name, code, map).
+ __ sll(offset_scratch, offset, 1);
+ __ Addu(offset_scratch, offset_scratch, offset);
+
+ // Calculate the base address of the entry.
+ __ li(base_addr, Operand(key_offset));
+ __ sll(at, offset_scratch, kPointerSizeLog2);
+ __ Addu(base_addr, base_addr, at);
+
+ // Check that the key in the entry matches the name.
+ __ lw(at, MemOperand(base_addr, 0));
+ __ Branch(&miss, ne, name, Operand(at));
+
+ // Check the map matches.
+ __ lw(at, MemOperand(base_addr, map_off_addr - key_off_addr));
+ __ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Branch(&miss, ne, at, Operand(scratch2));
+
+ // Get the code entry from the cache.
+ Register code = scratch2;
+ scratch2 = no_reg;
+ __ lw(code, MemOperand(base_addr, value_off_addr - key_off_addr));
+
+ // Check that the flags match what we're looking for.
+ Register flags_reg = base_addr;
+ base_addr = no_reg;
+ __ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
+ __ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
+ __ Branch(&miss, ne, flags_reg, Operand(flags));
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ jmp(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ jmp(&miss);
+ }
+#endif
+
+ if (leave_frame) __ LeaveFrame(StackFrame::INTERNAL);
+
+ // Jump to the first instruction in the code stub.
+ __ Addu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+
+ // Miss: fall through.
+ __ bind(&miss);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
+ bool leave_frame, Register receiver,
+ Register name, Register scratch, Register extra,
+ Register extra2, Register extra3) {
+ Isolate* isolate = masm->isolate();
+ Label miss;
+
+ // Make sure that code is valid. The multiplying code relies on the
+ // entry size being 12.
+ DCHECK(sizeof(Entry) == 12);
+
+ // Make sure the flags does not name a specific type.
+ DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ DCHECK(!scratch.is(receiver));
+ DCHECK(!scratch.is(name));
+ DCHECK(!extra.is(receiver));
+ DCHECK(!extra.is(name));
+ DCHECK(!extra.is(scratch));
+ DCHECK(!extra2.is(receiver));
+ DCHECK(!extra2.is(name));
+ DCHECK(!extra2.is(scratch));
+ DCHECK(!extra2.is(extra));
+
+ // Check register validity.
+ DCHECK(!scratch.is(no_reg));
+ DCHECK(!extra.is(no_reg));
+ DCHECK(!extra2.is(no_reg));
+ DCHECK(!extra3.is(no_reg));
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
+ extra3);
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Get the map of the receiver and compute the hash.
+ __ lw(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
+ __ lw(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Addu(scratch, scratch, at);
+ uint32_t mask = kPrimaryTableSize - 1;
+ // We shift out the last two bits because they are not part of the hash and
+ // they are always 01 for maps.
+ __ srl(scratch, scratch, kCacheIndexShift);
+ __ Xor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
+ __ And(scratch, scratch, Operand(mask));
+
+ // Probe the primary table.
+ ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ srl(at, name, kCacheIndexShift);
+ __ Subu(scratch, scratch, at);
+ uint32_t mask2 = kSecondaryTableSize - 1;
+ __ Addu(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
+ __ And(scratch, scratch, Operand(mask2));
+
+ // Probe the secondary table.
+ ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
+ extra3);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/ic/mips64/OWNERS b/deps/v8/src/ic/mips64/OWNERS
new file mode 100644
index 0000000000..5508ba626f
--- /dev/null
+++ b/deps/v8/src/ic/mips64/OWNERS
@@ -0,0 +1,5 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
diff --git a/deps/v8/src/ic/mips64/access-compiler-mips64.cc b/deps/v8/src/ic/mips64/access-compiler-mips64.cc
new file mode 100644
index 0000000000..5e3cfc52fd
--- /dev/null
+++ b/deps/v8/src/ic/mips64/access-compiler-mips64.cc
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/ic/access-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+ Handle<Code> code) {
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
+Register* PropertyAccessCompiler::load_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ static Register registers[] = {receiver, name, a3, a0, a4, a5};
+ return registers;
+}
+
+
+Register* PropertyAccessCompiler::store_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ DCHECK(a3.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ static Register registers[] = {receiver, name, a3, a4, a5};
+ return registers;
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/mips64/stub-cache-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
index fde21a9d12..f44226f772 100644
--- a/deps/v8/src/mips64/stub-cache-mips64.cc
+++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,9 +6,9 @@
#if V8_TARGET_ARCH_MIPS64
-#include "src/codegen.h"
-#include "src/ic-inl.h"
-#include "src/stub-cache.h"
+#include "src/ic/call-optimization.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
namespace v8 {
namespace internal {
@@ -16,81 +16,79 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register receiver,
- Register name,
- // Number of the cache entry, not scaled.
- Register offset,
- Register scratch,
- Register scratch2,
- Register offset_scratch) {
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
-
- uint64_t key_off_addr = reinterpret_cast<uint64_t>(key_offset.address());
- uint64_t value_off_addr = reinterpret_cast<uint64_t>(value_offset.address());
- uint64_t map_off_addr = reinterpret_cast<uint64_t>(map_offset.address());
-
- // Check the relative positions of the address fields.
- DCHECK(value_off_addr > key_off_addr);
- DCHECK((value_off_addr - key_off_addr) % 4 == 0);
- DCHECK((value_off_addr - key_off_addr) < (256 * 4));
- DCHECK(map_off_addr > key_off_addr);
- DCHECK((map_off_addr - key_off_addr) % 4 == 0);
- DCHECK((map_off_addr - key_off_addr) < (256 * 4));
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- a0 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- Label miss;
- Register base_addr = scratch;
- scratch = no_reg;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ dsll(offset_scratch, offset, 1);
- __ Daddu(offset_scratch, offset_scratch, offset);
-
- // Calculate the base address of the entry.
- __ li(base_addr, Operand(key_offset));
- __ dsll(at, offset_scratch, kPointerSizeLog2);
- __ Daddu(base_addr, base_addr, at);
-
- // Check that the key in the entry matches the name.
- __ ld(at, MemOperand(base_addr, 0));
- __ Branch(&miss, ne, name, Operand(at));
-
- // Check the map matches.
- __ ld(at, MemOperand(base_addr, map_off_addr - key_off_addr));
- __ ld(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Branch(&miss, ne, at, Operand(scratch2));
-
- // Get the code entry from the cache.
- Register code = scratch2;
- scratch2 = no_reg;
- __ ld(code, MemOperand(base_addr, value_off_addr - key_off_addr));
-
- // Check that the flags match what we're looking for.
- Register flags_reg = base_addr;
- base_addr = no_reg;
- __ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
- __ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
- __ Branch(&miss, ne, flags_reg, Operand(flags));
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ ld(receiver,
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ push(receiver);
+ ParameterCount actual(0);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ push(value());
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ ld(receiver,
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ Push(receiver, value());
+ ParameterCount actual(1);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
}
-#endif
- // Jump to the first instruction in the code stub.
- __ Daddu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ // We have to return the passed value, not the return value of the setter.
+ __ pop(v0);
- // Miss: fall through.
- __ bind(&miss);
+ // Restore context register.
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
}
@@ -132,111 +130,13 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- NameDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- receiver,
- properties,
- name,
- scratch1);
+ NameDictionaryLookupStub::GenerateNegativeLookup(
+ masm, miss_label, &done, receiver, properties, name, scratch1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
}
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2,
- Register extra3) {
- Isolate* isolate = masm->isolate();
- Label miss;
-
- // Make sure that code is valid. The multiplying code relies on the
- // entry size being 12.
- // DCHECK(sizeof(Entry) == 12);
- // DCHECK(sizeof(Entry) == 3 * kPointerSize);
-
- // Make sure the flags does not name a specific type.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Make sure that there are no register conflicts.
- DCHECK(!scratch.is(receiver));
- DCHECK(!scratch.is(name));
- DCHECK(!extra.is(receiver));
- DCHECK(!extra.is(name));
- DCHECK(!extra.is(scratch));
- DCHECK(!extra2.is(receiver));
- DCHECK(!extra2.is(name));
- DCHECK(!extra2.is(scratch));
- DCHECK(!extra2.is(extra));
-
- // Check register validity.
- DCHECK(!scratch.is(no_reg));
- DCHECK(!extra.is(no_reg));
- DCHECK(!extra2.is(no_reg));
- DCHECK(!extra3.is(no_reg));
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
- extra2, extra3);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ ld(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
- __ ld(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Daddu(scratch, scratch, at);
- uint64_t mask = kPrimaryTableSize - 1;
- // We shift out the last two bits because they are not part of the hash and
- // they are always 01 for maps.
- __ dsrl(scratch, scratch, kCacheIndexShift);
- __ Xor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
- __ And(scratch, scratch, Operand(mask));
-
- // Probe the primary table.
- ProbeTable(isolate,
- masm,
- flags,
- kPrimary,
- receiver,
- name,
- scratch,
- extra,
- extra2,
- extra3);
-
- // Primary miss: Compute hash for secondary probe.
- __ dsrl(at, name, kCacheIndexShift);
- __ Dsubu(scratch, scratch, at);
- uint64_t mask2 = kSecondaryTableSize - 1;
- __ Daddu(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
- __ And(scratch, scratch, Operand(mask2));
-
- // Probe the secondary table.
- ProbeTable(isolate,
- masm,
- flags,
- kSecondary,
- receiver,
- name,
- scratch,
- extra,
- extra2,
- extra3);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
- extra2, extra3);
-}
-
-
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register prototype, Label* miss) {
Isolate* isolate = masm->isolate();
@@ -269,6 +169,9 @@ void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
}
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
void PropertyHandlerCompiler::GenerateCheckPropertyCell(
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
Register scratch, Label* miss) {
@@ -281,10 +184,8 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
}
-static void PushInterceptorArguments(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
+static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
+ Register holder, Register name,
Handle<JSObject> holder_obj) {
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
@@ -301,12 +202,8 @@ static void PushInterceptorArguments(MacroAssembler* masm,
static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj,
- IC::UtilityId id) {
+ MacroAssembler* masm, Register receiver, Register holder, Register name,
+ Handle<JSObject> holder_obj, IC::UtilityId id) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
__ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
NamedLoadHandlerCompiler::kInterceptorArgsLength);
@@ -324,10 +221,10 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
__ sd(receiver, MemOperand(sp, argc * kPointerSize)); // Push receiver.
// Write the arguments to stack frame.
for (int i = 0; i < argc; i++) {
- Register arg = values[argc-1-i];
+ Register arg = values[argc - 1 - i];
DCHECK(!receiver.is(arg));
DCHECK(!scratch_in.is(arg));
- __ sd(arg, MemOperand(sp, (argc-1-i) * kPointerSize)); // Push arg.
+ __ sd(arg, MemOperand(sp, (argc - 1 - i) * kPointerSize)); // Push arg.
}
DCHECK(optimization.is_simple_api_call());
@@ -339,16 +236,15 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
- receiver_map,
- &holder_lookup);
+ Handle<JSObject> api_holder =
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
break;
case CallOptimization::kHolderFound:
__ li(holder, api_holder);
- break;
+ break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
break;
@@ -377,10 +273,7 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
Address function_address = v8::ToCData<Address>(api_call_info->callback());
ApiFunction fun(function_address);
ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
- ExternalReference ref =
- ExternalReference(&fun,
- type,
- masm->isolate());
+ ExternalReference ref = ExternalReference(&fun, type, masm->isolate());
__ li(api_function_address, Operand(ref));
// Jump to stub.
@@ -389,9 +282,29 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
}
-void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
- Handle<Code> code) {
- __ Jump(code, RelocInfo::CODE_TARGET);
+void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
+ // Push receiver, key and value for runtime call.
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
+ // Push receiver, key and value for runtime call.
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
}
@@ -463,8 +376,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
__ jmp(&do_store);
__ bind(&heap_number);
- __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
- miss_label, DONT_DO_SMI_CHECK);
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, miss_label,
+ DONT_DO_SMI_CHECK);
__ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
__ bind(&do_store);
@@ -483,9 +396,9 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
__ li(a2, Operand(transition));
__ Push(a2, a0);
__ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
- isolate()),
- 3, 1);
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+ isolate()),
+ 3, 1);
return;
}
@@ -494,13 +407,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
__ sd(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
// Update the write barrier for the map field.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- scratch2,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
+ __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
+ kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
if (details.type() == CONSTANT) {
@@ -519,8 +427,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
index -= transition->inobject_properties();
// TODO(verwaest): Share this code as a code stub.
- SmiCheck smi_check = representation.IsTagged()
- ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ SmiCheck smi_check =
+ representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
if (index < 0) {
// Set the property straight into the object.
int offset = transition->instance_size() + (index * kPointerSize);
@@ -535,21 +443,15 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
- __ RecordWriteField(receiver_reg,
- offset,
- storage_reg,
- scratch1,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
+ __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
+ kRAHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, smi_check);
}
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array
- __ ld(scratch1,
- FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ ld(scratch1, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
if (representation.IsDouble()) {
__ sd(storage_reg, FieldMemOperand(scratch1, offset));
} else {
@@ -561,14 +463,9 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
- __ RecordWriteField(scratch1,
- offset,
- storage_reg,
- receiver_reg,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
+ __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
+ kRAHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, smi_check);
}
}
@@ -580,7 +477,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
}
-void NamedStoreHandlerCompiler::GenerateStoreField(LookupResult* lookup,
+void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
Register value_reg,
Label* miss_label) {
DCHECK(lookup->representation().IsHeapObject());
@@ -615,8 +512,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
// Make sure there's no overlap between holder and object registers.
DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
+ DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
+ !scratch2.is(scratch1));
// Keep track of the current object in register reg.
Register reg = object_reg;
@@ -649,10 +546,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
}
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound);
+ NameDictionary::kNotFound);
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
- scratch1, scratch2);
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
+ scratch2);
__ ld(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -681,9 +578,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch2, miss);
} else if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(
- masm(), Handle<JSGlobalObject>::cast(current), name,
- scratch2, miss);
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
}
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -766,8 +662,8 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
__ push(receiver());
if (heap()->InNewSpace(callback->data())) {
__ li(scratch3(), callback);
- __ ld(scratch3(), FieldMemOperand(scratch3(),
- ExecutableAccessorInfo::kDataOffset));
+ __ ld(scratch3(),
+ FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset));
} else {
__ li(scratch3(), Handle<Object>(callback->data(), isolate()));
}
@@ -776,8 +672,7 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
__ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
__ sd(scratch3(), MemOperand(sp, 4 * kPointerSize));
__ sd(scratch3(), MemOperand(sp, 3 * kPointerSize));
- __ li(scratch4(),
- Operand(ExternalReference::isolate_address(isolate())));
+ __ li(scratch4(), Operand(ExternalReference::isolate_address(isolate())));
__ sd(scratch4(), MemOperand(sp, 2 * kPointerSize));
__ sd(reg, MemOperand(sp, 1 * kPointerSize));
__ sd(name(), MemOperand(sp, 0 * kPointerSize));
@@ -785,7 +680,7 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
__ mov(a2, scratch2()); // Saved in case scratch2 == a1.
// Abi for CallApiGetter.
- Register getter_address_reg = a2;
+ Register getter_address_reg = ApiGetterDescriptor::function_address();
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
@@ -798,88 +693,74 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
}
-void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg,
- LookupResult* lookup,
- Handle<Name> name) {
+void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
+ LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- bool compile_followup_inline = false;
- if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->IsField()) {
- compile_followup_inline = true;
- } else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> callback(
- ExecutableAccessorInfo::cast(lookup->GetCallbackObject()));
- compile_followup_inline =
- callback->getter() != NULL &&
- ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), callback,
- type());
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+ // Preserve the receiver register explicitly whenever it is different from the
+ // holder and it is needed should the interceptor return without any result.
+ // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
+ // case might cause a miss during the prototype check.
+ bool must_perform_prototype_check =
+ !holder().is_identical_to(it->GetHolder<JSObject>());
+ bool must_preserve_receiver_reg =
+ !receiver().is(holder_reg) &&
+ (it->state() == LookupIterator::ACCESSOR || must_perform_prototype_check);
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+ if (must_preserve_receiver_reg) {
+ __ Push(receiver(), holder_reg, this->name());
+ } else {
+ __ Push(holder_reg, this->name());
}
- }
-
- if (compile_followup_inline) {
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
- // Preserve the receiver register explicitly whenever it is different from
- // the holder and it is needed should the interceptor return without any
- // result. The CALLBACKS case needs the receiver to be passed into C++ code,
- // the FIELD case might cause a miss during the prototype check.
- bool must_perfrom_prototype_check = *holder() != lookup->holder();
- bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
- (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
- if (must_preserve_receiver_reg) {
- __ Push(receiver(), holder_reg, this->name());
- } else {
- __ Push(holder_reg, this->name());
- }
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method).
- CompileCallLoadPropertyWithInterceptor(
- masm(), receiver(), holder_reg, this->name(), holder(),
- IC::kLoadPropertyWithInterceptorOnly);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
- __ Branch(&interceptor_failed, eq, v0, Operand(scratch1()));
- frame_scope.GenerateLeaveFrame();
- __ Ret();
-
- __ bind(&interceptor_failed);
- __ pop(this->name());
- __ pop(holder_reg);
- if (must_preserve_receiver_reg) {
- __ pop(receiver());
- }
- // Leave the internal frame.
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method).
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), holder(),
+ IC::kLoadPropertyWithInterceptorOnly);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
+ __ Branch(&interceptor_failed, eq, v0, Operand(scratch1()));
+ frame_scope.GenerateLeaveFrame();
+ __ Ret();
+
+ __ bind(&interceptor_failed);
+ if (must_preserve_receiver_reg) {
+ __ Pop(receiver(), holder_reg, this->name());
+ } else {
+ __ Pop(holder_reg, this->name());
}
- GenerateLoadPostInterceptor(holder_reg, name, lookup);
- } else { // !compile_followup_inline
- // Call the runtime system to load the interceptor.
- // Check that the maps haven't changed.
- PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
- holder());
-
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
- __ TailCallExternalReference(
- ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ // Leave the internal frame.
}
+
+ GenerateLoadPostInterceptor(it, holder_reg);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
+ // Call the runtime system to load the interceptor.
+ DCHECK(holder()->HasNamedInterceptor());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+ holder());
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
+ __ TailCallExternalReference(
+ ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
}
@@ -889,7 +770,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Register holder_reg = Frontend(receiver(), name);
__ Push(receiver(), holder_reg); // Receiver.
- __ li(at, Operand(callback)); // Callback info.
+ __ li(at, Operand(callback)); // Callback info.
__ push(at);
__ li(at, Operand(name));
__ Push(at, value());
@@ -904,54 +785,6 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
}
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save value register, so we can restore it later.
- __ push(value());
-
- if (!setter.is_null()) {
- // Call the JavaScript setter with receiver and value on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ ld(receiver,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ Push(receiver, value());
- ParameterCount actual(1);
- ParameterCount expected(setter);
- __ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ pop(v0);
-
- // Restore context register.
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
Handle<Name> name) {
__ Push(receiver(), this->name(), value());
@@ -966,72 +799,11 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
}
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
- static Register registers[] = { receiver, name, a3, a0, a4, a5 };
- return registers;
+Register NamedStoreHandlerCompiler::value() {
+ return StoreDescriptor::ValueRegister();
}
-Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- DCHECK(a3.is(KeyedStoreIC::MapRegister()));
- static Register registers[] = { receiver, name, a3, a4, a5 };
- return registers;
-}
-
-
-Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); }
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- if (!getter.is_null()) {
- // Call the JavaScript getter with the receiver on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ ld(receiver,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ push(receiver);
- ParameterCount actual(0);
- ParameterCount expected(getter);
- __ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context register.
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
@@ -1039,7 +811,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
FrontendHeader(receiver(), name, &miss);
// Get the value from the cell.
- Register result = StoreIC::ValueRegister();
+ Register result = StoreDescriptor::ValueRegister();
__ li(result, Operand(cell));
__ ld(result, FieldMemOperand(result, Cell::kValueOffset));
@@ -1061,131 +833,8 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
}
-Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- __ JumpIfNotUniqueName(this->name(), &miss);
- } else {
- __ Branch(&miss, ne, this->name(), Operand(name));
- }
- }
-
- Label number_case;
- Register match = scratch2();
- Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target, match); // Reg match is 0 if Smi.
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(KeyedStoreIC::MapRegister()));
-
- int receiver_count = types->length();
- int number_of_handled_maps = 0;
- __ ld(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate());
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- // Check map and tail call if there's a match.
- // Separate compare from branch, to provide path for above JumpIfSmi().
- __ Dsubu(match, map_reg, Operand(map));
- if (type->Is(HeapType::Number())) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET,
- eq, match, Operand(zero_reg));
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss);
-
- int receiver_count = receiver_maps->length();
- __ ld(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; ++i) {
- if (transitioned_maps->at(i).is_null()) {
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq,
- scratch1(), Operand(receiver_maps->at(i)));
- } else {
- Label next_map;
- __ Branch(&next_map, ne, scratch1(), Operand(receiver_maps->at(i)));
- __ li(transition_map(), Operand(transitioned_maps->at(i)));
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
- }
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void ElementHandlerCompiler::GenerateLoadDictionaryElement(
- MacroAssembler* masm) {
- // The return address is in ra
- Label slow, miss;
-
- Register key = LoadIC::NameRegister();
- Register receiver = LoadIC::ReceiverRegister();
- DCHECK(receiver.is(a1));
- DCHECK(key.is(a2));
-
- __ UntagAndJumpIfNotSmi(a6, key, &miss);
- __ ld(a4, FieldMemOperand(receiver, JSObject::kElementsOffset));
- DCHECK(kSmiTagSize + kSmiShiftSize == 32);
- __ LoadFromNumberDictionary(&slow, a4, key, v0, a6, a3, a5);
- __ Ret();
-
- // Slow case, key and receiver still unmodified.
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, a2, a3);
-
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
-
- // Miss case, call the runtime.
- __ bind(&miss);
-
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
}
-
-
-#undef __
-
-} } // namespace v8::internal
+} // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/ic/mips64/ic-compiler-mips64.cc b/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
new file mode 100644
index 0000000000..796ed87f7e
--- /dev/null
+++ b/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
@@ -0,0 +1,131 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
+ Label miss;
+
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ // In case we are compiling an IC for dictionary loads and stores, just
+ // check whether the name is unique.
+ if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ Register tmp = scratch1();
+ __ JumpIfSmi(this->name(), &miss);
+ __ ld(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
+ __ lbu(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
+ } else {
+ __ Branch(&miss, ne, this->name(), Operand(name));
+ }
+ }
+
+ Label number_case;
+ Register match = scratch2();
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target, match); // Reg match is 0 if Smi.
+
+ // Polymorphic keyed stores may use the map register
+ Register map_reg = scratch1();
+ DCHECK(kind() != Code::KEYED_STORE_IC ||
+ map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+
+ int receiver_count = types->length();
+ int number_of_handled_maps = 0;
+ __ ld(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ // Check map and tail call if there's a match.
+ // Separate compare from branch, to provide path for above JumpIfSmi().
+ __ Dsubu(match, map_reg, Operand(map));
+ if (type->Is(HeapType::Number())) {
+ DCHECK(!number_case.is_unused());
+ __ bind(&number_case);
+ }
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq, match,
+ Operand(zero_reg));
+ }
+ }
+ DCHECK(number_of_handled_maps != 0);
+
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ InlineCacheState state =
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+ return GetCode(kind(), type, name, state);
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+ MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
+ Label miss;
+ __ JumpIfSmi(receiver(), &miss);
+
+ int receiver_count = receiver_maps->length();
+ __ ld(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ for (int i = 0; i < receiver_count; ++i) {
+ if (transitioned_maps->at(i).is_null()) {
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq, scratch1(),
+ Operand(receiver_maps->at(i)));
+ } else {
+ Label next_map;
+ __ Branch(&next_map, ne, scratch1(), Operand(receiver_maps->at(i)));
+ __ li(transition_map(), Operand(transitioned_maps->at(i)));
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ __ bind(&next_map);
+ }
+ }
+
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
+
+ __ li(a0, Operand(Smi::FromInt(strict_mode)));
+ __ Push(a0);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/mips64/ic-mips64.cc b/deps/v8/src/ic/mips64/ic-mips64.cc
index 5187342329..a5d9fe78ff 100644
--- a/deps/v8/src/mips64/ic-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-mips64.cc
@@ -3,16 +3,14 @@
// found in the LICENSE file.
-
#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS64
-#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/ic-inl.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
@@ -25,8 +23,7 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
Label* global_object) {
// Register usage:
// type: holds the receiver instance type on entry.
@@ -51,12 +48,9 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
// is not a global object and does not have interceptors.
// The address returned from GenerateStringDictionaryProbes() in scratch2
// is used.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register result,
- Register scratch1,
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
+ Register elements, Register name,
+ Register result, Register scratch1,
Register scratch2) {
// Main use of the scratch registers.
// scratch1: Used as temporary and to hold the capacity of the property
@@ -65,23 +59,18 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+ name, scratch1, scratch2);
// If probing finds an entry check that the value is a normal
// property.
__ bind(&done); // scratch2 == elements + 4 * index.
- const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ And(at,
- scratch1,
+ __ And(at, scratch1,
Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
__ Branch(miss, ne, at, Operand(zero_reg));
@@ -104,12 +93,9 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// is not a global object and does not have interceptors.
// The address returned from GenerateStringDictionaryProbes() in scratch2
// is used.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register value,
- Register scratch1,
+static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
+ Register elements, Register name,
+ Register value, Register scratch1,
Register scratch2) {
// Main use of the scratch registers.
// scratch1: Used as temporary and to hold the capacity of the property
@@ -118,18 +104,14 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+ name, scratch1, scratch2);
// If probing finds an entry in the dictionary check that the value
// is a normal property that is not read only.
__ bind(&done); // scratch2 == elements + 4 * index.
- const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
const int kTypeAndReadOnlyMask =
@@ -146,19 +128,17 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Update the write barrier. Make sure not to clobber the value.
__ mov(scratch1, value);
- __ RecordWrite(
- elements, scratch2, scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs);
+ __ RecordWrite(elements, scratch2, scratch1, kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
}
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map,
+ Register receiver, Register map,
Register scratch,
- int interceptor_bit,
- Label* slow) {
+ int interceptor_bit, Label* slow) {
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, slow);
// Get the map of the receiver.
@@ -180,14 +160,10 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// Loads an indexed element from a fast case array.
// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register scratch1,
- Register scratch2,
- Register result,
- Label* not_fast_array,
+static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
+ Register key, Register elements,
+ Register scratch1, Register scratch2,
+ Register result, Label* not_fast_array,
Label* out_of_range) {
// Register use:
//
@@ -227,7 +203,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// Fast case: Do the load.
__ Daddu(scratch1, elements,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// The key is a smi.
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ SmiScale(at, key, kPointerSizeLog2);
@@ -244,12 +220,9 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// Checks whether a key is an array index string or a unique name.
// Falls through if a key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_unique) {
+static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
+ Register map, Register hash,
+ Label* index_string, Label* not_unique) {
// The key is not a smi.
Label unique;
// Is it a name?
@@ -275,33 +248,16 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
}
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is in lr.
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- DCHECK(receiver.is(a1));
- DCHECK(name.is(a2));
-
- // Probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, receiver, name, a3, a4, a5, a6);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = a0;
- DCHECK(!dictionary.is(ReceiverRegister()));
- DCHECK(!dictionary.is(NameRegister()));
+ DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
Label slow;
- __ ld(dictionary,
- FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset));
- GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), v0, a3, a4);
+ __ ld(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
+ JSObject::kPropertiesOffset));
+ GenerateDictionaryLoad(masm, &slow, dictionary,
+ LoadDescriptor::NameRegister(), v0, a3, a4);
__ Ret();
// Dictionary load failed, go slow (but don't miss).
@@ -320,8 +276,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, a4);
- __ mov(LoadIC_TempRegister(), ReceiverRegister());
- __ Push(LoadIC_TempRegister(), NameRegister());
+ __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
+ __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
// Perform tail call to the entry.
ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
@@ -332,21 +288,17 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is in ra.
- __ mov(LoadIC_TempRegister(), ReceiverRegister());
- __ Push(LoadIC_TempRegister(), NameRegister());
+ __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
+ __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
__ TailCallRuntime(Runtime::kGetProperty, 2, 1);
}
-static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
- Register object,
- Register key,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* unmapped_case,
- Label* slow_case) {
+static MemOperand GenerateMappedArgumentsLookup(
+ MacroAssembler* masm, Register object, Register key, Register scratch1,
+ Register scratch2, Register scratch3, Label* unmapped_case,
+ Label* slow_case) {
Heap* heap = masm->isolate()->heap();
// Check that the receiver is a JSObject. Because of the map check
@@ -364,11 +316,7 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Load the elements into scratch1 and check its map.
Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
__ ld(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
- __ CheckMap(scratch1,
- scratch2,
- arguments_map,
- slow_case,
- DONT_DO_SMI_CHECK);
+ __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
// Check if element is in the range of mapped arguments. If not, jump
// to the unmapped lookup with the parameter map in scratch1.
__ ld(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
@@ -412,54 +360,22 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
Register backing_store = parameter_map;
__ ld(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
- __ CheckMap(backing_store,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- slow_case,
+ __ CheckMap(backing_store, scratch, Heap::kFixedArrayMapRootIndex, slow_case,
DONT_DO_SMI_CHECK);
__ ld(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
__ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
__ SmiUntag(scratch, key);
__ dsll(scratch, scratch, kPointerSizeLog2);
- __ Daddu(scratch,
- scratch,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Daddu(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ Daddu(scratch, backing_store, scratch);
return MemOperand(scratch);
}
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
- // The return address is in ra.
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- DCHECK(receiver.is(a1));
- DCHECK(key.is(a2));
-
- Label slow, notin;
- MemOperand mapped_location =
- GenerateMappedArgumentsLookup(
- masm, receiver, key, a0, a3, a4, &notin, &slow);
- __ Ret(USE_DELAY_SLOT);
- __ ld(v0, mapped_location);
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in a2.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, key, a0, a3, &slow);
- __ ld(a0, unmapped_location);
- __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
- __ Branch(&slow, eq, a0, Operand(a3));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- Register value = ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register key = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
DCHECK(value.is(a0));
Label slow, notin;
@@ -469,8 +385,8 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
__ sd(value, mapped_location);
__ mov(t1, value);
DCHECK_EQ(mapped_location.offset(), 0);
- __ RecordWrite(a3, mapped_location.rm(), t1,
- kRAHasNotBeenSaved, kDontSaveFPRegs);
+ __ RecordWrite(a3, mapped_location.rm(), t1, kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, value); // (In delay slot) return the value stored in v0.
__ bind(&notin);
@@ -481,8 +397,8 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
__ sd(value, unmapped_location);
__ mov(t1, value);
DCHECK_EQ(unmapped_location.offset(), 0);
- __ RecordWrite(a3, unmapped_location.rm(), t1,
- kRAHasNotBeenSaved, kDontSaveFPRegs);
+ __ RecordWrite(a3, unmapped_location.rm(), t1, kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); // (In delay slot) return the value stored in v0.
__ bind(&slow);
@@ -496,7 +412,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, a4);
- __ Push(ReceiverRegister(), NameRegister());
+ __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
// Perform tail call to the entry.
ExternalReference ref =
@@ -506,37 +422,10 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
}
-// IC register specifications
-const Register LoadIC::ReceiverRegister() { return a1; }
-const Register LoadIC::NameRegister() { return a2; }
-
-
-const Register LoadIC::SlotRegister() {
- DCHECK(FLAG_vector_ics);
- return a0;
-}
-
-
-const Register LoadIC::VectorRegister() {
- DCHECK(FLAG_vector_ics);
- return a3;
-}
-
-
-const Register StoreIC::ReceiverRegister() { return a1; }
-const Register StoreIC::NameRegister() { return a2; }
-const Register StoreIC::ValueRegister() { return a0; }
-
-
-const Register KeyedStoreIC::MapRegister() {
- return a3;
-}
-
-
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is in ra.
- __ Push(ReceiverRegister(), NameRegister());
+ __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
}
@@ -547,8 +436,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
- Register key = NameRegister();
- Register receiver = ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
DCHECK(key.is(a2));
DCHECK(receiver.is(a1));
@@ -560,14 +449,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, a0, a3, Map::kHasIndexedInterceptor, &slow);
+ GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
+ Map::kHasIndexedInterceptor, &slow);
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(a0, a3, &check_number_dictionary);
- GenerateFastArrayLoad(
- masm, receiver, key, a0, a3, a4, v0, NULL, &slow);
+ GenerateFastArrayLoad(masm, receiver, key, a0, a3, a4, v0, NULL, &slow);
__ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a4, a3);
__ Ret();
@@ -586,17 +474,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Slow case, key and receiver still in a2 and a1.
__ bind(&slow);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
- 1,
- a4,
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, a4,
a3);
GenerateRuntimeGetProperty(masm);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, a0, a3, Map::kHasNamedInterceptor, &slow);
+ GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
+ Map::kHasNamedInterceptor, &slow);
// If the receiver is a fast-case object, check the keyed lookup
@@ -677,10 +563,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ dsll(at, a6, kPointerSizeLog2);
__ daddu(at, receiver, at);
__ ld(v0, MemOperand(at));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1,
- a4,
- a3);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+ a4, a3);
__ Ret();
// Load property array property.
@@ -690,10 +574,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ dsll(v0, a5, kPointerSizeLog2);
__ Daddu(v0, v0, a1);
__ ld(v0, MemOperand(v0));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1,
- a4,
- a3);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+ a4, a3);
__ Ret();
@@ -706,9 +588,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
// Load the property to v0.
GenerateDictionaryLoad(masm, &slow, a3, key, v0, a5, a4);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
- 1,
- a4,
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, a4,
a3);
__ Ret();
@@ -723,16 +603,13 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// Return address is in ra.
Label miss;
- Register receiver = ReceiverRegister();
- Register index = NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register index = LoadDescriptor::NameRegister();
Register scratch = a3;
Register result = v0;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch,
- result,
+ StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
&miss, // When index out of range.
@@ -748,31 +625,11 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- // Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- __ li(a0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
- __ Push(a0);
-
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
static void KeyedStoreGenerateGenericHelper(
- MacroAssembler* masm,
- Label* fast_object,
- Label* fast_double,
- Label* slow,
- KeyedStoreCheckMap check_map,
- KeyedStoreIncrementLength increment_length,
- Register value,
- Register key,
- Register receiver,
- Register receiver_map,
- Register elements_map,
- Register elements) {
+ MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
+ KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
+ Register value, Register key, Register receiver, Register receiver_map,
+ Register elements_map, Register elements) {
Label transition_smi_elements;
Label finish_object_store, non_double_value, transition_double_elements;
Label fast_double_without_map_check;
@@ -814,7 +671,7 @@ static void KeyedStoreGenerateGenericHelper(
}
// It's irrelevant whether array is smi-only or not when writing a smi.
__ Daddu(address, elements,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ SmiScale(scratch_value, key, kPointerSizeLog2);
__ Daddu(address, address, scratch_value);
__ sd(value, MemOperand(address));
@@ -833,19 +690,14 @@ static void KeyedStoreGenerateGenericHelper(
__ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
__ Daddu(address, elements,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ SmiScale(scratch_value, key, kPointerSizeLog2);
__ Daddu(address, address, scratch_value);
__ sd(value, MemOperand(address));
// Update write barrier for the elements array address.
__ mov(scratch_value, value); // Preserve the value which is returned.
- __ RecordWrite(elements,
- address,
- scratch_value,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWrite(elements, address, scratch_value, kRAHasNotBeenSaved,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret();
__ bind(fast_double);
@@ -860,8 +712,8 @@ static void KeyedStoreGenerateGenericHelper(
// We have to see if the double version of the hole is present. If so
// go to the runtime.
__ Daddu(address, elements,
- Operand(FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)
- - kHeapObjectTag));
+ Operand(FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32) -
+ kHeapObjectTag));
__ SmiScale(at, key, kPointerSizeLog2);
__ daddu(address, address, at);
__ lw(scratch_value, MemOperand(address));
@@ -871,13 +723,10 @@ static void KeyedStoreGenerateGenericHelper(
slow);
__ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value,
- key,
+ __ StoreNumberToDoubleElements(value, key,
elements, // Overwritten.
a3, // Scratch regs...
- a4,
- a5,
- &transition_double_elements);
+ a4, a5, &transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
__ Daddu(scratch_value, key, Operand(Smi::FromInt(1)));
@@ -893,25 +742,19 @@ static void KeyedStoreGenerateGenericHelper(
// Value is a double. Transition FAST_SMI_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- receiver_map,
- a4,
- slow);
- AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(
- masm, receiver, key, value, receiver_map, mode, slow);
+ __ LoadTransitionedArrayMapConditional(
+ FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, a4, slow);
+ AllocationSiteMode mode =
+ AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
+ receiver_map, mode, slow);
__ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- a4,
- slow);
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
+ receiver_map, a4, slow);
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -922,11 +765,8 @@ static void KeyedStoreGenerateGenericHelper(
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- a4,
- slow);
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
+ receiver_map, a4, slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -948,9 +788,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
Label array, extra, check_if_double_array;
// Register usage.
- Register value = ValueRegister();
- Register key = NameRegister();
- Register receiver = ReceiverRegister();
+ Register value = StoreDescriptor::ValueRegister();
+ Register key = StoreDescriptor::NameRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
DCHECK(value.is(a0));
Register receiver_map = a3;
Register elements_map = a6;
@@ -966,8 +806,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Check that the receiver does not require access checks and is not observed.
// The generic stub does not perform map checks or handle observed objects.
__ lbu(a4, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ And(a4, a4, Operand(1 << Map::kIsAccessCheckNeeded |
- 1 << Map::kIsObserved));
+ __ And(a4, a4,
+ Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
__ Branch(&slow, ne, a4, Operand(zero_reg));
// Check if the object is a JS array or not.
__ lbu(a4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
@@ -987,7 +827,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// a0: value.
// a1: key.
// a2: receiver.
- GenerateRuntimeSetProperty(masm, strict_mode);
+ PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@@ -1001,8 +841,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ Branch(&slow, hs, key, Operand(a4));
__ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Branch(
- &check_if_double_array, ne, elements_map, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&check_if_double_array, ne, elements_map,
+ Heap::kFixedArrayMapRootIndex);
__ jmp(&fast_object_grow);
@@ -1020,58 +860,20 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ ld(a4, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Branch(&extra, hs, key, Operand(a4));
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
- &slow, kCheckMap, kDontIncrementLength,
- value, key, receiver, receiver_map,
- elements_map, elements);
+ KeyedStoreGenerateGenericHelper(
+ masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
+ value, key, receiver, receiver_map, elements_map, elements);
KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength,
- value, key, receiver, receiver_map,
- elements_map, elements);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // Return address is in ra.
- Label slow;
-
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- Register scratch1 = a3;
- Register scratch2 = a4;
- DCHECK(!scratch1.is(receiver) && !scratch1.is(key));
- DCHECK(!scratch2.is(receiver) && !scratch2.is(key));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &slow);
-
- // Check that the key is an array index, that is Uint32.
- __ And(a4, key, Operand(kSmiTagMask | kSmiSignMask));
- __ Branch(&slow, ne, a4, Operand(zero_reg));
-
- // Get the map of the receiver.
- __ ld(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
- __ And(scratch2, scratch2, Operand(kSlowCaseBitFieldMask));
- __ Branch(&slow, ne, scratch2, Operand(1 << Map::kHasIndexedInterceptor));
- // Everything is fine, call runtime.
- __ Push(receiver, key); // Receiver, key.
-
- // Perform tail call to the entry.
- __ TailCallExternalReference(ExternalReference(
- IC_Utility(kLoadElementWithInterceptor), masm->isolate()), 2, 1);
-
- __ bind(&slow);
- GenerateMiss(masm);
+ &slow, kDontCheckMap, kIncrementLength, value,
+ key, receiver, receiver_map, elements_map,
+ elements);
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
@@ -1079,43 +881,18 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
}
-void StoreIC::GenerateSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- // We can't use MultiPush as the order of the registers is important.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
-
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
DCHECK(receiver.is(a1));
DCHECK(name.is(a2));
- DCHECK(ValueRegister().is(a0));
+ DCHECK(StoreDescriptor::ValueRegister().is(a0));
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, receiver, name, a3, a4, a5, a6);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
+ name, a3, a4, a5, a6);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -1123,19 +900,20 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
void StoreIC::GenerateMiss(MacroAssembler* masm) {
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
// Perform tail call to the entry.
- ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss),
- masm->isolate());
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
void StoreIC::GenerateNormal(MacroAssembler* masm) {
Label miss;
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- Register value = ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
Register dictionary = a3;
DCHECK(!AreAliased(value, receiver, name, dictionary, a4, a5));
@@ -1152,18 +930,6 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
}
-void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- __ li(a0, Operand(Smi::FromInt(strict_mode)));
- __ Push(a0);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
#undef __
@@ -1196,7 +962,7 @@ bool CompareIC::HasInlinedSmiCode(Address address) {
// was inlined.
Instr instr = Assembler::instr_at(andi_instruction_address);
return Assembler::IsAndImmediate(instr) &&
- Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
+ Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
}
@@ -1223,8 +989,8 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
}
if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, andi=%p, delta=%d\n",
- address, andi_instruction_address, delta);
+ PrintF("[ patching ic at %p, andi=%p, delta=%d\n", address,
+ andi_instruction_address, delta);
}
Address patch_address =
@@ -1259,8 +1025,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
patcher.ChangeBranchCondition(eq);
}
}
-
-
-} } // namespace v8::internal
+}
+} // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/ic/mips64/stub-cache-mips64.cc b/deps/v8/src/ic/mips64/stub-cache-mips64.cc
new file mode 100644
index 0000000000..272e5bea9b
--- /dev/null
+++ b/deps/v8/src/ic/mips64/stub-cache-mips64.cc
@@ -0,0 +1,170 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/codegen.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
+ Code::Flags flags, bool leave_frame,
+ StubCache::Table table, Register receiver, Register name,
+ // Number of the cache entry, not scaled.
+ Register offset, Register scratch, Register scratch2,
+ Register offset_scratch) {
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+ uint64_t key_off_addr = reinterpret_cast<uint64_t>(key_offset.address());
+ uint64_t value_off_addr = reinterpret_cast<uint64_t>(value_offset.address());
+ uint64_t map_off_addr = reinterpret_cast<uint64_t>(map_offset.address());
+
+ // Check the relative positions of the address fields.
+ DCHECK(value_off_addr > key_off_addr);
+ DCHECK((value_off_addr - key_off_addr) % 4 == 0);
+ DCHECK((value_off_addr - key_off_addr) < (256 * 4));
+ DCHECK(map_off_addr > key_off_addr);
+ DCHECK((map_off_addr - key_off_addr) % 4 == 0);
+ DCHECK((map_off_addr - key_off_addr) < (256 * 4));
+
+ Label miss;
+ Register base_addr = scratch;
+ scratch = no_reg;
+
+ // Multiply by 3 because there are 3 fields per entry (name, code, map).
+ __ dsll(offset_scratch, offset, 1);
+ __ Daddu(offset_scratch, offset_scratch, offset);
+
+ // Calculate the base address of the entry.
+ __ li(base_addr, Operand(key_offset));
+ __ dsll(at, offset_scratch, kPointerSizeLog2);
+ __ Daddu(base_addr, base_addr, at);
+
+ // Check that the key in the entry matches the name.
+ __ ld(at, MemOperand(base_addr, 0));
+ __ Branch(&miss, ne, name, Operand(at));
+
+ // Check the map matches.
+ __ ld(at, MemOperand(base_addr, map_off_addr - key_off_addr));
+ __ ld(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Branch(&miss, ne, at, Operand(scratch2));
+
+ // Get the code entry from the cache.
+ Register code = scratch2;
+ scratch2 = no_reg;
+ __ ld(code, MemOperand(base_addr, value_off_addr - key_off_addr));
+
+ // Check that the flags match what we're looking for.
+ Register flags_reg = base_addr;
+ base_addr = no_reg;
+ __ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
+ __ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
+ __ Branch(&miss, ne, flags_reg, Operand(flags));
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ jmp(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ jmp(&miss);
+ }
+#endif
+
+ if (leave_frame) __ LeaveFrame(StackFrame::INTERNAL);
+
+ // Jump to the first instruction in the code stub.
+ __ Daddu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+
+ // Miss: fall through.
+ __ bind(&miss);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
+ bool leave_frame, Register receiver,
+ Register name, Register scratch, Register extra,
+ Register extra2, Register extra3) {
+ Isolate* isolate = masm->isolate();
+ Label miss;
+
+ // Make sure that code is valid. The multiplying code relies on the
+ // entry size being 12.
+ // DCHECK(sizeof(Entry) == 12);
+ // DCHECK(sizeof(Entry) == 3 * kPointerSize);
+
+ // Make sure the flags does not name a specific type.
+ DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ DCHECK(!scratch.is(receiver));
+ DCHECK(!scratch.is(name));
+ DCHECK(!extra.is(receiver));
+ DCHECK(!extra.is(name));
+ DCHECK(!extra.is(scratch));
+ DCHECK(!extra2.is(receiver));
+ DCHECK(!extra2.is(name));
+ DCHECK(!extra2.is(scratch));
+ DCHECK(!extra2.is(extra));
+
+ // Check register validity.
+ DCHECK(!scratch.is(no_reg));
+ DCHECK(!extra.is(no_reg));
+ DCHECK(!extra2.is(no_reg));
+ DCHECK(!extra3.is(no_reg));
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
+ extra3);
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Get the map of the receiver and compute the hash.
+ __ ld(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
+ __ ld(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Daddu(scratch, scratch, at);
+ uint64_t mask = kPrimaryTableSize - 1;
+ // We shift out the last two bits because they are not part of the hash and
+ // they are always 01 for maps.
+ __ dsrl(scratch, scratch, kCacheIndexShift);
+ __ Xor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
+ __ And(scratch, scratch, Operand(mask));
+
+ // Probe the primary table.
+ ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ dsrl(at, name, kCacheIndexShift);
+ __ Dsubu(scratch, scratch, at);
+ uint64_t mask2 = kSecondaryTableSize - 1;
+ __ Daddu(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
+ __ And(scratch, scratch, Operand(mask2));
+
+ // Probe the secondary table.
+ ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
+ extra3);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
new file mode 100644
index 0000000000..35a4acf8cc
--- /dev/null
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -0,0 +1,147 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/base/bits.h"
+#include "src/ic/stub-cache.h"
+#include "src/type-info.h"
+
+namespace v8 {
+namespace internal {
+
+
+StubCache::StubCache(Isolate* isolate) : isolate_(isolate) {}
+
+
+void StubCache::Initialize() {
+ DCHECK(base::bits::IsPowerOfTwo32(kPrimaryTableSize));
+ DCHECK(base::bits::IsPowerOfTwo32(kSecondaryTableSize));
+ Clear();
+}
+
+
+static Code::Flags CommonStubCacheChecks(Name* name, Map* map,
+ Code::Flags flags) {
+ flags = Code::RemoveTypeAndHolderFromFlags(flags);
+
+ // Validate that the name does not move on scavenge, and that we
+ // can use identity checks instead of structural equality checks.
+ DCHECK(!name->GetHeap()->InNewSpace(name));
+ DCHECK(name->IsUniqueName());
+
+ // The state bits are not important to the hash function because the stub
+ // cache only contains handlers. Make sure that the bits are the least
+ // significant so they will be the ones masked out.
+ DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(flags));
+ STATIC_ASSERT((Code::ICStateField::kMask & 1) == 1);
+
+ // Make sure that the code type and cache holder are not included in the hash.
+ DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+ DCHECK(Code::ExtractCacheHolderFromFlags(flags) == 0);
+
+ return flags;
+}
+
+
+Code* StubCache::Set(Name* name, Map* map, Code* code) {
+ Code::Flags flags = CommonStubCacheChecks(name, map, code->flags());
+
+ // Compute the primary entry.
+ int primary_offset = PrimaryOffset(name, flags, map);
+ Entry* primary = entry(primary_, primary_offset);
+ Code* old_code = primary->value;
+
+ // If the primary entry has useful data in it, we retire it to the
+ // secondary cache before overwriting it.
+ if (old_code != isolate_->builtins()->builtin(Builtins::kIllegal)) {
+ Map* old_map = primary->map;
+ Code::Flags old_flags =
+ Code::RemoveTypeAndHolderFromFlags(old_code->flags());
+ int seed = PrimaryOffset(primary->key, old_flags, old_map);
+ int secondary_offset = SecondaryOffset(primary->key, old_flags, seed);
+ Entry* secondary = entry(secondary_, secondary_offset);
+ *secondary = *primary;
+ }
+
+ // Update primary cache.
+ primary->key = name;
+ primary->value = code;
+ primary->map = map;
+ isolate()->counters()->megamorphic_stub_cache_updates()->Increment();
+ return code;
+}
+
+
+Code* StubCache::Get(Name* name, Map* map, Code::Flags flags) {
+ flags = CommonStubCacheChecks(name, map, flags);
+ int primary_offset = PrimaryOffset(name, flags, map);
+ Entry* primary = entry(primary_, primary_offset);
+ if (primary->key == name && primary->map == map) {
+ return primary->value;
+ }
+ int secondary_offset = SecondaryOffset(name, flags, primary_offset);
+ Entry* secondary = entry(secondary_, secondary_offset);
+ if (secondary->key == name && secondary->map == map) {
+ return secondary->value;
+ }
+ return NULL;
+}
+
+
+void StubCache::Clear() {
+ Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
+ for (int i = 0; i < kPrimaryTableSize; i++) {
+ primary_[i].key = isolate()->heap()->empty_string();
+ primary_[i].map = NULL;
+ primary_[i].value = empty;
+ }
+ for (int j = 0; j < kSecondaryTableSize; j++) {
+ secondary_[j].key = isolate()->heap()->empty_string();
+ secondary_[j].map = NULL;
+ secondary_[j].value = empty;
+ }
+}
+
+
+void StubCache::CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
+ Code::Flags flags,
+ Handle<Context> native_context,
+ Zone* zone) {
+ for (int i = 0; i < kPrimaryTableSize; i++) {
+ if (primary_[i].key == *name) {
+ Map* map = primary_[i].map;
+ // Map can be NULL, if the stub is constant function call
+ // with a primitive receiver.
+ if (map == NULL) continue;
+
+ int offset = PrimaryOffset(*name, flags, map);
+ if (entry(primary_, offset) == &primary_[i] &&
+ !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
+ types->AddMapIfMissing(Handle<Map>(map), zone);
+ }
+ }
+ }
+
+ for (int i = 0; i < kSecondaryTableSize; i++) {
+ if (secondary_[i].key == *name) {
+ Map* map = secondary_[i].map;
+ // Map can be NULL, if the stub is constant function call
+ // with a primitive receiver.
+ if (map == NULL) continue;
+
+ // Lookup in primary table and skip duplicates.
+ int primary_offset = PrimaryOffset(*name, flags, map);
+
+ // Lookup in secondary table and add matches.
+ int offset = SecondaryOffset(*name, flags, primary_offset);
+ if (entry(secondary_, offset) == &secondary_[i] &&
+ !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
+ types->AddMapIfMissing(Handle<Map>(map), zone);
+ }
+ }
+ }
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
new file mode 100644
index 0000000000..7aee6f16ad
--- /dev/null
+++ b/deps/v8/src/ic/stub-cache.h
@@ -0,0 +1,171 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_STUB_CACHE_H_
+#define V8_STUB_CACHE_H_
+
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+// The stub cache is used for megamorphic property accesses.
+// It maps (map, name, type) to property access handlers. The cache does not
+// need explicit invalidation when a prototype chain is modified, since the
+// handlers verify the chain.
+
+
+class SCTableReference {
+ public:
+ Address address() const { return address_; }
+
+ private:
+ explicit SCTableReference(Address address) : address_(address) {}
+
+ Address address_;
+
+ friend class StubCache;
+};
+
+
+class StubCache {
+ public:
+ struct Entry {
+ Name* key;
+ Code* value;
+ Map* map;
+ };
+
+ void Initialize();
+ // Access cache for entry hash(name, map).
+ Code* Set(Name* name, Map* map, Code* code);
+ Code* Get(Name* name, Map* map, Code::Flags flags);
+ // Clear the lookup table (@ mark compact collection).
+ void Clear();
+ // Collect all maps that match the name and flags.
+ void CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
+ Code::Flags flags, Handle<Context> native_context,
+ Zone* zone);
+ // Generate code for probing the stub cache table.
+ // Arguments extra, extra2 and extra3 may be used to pass additional scratch
+ // registers. Set to no_reg if not needed.
+ // If leave_frame is true, then exit a frame before the tail call.
+ void GenerateProbe(MacroAssembler* masm, Code::Flags flags, bool leave_frame,
+ Register receiver, Register name, Register scratch,
+ Register extra, Register extra2 = no_reg,
+ Register extra3 = no_reg);
+
+ enum Table { kPrimary, kSecondary };
+
+ SCTableReference key_reference(StubCache::Table table) {
+ return SCTableReference(
+ reinterpret_cast<Address>(&first_entry(table)->key));
+ }
+
+ SCTableReference map_reference(StubCache::Table table) {
+ return SCTableReference(
+ reinterpret_cast<Address>(&first_entry(table)->map));
+ }
+
+ SCTableReference value_reference(StubCache::Table table) {
+ return SCTableReference(
+ reinterpret_cast<Address>(&first_entry(table)->value));
+ }
+
+ StubCache::Entry* first_entry(StubCache::Table table) {
+ switch (table) {
+ case StubCache::kPrimary:
+ return StubCache::primary_;
+ case StubCache::kSecondary:
+ return StubCache::secondary_;
+ }
+ UNREACHABLE();
+ return NULL;
+ }
+
+ Isolate* isolate() { return isolate_; }
+
+ // Setting the entry size such that the index is shifted by Name::kHashShift
+ // is convenient; shifting down the length field (to extract the hash code)
+ // automatically discards the hash bit field.
+ static const int kCacheIndexShift = Name::kHashShift;
+
+ private:
+ explicit StubCache(Isolate* isolate);
+
+ // The stub cache has a primary and secondary level. The two levels have
+ // different hashing algorithms in order to avoid simultaneous collisions
+ // in both caches. Unlike a probing strategy (quadratic or otherwise) the
+ // update strategy on updates is fairly clear and simple: Any existing entry
+ // in the primary cache is moved to the secondary cache, and secondary cache
+ // entries are overwritten.
+
+ // Hash algorithm for the primary table. This algorithm is replicated in
+ // assembler for every architecture. Returns an index into the table that
+ // is scaled by 1 << kCacheIndexShift.
+ static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) {
+ STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
+ // Compute the hash of the name (use entire hash field).
+ DCHECK(name->HasHashCode());
+ uint32_t field = name->hash_field();
+ // Using only the low bits in 64-bit mode is unlikely to increase the
+ // risk of collision even if the heap is spread over an area larger than
+ // 4Gb (and not at all if it isn't).
+ uint32_t map_low32bits =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
+ // We always set the in_loop bit to zero when generating the lookup code
+ // so do it here too so the hash codes match.
+ uint32_t iflags =
+ (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
+ // Base the offset on a simple combination of name, flags, and map.
+ uint32_t key = (map_low32bits + field) ^ iflags;
+ return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
+ }
+
+ // Hash algorithm for the secondary table. This algorithm is replicated in
+ // assembler for every architecture. Returns an index into the table that
+ // is scaled by 1 << kCacheIndexShift.
+ static int SecondaryOffset(Name* name, Code::Flags flags, int seed) {
+ // Use the seed from the primary cache in the secondary cache.
+ uint32_t name_low32bits =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
+ // We always set the in_loop bit to zero when generating the lookup code
+ // so do it here too so the hash codes match.
+ uint32_t iflags =
+ (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
+ uint32_t key = (seed - name_low32bits) + iflags;
+ return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
+ }
+
+ // Compute the entry for a given offset in exactly the same way as
+ // we do in generated code. We generate an hash code that already
+ // ends in Name::kHashShift 0s. Then we multiply it so it is a multiple
+ // of sizeof(Entry). This makes it easier to avoid making mistakes
+ // in the hashed offset computations.
+ static Entry* entry(Entry* table, int offset) {
+ const int multiplier = sizeof(*table) >> Name::kHashShift;
+ return reinterpret_cast<Entry*>(reinterpret_cast<Address>(table) +
+ offset * multiplier);
+ }
+
+ static const int kPrimaryTableBits = 11;
+ static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
+ static const int kSecondaryTableBits = 9;
+ static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
+
+ private:
+ Entry primary_[kPrimaryTableSize];
+ Entry secondary_[kSecondaryTableSize];
+ Isolate* isolate_;
+
+ friend class Isolate;
+ friend class SCTableReference;
+
+ DISALLOW_COPY_AND_ASSIGN(StubCache);
+};
+}
+} // namespace v8::internal
+
+#endif // V8_STUB_CACHE_H_
diff --git a/deps/v8/src/ic/x64/access-compiler-x64.cc b/deps/v8/src/ic/x64/access-compiler-x64.cc
new file mode 100644
index 0000000000..cd9196f526
--- /dev/null
+++ b/deps/v8/src/ic/x64/access-compiler-x64.cc
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X64
+
+#include "src/ic/access-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+ Handle<Code> code) {
+ __ jmp(code, RelocInfo::CODE_TARGET);
+}
+
+
+Register* PropertyAccessCompiler::load_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ static Register registers[] = {receiver, name, rax, rbx, rdi, r8};
+ return registers;
+}
+
+
+Register* PropertyAccessCompiler::store_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ DCHECK(rbx.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ static Register registers[] = {receiver, name, rbx, rdi, r8};
+ return registers;
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
index 504482d931..c4d6ecfe7c 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,86 +6,15 @@
#if V8_TARGET_ARCH_X64
-#include "src/arguments.h"
-#include "src/codegen.h"
-#include "src/ic-inl.h"
-#include "src/stub-cache.h"
+#include "src/ic/call-optimization.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register receiver,
- Register name,
- // The offset is scaled by 4, based on
- // kCacheIndexShift, which is two bits
- Register offset) {
- // We need to scale up the pointer by 2 when the offset is scaled by less
- // than the pointer size.
- DCHECK(kPointerSize == kInt64Size
- ? kPointerSizeLog2 == StubCache::kCacheIndexShift + 1
- : kPointerSizeLog2 == StubCache::kCacheIndexShift);
- ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1;
-
- DCHECK_EQ(3 * kPointerSize, sizeof(StubCache::Entry));
- // The offset register holds the entry offset times four (due to masking
- // and shifting optimizations).
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- Label miss;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ leap(offset, Operand(offset, offset, times_2, 0));
-
- __ LoadAddress(kScratchRegister, key_offset);
-
- // Check that the key in the entry matches the name.
- // Multiply entry offset by 16 to get the entry address. Since the
- // offset register already holds the entry offset times four, multiply
- // by a further four.
- __ cmpl(name, Operand(kScratchRegister, offset, scale_factor, 0));
- __ j(not_equal, &miss);
-
- // Get the map entry from the cache.
- // Use key_offset + kPointerSize * 2, rather than loading map_offset.
- __ movp(kScratchRegister,
- Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
- __ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
- __ j(not_equal, &miss);
-
- // Get the code entry from the cache.
- __ LoadAddress(kScratchRegister, value_offset);
- __ movp(kScratchRegister,
- Operand(kScratchRegister, offset, scale_factor, 0));
-
- // Check that the flags match what we're looking for.
- __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
- __ andp(offset, Immediate(~Code::kFlagsNotUsedInLookup));
- __ cmpl(offset, Immediate(flags));
- __ j(not_equal, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(kScratchRegister);
-
- __ bind(&miss);
-}
-
-
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
@@ -119,83 +48,13 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ j(not_equal, miss_label);
Label done;
- NameDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- properties,
- name,
- scratch1);
+ NameDictionaryLookupStub::GenerateNegativeLookup(masm, miss_label, &done,
+ properties, name, scratch1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1);
}
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2,
- Register extra3) {
- Isolate* isolate = masm->isolate();
- Label miss;
- USE(extra); // The register extra is not used on the X64 platform.
- USE(extra2); // The register extra2 is not used on the X64 platform.
- USE(extra3); // The register extra2 is not used on the X64 platform.
- // Make sure that code is valid. The multiplying code relies on the
- // entry size being 3 * kPointerSize.
- DCHECK(sizeof(Entry) == 3 * kPointerSize);
-
- // Make sure the flags do not name a specific type.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Make sure that there are no register conflicts.
- DCHECK(!scratch.is(receiver));
- DCHECK(!scratch.is(name));
-
- // Check scratch register is valid, extra and extra2 are unused.
- DCHECK(!scratch.is(no_reg));
- DCHECK(extra2.is(no_reg));
- DCHECK(extra3.is(no_reg));
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
- // Use only the low 32 bits of the map pointer.
- __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xorp(scratch, Immediate(flags));
- // We mask out the last two bits because they are not part of the hash and
- // they are always 01 for maps. Also in the two 'and' instructions below.
- __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
-
- // Probe the primary table.
- ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch);
-
- // Primary miss: Compute hash for secondary probe.
- __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
- __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xorp(scratch, Immediate(flags));
- __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
- __ subl(scratch, name);
- __ addl(scratch, Immediate(flags));
- __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift));
-
- // Probe the secondary table.
- ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
-}
-
-
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register prototype, Label* miss) {
Isolate* isolate = masm->isolate();
@@ -227,10 +86,8 @@ void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
}
-static void PushInterceptorArguments(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
+static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
+ Register holder, Register name,
Handle<JSObject> holder_obj) {
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
@@ -248,12 +105,8 @@ static void PushInterceptorArguments(MacroAssembler* masm,
static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj,
- IC::UtilityId id) {
+ MacroAssembler* masm, Register receiver, Register holder, Register name,
+ Handle<JSObject> holder_obj, IC::UtilityId id) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
__ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
NamedLoadHandlerCompiler::kInterceptorArgsLength);
@@ -272,7 +125,7 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
__ Push(receiver);
// Write the arguments to stack frame.
for (int i = 0; i < argc; i++) {
- Register arg = values[argc-1-i];
+ Register arg = values[argc - 1 - i];
DCHECK(!receiver.is(arg));
DCHECK(!scratch_in.is(arg));
__ Push(arg);
@@ -289,16 +142,15 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
- receiver_map,
- &holder_lookup);
+ Handle<JSObject> api_holder =
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
break;
case CallOptimization::kHolderFound:
__ Move(holder, api_holder);
- break;
+ break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
break;
@@ -326,8 +178,8 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
// Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
- __ Move(
- api_function_address, function_address, RelocInfo::EXTERNAL_REFERENCE);
+ __ Move(api_function_address, function_address,
+ RelocInfo::EXTERNAL_REFERENCE);
// Jump to stub.
CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
@@ -338,8 +190,7 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
void PropertyHandlerCompiler::GenerateCheckPropertyCell(
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
Register scratch, Label* miss) {
- Handle<PropertyCell> cell =
- JSGlobalObject::EnsurePropertyCell(global, name);
+ Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
DCHECK(cell->value()->IsTheHole());
__ Move(scratch, cell);
__ Cmp(FieldOperand(scratch, Cell::kValueOffset),
@@ -348,9 +199,115 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
}
-void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
- Handle<Code> code) {
- __ jmp(code, RelocInfo::CODE_TARGET);
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ Push(value());
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ movp(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ Push(receiver);
+ __ Push(value());
+ ParameterCount actual(1);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ Pop(rax);
+
+ // Restore context register.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ }
+ __ ret(0);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- rax : receiver
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ movp(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ Push(receiver);
+ ParameterCount actual(0);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ }
+ __ ret(0);
+}
+
+
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
+
+ DCHECK(!rbx.is(receiver) && !rbx.is(name) && !rbx.is(value));
+
+ __ PopReturnAddressTo(rbx);
+ __ Push(receiver);
+ __ Push(name);
+ __ Push(value);
+ __ PushReturnAddressFrom(rbx);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
+ // Return address is on the stack.
+ StoreIC_PushArgs(masm);
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
+ // Return address is on the stack.
+ StoreIC_PushArgs(masm);
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
}
@@ -445,13 +402,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
__ movp(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
// Update the write barrier for the map field.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- scratch2,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
+ kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
if (details.type() == CONSTANT) {
DCHECK(value_reg.is(rax));
@@ -468,8 +420,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
index -= transition->inobject_properties();
// TODO(verwaest): Share this code as a code stub.
- SmiCheck smi_check = representation.IsTagged()
- ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ SmiCheck smi_check =
+ representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
if (index < 0) {
// Set the property straight into the object.
int offset = transition->instance_size() + (index * kPointerSize);
@@ -484,9 +436,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
if (!representation.IsDouble()) {
__ movp(storage_reg, value_reg);
}
- __ RecordWriteField(
- receiver_reg, offset, storage_reg, scratch1, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, smi_check);
+ __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
}
} else {
// Write to the properties array.
@@ -504,9 +455,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
if (!representation.IsDouble()) {
__ movp(storage_reg, value_reg);
}
- __ RecordWriteField(
- scratch1, offset, storage_reg, receiver_reg, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, smi_check);
+ __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
}
}
@@ -516,7 +466,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
}
-void NamedStoreHandlerCompiler::GenerateStoreField(LookupResult* lookup,
+void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
Register value_reg,
Label* miss_label) {
DCHECK(lookup->representation().IsHeapObject());
@@ -548,8 +498,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
// Make sure there's no overlap between holder and object registers.
DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
+ DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
+ !scratch2.is(scratch1));
// Keep track of the current object in register reg. On the first
// iteration, reg is an alias for object_reg, on later iterations,
@@ -584,10 +534,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
}
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound);
+ NameDictionary::kNotFound);
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
- scratch1, scratch2);
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
+ scratch2);
__ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -616,9 +566,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch2, miss);
} else if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(
- masm(), Handle<JSGlobalObject>::cast(current), name,
- scratch2, miss);
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
}
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -703,7 +652,7 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
__ Push(kScratchRegister); // return value
__ Push(kScratchRegister); // return value default
__ PushAddress(ExternalReference::isolate_address(isolate()));
- __ Push(reg); // holder
+ __ Push(reg); // holder
__ Push(name()); // name
// Save a pointer to where we pushed the arguments pointer. This will be
// passed as the const PropertyAccessorInfo& to the C++ callback.
@@ -711,7 +660,7 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
__ PushReturnAddressFrom(scratch4());
// Abi for CallApiGetter
- Register api_function_address = r8;
+ Register api_function_address = ApiGetterDescriptor::function_address();
Address getter_address = v8::ToCData<Address>(callback->getter());
__ Move(api_function_address, getter_address, RelocInfo::EXTERNAL_REFERENCE);
@@ -727,94 +676,79 @@ void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
}
-void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg,
- LookupResult* lookup,
- Handle<Name> name) {
+void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
+ LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- bool compile_followup_inline = false;
- if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->IsField()) {
- compile_followup_inline = true;
- } else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> callback(
- ExecutableAccessorInfo::cast(lookup->GetCallbackObject()));
- compile_followup_inline =
- callback->getter() != NULL &&
- ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), callback,
- type());
- }
- }
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+ // Preserve the receiver register explicitly whenever it is different from the
+ // holder and it is needed should the interceptor return without any result.
+ // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
+ // case might cause a miss during the prototype check.
+ bool must_perform_prototype_check =
+ !holder().is_identical_to(it->GetHolder<JSObject>());
+ bool must_preserve_receiver_reg =
+ !receiver().is(holder_reg) &&
+ (it->state() == LookupIterator::ACCESSOR || must_perform_prototype_check);
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
- if (compile_followup_inline) {
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
- // Preserve the receiver register explicitly whenever it is different from
- // the holder and it is needed should the interceptor return without any
- // result. The CALLBACKS case needs the receiver to be passed into C++ code,
- // the FIELD case might cause a miss during the prototype check.
- bool must_perfrom_prototype_check = *holder() != lookup->holder();
- bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
- (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-
- if (must_preserve_receiver_reg) {
- __ Push(receiver());
- }
- __ Push(holder_reg);
- __ Push(this->name());
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(
- masm(), receiver(), holder_reg, this->name(), holder(),
- IC::kLoadPropertyWithInterceptorOnly);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
- __ j(equal, &interceptor_failed);
- frame_scope.GenerateLeaveFrame();
- __ ret(0);
-
- __ bind(&interceptor_failed);
- __ Pop(this->name());
- __ Pop(holder_reg);
- if (must_preserve_receiver_reg) {
- __ Pop(receiver());
- }
+ if (must_preserve_receiver_reg) {
+ __ Push(receiver());
+ }
+ __ Push(holder_reg);
+ __ Push(this->name());
+
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), holder(),
+ IC::kLoadPropertyWithInterceptorOnly);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ j(equal, &interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ ret(0);
- // Leave the internal frame.
+ __ bind(&interceptor_failed);
+ __ Pop(this->name());
+ __ Pop(holder_reg);
+ if (must_preserve_receiver_reg) {
+ __ Pop(receiver());
}
- GenerateLoadPostInterceptor(holder_reg, name, lookup);
- } else { // !compile_followup_inline
- // Call the runtime system to load the interceptor.
- // Check that the maps haven't changed.
- __ PopReturnAddressTo(scratch2());
- PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
- holder());
- __ PushReturnAddressFrom(scratch2());
-
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
- __ TailCallExternalReference(
- ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ // Leave the internal frame.
}
+
+ GenerateLoadPostInterceptor(it, holder_reg);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
+ // Call the runtime system to load the interceptor.
+ DCHECK(holder()->HasNamedInterceptor());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ __ PopReturnAddressTo(scratch2());
+ PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+ holder());
+ __ PushReturnAddressFrom(scratch2());
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
+ __ TailCallExternalReference(
+ ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
}
@@ -841,55 +775,6 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
}
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save value register, so we can restore it later.
- __ Push(value());
-
- if (!setter.is_null()) {
- // Call the JavaScript setter with receiver and value on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ movp(receiver,
- FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ Push(receiver);
- __ Push(value());
- ParameterCount actual(1);
- ParameterCount expected(setter);
- __ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ Pop(rax);
-
- // Restore context register.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- }
- __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
Handle<Name> name) {
__ PopReturnAddressTo(scratch1());
@@ -908,112 +793,18 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
}
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss, Label::kNear);
-
- __ movp(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = receiver_maps->length();
- for (int i = 0; i < receiver_count; ++i) {
- // Check map and tail call if there's a match
- __ Cmp(scratch1(), receiver_maps->at(i));
- if (transitioned_maps->at(i).is_null()) {
- __ j(equal, handler_stubs->at(i), RelocInfo::CODE_TARGET);
- } else {
- Label next_map;
- __ j(not_equal, &next_map, Label::kNear);
- __ Move(transition_map(),
- transitioned_maps->at(i),
- RelocInfo::EMBEDDED_OBJECT);
- __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
- }
-
- __ bind(&miss);
-
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
- static Register registers[] = { receiver, name, rax, rbx, rdi, r8 };
- return registers;
-}
-
-
-Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
- Register receiver = KeyedStoreIC::ReceiverRegister();
- Register name = KeyedStoreIC::NameRegister();
- DCHECK(rbx.is(KeyedStoreIC::MapRegister()));
- static Register registers[] = { receiver, name, rbx, rdi, r8 };
- return registers;
-}
-
-
-Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); }
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- if (!getter.is_null()) {
- // Call the JavaScript getter with the receiver on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ movp(receiver,
- FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ Push(receiver);
- ParameterCount actual(0);
- ParameterCount expected(getter);
- __ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context register.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- }
- __ ret(0);
+Register NamedStoreHandlerCompiler::value() {
+ return StoreDescriptor::ValueRegister();
}
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
FrontendHeader(receiver(), name, &miss);
// Get the value from the cell.
- Register result = StoreIC::ValueRegister();
+ Register result = StoreDescriptor::ValueRegister();
__ Move(result, cell);
__ movp(result, FieldOperand(result, PropertyCell::kValueOffset));
@@ -1037,112 +828,8 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
}
-Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- __ JumpIfNotUniqueName(this->name(), &miss);
- } else {
- __ Cmp(this->name(), name);
- __ j(not_equal, &miss);
- }
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(KeyedStoreIC::MapRegister()));
- __ movp(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = types->length();
- int number_of_handled_maps = 0;
- for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate());
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- // Check map and tail call if there's a match
- __ Cmp(map_reg, map);
- if (type->Is(HeapType::Number())) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ j(equal, handlers->at(current), RelocInfo::CODE_TARGET);
- }
- }
- DCHECK(number_of_handled_maps > 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void ElementHandlerCompiler::GenerateLoadDictionaryElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- DCHECK(rdx.is(LoadIC::ReceiverRegister()));
- DCHECK(rcx.is(LoadIC::NameRegister()));
- Label slow, miss;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- __ JumpIfNotSmi(rcx, &miss);
- __ SmiToInteger32(rbx, rcx);
- __ movp(rax, FieldOperand(rdx, JSObject::kElementsOffset));
-
- // Check whether the elements is a number dictionary.
- // rdx: receiver
- // rcx: key
- // rbx: key as untagged int32
- // rax: elements
- __ LoadFromNumberDictionary(&slow, rax, rcx, rbx, r9, rdi, rax);
- __ ret(0);
-
- __ bind(&slow);
- // ----------- S t a t e -------------
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
-
- __ bind(&miss);
- // ----------- S t a t e -------------
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
}
-
-
-#undef __
-
-} } // namespace v8::internal
+} // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/ic/x64/ic-compiler-x64.cc b/deps/v8/src/ic/x64/ic-compiler-x64.cc
new file mode 100644
index 0000000000..a5848b6dda
--- /dev/null
+++ b/deps/v8/src/ic/x64/ic-compiler-x64.cc
@@ -0,0 +1,137 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X64
+
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ // Return address is on the stack.
+ DCHECK(!rbx.is(StoreDescriptor::ReceiverRegister()) &&
+ !rbx.is(StoreDescriptor::NameRegister()) &&
+ !rbx.is(StoreDescriptor::ValueRegister()));
+
+ __ PopReturnAddressTo(rbx);
+ __ Push(StoreDescriptor::ReceiverRegister());
+ __ Push(StoreDescriptor::NameRegister());
+ __ Push(StoreDescriptor::ValueRegister());
+ __ Push(Smi::FromInt(strict_mode));
+ __ PushReturnAddressFrom(rbx);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+ MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
+ Label miss;
+ __ JumpIfSmi(receiver(), &miss, Label::kNear);
+
+ __ movp(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
+ int receiver_count = receiver_maps->length();
+ for (int i = 0; i < receiver_count; ++i) {
+ // Check map and tail call if there's a match
+ __ Cmp(scratch1(), receiver_maps->at(i));
+ if (transitioned_maps->at(i).is_null()) {
+ __ j(equal, handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ } else {
+ Label next_map;
+ __ j(not_equal, &next_map, Label::kNear);
+ __ Move(transition_map(), transitioned_maps->at(i),
+ RelocInfo::EMBEDDED_OBJECT);
+ __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ __ bind(&next_map);
+ }
+ }
+
+ __ bind(&miss);
+
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
+ Label miss;
+
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ // In case we are compiling an IC for dictionary loads and stores, just
+ // check whether the name is unique.
+ if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ Register tmp = scratch1();
+ __ JumpIfSmi(this->name(), &miss);
+ __ movp(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
+ __ movzxbp(tmp, FieldOperand(tmp, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
+ } else {
+ __ Cmp(this->name(), name);
+ __ j(not_equal, &miss);
+ }
+ }
+
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
+ // Polymorphic keyed stores may use the map register
+ Register map_reg = scratch1();
+ DCHECK(kind() != Code::KEYED_STORE_IC ||
+ map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ __ movp(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
+ int receiver_count = types->length();
+ int number_of_handled_maps = 0;
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ // Check map and tail call if there's a match
+ __ Cmp(map_reg, map);
+ if (type->Is(HeapType::Number())) {
+ DCHECK(!number_case.is_unused());
+ __ bind(&number_case);
+ }
+ __ j(equal, handlers->at(current), RelocInfo::CODE_TARGET);
+ }
+ }
+ DCHECK(number_of_handled_maps > 0);
+
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ InlineCacheState state =
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+ return GetCode(kind(), type, name, state);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/ic/x64/ic-x64.cc
index 69e14135b0..ad79f3042d 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/ic/x64/ic-x64.cc
@@ -7,9 +7,9 @@
#if V8_TARGET_ARCH_X64
#include "src/codegen.h"
-#include "src/ic-inl.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
@@ -21,8 +21,7 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
Label* global_object) {
// Register usage:
// type: holds the receiver instance type on entry.
@@ -42,13 +41,9 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
// and will jump to the miss_label in that case.
// The generated code assumes that the receiver has slow properties,
// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register r0,
- Register r1,
- Register result) {
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
+ Register elements, Register name,
+ Register r0, Register r1, Register result) {
// Register use:
//
// elements - holds the property dictionary on entry and is unchanged.
@@ -64,13 +59,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
+ elements, name, r0, r1);
// If probing finds an entry in the dictionary, r1 contains the
// index into the dictionary. Check that the value is a normal
@@ -87,9 +77,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ movp(result,
- Operand(elements, r1, times_pointer_size,
- kValueOffset - kHeapObjectTag));
+ __ movp(result, Operand(elements, r1, times_pointer_size,
+ kValueOffset - kHeapObjectTag));
}
@@ -100,12 +89,9 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// call if name is not an internalized string, and will jump to the miss_label
// in that case. The generated code assumes that the receiver has slow
// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register value,
- Register scratch0,
+static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
+ Register elements, Register name,
+ Register value, Register scratch0,
Register scratch1) {
// Register use:
//
@@ -121,13 +107,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss_label,
- &done,
- elements,
- name,
- scratch0,
- scratch1);
+ NameDictionaryLookupStub::GeneratePositiveLookup(
+ masm, miss_label, &done, elements, name, scratch0, scratch1);
// If probing finds an entry in the dictionary, scratch0 contains the
// index into the dictionary. Check that the value is a normal
@@ -139,20 +120,17 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
const int kTypeAndReadOnlyMask =
(PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
- __ Test(Operand(elements,
- scratch1,
- times_pointer_size,
+ PropertyDetails::AttributesField::encode(READ_ONLY))
+ << kSmiTagSize;
+ __ Test(Operand(elements, scratch1, times_pointer_size,
kDetailsOffset - kHeapObjectTag),
Smi::FromInt(kTypeAndReadOnlyMask));
__ j(not_zero, miss_label);
// Store the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ leap(scratch1, Operand(elements,
- scratch1,
- times_pointer_size,
- kValueOffset - kHeapObjectTag));
+ __ leap(scratch1, Operand(elements, scratch1, times_pointer_size,
+ kValueOffset - kHeapObjectTag));
__ movp(Operand(scratch1, 0), value);
// Update write barrier. Make sure not to clobber the value.
@@ -164,10 +142,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map,
- int interceptor_bit,
- Label* slow) {
+ Register receiver, Register map,
+ int interceptor_bit, Label* slow) {
// Register use:
// receiver - holds the receiver and is unchanged.
// Scratch registers:
@@ -185,23 +161,19 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
__ j(below, slow);
// Check bit field.
- __ testb(FieldOperand(map, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsAccessCheckNeeded) |
- (1 << interceptor_bit)));
+ __ testb(
+ FieldOperand(map, Map::kBitFieldOffset),
+ Immediate((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ j(not_zero, slow);
}
// Loads an indexed element from a fast case array.
// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register scratch,
- Register result,
- Label* not_fast_array,
- Label* out_of_range) {
+static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
+ Register key, Register elements,
+ Register scratch, Register result,
+ Label* not_fast_array, Label* out_of_range) {
// Register use:
//
// receiver - holds the receiver on entry.
@@ -236,9 +208,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
__ j(above_equal, out_of_range);
// Fast case: Do the load.
SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2);
- __ movp(scratch, FieldOperand(elements,
- index.reg,
- index.scale,
+ __ movp(scratch, FieldOperand(elements, index.reg, index.scale,
FixedArray::kHeaderSize));
__ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
// In case the loaded value is the_hole we have to consult GetProperty
@@ -252,12 +222,9 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// Checks whether a key is an array index string or a unique name.
// Falls through if the key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_unique) {
+static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
+ Register map, Register hash,
+ Label* index_string, Label* not_unique) {
// Register use:
// key - holds the key and is unchanged. Assumed to be non-smi.
// Scratch registers:
@@ -285,14 +252,13 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
}
-
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// The return address is on the stack.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
DCHECK(receiver.is(rdx));
DCHECK(key.is(rcx));
@@ -302,20 +268,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, rax, Map::kHasIndexedInterceptor, &slow);
+ GenerateKeyedLoadReceiverCheck(masm, receiver, rax,
+ Map::kHasIndexedInterceptor, &slow);
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(rax, &check_number_dictionary);
- GenerateFastArrayLoad(masm,
- receiver,
- key,
- rax,
- rbx,
- rax,
- NULL,
- &slow);
+ GenerateFastArrayLoad(masm, receiver, key, rax, rbx, rax, NULL, &slow);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->keyed_load_generic_smi(), 1);
__ ret(0);
@@ -341,8 +300,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, rax, rbx, &index_name, &slow);
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, rax, Map::kHasNamedInterceptor, &slow);
+ GenerateKeyedLoadReceiverCheck(masm, receiver, rax, Map::kHasNamedInterceptor,
+ &slow);
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary leaving result in key.
@@ -367,8 +326,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Label load_in_object_property;
static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys
- = ExternalReference::keyed_lookup_cache_keys(masm->isolate());
+ ExternalReference cache_keys =
+ ExternalReference::keyed_lookup_cache_keys(masm->isolate());
for (int i = 0; i < kEntriesPerBucket - 1; i++) {
Label try_next_entry;
@@ -390,8 +349,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ j(not_equal, &slow);
// Get field offset, which is a 32-bit integer.
- ExternalReference cache_field_offsets
- = ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
+ ExternalReference cache_field_offsets =
+ ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
// Hit on nth entry.
for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
@@ -420,8 +379,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load property array property.
__ bind(&property_array_property);
__ movp(rax, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ movp(rax, FieldOperand(rax, rdi, times_pointer_size,
- FixedArray::kHeaderSize));
+ __ movp(rax,
+ FieldOperand(rax, rdi, times_pointer_size, FixedArray::kHeaderSize));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
@@ -448,16 +407,13 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// Return address is on the stack.
Label miss;
- Register receiver = ReceiverRegister();
- Register index = NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register index = LoadDescriptor::NameRegister();
Register scratch = rbx;
Register result = rax;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch,
- result,
+ StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
&miss, // When index out of range.
@@ -473,62 +429,15 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // Return address is on the stack.
- Label slow;
-
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- Register scratch = rax;
- DCHECK(!scratch.is(receiver) && !scratch.is(key));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &slow);
-
- // Check that the key is an array index, that is Uint32.
- STATIC_ASSERT(kSmiValueSize <= 32);
- __ JumpUnlessNonNegativeSmi(key, &slow);
-
- // Get the map of the receiver.
- __ movp(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ movb(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
- __ andb(scratch, Immediate(kSlowCaseBitFieldMask));
- __ cmpb(scratch, Immediate(1 << Map::kHasIndexedInterceptor));
- __ j(not_zero, &slow);
-
- // Everything is fine, call runtime.
- __ PopReturnAddressTo(scratch);
- __ Push(receiver); // receiver
- __ Push(key); // key
- __ PushReturnAddressFrom(scratch);
-
- // Perform tail call to the entry.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(kLoadElementWithInterceptor),
- masm->isolate()),
- 2, 1);
-
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
static void KeyedStoreGenerateGenericHelper(
- MacroAssembler* masm,
- Label* fast_object,
- Label* fast_double,
- Label* slow,
- KeyedStoreCheckMap check_map,
- KeyedStoreIncrementLength increment_length) {
+ MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
+ KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
Label transition_smi_elements;
Label finish_object_store, non_double_value, transition_double_elements;
Label fast_double_without_map_check;
- Register receiver = KeyedStoreIC::ReceiverRegister();
- Register key = KeyedStoreIC::NameRegister();
- Register value = KeyedStoreIC::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register key = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
DCHECK(receiver.is(rdx));
DCHECK(key.is(rcx));
DCHECK(value.is(rax));
@@ -547,10 +456,8 @@ static void KeyedStoreGenerateGenericHelper(
// We have to go to the runtime if the current value is the hole because
// there may be a callback on the element
Label holecheck_passed1;
- __ movp(kScratchRegister, FieldOperand(rbx,
- key,
- times_pointer_size,
- FixedArray::kHeaderSize));
+ __ movp(kScratchRegister,
+ FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize));
__ CompareRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &holecheck_passed1);
__ JumpIfDictionaryInPrototypeChain(receiver, rdi, kScratchRegister, slow);
@@ -584,8 +491,8 @@ static void KeyedStoreGenerateGenericHelper(
__ movp(FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize),
value);
__ movp(rdx, value); // Preserve the value which is returned.
- __ RecordWriteArray(
- rbx, rdx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ RecordWriteArray(rbx, rdx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
__ ret(0);
__ bind(fast_double);
@@ -626,24 +533,18 @@ static void KeyedStoreGenerateGenericHelper(
// Value is a double. Transition FAST_SMI_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- rbx,
- rdi,
- slow);
- AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(
- masm, receiver, key, value, rbx, mode, slow);
+ FAST_DOUBLE_ELEMENTS, rbx, rdi, slow);
+ AllocationSiteMode mode =
+ AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
+ rbx, mode, slow);
__ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- rbx,
- rdi,
- slow);
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, rbx,
+ rdi, slow);
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, receiver, key, value, rbx, mode, slow);
@@ -655,14 +556,11 @@ static void KeyedStoreGenerateGenericHelper(
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
__ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- rbx,
- rdi,
- slow);
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
+ rbx, rdi, slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(
- masm, receiver, key, value, rbx, mode, slow);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key,
+ value, rbx, mode, slow);
__ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
@@ -674,8 +572,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
Label slow, slow_with_tagged_index, fast_object, fast_object_grow;
Label fast_double, fast_double_grow;
Label array, extra, check_if_double_array;
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register key = StoreDescriptor::NameRegister();
DCHECK(receiver.is(rdx));
DCHECK(key.is(rcx));
@@ -709,7 +607,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ bind(&slow);
__ Integer32ToSmi(key, key);
__ bind(&slow_with_tagged_index);
- GenerateRuntimeSetProperty(masm, strict_mode);
+ PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
// Never returns to here.
// Extra capacity case: Check if there is extra capacity to
@@ -746,21 +644,17 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ SmiCompareInteger32(FieldOperand(receiver, JSArray::kLengthOffset), key);
__ j(below_equal, &extra);
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
- &slow, kCheckMap, kDontIncrementLength);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, &slow,
+ kCheckMap, kDontIncrementLength);
KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
&slow, kDontCheckMap, kIncrementLength);
}
-static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
- Register object,
- Register key,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* unmapped_case,
- Label* slow_case) {
+static Operand GenerateMappedArgumentsLookup(
+ MacroAssembler* masm, Register object, Register key, Register scratch1,
+ Register scratch2, Register scratch3, Label* unmapped_case,
+ Label* slow_case) {
Heap* heap = masm->isolate()->heap();
// Check that the receiver is a JSObject. Because of the elements
@@ -790,10 +684,8 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Load element index and check whether it is the hole.
const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
__ SmiToInteger64(scratch3, key);
- __ movp(scratch2, FieldOperand(scratch1,
- scratch3,
- times_pointer_size,
- kHeaderSize));
+ __ movp(scratch2,
+ FieldOperand(scratch1, scratch3, times_pointer_size, kHeaderSize));
__ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
__ j(equal, unmapped_case);
@@ -802,9 +694,7 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// map in scratch1).
__ movp(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
__ SmiToInteger64(scratch3, scratch2);
- return FieldOperand(scratch1,
- scratch3,
- times_pointer_size,
+ return FieldOperand(scratch1, scratch3, times_pointer_size,
Context::kHeaderSize);
}
@@ -827,45 +717,17 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
__ cmpp(key, scratch);
__ j(greater_equal, slow_case);
__ SmiToInteger64(scratch, key);
- return FieldOperand(backing_store,
- scratch,
- times_pointer_size,
+ return FieldOperand(backing_store, scratch, times_pointer_size,
FixedArray::kHeaderSize);
}
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
- // The return address is on the stack.
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- DCHECK(receiver.is(rdx));
- DCHECK(key.is(rcx));
-
- Label slow, notin;
- Operand mapped_location =
- GenerateMappedArgumentsLookup(
- masm, receiver, key, rbx, rax, rdi, &notin, &slow);
- __ movp(rax, mapped_location);
- __ Ret();
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in rbx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, key, rbx, rax, &slow);
- __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
- __ j(equal, &slow);
- __ movp(rax, unmapped_location);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
// The return address is on the stack.
Label slow, notin;
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- Register value = ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
DCHECK(receiver.is(rdx));
DCHECK(name.is(rcx));
DCHECK(value.is(rax));
@@ -875,11 +737,7 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
__ movp(mapped_location, value);
__ leap(r9, mapped_location);
__ movp(r8, value);
- __ RecordWrite(rbx,
- r9,
- r8,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
+ __ RecordWrite(rbx, r9, r8, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
INLINE_SMI_CHECK);
__ Ret();
__ bind(&notin);
@@ -889,11 +747,7 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
__ movp(unmapped_location, value);
__ leap(r9, unmapped_location);
__ movp(r8, value);
- __ RecordWrite(rbx,
- r9,
- r8,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
+ __ RecordWrite(rbx, r9, r8, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
INLINE_SMI_CHECK);
__ Ret();
__ bind(&slow);
@@ -901,34 +755,17 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
}
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is on the stack.
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- DCHECK(receiver.is(rdx));
- DCHECK(name.is(rcx));
-
- // Probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, receiver, name, rbx, rax);
-
- GenerateMiss(masm);
-}
-
-
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = rax;
- DCHECK(!dictionary.is(ReceiverRegister()));
- DCHECK(!dictionary.is(NameRegister()));
+ DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
Label slow;
- __ movp(dictionary,
- FieldOperand(ReceiverRegister(), JSObject::kPropertiesOffset));
- GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), rbx, rdi,
- rax);
+ __ movp(dictionary, FieldOperand(LoadDescriptor::ReceiverRegister(),
+ JSObject::kPropertiesOffset));
+ GenerateDictionaryLoad(masm, &slow, dictionary,
+ LoadDescriptor::NameRegister(), rbx, rdi, rax);
__ ret(0);
// Dictionary load failed, go slow (but don't miss).
@@ -941,9 +778,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
static const Register LoadIC_TempRegister() { return rbx; }
-static const Register KeyedLoadIC_TempRegister() {
- return rbx;
-}
+static const Register KeyedLoadIC_TempRegister() { return rbx; }
void LoadIC::GenerateMiss(MacroAssembler* masm) {
@@ -953,8 +788,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ IncrementCounter(counters->load_miss(), 1);
__ PopReturnAddressTo(LoadIC_TempRegister());
- __ Push(ReceiverRegister()); // receiver
- __ Push(NameRegister()); // name
+ __ Push(LoadDescriptor::ReceiverRegister()); // receiver
+ __ Push(LoadDescriptor::NameRegister()); // name
__ PushReturnAddressFrom(LoadIC_TempRegister());
// Perform tail call to the entry.
@@ -968,8 +803,8 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is on the stack.
__ PopReturnAddressTo(LoadIC_TempRegister());
- __ Push(ReceiverRegister()); // receiver
- __ Push(NameRegister()); // name
+ __ Push(LoadDescriptor::ReceiverRegister()); // receiver
+ __ Push(LoadDescriptor::NameRegister()); // name
__ PushReturnAddressFrom(LoadIC_TempRegister());
// Perform tail call to the entry.
@@ -983,8 +818,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ IncrementCounter(counters->keyed_load_miss(), 1);
__ PopReturnAddressTo(KeyedLoadIC_TempRegister());
- __ Push(ReceiverRegister()); // receiver
- __ Push(NameRegister()); // name
+ __ Push(LoadDescriptor::ReceiverRegister()); // receiver
+ __ Push(LoadDescriptor::NameRegister()); // name
__ PushReturnAddressFrom(KeyedLoadIC_TempRegister());
// Perform tail call to the entry.
@@ -994,39 +829,12 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
}
-// IC register specifications
-const Register LoadIC::ReceiverRegister() { return rdx; }
-const Register LoadIC::NameRegister() { return rcx; }
-
-
-const Register LoadIC::SlotRegister() {
- DCHECK(FLAG_vector_ics);
- return rax;
-}
-
-
-const Register LoadIC::VectorRegister() {
- DCHECK(FLAG_vector_ics);
- return rbx;
-}
-
-
-const Register StoreIC::ReceiverRegister() { return rdx; }
-const Register StoreIC::NameRegister() { return rcx; }
-const Register StoreIC::ValueRegister() { return rax; }
-
-
-const Register KeyedStoreIC::MapRegister() {
- return rbx;
-}
-
-
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is on the stack.
__ PopReturnAddressTo(KeyedLoadIC_TempRegister());
- __ Push(ReceiverRegister()); // receiver
- __ Push(NameRegister()); // name
+ __ Push(LoadDescriptor::ReceiverRegister()); // receiver
+ __ Push(LoadDescriptor::NameRegister()); // name
__ PushReturnAddressFrom(KeyedLoadIC_TempRegister());
// Perform tail call to the entry.
@@ -1041,7 +849,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, ReceiverRegister(), NameRegister(), rbx, no_reg);
+ masm, flags, false, StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), rbx, no_reg);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -1049,9 +858,9 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
static void StoreIC_PushArgs(MacroAssembler* masm) {
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- Register value = StoreIC::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
DCHECK(!rbx.is(receiver) && !rbx.is(name) && !rbx.is(value));
@@ -1075,9 +884,9 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
void StoreIC::GenerateNormal(MacroAssembler* masm) {
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- Register value = ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
Register dictionary = rbx;
Label miss;
@@ -1094,62 +903,6 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
}
-void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- // Return address is on the stack.
- DCHECK(!rbx.is(ReceiverRegister()) && !rbx.is(NameRegister()) &&
- !rbx.is(ValueRegister()));
-
- __ PopReturnAddressTo(rbx);
- __ Push(ReceiverRegister());
- __ Push(NameRegister());
- __ Push(ValueRegister());
- __ Push(Smi::FromInt(strict_mode));
- __ PushReturnAddressFrom(rbx);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- // Return address is on the stack.
- DCHECK(!rbx.is(ReceiverRegister()) && !rbx.is(NameRegister()) &&
- !rbx.is(ValueRegister()));
-
- __ PopReturnAddressTo(rbx);
- __ Push(ReceiverRegister());
- __ Push(NameRegister());
- __ Push(ValueRegister());
- __ Push(Smi::FromInt(strict_mode)); // Strict mode.
- __ PushReturnAddressFrom(rbx);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-void StoreIC::GenerateSlow(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// Return address is on the stack.
StoreIC_PushArgs(masm);
@@ -1212,8 +965,8 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// condition code uses at the patched jump.
uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, test=%p, delta=%d\n",
- address, test_instruction_address, delta);
+ PrintF("[ patching ic at %p, test=%p, delta=%d\n", address,
+ test_instruction_address, delta);
}
// Patch with a short conditional jump. Enabling means switching from a short
@@ -1221,17 +974,17 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// reverse operation of that.
Address jmp_address = test_instruction_address - delta;
DCHECK((check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ||
- *jmp_address == Assembler::kJcShortOpcode)
- : (*jmp_address == Assembler::kJnzShortOpcode ||
- *jmp_address == Assembler::kJzShortOpcode));
- Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
- : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
+ ? (*jmp_address == Assembler::kJncShortOpcode ||
+ *jmp_address == Assembler::kJcShortOpcode)
+ : (*jmp_address == Assembler::kJnzShortOpcode ||
+ *jmp_address == Assembler::kJzShortOpcode));
+ Condition cc =
+ (check == ENABLE_INLINED_SMI_CHECK)
+ ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
+ : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
*jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
}
-
-
-} } // namespace v8::internal
+}
+} // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/ic/x64/stub-cache-x64.cc b/deps/v8/src/ic/x64/stub-cache-x64.cc
new file mode 100644
index 0000000000..a54ddcaf96
--- /dev/null
+++ b/deps/v8/src/ic/x64/stub-cache-x64.cc
@@ -0,0 +1,153 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X64
+
+#include "src/codegen.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
+ Code::Flags flags, bool leave_frame,
+ StubCache::Table table, Register receiver, Register name,
+ // The offset is scaled by 4, based on
+ // kCacheIndexShift, which is two bits
+ Register offset) {
+ // We need to scale up the pointer by 2 when the offset is scaled by less
+ // than the pointer size.
+ DCHECK(kPointerSize == kInt64Size
+ ? kPointerSizeLog2 == StubCache::kCacheIndexShift + 1
+ : kPointerSizeLog2 == StubCache::kCacheIndexShift);
+ ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1;
+
+ DCHECK_EQ(3 * kPointerSize, sizeof(StubCache::Entry));
+ // The offset register holds the entry offset times four (due to masking
+ // and shifting optimizations).
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ Label miss;
+
+ // Multiply by 3 because there are 3 fields per entry (name, code, map).
+ __ leap(offset, Operand(offset, offset, times_2, 0));
+
+ __ LoadAddress(kScratchRegister, key_offset);
+
+ // Check that the key in the entry matches the name.
+ // Multiply entry offset by 16 to get the entry address. Since the
+ // offset register already holds the entry offset times four, multiply
+ // by a further four.
+ __ cmpl(name, Operand(kScratchRegister, offset, scale_factor, 0));
+ __ j(not_equal, &miss);
+
+ // Get the map entry from the cache.
+ // Use key_offset + kPointerSize * 2, rather than loading map_offset.
+ __ movp(kScratchRegister,
+ Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
+ __ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ j(not_equal, &miss);
+
+ // Get the code entry from the cache.
+ __ LoadAddress(kScratchRegister, value_offset);
+ __ movp(kScratchRegister, Operand(kScratchRegister, offset, scale_factor, 0));
+
+ // Check that the flags match what we're looking for.
+ __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
+ __ andp(offset, Immediate(~Code::kFlagsNotUsedInLookup));
+ __ cmpl(offset, Immediate(flags));
+ __ j(not_equal, &miss);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ jmp(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ jmp(&miss);
+ }
+#endif
+
+ if (leave_frame) __ leave();
+
+ // Jump to the first instruction in the code stub.
+ __ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(kScratchRegister);
+
+ __ bind(&miss);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
+ bool leave_frame, Register receiver,
+ Register name, Register scratch, Register extra,
+ Register extra2, Register extra3) {
+ Isolate* isolate = masm->isolate();
+ Label miss;
+ USE(extra); // The register extra is not used on the X64 platform.
+ USE(extra2); // The register extra2 is not used on the X64 platform.
+ USE(extra3); // The register extra2 is not used on the X64 platform.
+ // Make sure that code is valid. The multiplying code relies on the
+ // entry size being 3 * kPointerSize.
+ DCHECK(sizeof(Entry) == 3 * kPointerSize);
+
+ // Make sure the flags do not name a specific type.
+ DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ DCHECK(!scratch.is(receiver));
+ DCHECK(!scratch.is(name));
+
+ // Check scratch register is valid, extra and extra2 are unused.
+ DCHECK(!scratch.is(no_reg));
+ DCHECK(extra2.is(no_reg));
+ DCHECK(extra3.is(no_reg));
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Get the map of the receiver and compute the hash.
+ __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
+ // Use only the low 32 bits of the map pointer.
+ __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xorp(scratch, Immediate(flags));
+ // We mask out the last two bits because they are not part of the hash and
+ // they are always 01 for maps. Also in the two 'and' instructions below.
+ __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
+
+ // Probe the primary table.
+ ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
+ scratch);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
+ __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xorp(scratch, Immediate(flags));
+ __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
+ __ subl(scratch, name);
+ __ addl(scratch, Immediate(flags));
+ __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift));
+
+ // Probe the secondary table.
+ ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
+ scratch);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/ic/x87/OWNERS b/deps/v8/src/ic/x87/OWNERS
new file mode 100644
index 0000000000..dd9998b261
--- /dev/null
+++ b/deps/v8/src/ic/x87/OWNERS
@@ -0,0 +1 @@
+weiliang.lin@intel.com
diff --git a/deps/v8/src/ic/x87/access-compiler-x87.cc b/deps/v8/src/ic/x87/access-compiler-x87.cc
new file mode 100644
index 0000000000..9456ec899c
--- /dev/null
+++ b/deps/v8/src/ic/x87/access-compiler-x87.cc
@@ -0,0 +1,44 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/ic/access-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+ Handle<Code> code) {
+ __ jmp(code, RelocInfo::CODE_TARGET);
+}
+
+
+Register* PropertyAccessCompiler::load_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ static Register registers[] = {receiver, name, ebx, eax, edi, no_reg};
+ return registers;
+}
+
+
+Register* PropertyAccessCompiler::store_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ DCHECK(ebx.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ static Register registers[] = {receiver, name, ebx, edi, no_reg};
+ return registers;
+}
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/stub-cache-x87.cc b/deps/v8/src/ic/x87/handler-compiler-x87.cc
index 0fc450a56f..e706998c38 100644
--- a/deps/v8/src/x87/stub-cache-x87.cc
+++ b/deps/v8/src/ic/x87/handler-compiler-x87.cc
@@ -6,9 +6,9 @@
#if V8_TARGET_ARCH_X87
-#include "src/codegen.h"
-#include "src/ic-inl.h"
-#include "src/stub-cache.h"
+#include "src/ic/call-optimization.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
namespace v8 {
namespace internal {
@@ -16,101 +16,34 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register name,
- Register receiver,
- // Number of the cache entry pointer-size scaled.
- Register offset,
- Register extra) {
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
-
- Label miss;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ lea(offset, Operand(offset, offset, times_2, 0));
-
- if (extra.is_valid()) {
- // Get the code entry from the cache.
- __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
-
- // Check that the key in the entry matches the name.
- __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
- __ j(not_equal, &miss);
-
- // Check the map matches.
- __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
- __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ j(not_equal, &miss);
-
- // Check that the flags match what we're looking for.
- __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
- __ and_(offset, ~Code::kFlagsNotUsedInLookup);
- __ cmp(offset, flags);
- __ j(not_equal, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(extra);
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> getter) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- __ bind(&miss);
- } else {
- // Save the offset on the stack.
- __ push(offset);
-
- // Check that the key in the entry matches the name.
- __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
- __ j(not_equal, &miss);
-
- // Check the map matches.
- __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
- __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ j(not_equal, &miss);
-
- // Restore offset register.
- __ mov(offset, Operand(esp, 0));
-
- // Get the code entry from the cache.
- __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
- // Check that the flags match what we're looking for.
- __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
- __ and_(offset, ~Code::kFlagsNotUsedInLookup);
- __ cmp(offset, flags);
- __ j(not_equal, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ mov(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ push(receiver);
+ ParameterCount actual(0);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
}
-#endif
-
- // Restore offset and re-load code entry from cache.
- __ pop(offset);
- __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
- // Jump to the first instruction in the code stub.
- __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(offset);
-
- // Pop at miss.
- __ bind(&miss);
- __ pop(offset);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
+ __ ret(0);
}
@@ -147,89 +80,13 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ j(not_equal, miss_label);
Label done;
- NameDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- properties,
- name,
- scratch1);
+ NameDictionaryLookupStub::GenerateNegativeLookup(masm, miss_label, &done,
+ properties, name, scratch1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1);
}
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2,
- Register extra3) {
- Label miss;
-
- // Assert that code is valid. The multiplying code relies on the entry size
- // being 12.
- DCHECK(sizeof(Entry) == 12);
-
- // Assert the flags do not name a specific type.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Assert that there are no register conflicts.
- DCHECK(!scratch.is(receiver));
- DCHECK(!scratch.is(name));
- DCHECK(!extra.is(receiver));
- DCHECK(!extra.is(name));
- DCHECK(!extra.is(scratch));
-
- // Assert scratch and extra registers are valid, and extra2/3 are unused.
- DCHECK(!scratch.is(no_reg));
- DCHECK(extra2.is(no_reg));
- DCHECK(extra3.is(no_reg));
-
- Register offset = scratch;
- scratch = no_reg;
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
- __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(offset, flags);
- // We mask out the last two bits because they are not part of the hash and
- // they are always 01 for maps. Also in the two 'and' instructions below.
- __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
- // ProbeTable expects the offset to be pointer scaled, which it is, because
- // the heap object tag size is 2 and the pointer size log 2 is also 2.
- DCHECK(kCacheIndexShift == kPointerSizeLog2);
-
- // Probe the primary table.
- ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
-
- // Primary miss: Compute hash for secondary probe.
- __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
- __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(offset, flags);
- __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
- __ sub(offset, name);
- __ add(offset, Immediate(flags));
- __ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
-
- // Probe the secondary table.
- ProbeTable(
- isolate(), masm, flags, kSecondary, name, receiver, offset, extra);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
-}
-
-
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register prototype, Label* miss) {
// Get the global function with the given index.
@@ -259,40 +116,6 @@ void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
}
-static void PushInterceptorArguments(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj) {
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
- __ push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
- Register scratch = name;
- __ mov(scratch, Immediate(interceptor));
- __ push(scratch);
- __ push(receiver);
- __ push(holder);
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj,
- IC::UtilityId id) {
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
- NamedLoadHandlerCompiler::kInterceptorArgsLength);
-}
-
-
// Generate call to api function.
// This function uses push() to generate smaller, faster code than
// the version above. It is an optimization that should will be removed
@@ -307,7 +130,7 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
__ push(receiver);
// Write the arguments to stack frame.
for (int i = 0; i < argc; i++) {
- Register arg = values[argc-1-i];
+ Register arg = values[argc - 1 - i];
DCHECK(!receiver.is(arg));
DCHECK(!scratch_in.is(arg));
__ push(arg);
@@ -325,16 +148,15 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
- receiver_map,
- &holder_lookup);
+ Handle<JSObject> api_holder =
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
break;
case CallOptimization::kHolderFound:
__ LoadHeapObject(holder, api_holder);
- break;
+ break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
break;
@@ -376,8 +198,7 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
void PropertyHandlerCompiler::GenerateCheckPropertyCell(
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
Register scratch, Label* miss) {
- Handle<PropertyCell> cell =
- JSGlobalObject::EnsurePropertyCell(global, name);
+ Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
DCHECK(cell->value()->IsTheHole());
Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
if (masm->serializer_enabled()) {
@@ -391,9 +212,107 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
}
-void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
- Handle<Code> code) {
- __ jmp(code, RelocInfo::CODE_TARGET);
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ push(value());
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ mov(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ push(receiver);
+ __ push(value());
+ ParameterCount actual(1);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ pop(eax);
+
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ }
+ __ ret(0);
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
+ Register holder, Register name,
+ Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
+ __ push(name);
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
+ Register scratch = name;
+ __ mov(scratch, Immediate(interceptor));
+ __ push(scratch);
+ __ push(receiver);
+ __ push(holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm, Register receiver, Register holder, Register name,
+ Handle<JSObject> holder_obj, IC::UtilityId id) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+ __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
+ NamedLoadHandlerCompiler::kInterceptorArgsLength);
+}
+
+
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
+
+ DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
+
+ __ pop(ebx);
+ __ push(receiver);
+ __ push(name);
+ __ push(value);
+ __ push(ebx);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
+ // Return address is on the stack.
+ StoreIC_PushArgs(masm);
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
+ // Return address is on the stack.
+ StoreIC_PushArgs(masm);
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
}
@@ -427,7 +346,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
__ CmpObject(value_reg, constant);
__ j(not_equal, miss_label);
} else if (representation.IsSmi()) {
- __ JumpIfNotSmi(value_reg, miss_label);
+ __ JumpIfNotSmi(value_reg, miss_label);
} else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
HeapType* field_type = descriptors->GetFieldType(descriptor);
@@ -491,12 +410,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
__ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
// Update the write barrier for the map field.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- scratch2,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
+ kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
if (details.type() == CONSTANT) {
DCHECK(value_reg.is(eax));
@@ -512,8 +427,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
// object and the number of in-object properties is not going to change.
index -= transition->inobject_properties();
- SmiCheck smi_check = representation.IsTagged()
- ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ SmiCheck smi_check =
+ representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
@@ -529,12 +444,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
- __ RecordWriteField(receiver_reg,
- offset,
- storage_reg,
- scratch1,
- EMIT_REMEMBERED_SET,
- smi_check);
+ __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
}
} else {
// Write to the properties array.
@@ -552,12 +463,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
- __ RecordWriteField(scratch1,
- offset,
- storage_reg,
- receiver_reg,
- EMIT_REMEMBERED_SET,
- smi_check);
+ __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
}
}
@@ -567,7 +474,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
}
-void NamedStoreHandlerCompiler::GenerateStoreField(LookupResult* lookup,
+void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
Register value_reg,
Label* miss_label) {
DCHECK(lookup->representation().IsHeapObject());
@@ -599,8 +506,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
// Make sure there's no overlap between holder and object registers.
DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
+ DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
+ !scratch2.is(scratch1));
// Keep track of the current object in register reg.
Register reg = object_reg;
@@ -632,10 +539,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
}
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound);
+ NameDictionary::kNotFound);
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
- scratch1, scratch2);
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
+ scratch2);
__ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -660,9 +567,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
} else if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(
- masm(), Handle<JSGlobalObject>::cast(current), name,
- scratch2, miss);
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
}
if (load_prototype_from_map) {
@@ -762,7 +668,7 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
__ push(scratch3()); // Restore return address.
// Abi for CallApiGetter
- Register getter_address = edx;
+ Register getter_address = ApiGetterDescriptor::function_address();
Address function_address = v8::ToCData<Address>(callback->getter());
__ mov(getter_address, Immediate(function_address));
@@ -778,102 +684,86 @@ void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
}
-void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg,
- LookupResult* lookup,
- Handle<Name> name) {
+void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
+ LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- bool compile_followup_inline = false;
- if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->IsField()) {
- compile_followup_inline = true;
- } else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> callback(
- ExecutableAccessorInfo::cast(lookup->GetCallbackObject()));
- compile_followup_inline =
- callback->getter() != NULL &&
- ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), callback,
- type());
- }
- }
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+ // Preserve the receiver register explicitly whenever it is different from the
+ // holder and it is needed should the interceptor return without any result.
+ // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
+ // case might cause a miss during the prototype check.
+ bool must_perform_prototype_check =
+ !holder().is_identical_to(it->GetHolder<JSObject>());
+ bool must_preserve_receiver_reg =
+ !receiver().is(holder_reg) &&
+ (it->state() == LookupIterator::ACCESSOR || must_perform_prototype_check);
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
- if (compile_followup_inline) {
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
- // Preserve the receiver register explicitly whenever it is different from
- // the holder and it is needed should the interceptor return without any
- // result. The CALLBACKS case needs the receiver to be passed into C++ code,
- // the FIELD case might cause a miss during the prototype check.
- bool must_perfrom_prototype_check = *holder() != lookup->holder();
- bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
- (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-
- if (must_preserve_receiver_reg) {
- __ push(receiver());
- }
- __ push(holder_reg);
- __ push(this->name());
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(
- masm(), receiver(), holder_reg, this->name(), holder(),
- IC::kLoadPropertyWithInterceptorOnly);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ cmp(eax, factory()->no_interceptor_result_sentinel());
- __ j(equal, &interceptor_failed);
- frame_scope.GenerateLeaveFrame();
- __ ret(0);
-
- // Clobber registers when generating debug-code to provoke errors.
- __ bind(&interceptor_failed);
- if (FLAG_debug_code) {
- __ mov(receiver(), Immediate(BitCast<int32_t>(kZapValue)));
- __ mov(holder_reg, Immediate(BitCast<int32_t>(kZapValue)));
- __ mov(this->name(), Immediate(BitCast<int32_t>(kZapValue)));
- }
+ if (must_preserve_receiver_reg) {
+ __ push(receiver());
+ }
+ __ push(holder_reg);
+ __ push(this->name());
+
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), holder(),
+ IC::kLoadPropertyWithInterceptorOnly);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ cmp(eax, factory()->no_interceptor_result_sentinel());
+ __ j(equal, &interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ ret(0);
- __ pop(this->name());
- __ pop(holder_reg);
- if (must_preserve_receiver_reg) {
- __ pop(receiver());
- }
+ // Clobber registers when generating debug-code to provoke errors.
+ __ bind(&interceptor_failed);
+ if (FLAG_debug_code) {
+ __ mov(receiver(), Immediate(bit_cast<int32_t>(kZapValue)));
+ __ mov(holder_reg, Immediate(bit_cast<int32_t>(kZapValue)));
+ __ mov(this->name(), Immediate(bit_cast<int32_t>(kZapValue)));
+ }
- // Leave the internal frame.
+ __ pop(this->name());
+ __ pop(holder_reg);
+ if (must_preserve_receiver_reg) {
+ __ pop(receiver());
}
- GenerateLoadPostInterceptor(holder_reg, name, lookup);
- } else { // !compile_followup_inline
- // Call the runtime system to load the interceptor.
- // Check that the maps haven't changed.
- __ pop(scratch2()); // save old return address
- PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
- holder());
- __ push(scratch2()); // restore old return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptor),
- isolate());
- __ TailCallExternalReference(
- ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ // Leave the internal frame.
}
+
+ GenerateLoadPostInterceptor(it, holder_reg);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
+ DCHECK(holder()->HasNamedInterceptor());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ // Call the runtime system to load the interceptor.
+ __ pop(scratch2()); // save old return address
+ PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+ holder());
+ __ push(scratch2()); // restore old return address
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
+ __ TailCallExternalReference(
+ ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
}
@@ -900,55 +790,6 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
}
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save value register, so we can restore it later.
- __ push(value());
-
- if (!setter.is_null()) {
- // Call the JavaScript setter with receiver and value on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ mov(receiver,
- FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ push(receiver);
- __ push(value());
- ParameterCount actual(1);
- ParameterCount expected(setter);
- __ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ pop(eax);
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- }
- __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
Handle<Name> name) {
__ pop(scratch1()); // remove the return address
@@ -967,100 +808,18 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
}
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss, Label::kNear);
- __ mov(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_maps->length(); ++i) {
- __ cmp(scratch1(), receiver_maps->at(i));
- if (transitioned_maps->at(i).is_null()) {
- __ j(equal, handler_stubs->at(i));
- } else {
- Label next_map;
- __ j(not_equal, &next_map, Label::kNear);
- __ mov(transition_map(), Immediate(transitioned_maps->at(i)));
- __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
- }
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
- static Register registers[] = { receiver, name, ebx, eax, edi, no_reg };
- return registers;
+Register NamedStoreHandlerCompiler::value() {
+ return StoreDescriptor::ValueRegister();
}
-Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- DCHECK(ebx.is(KeyedStoreIC::MapRegister()));
- static Register registers[] = { receiver, name, ebx, edi, no_reg };
- return registers;
-}
-
-
-Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); }
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> getter) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- if (!getter.is_null()) {
- // Call the JavaScript getter with the receiver on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ mov(receiver,
- FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ push(receiver);
- ParameterCount actual(0);
- ParameterCount expected(getter);
- __ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- }
- __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
FrontendHeader(receiver(), name, &miss);
// Get the value from the cell.
- Register result = StoreIC::ValueRegister();
+ Register result = StoreDescriptor::ValueRegister();
if (masm()->serializer_enabled()) {
__ mov(result, Immediate(cell));
__ mov(result, FieldOperand(result, PropertyCell::kValueOffset));
@@ -1089,113 +848,8 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
}
-Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- __ JumpIfNotUniqueName(this->name(), &miss);
- } else {
- __ cmp(this->name(), Immediate(name));
- __ j(not_equal, &miss);
- }
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(KeyedStoreIC::MapRegister()));
- __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = types->length();
- int number_of_handled_maps = 0;
- for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate());
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- __ cmp(map_reg, map);
- if (type->Is(HeapType::Number())) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ j(equal, handlers->at(current));
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void ElementHandlerCompiler::GenerateLoadDictionaryElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- DCHECK(edx.is(LoadIC::ReceiverRegister()));
- DCHECK(ecx.is(LoadIC::NameRegister()));
- Label slow, miss;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
- __ JumpIfNotSmi(ecx, &miss);
- __ mov(ebx, ecx);
- __ SmiUntag(ebx);
- __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
-
- // Push receiver on the stack to free up a register for the dictionary
- // probing.
- __ push(edx);
- __ LoadFromNumberDictionary(&slow, eax, ecx, ebx, edx, edi, eax);
- // Pop receiver before returning.
- __ pop(edx);
- __ ret(0);
-
- __ bind(&slow);
- __ pop(edx);
-
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
-
- __ bind(&miss);
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
}
-
-
-#undef __
-
-} } // namespace v8::internal
+} // namespace v8::internal
#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/ic/x87/ic-compiler-x87.cc b/deps/v8/src/ic/x87/ic-compiler-x87.cc
new file mode 100644
index 0000000000..20b47e726e
--- /dev/null
+++ b/deps/v8/src/ic/x87/ic-compiler-x87.cc
@@ -0,0 +1,128 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ // Return address is on the stack.
+ DCHECK(!ebx.is(StoreDescriptor::ReceiverRegister()) &&
+ !ebx.is(StoreDescriptor::NameRegister()) &&
+ !ebx.is(StoreDescriptor::ValueRegister()));
+ __ pop(ebx);
+ __ push(StoreDescriptor::ReceiverRegister());
+ __ push(StoreDescriptor::NameRegister());
+ __ push(StoreDescriptor::ValueRegister());
+ __ push(Immediate(Smi::FromInt(strict_mode)));
+ __ push(ebx); // return address
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
+ Label miss;
+
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ // In case we are compiling an IC for dictionary loads and stores, just
+ // check whether the name is unique.
+ if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ Register tmp = scratch1();
+ __ JumpIfSmi(this->name(), &miss);
+ __ mov(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
+ __ movzx_b(tmp, FieldOperand(tmp, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
+ } else {
+ __ cmp(this->name(), Immediate(name));
+ __ j(not_equal, &miss);
+ }
+ }
+
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
+ // Polymorphic keyed stores may use the map register
+ Register map_reg = scratch1();
+ DCHECK(kind() != Code::KEYED_STORE_IC ||
+ map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
+ int receiver_count = types->length();
+ int number_of_handled_maps = 0;
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ __ cmp(map_reg, map);
+ if (type->Is(HeapType::Number())) {
+ DCHECK(!number_case.is_unused());
+ __ bind(&number_case);
+ }
+ __ j(equal, handlers->at(current));
+ }
+ }
+ DCHECK(number_of_handled_maps != 0);
+
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ InlineCacheState state =
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+ return GetCode(kind(), type, name, state);
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+ MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
+ Label miss;
+ __ JumpIfSmi(receiver(), &miss, Label::kNear);
+ __ mov(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
+ for (int i = 0; i < receiver_maps->length(); ++i) {
+ __ cmp(scratch1(), receiver_maps->at(i));
+ if (transitioned_maps->at(i).is_null()) {
+ __ j(equal, handler_stubs->at(i));
+ } else {
+ Label next_map;
+ __ j(not_equal, &next_map, Label::kNear);
+ __ mov(transition_map(), Immediate(transitioned_maps->at(i)));
+ __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ __ bind(&next_map);
+ }
+ }
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/ic-x87.cc b/deps/v8/src/ic/x87/ic-x87.cc
index 4f7d810133..9c090c56a5 100644
--- a/deps/v8/src/x87/ic-x87.cc
+++ b/deps/v8/src/ic/x87/ic-x87.cc
@@ -7,9 +7,9 @@
#if V8_TARGET_ARCH_X87
#include "src/codegen.h"
-#include "src/ic-inl.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
@@ -21,8 +21,7 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
Label* global_object) {
// Register usage:
// type: holds the receiver instance type on entry.
@@ -42,13 +41,9 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
// name is not internalized, and will jump to the miss_label in that
// case. The generated code assumes that the receiver has slow
// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register r0,
- Register r1,
- Register result) {
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
+ Register elements, Register name,
+ Register r0, Register r1, Register result) {
// Register use:
//
// elements - holds the property dictionary on entry and is unchanged.
@@ -66,13 +61,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
+ elements, name, r0, r1);
// If probing finds an entry in the dictionary, r0 contains the
// index into the dictionary. Check that the value is a normal
@@ -99,13 +89,9 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// call if name is not internalized, and will jump to the miss_label in
// that case. The generated code assumes that the receiver has slow
// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register value,
- Register r0,
- Register r1) {
+static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
+ Register elements, Register name,
+ Register value, Register r0, Register r1) {
// Register use:
//
// elements - holds the property dictionary on entry and is clobbered.
@@ -121,13 +107,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
+ elements, name, r0, r1);
// If probing finds an entry in the dictionary, r0 contains the
// index into the dictionary. Check that the value is a normal
@@ -139,7 +120,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
const int kTypeAndReadOnlyMask =
(PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+ PropertyDetails::AttributesField::encode(READ_ONLY))
+ << kSmiTagSize;
__ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
Immediate(kTypeAndReadOnlyMask));
__ j(not_zero, miss_label);
@@ -151,17 +133,15 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Update write barrier. Make sure not to clobber the value.
__ mov(r1, value);
- __ RecordWrite(elements, r0, r1);
+ __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
}
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map,
- int interceptor_bit,
- Label* slow) {
+ Register receiver, Register map,
+ int interceptor_bit, Label* slow) {
// Register use:
// receiver - holds the receiver and is unchanged.
// Scratch registers:
@@ -190,12 +170,9 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// Loads an indexed element from a fast case array.
// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register scratch,
- Register result,
- Label* not_fast_array,
+static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
+ Register key, Register scratch,
+ Register result, Label* not_fast_array,
Label* out_of_range) {
// Register use:
// receiver - holds the receiver and is unchanged.
@@ -233,12 +210,9 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// Checks whether a key is an array index string or a unique name.
// Falls through if the key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_unique) {
+static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
+ Register map, Register hash,
+ Label* index_string, Label* not_unique) {
// Register use:
// key - holds the key and is unchanged. Assumed to be non-smi.
// Scratch registers:
@@ -266,13 +240,9 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
}
-static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
- Register object,
- Register key,
- Register scratch1,
- Register scratch2,
- Label* unmapped_case,
- Label* slow_case) {
+static Operand GenerateMappedArgumentsLookup(
+ MacroAssembler* masm, Register object, Register key, Register scratch1,
+ Register scratch2, Label* unmapped_case, Label* slow_case) {
Heap* heap = masm->isolate()->heap();
Factory* factory = masm->isolate()->factory();
@@ -302,10 +272,8 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Load element index and check whether it is the hole.
const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
- __ mov(scratch2, FieldOperand(scratch1,
- key,
- times_half_pointer_size,
- kHeaderSize));
+ __ mov(scratch2,
+ FieldOperand(scratch1, key, times_half_pointer_size, kHeaderSize));
__ cmp(scratch2, factory->the_hole_value());
__ j(equal, unmapped_case);
@@ -314,9 +282,7 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// map in scratch1).
const int kContextOffset = FixedArray::kHeaderSize;
__ mov(scratch1, FieldOperand(scratch1, kContextOffset));
- return FieldOperand(scratch1,
- scratch2,
- times_half_pointer_size,
+ return FieldOperand(scratch1, scratch2, times_half_pointer_size,
Context::kHeaderSize);
}
@@ -336,9 +302,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
__ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
__ cmp(key, scratch);
__ j(greater_equal, slow_case);
- return FieldOperand(backing_store,
- key,
- times_half_pointer_size,
+ return FieldOperand(backing_store, key, times_half_pointer_size,
FixedArray::kHeaderSize);
}
@@ -348,8 +312,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
DCHECK(receiver.is(edx));
DCHECK(key.is(ecx));
@@ -359,8 +323,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Now the key is known to be a smi. This place is also jumped to from
// where a numeric string is converted to a smi.
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, eax, Map::kHasIndexedInterceptor, &slow);
+ GenerateKeyedLoadReceiverCheck(masm, receiver, eax,
+ Map::kHasIndexedInterceptor, &slow);
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(eax, &check_number_dictionary);
@@ -379,9 +343,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check whether the elements is a number dictionary.
// ebx: untagged index
// eax: elements
- __ CheckMap(eax,
- isolate->factory()->hash_table_map(),
- &slow,
+ __ CheckMap(eax, isolate->factory()->hash_table_map(), &slow,
DONT_DO_SMI_CHECK);
Label slow_pop_receiver;
// Push receiver on the stack to free up a register for the dictionary
@@ -404,8 +366,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow);
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, eax, Map::kHasNamedInterceptor, &slow);
+ GenerateKeyedLoadReceiverCheck(masm, receiver, eax, Map::kHasNamedInterceptor,
+ &slow);
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary.
@@ -492,8 +454,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load property array property.
__ bind(&property_array_property);
__ mov(eax, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ mov(eax, FieldOperand(eax, edi, times_pointer_size,
- FixedArray::kHeaderSize));
+ __ mov(eax,
+ FieldOperand(eax, edi, times_pointer_size, FixedArray::kHeaderSize));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
@@ -520,17 +482,14 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// Return address is on the stack.
Label miss;
- Register receiver = ReceiverRegister();
- Register index = NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register index = LoadDescriptor::NameRegister();
Register scratch = ebx;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
Register result = eax;
DCHECK(!result.is(scratch));
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch,
- result,
+ StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
&miss, // When index out of range.
@@ -546,92 +505,22 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // Return address is on the stack.
- Label slow;
-
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- Register scratch = eax;
- DCHECK(!scratch.is(receiver) && !scratch.is(key));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &slow);
-
- // Check that the key is an array index, that is Uint32.
- __ test(key, Immediate(kSmiTagMask | kSmiSignMask));
- __ j(not_zero, &slow);
-
- // Get the map of the receiver.
- __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
- __ and_(scratch, Immediate(kSlowCaseBitFieldMask));
- __ cmp(scratch, Immediate(1 << Map::kHasIndexedInterceptor));
- __ j(not_zero, &slow);
-
- // Everything is fine, call runtime.
- __ pop(scratch);
- __ push(receiver); // receiver
- __ push(key); // key
- __ push(scratch); // return address
-
- // Perform tail call to the entry.
- ExternalReference ref = ExternalReference(
- IC_Utility(kLoadElementWithInterceptor), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
- // The return address is on the stack.
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- DCHECK(receiver.is(edx));
- DCHECK(key.is(ecx));
-
- Label slow, notin;
- Factory* factory = masm->isolate()->factory();
- Operand mapped_location =
- GenerateMappedArgumentsLookup(
- masm, receiver, key, ebx, eax, &notin, &slow);
- __ mov(eax, mapped_location);
- __ Ret();
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in ebx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, key, ebx, eax, &slow);
- __ cmp(unmapped_location, factory->the_hole_value());
- __ j(equal, &slow);
- __ mov(eax, unmapped_location);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
// Return address is on the stack.
Label slow, notin;
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- Register value = ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
DCHECK(receiver.is(edx));
DCHECK(name.is(ecx));
DCHECK(value.is(eax));
- Operand mapped_location =
- GenerateMappedArgumentsLookup(masm, receiver, name, ebx, edi, &notin,
- &slow);
+ Operand mapped_location = GenerateMappedArgumentsLookup(
+ masm, receiver, name, ebx, edi, &notin, &slow);
__ mov(mapped_location, value);
__ lea(ecx, mapped_location);
__ mov(edx, value);
- __ RecordWrite(ebx, ecx, edx);
+ __ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs);
__ Ret();
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in ebx.
@@ -640,7 +529,7 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
__ mov(unmapped_location, value);
__ lea(edi, unmapped_location);
__ mov(edx, value);
- __ RecordWrite(ebx, edi, edx);
+ __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
__ Ret();
__ bind(&slow);
GenerateMiss(masm);
@@ -648,18 +537,14 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
static void KeyedStoreGenerateGenericHelper(
- MacroAssembler* masm,
- Label* fast_object,
- Label* fast_double,
- Label* slow,
- KeyedStoreCheckMap check_map,
- KeyedStoreIncrementLength increment_length) {
+ MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
+ KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
Label transition_smi_elements;
Label finish_object_store, non_double_value, transition_double_elements;
Label fast_double_without_map_check;
- Register receiver = KeyedStoreIC::ReceiverRegister();
- Register key = KeyedStoreIC::NameRegister();
- Register value = KeyedStoreIC::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register key = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
DCHECK(receiver.is(edx));
DCHECK(key.is(ecx));
DCHECK(value.is(eax));
@@ -713,8 +598,8 @@ static void KeyedStoreGenerateGenericHelper(
__ mov(FixedArrayElementOperand(ebx, key), value);
// Update write barrier for the elements array address.
__ mov(edx, value); // Preserve the value which is returned.
- __ RecordWriteArray(
- ebx, edx, key, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ RecordWriteArray(ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
__ ret(0);
__ bind(fast_double);
@@ -750,32 +635,24 @@ static void KeyedStoreGenerateGenericHelper(
__ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
// Transition the array appropriately depending on the value type.
- __ CheckMap(value,
- masm->isolate()->factory()->heap_number_map(),
- &non_double_value,
- DONT_DO_SMI_CHECK);
+ __ CheckMap(value, masm->isolate()->factory()->heap_number_map(),
+ &non_double_value, DONT_DO_SMI_CHECK);
// Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
// and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- ebx,
- edi,
- slow);
- AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(
- masm, receiver, key, value, ebx, mode, slow);
+ FAST_DOUBLE_ELEMENTS, ebx, edi, slow);
+ AllocationSiteMode mode =
+ AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
+ ebx, mode, slow);
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- ebx,
- edi,
- slow);
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, ebx,
+ edi, slow);
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, receiver, key, value, ebx, mode, slow);
@@ -787,14 +664,11 @@ static void KeyedStoreGenerateGenericHelper(
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
__ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- ebx,
- edi,
- slow);
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
+ ebx, edi, slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(
- masm, receiver, key, value, ebx, mode, slow);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key,
+ value, ebx, mode, slow);
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
@@ -806,8 +680,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
Label slow, fast_object, fast_object_grow;
Label fast_double, fast_double_grow;
Label array, extra, check_if_double_array;
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register key = StoreDescriptor::NameRegister();
DCHECK(receiver.is(edx));
DCHECK(key.is(ecx));
@@ -838,7 +712,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Slow case: call runtime.
__ bind(&slow);
- GenerateRuntimeSetProperty(masm, strict_mode);
+ PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@@ -877,42 +751,24 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ cmp(key, FieldOperand(receiver, JSArray::kLengthOffset)); // Compare smis.
__ j(above_equal, &extra);
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
- &slow, kCheckMap, kDontIncrementLength);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, &slow,
+ kCheckMap, kDontIncrementLength);
KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
&slow, kDontCheckMap, kIncrementLength);
}
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is on the stack.
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- DCHECK(receiver.is(edx));
- DCHECK(name.is(ecx));
-
- // Probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, receiver, name, ebx, eax);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = eax;
- DCHECK(!dictionary.is(ReceiverRegister()));
- DCHECK(!dictionary.is(NameRegister()));
+ DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
Label slow;
- __ mov(dictionary,
- FieldOperand(ReceiverRegister(), JSObject::kPropertiesOffset));
- GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), edi, ebx,
- eax);
+ __ mov(dictionary, FieldOperand(LoadDescriptor::ReceiverRegister(),
+ JSObject::kPropertiesOffset));
+ GenerateDictionaryLoad(masm, &slow, dictionary,
+ LoadDescriptor::NameRegister(), edi, ebx, eax);
__ ret(0);
// Dictionary load failed, go slow (but don't miss).
@@ -922,8 +778,8 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
static void LoadIC_PushArgs(MacroAssembler* masm) {
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
DCHECK(!ebx.is(receiver) && !ebx.is(name));
__ pop(ebx);
@@ -968,33 +824,6 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
}
-// IC register specifications
-const Register LoadIC::ReceiverRegister() { return edx; }
-const Register LoadIC::NameRegister() { return ecx; }
-
-
-const Register LoadIC::SlotRegister() {
- DCHECK(FLAG_vector_ics);
- return eax;
-}
-
-
-const Register LoadIC::VectorRegister() {
- DCHECK(FLAG_vector_ics);
- return ebx;
-}
-
-
-const Register StoreIC::ReceiverRegister() { return edx; }
-const Register StoreIC::NameRegister() { return ecx; }
-const Register StoreIC::ValueRegister() { return eax; }
-
-
-const Register KeyedStoreIC::MapRegister() {
- return ebx;
-}
-
-
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// Return address is on the stack.
LoadIC_PushArgs(masm);
@@ -1009,8 +838,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, ReceiverRegister(), NameRegister(),
- ebx, no_reg);
+ masm, flags, false, StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), ebx, no_reg);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -1018,9 +847,9 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
static void StoreIC_PushArgs(MacroAssembler* masm) {
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- Register value = StoreIC::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
@@ -1045,9 +874,9 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
void StoreIC::GenerateNormal(MacroAssembler* masm) {
Label restore_miss;
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- Register value = ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
Register dictionary = ebx;
__ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
@@ -1070,40 +899,6 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
}
-void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- // Return address is on the stack.
- DCHECK(!ebx.is(ReceiverRegister()) && !ebx.is(NameRegister()) &&
- !ebx.is(ValueRegister()));
- __ pop(ebx);
- __ push(ReceiverRegister());
- __ push(NameRegister());
- __ push(ValueRegister());
- __ push(Immediate(Smi::FromInt(strict_mode)));
- __ push(ebx); // return address
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- // Return address is on the stack.
- DCHECK(!ebx.is(ReceiverRegister()) && !ebx.is(NameRegister()) &&
- !ebx.is(ValueRegister()));
- __ pop(ebx);
- __ push(ReceiverRegister());
- __ push(NameRegister());
- __ push(ValueRegister());
- __ push(Immediate(Smi::FromInt(strict_mode)));
- __ push(ebx); // return address
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// Return address is on the stack.
StoreIC_PushArgs(masm);
@@ -1115,26 +910,6 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
}
-void StoreIC::GenerateSlow(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
#undef __
@@ -1186,8 +961,8 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// condition code uses at the patched jump.
uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, test=%p, delta=%d\n",
- address, test_instruction_address, delta);
+ PrintF("[ patching ic at %p, test=%p, delta=%d\n", address,
+ test_instruction_address, delta);
}
// Patch with a short conditional jump. Enabling means switching from a short
@@ -1195,17 +970,17 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// reverse operation of that.
Address jmp_address = test_instruction_address - delta;
DCHECK((check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ||
- *jmp_address == Assembler::kJcShortOpcode)
- : (*jmp_address == Assembler::kJnzShortOpcode ||
- *jmp_address == Assembler::kJzShortOpcode));
- Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
- : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
+ ? (*jmp_address == Assembler::kJncShortOpcode ||
+ *jmp_address == Assembler::kJcShortOpcode)
+ : (*jmp_address == Assembler::kJnzShortOpcode ||
+ *jmp_address == Assembler::kJzShortOpcode));
+ Condition cc =
+ (check == ENABLE_INLINED_SMI_CHECK)
+ ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
+ : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
*jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
}
-
-
-} } // namespace v8::internal
+}
+} // namespace v8::internal
#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/ic/x87/stub-cache-x87.cc b/deps/v8/src/ic/x87/stub-cache-x87.cc
new file mode 100644
index 0000000000..0291ef3d82
--- /dev/null
+++ b/deps/v8/src/ic/x87/stub-cache-x87.cc
@@ -0,0 +1,189 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/codegen.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
+ Code::Flags flags, bool leave_frame,
+ StubCache::Table table, Register name, Register receiver,
+ // Number of the cache entry pointer-size scaled.
+ Register offset, Register extra) {
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+ Label miss;
+
+ // Multiply by 3 because there are 3 fields per entry (name, code, map).
+ __ lea(offset, Operand(offset, offset, times_2, 0));
+
+ if (extra.is_valid()) {
+ // Get the code entry from the cache.
+ __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
+
+ // Check that the key in the entry matches the name.
+ __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
+ __ j(not_equal, &miss);
+
+ // Check the map matches.
+ __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
+ __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ j(not_equal, &miss);
+
+ // Check that the flags match what we're looking for.
+ __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
+ __ and_(offset, ~Code::kFlagsNotUsedInLookup);
+ __ cmp(offset, flags);
+ __ j(not_equal, &miss);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ jmp(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ jmp(&miss);
+ }
+#endif
+
+ if (leave_frame) __ leave();
+
+ // Jump to the first instruction in the code stub.
+ __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(extra);
+
+ __ bind(&miss);
+ } else {
+ // Save the offset on the stack.
+ __ push(offset);
+
+ // Check that the key in the entry matches the name.
+ __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
+ __ j(not_equal, &miss);
+
+ // Check the map matches.
+ __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
+ __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ j(not_equal, &miss);
+
+ // Restore offset register.
+ __ mov(offset, Operand(esp, 0));
+
+ // Get the code entry from the cache.
+ __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
+
+ // Check that the flags match what we're looking for.
+ __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
+ __ and_(offset, ~Code::kFlagsNotUsedInLookup);
+ __ cmp(offset, flags);
+ __ j(not_equal, &miss);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ jmp(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ jmp(&miss);
+ }
+#endif
+
+ // Restore offset and re-load code entry from cache.
+ __ pop(offset);
+ __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
+
+ if (leave_frame) __ leave();
+
+ // Jump to the first instruction in the code stub.
+ __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(offset);
+
+ // Pop at miss.
+ __ bind(&miss);
+ __ pop(offset);
+ }
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
+ bool leave_frame, Register receiver,
+ Register name, Register scratch, Register extra,
+ Register extra2, Register extra3) {
+ Label miss;
+
+ // Assert that code is valid. The multiplying code relies on the entry size
+ // being 12.
+ DCHECK(sizeof(Entry) == 12);
+
+ // Assert the flags do not name a specific type.
+ DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Assert that there are no register conflicts.
+ DCHECK(!scratch.is(receiver));
+ DCHECK(!scratch.is(name));
+ DCHECK(!extra.is(receiver));
+ DCHECK(!extra.is(name));
+ DCHECK(!extra.is(scratch));
+
+ // Assert scratch and extra registers are valid, and extra2/3 are unused.
+ DCHECK(!scratch.is(no_reg));
+ DCHECK(extra2.is(no_reg));
+ DCHECK(extra3.is(no_reg));
+
+ Register offset = scratch;
+ scratch = no_reg;
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Get the map of the receiver and compute the hash.
+ __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
+ __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xor_(offset, flags);
+ // We mask out the last two bits because they are not part of the hash and
+ // they are always 01 for maps. Also in the two 'and' instructions below.
+ __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
+ // ProbeTable expects the offset to be pointer scaled, which it is, because
+ // the heap object tag size is 2 and the pointer size log 2 is also 2.
+ DCHECK(kCacheIndexShift == kPointerSizeLog2);
+
+ // Probe the primary table.
+ ProbeTable(isolate(), masm, flags, leave_frame, kPrimary, name, receiver,
+ offset, extra);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
+ __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xor_(offset, flags);
+ __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
+ __ sub(offset, name);
+ __ add(offset, Immediate(flags));
+ __ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
+
+ // Probe the secondary table.
+ ProbeTable(isolate(), masm, flags, leave_frame, kSecondary, name, receiver,
+ offset, extra);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
new file mode 100644
index 0000000000..62d710581f
--- /dev/null
+++ b/deps/v8/src/interface-descriptors.cc
@@ -0,0 +1,143 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+void CallInterfaceDescriptorData::Initialize(
+ int register_parameter_count, Register* registers,
+ Representation* register_param_representations,
+ PlatformInterfaceDescriptor* platform_descriptor) {
+ platform_specific_descriptor_ = platform_descriptor;
+ register_param_count_ = register_parameter_count;
+
+ // An interface descriptor must have a context register.
+ DCHECK(register_parameter_count > 0 &&
+ registers[0].is(CallInterfaceDescriptor::ContextRegister()));
+
+ // InterfaceDescriptor owns a copy of the registers array.
+ register_params_.Reset(NewArray<Register>(register_parameter_count));
+ for (int i = 0; i < register_parameter_count; i++) {
+ register_params_[i] = registers[i];
+ }
+
+ // If a representations array is specified, then the descriptor owns that as
+ // well.
+ if (register_param_representations != NULL) {
+ register_param_representations_.Reset(
+ NewArray<Representation>(register_parameter_count));
+ for (int i = 0; i < register_parameter_count; i++) {
+ // If there is a context register, the representation must be tagged.
+ DCHECK(
+ i != 0 ||
+ register_param_representations[i].Equals(Representation::Tagged()));
+ register_param_representations_[i] = register_param_representations[i];
+ }
+ }
+}
+
+
+const char* CallInterfaceDescriptor::DebugName(Isolate* isolate) {
+ CallInterfaceDescriptorData* start = isolate->call_descriptor_data(0);
+ size_t index = data_ - start;
+ DCHECK(index < CallDescriptors::NUMBER_OF_DESCRIPTORS);
+ CallDescriptors::Key key = static_cast<CallDescriptors::Key>(index);
+ switch (key) {
+#define DEF_CASE(NAME) \
+ case CallDescriptors::NAME: \
+ return #NAME " Descriptor";
+ INTERFACE_DESCRIPTOR_LIST(DEF_CASE)
+#undef DEF_CASE
+ case CallDescriptors::NUMBER_OF_DESCRIPTORS:
+ break;
+ }
+ return "";
+}
+
+
+void LoadDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {ContextRegister(), ReceiverRegister(),
+ NameRegister()};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StoreDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {ContextRegister(), ReceiverRegister(), NameRegister(),
+ ValueRegister()};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ElementTransitionAndStoreDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ContextRegister(), ValueRegister(), MapRegister(),
+ NameRegister(), ReceiverRegister()};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void InstanceofDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {ContextRegister(), left(), right()};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void MathPowTaggedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {ContextRegister(), exponent()};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void MathPowIntegerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {ContextRegister(), exponent()};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void VectorLoadICTrampolineDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ContextRegister(), ReceiverRegister(), NameRegister(),
+ SlotRegister()};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void VectorLoadICDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {ContextRegister(), ReceiverRegister(), NameRegister(),
+ SlotRegister(), VectorRegister()};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(),
+ Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ApiGetterDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {ContextRegister(), function_address()};
+ Representation representations[] = {Representation::Tagged(),
+ Representation::External()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ArgumentsAccessReadDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ContextRegister(), index(), parameter_count()};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ContextOnlyDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {ContextRegister()};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
new file mode 100644
index 0000000000..b773c916ec
--- /dev/null
+++ b/deps/v8/src/interface-descriptors.h
@@ -0,0 +1,486 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CALL_INTERFACE_DESCRIPTOR_H_
+#define V8_CALL_INTERFACE_DESCRIPTOR_H_
+
+#include "src/assembler.h"
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class PlatformInterfaceDescriptor;
+
+#define INTERFACE_DESCRIPTOR_LIST(V) \
+ V(Load) \
+ V(Store) \
+ V(ElementTransitionAndStore) \
+ V(Instanceof) \
+ V(VectorLoadICTrampoline) \
+ V(VectorLoadIC) \
+ V(FastNewClosure) \
+ V(FastNewContext) \
+ V(ToNumber) \
+ V(NumberToString) \
+ V(FastCloneShallowArray) \
+ V(FastCloneShallowObject) \
+ V(CreateAllocationSite) \
+ V(CallFunction) \
+ V(CallFunctionWithFeedback) \
+ V(CallConstruct) \
+ V(RegExpConstructResult) \
+ V(TransitionElementsKind) \
+ V(ArrayConstructorConstantArgCount) \
+ V(ArrayConstructor) \
+ V(InternalArrayConstructorConstantArgCount) \
+ V(InternalArrayConstructor) \
+ V(CompareNil) \
+ V(ToBoolean) \
+ V(BinaryOp) \
+ V(BinaryOpWithAllocationSite) \
+ V(StringAdd) \
+ V(Keyed) \
+ V(Named) \
+ V(CallHandler) \
+ V(ArgumentAdaptor) \
+ V(ApiGetter) \
+ V(ApiFunction) \
+ V(ArgumentsAccessRead) \
+ V(StoreArrayLiteralElement) \
+ V(MathPowTagged) \
+ V(MathPowInteger) \
+ V(ContextOnly)
+
+
+class CallInterfaceDescriptorData {
+ public:
+ CallInterfaceDescriptorData() : register_param_count_(-1) {}
+
+ // A copy of the passed in registers and param_representations is made
+ // and owned by the CallInterfaceDescriptorData.
+
+ // TODO(mvstanton): Instead of taking parallel arrays register and
+ // param_representations, how about a struct that puts the representation
+ // and register side by side (eg, RegRep(r1, Representation::Tagged()).
+ // The same should go for the CodeStubDescriptor class.
+ void Initialize(int register_parameter_count, Register* registers,
+ Representation* param_representations,
+ PlatformInterfaceDescriptor* platform_descriptor = NULL);
+
+ bool IsInitialized() const { return register_param_count_ >= 0; }
+
+ int register_param_count() const { return register_param_count_; }
+ Register register_param(int index) const { return register_params_[index]; }
+ Register* register_params() const { return register_params_.get(); }
+ Representation register_param_representation(int index) const {
+ return register_param_representations_[index];
+ }
+ Representation* register_param_representations() const {
+ return register_param_representations_.get();
+ }
+ PlatformInterfaceDescriptor* platform_specific_descriptor() const {
+ return platform_specific_descriptor_;
+ }
+
+ private:
+ int register_param_count_;
+
+ // The Register params are allocated dynamically by the
+ // InterfaceDescriptor, and freed on destruction. This is because static
+ // arrays of Registers cause creation of runtime static initializers
+ // which we don't want.
+ SmartArrayPointer<Register> register_params_;
+ // Specifies Representations for the stub's parameter. Points to an array of
+ // Representations of the same length of the numbers of parameters to the
+ // stub, or if NULL (the default value), Representation of each parameter
+ // assumed to be Tagged().
+ SmartArrayPointer<Representation> register_param_representations_;
+
+ PlatformInterfaceDescriptor* platform_specific_descriptor_;
+
+ DISALLOW_COPY_AND_ASSIGN(CallInterfaceDescriptorData);
+};
+
+
+class CallDescriptors {
+ public:
+ enum Key {
+#define DEF_ENUM(name) name,
+ INTERFACE_DESCRIPTOR_LIST(DEF_ENUM)
+#undef DEF_ENUM
+ NUMBER_OF_DESCRIPTORS
+ };
+};
+
+
+class CallInterfaceDescriptor {
+ public:
+ CallInterfaceDescriptor() : data_(NULL) {}
+
+ CallInterfaceDescriptor(Isolate* isolate, CallDescriptors::Key key)
+ : data_(isolate->call_descriptor_data(key)) {}
+
+ int GetEnvironmentLength() const { return data()->register_param_count(); }
+
+ int GetRegisterParameterCount() const {
+ return data()->register_param_count();
+ }
+
+ Register GetParameterRegister(int index) const {
+ return data()->register_param(index);
+ }
+
+ Representation GetParameterRepresentation(int index) const {
+ DCHECK(index < data()->register_param_count());
+ if (data()->register_param_representations() == NULL) {
+ return Representation::Tagged();
+ }
+
+ return data()->register_param_representation(index);
+ }
+
+ // "Environment" versions of parameter functions. The first register
+ // parameter (context) is not included.
+ int GetEnvironmentParameterCount() const {
+ return GetEnvironmentLength() - 1;
+ }
+
+ Register GetEnvironmentParameterRegister(int index) const {
+ return GetParameterRegister(index + 1);
+ }
+
+ Representation GetEnvironmentParameterRepresentation(int index) const {
+ return GetParameterRepresentation(index + 1);
+ }
+
+ // Some platforms have extra information to associate with the descriptor.
+ PlatformInterfaceDescriptor* platform_specific_descriptor() const {
+ return data()->platform_specific_descriptor();
+ }
+
+ static const Register ContextRegister();
+
+ const char* DebugName(Isolate* isolate);
+
+ protected:
+ const CallInterfaceDescriptorData* data() const { return data_; }
+
+ private:
+ const CallInterfaceDescriptorData* data_;
+};
+
+
+#define DECLARE_DESCRIPTOR(name, base) \
+ explicit name(Isolate* isolate) : base(isolate, key()) { \
+ if (!data()->IsInitialized()) \
+ Initialize(isolate->call_descriptor_data(key())); \
+ } \
+ \
+ protected: \
+ void Initialize(CallInterfaceDescriptorData* data); \
+ name(Isolate* isolate, CallDescriptors::Key key) : base(isolate, key) {} \
+ \
+ public: \
+ static inline CallDescriptors::Key key();
+
+
+// LoadDescriptor is used by all stubs that implement Load/KeyedLoad ICs.
+class LoadDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(LoadDescriptor, CallInterfaceDescriptor)
+
+ enum ParameterIndices { kReceiverIndex, kNameIndex };
+ static const Register ReceiverRegister();
+ static const Register NameRegister();
+};
+
+
+class StoreDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(StoreDescriptor, CallInterfaceDescriptor)
+
+ enum ParameterIndices {
+ kReceiverIndex,
+ kNameIndex,
+ kValueIndex,
+ kParameterCount
+ };
+ static const Register ReceiverRegister();
+ static const Register NameRegister();
+ static const Register ValueRegister();
+};
+
+
+class ElementTransitionAndStoreDescriptor : public StoreDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(ElementTransitionAndStoreDescriptor, StoreDescriptor)
+
+ static const Register MapRegister();
+};
+
+
+class InstanceofDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(InstanceofDescriptor, CallInterfaceDescriptor)
+
+ enum ParameterIndices { kLeftIndex, kRightIndex, kParameterCount };
+ static const Register left();
+ static const Register right();
+};
+
+
+class VectorLoadICTrampolineDescriptor : public LoadDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(VectorLoadICTrampolineDescriptor, LoadDescriptor)
+
+ enum ParameterIndices { kReceiverIndex, kNameIndex, kSlotIndex };
+
+ static const Register SlotRegister();
+};
+
+
+class VectorLoadICDescriptor : public VectorLoadICTrampolineDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(VectorLoadICDescriptor, VectorLoadICTrampolineDescriptor)
+
+ enum ParameterIndices {
+ kReceiverIndex,
+ kNameIndex,
+ kSlotIndex,
+ kVectorIndex
+ };
+
+ static const Register VectorRegister();
+};
+
+
+class FastNewClosureDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(FastNewClosureDescriptor, CallInterfaceDescriptor)
+};
+
+
+class FastNewContextDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(FastNewContextDescriptor, CallInterfaceDescriptor)
+};
+
+
+class ToNumberDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(ToNumberDescriptor, CallInterfaceDescriptor)
+};
+
+
+class NumberToStringDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(NumberToStringDescriptor, CallInterfaceDescriptor)
+};
+
+
+class FastCloneShallowArrayDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(FastCloneShallowArrayDescriptor, CallInterfaceDescriptor)
+};
+
+
+class FastCloneShallowObjectDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(FastCloneShallowObjectDescriptor, CallInterfaceDescriptor)
+};
+
+
+class CreateAllocationSiteDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(CreateAllocationSiteDescriptor, CallInterfaceDescriptor)
+};
+
+
+class CallFunctionDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(CallFunctionDescriptor, CallInterfaceDescriptor)
+};
+
+
+class CallFunctionWithFeedbackDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(CallFunctionWithFeedbackDescriptor,
+ CallInterfaceDescriptor)
+};
+
+
+class CallConstructDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(CallConstructDescriptor, CallInterfaceDescriptor)
+};
+
+
+class RegExpConstructResultDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(RegExpConstructResultDescriptor, CallInterfaceDescriptor)
+};
+
+
+class TransitionElementsKindDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(TransitionElementsKindDescriptor, CallInterfaceDescriptor)
+};
+
+
+class ArrayConstructorConstantArgCountDescriptor
+ : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(ArrayConstructorConstantArgCountDescriptor,
+ CallInterfaceDescriptor)
+};
+
+
+class ArrayConstructorDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(ArrayConstructorDescriptor, CallInterfaceDescriptor)
+};
+
+
+class InternalArrayConstructorConstantArgCountDescriptor
+ : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(InternalArrayConstructorConstantArgCountDescriptor,
+ CallInterfaceDescriptor)
+};
+
+
+class InternalArrayConstructorDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(InternalArrayConstructorDescriptor,
+ CallInterfaceDescriptor)
+};
+
+
+class CompareNilDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(CompareNilDescriptor, CallInterfaceDescriptor)
+};
+
+
+class ToBooleanDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(ToBooleanDescriptor, CallInterfaceDescriptor)
+};
+
+
+class BinaryOpDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(BinaryOpDescriptor, CallInterfaceDescriptor)
+};
+
+
+class BinaryOpWithAllocationSiteDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(BinaryOpWithAllocationSiteDescriptor,
+ CallInterfaceDescriptor)
+};
+
+
+class StringAddDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(StringAddDescriptor, CallInterfaceDescriptor)
+};
+
+
+class KeyedDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(KeyedDescriptor, CallInterfaceDescriptor)
+};
+
+
+class NamedDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(NamedDescriptor, CallInterfaceDescriptor)
+};
+
+
+class CallHandlerDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(CallHandlerDescriptor, CallInterfaceDescriptor)
+};
+
+
+class ArgumentAdaptorDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(ArgumentAdaptorDescriptor, CallInterfaceDescriptor)
+};
+
+
+class ApiFunctionDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(ApiFunctionDescriptor, CallInterfaceDescriptor)
+};
+
+
+class ApiGetterDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(ApiGetterDescriptor, CallInterfaceDescriptor)
+
+ static const Register function_address();
+};
+
+
+class ArgumentsAccessReadDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(ArgumentsAccessReadDescriptor, CallInterfaceDescriptor)
+
+ static const Register index();
+ static const Register parameter_count();
+};
+
+
+class StoreArrayLiteralElementDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(StoreArrayLiteralElementDescriptor,
+ CallInterfaceDescriptor)
+};
+
+
+class MathPowTaggedDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(MathPowTaggedDescriptor, CallInterfaceDescriptor)
+
+ static const Register exponent();
+};
+
+
+class MathPowIntegerDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(MathPowIntegerDescriptor, CallInterfaceDescriptor)
+
+ static const Register exponent();
+};
+
+
+class ContextOnlyDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(ContextOnlyDescriptor, CallInterfaceDescriptor)
+};
+
+#undef DECLARE_DESCRIPTOR
+
+
+// We define the association between CallDescriptors::Key and the specialized
+// descriptor here to reduce boilerplate and mistakes.
+#define DEF_KEY(name) \
+ CallDescriptors::Key name##Descriptor::key() { return CallDescriptors::name; }
+INTERFACE_DESCRIPTOR_LIST(DEF_KEY)
+#undef DEF_KEY
+}
+} // namespace v8::internal
+
+
+#if V8_TARGET_ARCH_ARM64
+#include "src/arm64/interface-descriptors-arm64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "src/arm/interface-descriptors-arm.h"
+#endif
+
+#endif // V8_CALL_INTERFACE_DESCRIPTOR_H_
diff --git a/deps/v8/src/interpreter-irregexp.cc b/deps/v8/src/interpreter-irregexp.cc
index 7f51d5e410..2aedfb48b1 100644
--- a/deps/v8/src/interpreter-irregexp.cc
+++ b/deps/v8/src/interpreter-irregexp.cc
@@ -584,7 +584,7 @@ RegExpImpl::IrregexpResult IrregexpInterpreter::Match(
const byte* code_base = code_array->GetDataStartAddress();
uc16 previous_char = '\n';
String::FlatContent subject_content = subject->GetFlatContent();
- if (subject_content.IsAscii()) {
+ if (subject_content.IsOneByte()) {
Vector<const uint8_t> subject_vector = subject_content.ToOneByteVector();
if (start_position != 0) previous_char = subject_vector[start_position - 1];
return RawMatch(isolate,
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 215296d735..7d1f835aa1 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -8,7 +8,9 @@
#include "src/ast.h"
#include "src/base/platform/platform.h"
+#include "src/base/sys-info.h"
#include "src/base/utils/random-number-generator.h"
+#include "src/basic-block-profiler.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/compilation-cache.h"
@@ -19,6 +21,7 @@
#include "src/heap/sweeper-thread.h"
#include "src/heap-profiler.h"
#include "src/hydrogen.h"
+#include "src/ic/stub-cache.h"
#include "src/isolate-inl.h"
#include "src/lithium-allocator.h"
#include "src/log.h"
@@ -30,7 +33,6 @@
#include "src/scopeinfo.h"
#include "src/serialize.h"
#include "src/simulator.h"
-#include "src/stub-cache.h"
#include "src/version.h"
#include "src/vm-state-inl.h"
@@ -47,7 +49,6 @@ int ThreadId::AllocateThreadId() {
int ThreadId::GetCurrentThreadId() {
- Isolate::EnsureInitialized();
int thread_id = base::Thread::GetThreadLocalInt(Isolate::thread_id_key_);
if (thread_id == 0) {
thread_id = AllocateThreadId();
@@ -79,6 +80,7 @@ void ThreadLocalTop::InitializeInternal() {
save_context_ = NULL;
catcher_ = NULL;
top_lookup_result_ = NULL;
+ promise_on_stack_ = NULL;
// These members are re-initialized later after deserialization
// is complete.
@@ -100,23 +102,25 @@ void ThreadLocalTop::Initialize() {
}
+void ThreadLocalTop::Free() {
+ // Match unmatched PopPromise calls.
+ while (promise_on_stack_) isolate_->PopPromise();
+}
+
+
base::Thread::LocalStorageKey Isolate::isolate_key_;
base::Thread::LocalStorageKey Isolate::thread_id_key_;
base::Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
-#ifdef DEBUG
-base::Thread::LocalStorageKey PerThreadAssertScopeBase::thread_local_key;
-#endif // DEBUG
-base::LazyMutex Isolate::process_wide_mutex_ = LAZY_MUTEX_INITIALIZER;
+base::LazyMutex Isolate::thread_data_table_mutex_ = LAZY_MUTEX_INITIALIZER;
Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
base::Atomic32 Isolate::isolate_counter_ = 0;
Isolate::PerIsolateThreadData*
Isolate::FindOrAllocatePerThreadDataForThisThread() {
- EnsureInitialized();
ThreadId thread_id = ThreadId::Current();
PerIsolateThreadData* per_thread = NULL;
{
- base::LockGuard<base::Mutex> lock_guard(process_wide_mutex_.Pointer());
+ base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
per_thread = thread_data_table_->Lookup(this, thread_id);
if (per_thread == NULL) {
per_thread = new PerIsolateThreadData(this, thread_id);
@@ -136,28 +140,22 @@ Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThisThread() {
Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThread(
ThreadId thread_id) {
- EnsureInitialized();
PerIsolateThreadData* per_thread = NULL;
{
- base::LockGuard<base::Mutex> lock_guard(process_wide_mutex_.Pointer());
+ base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
per_thread = thread_data_table_->Lookup(this, thread_id);
}
return per_thread;
}
-void Isolate::EnsureInitialized() {
- base::LockGuard<base::Mutex> lock_guard(process_wide_mutex_.Pointer());
- if (thread_data_table_ == NULL) {
- isolate_key_ = base::Thread::CreateThreadLocalKey();
- thread_id_key_ = base::Thread::CreateThreadLocalKey();
- per_isolate_thread_data_key_ = base::Thread::CreateThreadLocalKey();
-#ifdef DEBUG
- PerThreadAssertScopeBase::thread_local_key =
- base::Thread::CreateThreadLocalKey();
-#endif // DEBUG
- thread_data_table_ = new Isolate::ThreadDataTable();
- }
+void Isolate::InitializeOncePerProcess() {
+ base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
+ CHECK(thread_data_table_ == NULL);
+ isolate_key_ = base::Thread::CreateThreadLocalKey();
+ thread_id_key_ = base::Thread::CreateThreadLocalKey();
+ per_isolate_thread_data_key_ = base::Thread::CreateThreadLocalKey();
+ thread_data_table_ = new Isolate::ThreadDataTable();
}
@@ -183,16 +181,16 @@ void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
// Visit the roots from the top for a given thread.
v->VisitPointer(&thread->pending_exception_);
v->VisitPointer(&(thread->pending_message_obj_));
- v->VisitPointer(BitCast<Object**>(&(thread->pending_message_script_)));
- v->VisitPointer(BitCast<Object**>(&(thread->context_)));
+ v->VisitPointer(bit_cast<Object**>(&(thread->pending_message_script_)));
+ v->VisitPointer(bit_cast<Object**>(&(thread->context_)));
v->VisitPointer(&thread->scheduled_exception_);
for (v8::TryCatch* block = thread->try_catch_handler();
block != NULL;
block = block->next_) {
- v->VisitPointer(BitCast<Object**>(&(block->exception_)));
- v->VisitPointer(BitCast<Object**>(&(block->message_obj_)));
- v->VisitPointer(BitCast<Object**>(&(block->message_script_)));
+ v->VisitPointer(bit_cast<Object**>(&(block->exception_)));
+ v->VisitPointer(bit_cast<Object**>(&(block->message_obj_)));
+ v->VisitPointer(bit_cast<Object**>(&(block->message_script_)));
}
// Iterate over pointers on native execution stack.
@@ -445,22 +443,22 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit);
Handle<String> column_key =
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("column"));
+ factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("column"));
Handle<String> line_key =
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("lineNumber"));
+ factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("lineNumber"));
Handle<String> script_id_key =
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("scriptId"));
+ factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("scriptId"));
Handle<String> script_name_key =
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("scriptName"));
+ factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("scriptName"));
Handle<String> script_name_or_source_url_key =
factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("scriptNameOrSourceURL"));
+ STATIC_CHAR_VECTOR("scriptNameOrSourceURL"));
Handle<String> function_key =
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("functionName"));
+ factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("functionName"));
Handle<String> eval_key =
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("isEval"));
+ factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("isEval"));
Handle<String> constructor_key =
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("isConstructor"));
+ factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("isConstructor"));
StackTraceFrameIterator it(this);
int frames_seen = 0;
@@ -637,7 +635,10 @@ void Isolate::ReportFailedAccessCheck(Handle<JSObject> receiver,
v8::AccessType type) {
if (!thread_local_top()->failed_access_check_callback_) {
Handle<String> message = factory()->InternalizeUtf8String("no access");
- ScheduleThrow(*factory()->NewTypeError(message));
+ Handle<Object> error;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ this, error, factory()->NewTypeError(message), /* void */);
+ ScheduleThrow(*error);
return;
}
@@ -854,12 +855,6 @@ Object* Isolate::ThrowIllegalOperation() {
}
-Object* Isolate::ThrowInvalidStringLength() {
- return Throw(*factory()->NewRangeError(
- "invalid_string_length", HandleVector<Object>(NULL, 0)));
-}
-
-
void Isolate::ScheduleThrow(Object* exception) {
// When scheduling a throw we first throw the exception to get the
// error reporting if it is uncaught before rescheduling it.
@@ -987,7 +982,7 @@ bool Isolate::IsErrorObject(Handle<Object> obj) {
if (!obj->IsJSObject()) return false;
Handle<String> error_key =
- factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("$Error"));
+ factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("$Error"));
Handle<Object> error_constructor = Object::GetProperty(
js_builtins_object(), error_key).ToHandleChecked();
@@ -1018,9 +1013,9 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
bool report_exception = catchable_by_javascript && should_report_exception;
bool try_catch_needs_message =
- can_be_caught_externally && try_catch_handler()->capture_message_ &&
- !thread_local_top()->rethrowing_message_;
+ can_be_caught_externally && try_catch_handler()->capture_message_;
bool bootstrapping = bootstrapper()->IsActive();
+ bool rethrowing_message = thread_local_top()->rethrowing_message_;
thread_local_top()->rethrowing_message_ = false;
@@ -1030,7 +1025,7 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
}
// Generate the message if required.
- if (report_exception || try_catch_needs_message) {
+ if (!rethrowing_message && (report_exception || try_catch_needs_message)) {
MessageLocation potential_computed_location;
if (location == NULL) {
// If no location was specified we use a computed one instead.
@@ -1049,8 +1044,8 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
// Look up as own property. If the lookup fails, the exception is
// probably not a valid Error object. In that case, we fall through
// and capture the stack trace at this throw site.
- LookupIterator lookup(
- exception_handle, key, LookupIterator::CHECK_OWN_REAL);
+ LookupIterator lookup(exception_handle, key,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
Handle<Object> stack_trace_property;
if (Object::GetProperty(&lookup).ToHandle(&stack_trace_property) &&
stack_trace_property->IsJSArray()) {
@@ -1074,7 +1069,7 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
Execution::ToDetailString(this, exception_arg);
if (!maybe_exception.ToHandle(&exception_arg)) {
exception_arg = factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("exception"));
+ STATIC_CHAR_VECTOR("exception"));
}
}
Handle<Object> message_obj = MessageHandler::MakeMessageObject(
@@ -1289,6 +1284,48 @@ bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
}
+void Isolate::PushPromise(Handle<JSObject> promise) {
+ ThreadLocalTop* tltop = thread_local_top();
+ PromiseOnStack* prev = tltop->promise_on_stack_;
+ StackHandler* handler = StackHandler::FromAddress(Isolate::handler(tltop));
+ Handle<JSObject> global_handle =
+ Handle<JSObject>::cast(global_handles()->Create(*promise));
+ tltop->promise_on_stack_ = new PromiseOnStack(handler, global_handle, prev);
+}
+
+
+void Isolate::PopPromise() {
+ ThreadLocalTop* tltop = thread_local_top();
+ if (tltop->promise_on_stack_ == NULL) return;
+ PromiseOnStack* prev = tltop->promise_on_stack_->prev();
+ Handle<Object> global_handle = tltop->promise_on_stack_->promise();
+ delete tltop->promise_on_stack_;
+ tltop->promise_on_stack_ = prev;
+ global_handles()->Destroy(global_handle.location());
+}
+
+
+Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
+ Handle<Object> undefined = factory()->undefined_value();
+ ThreadLocalTop* tltop = thread_local_top();
+ if (tltop->promise_on_stack_ == NULL) return undefined;
+ StackHandler* promise_try = tltop->promise_on_stack_->handler();
+ // Find the top-most try-catch handler.
+ StackHandler* handler = StackHandler::FromAddress(Isolate::handler(tltop));
+ do {
+ if (handler == promise_try) {
+ // Mark the pushed try-catch handler to prevent a later duplicate event
+ // triggered with the following reject.
+ return tltop->promise_on_stack_->promise();
+ }
+ handler = handler->next();
+ // Throwing inside a Promise can be intercepted by an inner try-catch, so
+ // we stop at the first try-catch handler.
+ } while (handler != NULL && !handler->is_catch());
+ return undefined;
+}
+
+
void Isolate::SetCaptureStackTraceForUncaughtExceptions(
bool capture,
int frame_limit,
@@ -1457,8 +1494,7 @@ Isolate::Isolate()
string_tracker_(NULL),
regexp_stack_(NULL),
date_cache_(NULL),
- code_stub_interface_descriptors_(NULL),
- call_descriptors_(NULL),
+ call_descriptor_data_(NULL),
// TODO(bmeurer) Initialized lazily because it depends on flags; can
// be fixed once the default isolate cleanup is done.
random_number_generator_(NULL),
@@ -1474,7 +1510,12 @@ Isolate::Isolate()
num_sweeper_threads_(0),
stress_deopt_count_(0),
next_optimization_id_(0),
- use_counter_callback_(NULL) {
+ use_counter_callback_(NULL),
+ basic_block_profiler_(NULL) {
+ {
+ base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
+ CHECK(thread_data_table_);
+ }
id_ = base::NoBarrier_AtomicIncrement(&isolate_counter_, 1);
TRACE_ISOLATE(constructor);
@@ -1525,7 +1566,7 @@ void Isolate::TearDown() {
Deinit();
{
- base::LockGuard<base::Mutex> lock_guard(process_wide_mutex_.Pointer());
+ base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
thread_data_table_->RemoveAllThreads(this);
}
@@ -1543,6 +1584,7 @@ void Isolate::TearDown() {
void Isolate::GlobalTearDown() {
delete thread_data_table_;
+ thread_data_table_ = NULL;
}
@@ -1552,6 +1594,8 @@ void Isolate::Deinit() {
debug()->Unload();
+ FreeThreadResources();
+
if (concurrent_recompilation_enabled()) {
optimizing_compiler_thread_->Stop();
delete optimizing_compiler_thread_;
@@ -1591,6 +1635,10 @@ void Isolate::Deinit() {
delete runtime_profiler_;
runtime_profiler_ = NULL;
}
+
+ delete basic_block_profiler_;
+ basic_block_profiler_ = NULL;
+
heap_.TearDown();
logger_->TearDown();
@@ -1627,7 +1675,6 @@ void Isolate::PushToPartialSnapshotCache(Object* obj) {
void Isolate::SetIsolateThreadLocals(Isolate* isolate,
PerIsolateThreadData* data) {
- EnsureInitialized();
base::Thread::SetThreadLocal(isolate_key_, isolate);
base::Thread::SetThreadLocal(per_isolate_thread_data_key_, data);
}
@@ -1651,11 +1698,8 @@ Isolate::~Isolate() {
delete date_cache_;
date_cache_ = NULL;
- delete[] code_stub_interface_descriptors_;
- code_stub_interface_descriptors_ = NULL;
-
- delete[] call_descriptors_;
- call_descriptors_ = NULL;
+ delete[] call_descriptor_data_;
+ call_descriptor_data_ = NULL;
delete regexp_stack_;
regexp_stack_ = NULL;
@@ -1830,10 +1874,8 @@ bool Isolate::Init(Deserializer* des) {
regexp_stack_ = new RegExpStack();
regexp_stack_->isolate_ = this;
date_cache_ = new DateCache();
- code_stub_interface_descriptors_ =
- new CodeStubInterfaceDescriptor[CodeStub::NUMBER_OF_IDS];
- call_descriptors_ =
- new CallInterfaceDescriptor[NUMBER_OF_CALL_DESCRIPTORS];
+ call_descriptor_data_ =
+ new CallInterfaceDescriptorData[CallDescriptors::NUMBER_OF_DESCRIPTORS];
cpu_profiler_ = new CpuProfiler(this);
heap_profiler_ = new HeapProfiler(heap());
@@ -1895,7 +1937,7 @@ bool Isolate::Init(Deserializer* des) {
if (max_available_threads_ < 1) {
// Choose the default between 1 and 4.
max_available_threads_ =
- Max(Min(base::OS::NumberOfProcessorsOnline(), 4), 1);
+ Max(Min(base::SysInfo::NumberOfProcessors(), 4), 1);
}
if (!FLAG_job_based_sweeping) {
@@ -1984,26 +2026,7 @@ bool Isolate::Init(Deserializer* des) {
CodeStub::GenerateFPStubs(this);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(this);
StubFailureTrampolineStub::GenerateAheadOfTime(this);
- // Ensure interface descriptors are initialized even when stubs have been
- // deserialized out of the snapshot without using the graph builder.
- FastCloneShallowArrayStub::InstallDescriptors(this);
- BinaryOpICStub::InstallDescriptors(this);
- BinaryOpWithAllocationSiteStub::InstallDescriptors(this);
- CompareNilICStub::InstallDescriptors(this);
- ToBooleanStub::InstallDescriptors(this);
- ToNumberStub::InstallDescriptors(this);
- ArrayConstructorStubBase::InstallDescriptors(this);
- InternalArrayConstructorStubBase::InstallDescriptors(this);
- FastNewClosureStub::InstallDescriptors(this);
- FastNewContextStub::InstallDescriptors(this);
- NumberToStringStub::InstallDescriptors(this);
- StringAddStub::InstallDescriptors(this);
- RegExpConstructResultStub::InstallDescriptors(this);
- KeyedLoadGenericStub::InstallDescriptors(this);
- StoreFieldStub::InstallDescriptors(this);
- }
-
- CallDescriptors::InitializeForIsolate(this);
+ }
initialized_from_snapshot_ = (des != NULL);
@@ -2182,16 +2205,9 @@ bool Isolate::IsFastArrayConstructorPrototypeChainIntact() {
}
-CodeStubInterfaceDescriptor*
- Isolate::code_stub_interface_descriptor(int index) {
- return code_stub_interface_descriptors_ + index;
-}
-
-
-CallInterfaceDescriptor*
- Isolate::call_descriptor(CallDescriptorKey index) {
- DCHECK(0 <= index && index < NUMBER_OF_CALL_DESCRIPTORS);
- return &call_descriptors_[index];
+CallInterfaceDescriptorData* Isolate::call_descriptor_data(int index) {
+ DCHECK(0 <= index && index < CallDescriptors::NUMBER_OF_DESCRIPTORS);
+ return &call_descriptor_data_[index];
}
@@ -2218,7 +2234,7 @@ Handle<JSObject> Isolate::GetSymbolRegistry() {
static const char* nested[] = {
"for", "for_api", "for_intern", "keyFor", "private_api", "private_intern"
};
- for (unsigned i = 0; i < ARRAY_SIZE(nested); ++i) {
+ for (unsigned i = 0; i < arraysize(nested); ++i) {
Handle<String> name = factory()->InternalizeUtf8String(nested[i]);
Handle<JSObject> obj = factory()->NewJSObjectFromMap(map);
JSObject::NormalizeProperties(obj, KEEP_INOBJECT_PROPERTIES, 8);
@@ -2308,14 +2324,13 @@ void Isolate::RunMicrotasks() {
Handle<JSFunction>::cast(microtask);
SaveContext save(this);
set_context(microtask_function->context()->native_context());
- Handle<Object> exception;
- MaybeHandle<Object> result = Execution::TryCall(
- microtask_function, factory()->undefined_value(),
- 0, NULL, &exception);
+ MaybeHandle<Object> maybe_exception;
+ MaybeHandle<Object> result =
+ Execution::TryCall(microtask_function, factory()->undefined_value(),
+ 0, NULL, &maybe_exception);
// If execution is terminating, just bail out.
- if (result.is_null() &&
- !exception.is_null() &&
- *exception == heap()->termination_exception()) {
+ Handle<Object> exception;
+ if (result.is_null() && maybe_exception.is_null()) {
// Clear out any remaining callbacks in the queue.
heap()->set_microtask_queue(heap()->empty_fixed_array());
set_pending_microtask_count(0);
@@ -2347,6 +2362,14 @@ void Isolate::CountUsage(v8::Isolate::UseCounterFeature feature) {
}
+BasicBlockProfiler* Isolate::GetOrCreateBasicBlockProfiler() {
+ if (basic_block_profiler_ == NULL) {
+ basic_block_profiler_ = new BasicBlockProfiler();
+ }
+ return basic_block_profiler_;
+}
+
+
bool StackLimitCheck::JsHasOverflowed() const {
StackGuard* stack_guard = isolate_->stack_guard();
#ifdef USE_SIMULATOR
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 9ef6fc732a..9ea30743bd 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -20,7 +20,7 @@
#include "src/heap/heap.h"
#include "src/optimizing-compiler-thread.h"
#include "src/regexp-stack.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
#include "src/runtime-profiler.h"
#include "src/zone.h"
@@ -32,11 +32,12 @@ class RandomNumberGenerator;
namespace internal {
+class BasicBlockProfiler;
class Bootstrapper;
-class CallInterfaceDescriptor;
+class CallInterfaceDescriptorData;
class CodeGenerator;
class CodeRange;
-class CodeStubInterfaceDescriptor;
+class CodeStubDescriptor;
class CodeTracer;
class CompilationCache;
class ConsStringIteratorOp;
@@ -78,6 +79,7 @@ typedef void* ExternalReferenceRedirectorPointer();
class Debug;
class Debugger;
+class PromiseOnStack;
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
@@ -133,6 +135,22 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
#define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T) \
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, MaybeHandle<T>())
+#define THROW_NEW_ERROR(isolate, call, T) \
+ do { \
+ Handle<Object> __error__; \
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, __error__, isolate->factory()->call, \
+ T); \
+ return isolate->Throw<T>(__error__); \
+ } while (false)
+
+#define THROW_NEW_ERROR_RETURN_FAILURE(isolate, call) \
+ do { \
+ Handle<Object> __error__; \
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, __error__, \
+ isolate->factory()->call); \
+ return isolate->Throw(*__error__); \
+ } while (false)
+
#define RETURN_ON_EXCEPTION_VALUE(isolate, call, value) \
do { \
if ((call).is_null()) { \
@@ -240,11 +258,7 @@ class ThreadLocalTop BASE_EMBEDDED {
v8::TryCatch::JSStackComparableAddress(try_catch_handler()));
}
- void Free() {
- DCHECK(!has_pending_message_);
- DCHECK(!external_caught_exception_);
- DCHECK(try_catch_handler_ == NULL);
- }
+ void Free();
Isolate* isolate_;
// The context where the current execution method is created and for variable
@@ -270,6 +284,11 @@ class ThreadLocalTop BASE_EMBEDDED {
Address c_entry_fp_; // the frame pointer of the top c entry frame
Address handler_; // try-blocks are chained through the stack
+ // Throwing an exception may cause a Promise rejection. For this purpose
+ // we keep track of a stack of nested promises and the corresponding
+ // try-catch handlers.
+ PromiseOnStack* promise_on_stack_;
+
#ifdef USE_SIMULATOR
Simulator* simulator_;
#endif
@@ -355,9 +374,6 @@ typedef List<HeapObject*> DebugObjectCache;
V(Object*, string_stream_current_security_token, NULL) \
/* Serializer state. */ \
V(ExternalReferenceTable*, external_reference_table, NULL) \
- /* AstNode state. */ \
- V(int, ast_node_id, 0) \
- V(unsigned, ast_node_count, 0) \
V(int, pending_microtask_count, 0) \
V(bool, autorun_microtasks, true) \
V(HStatistics*, hstatistics, NULL) \
@@ -451,17 +467,17 @@ class Isolate {
kIsolateAddressCount
};
+ static void InitializeOncePerProcess();
+
// Returns the PerIsolateThreadData for the current thread (or NULL if one is
// not currently set).
static PerIsolateThreadData* CurrentPerIsolateThreadData() {
- EnsureInitialized();
return reinterpret_cast<PerIsolateThreadData*>(
base::Thread::GetThreadLocal(per_isolate_thread_data_key_));
}
// Returns the isolate inside which the current thread is running.
INLINE(static Isolate* Current()) {
- EnsureInitialized();
Isolate* isolate = reinterpret_cast<Isolate*>(
base::Thread::GetExistingThreadLocal(isolate_key_));
DCHECK(isolate != NULL);
@@ -469,7 +485,6 @@ class Isolate {
}
INLINE(static Isolate* UncheckedCurrent()) {
- EnsureInitialized();
return reinterpret_cast<Isolate*>(
base::Thread::GetThreadLocal(isolate_key_));
}
@@ -514,13 +529,11 @@ class Isolate {
// Used internally for V8 threads that do not execute JavaScript but still
// are part of the domain of an isolate (like the context switcher).
static base::Thread::LocalStorageKey isolate_key() {
- EnsureInitialized();
return isolate_key_;
}
// Returns the key used to store process-wide thread IDs.
static base::Thread::LocalStorageKey thread_id_key() {
- EnsureInitialized();
return thread_id_key_;
}
@@ -676,6 +689,11 @@ class Isolate {
// JavaScript code. If an exception is scheduled true is returned.
bool OptionalRescheduleException(bool is_bottom_call);
+ // Push and pop a promise and the current try-catch handler.
+ void PushPromise(Handle<JSObject> promise);
+ void PopPromise();
+ Handle<Object> GetPromiseOnStackOnThrow();
+
class ExceptionScope {
public:
explicit ExceptionScope(Isolate* isolate) :
@@ -758,7 +776,6 @@ class Isolate {
// Return pending location if any or unfilled structure.
MessageLocation GetMessageLocation();
Object* ThrowIllegalOperation();
- Object* ThrowInvalidStringLength();
// Promote a scheduled exception to pending. Asserts has_scheduled_exception.
Object* PromoteScheduledException();
@@ -1015,19 +1032,7 @@ class Isolate {
bool IsFastArrayConstructorPrototypeChainIntact();
- CodeStubInterfaceDescriptor*
- code_stub_interface_descriptor(int index);
-
- enum CallDescriptorKey {
- KeyedCall,
- NamedCall,
- CallHandler,
- ArgumentAdaptorCall,
- ApiFunctionCall,
- NUMBER_OF_CALL_DESCRIPTORS
- };
-
- CallInterfaceDescriptor* call_descriptor(CallDescriptorKey index);
+ CallInterfaceDescriptorData* call_descriptor_data(int index);
void IterateDeferredHandles(ObjectVisitor* visitor);
void LinkDeferredHandles(DeferredHandles* deferred_handles);
@@ -1103,9 +1108,12 @@ class Isolate {
void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
void CountUsage(v8::Isolate::UseCounterFeature feature);
- private:
- static void EnsureInitialized();
+ BasicBlockProfiler* GetOrCreateBasicBlockProfiler();
+ BasicBlockProfiler* basic_block_profiler() { return basic_block_profiler_; }
+
+ static Isolate* NewForTesting() { return new Isolate(); }
+ private:
Isolate();
friend struct GlobalState;
@@ -1164,8 +1172,7 @@ class Isolate {
DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
};
- // This mutex protects highest_thread_id_ and thread_data_table_.
- static base::LazyMutex process_wide_mutex_;
+ static base::LazyMutex thread_data_table_mutex_;
static base::Thread::LocalStorageKey per_isolate_thread_data_key_;
static base::Thread::LocalStorageKey isolate_key_;
@@ -1263,8 +1270,7 @@ class Isolate {
RegExpStack* regexp_stack_;
DateCache* date_cache_;
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
- CodeStubInterfaceDescriptor* code_stub_interface_descriptors_;
- CallInterfaceDescriptor* call_descriptors_;
+ CallInterfaceDescriptorData* call_descriptor_data_;
base::RandomNumberGenerator* random_number_generator_;
// Whether the isolate has been created for snapshotting.
@@ -1325,6 +1331,7 @@ class Isolate {
List<CallCompletedCallback> call_completed_callbacks_;
v8::Isolate::UseCounterCallback use_counter_callback_;
+ BasicBlockProfiler* basic_block_profiler_;
friend class ExecutionAccess;
friend class HandleScopeImplementer;
@@ -1349,6 +1356,22 @@ class Isolate {
#undef THREAD_LOCAL_TOP_ACCESSOR
+class PromiseOnStack {
+ public:
+ PromiseOnStack(StackHandler* handler, Handle<JSObject> promise,
+ PromiseOnStack* prev)
+ : handler_(handler), promise_(promise), prev_(prev) {}
+ StackHandler* handler() { return handler_; }
+ Handle<JSObject> promise() { return promise_; }
+ PromiseOnStack* prev() { return prev_; }
+
+ private:
+ StackHandler* handler_;
+ Handle<JSObject> promise_;
+ PromiseOnStack* prev_;
+};
+
+
// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
// class as a work around for a bug in the generated code found with these
// versions of GCC. See V8 issue 122 for details.
@@ -1467,7 +1490,7 @@ class PostponeInterruptsScope BASE_EMBEDDED {
};
-class CodeTracer V8_FINAL : public Malloced {
+class CodeTracer FINAL : public Malloced {
public:
explicit CodeTracer(int isolate_id)
: file_(NULL),
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index c23e50dbb4..d3148c9e21 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -17,7 +17,7 @@ namespace v8 {
namespace internal {
// A simple json parser.
-template <bool seq_ascii>
+template <bool seq_one_byte>
class JsonParser BASE_EMBEDDED {
public:
MUST_USE_RESULT static MaybeHandle<Object> Parse(Handle<String> source) {
@@ -39,8 +39,8 @@ class JsonParser BASE_EMBEDDED {
source_ = String::Flatten(source_);
pretenure_ = (source_length_ >= kPretenureTreshold) ? TENURED : NOT_TENURED;
- // Optimized fast case where we only have ASCII characters.
- if (seq_ascii) {
+ // Optimized fast case where we only have Latin1 characters.
+ if (seq_one_byte) {
seq_source_ = Handle<SeqOneByteString>::cast(source_);
}
}
@@ -52,7 +52,7 @@ class JsonParser BASE_EMBEDDED {
position_++;
if (position_ >= source_length_) {
c0_ = kEndOfString;
- } else if (seq_ascii) {
+ } else if (seq_one_byte) {
c0_ = seq_source_->SeqOneByteStringGet(position_);
} else {
c0_ = source_->Get(position_);
@@ -103,7 +103,7 @@ class JsonParser BASE_EMBEDDED {
if (source_->length() - position_ - 1 > length) {
DisallowHeapAllocation no_gc;
String::FlatContent content = expected->GetFlatContent();
- if (content.IsAscii()) {
+ if (content.IsOneByte()) {
DCHECK_EQ('"', c0_);
const uint8_t* input_chars = seq_source_->GetChars() + position_ + 1;
const uint8_t* expected_chars = content.ToOneByteVector().start();
@@ -132,7 +132,7 @@ class JsonParser BASE_EMBEDDED {
Handle<String> ScanJsonString();
// Creates a new string and copies prefix[start..end] into the beginning
// of it. Then scans the rest of the string, adding characters after the
- // prefix. Called by ScanJsonString when reaching a '\' or non-ASCII char.
+ // prefix. Called by ScanJsonString when reaching a '\' or non-Latin1 char.
template <typename StringType, typename SinkChar>
Handle<String> SlowScanJsonString(Handle<String> prefix, int start, int end);
@@ -195,8 +195,8 @@ class JsonParser BASE_EMBEDDED {
int position_;
};
-template <bool seq_ascii>
-MaybeHandle<Object> JsonParser<seq_ascii>::ParseJson() {
+template <bool seq_one_byte>
+MaybeHandle<Object> JsonParser<seq_one_byte>::ParseJson() {
// Advance to the first character (possibly EOS)
AdvanceSkipWhitespace();
Handle<Object> result = ParseJsonValue();
@@ -244,7 +244,9 @@ MaybeHandle<Object> JsonParser<seq_ascii>::ParseJson() {
MessageLocation location(factory->NewScript(source_),
position_,
position_ + 1);
- Handle<Object> error = factory->NewSyntaxError(message, array);
+ Handle<Object> error;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), error,
+ factory->NewSyntaxError(message, array), Object);
return isolate()->template Throw<Object>(error, &location);
}
return result;
@@ -252,8 +254,8 @@ MaybeHandle<Object> JsonParser<seq_ascii>::ParseJson() {
// Parse any JSON value.
-template <bool seq_ascii>
-Handle<Object> JsonParser<seq_ascii>::ParseJsonValue() {
+template <bool seq_one_byte>
+Handle<Object> JsonParser<seq_one_byte>::ParseJsonValue() {
StackLimitCheck stack_check(isolate_);
if (stack_check.HasOverflowed()) {
isolate_->StackOverflow();
@@ -293,8 +295,8 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonValue() {
// Parse a JSON object. Position must be right at '{'.
-template <bool seq_ascii>
-Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
+template <bool seq_one_byte>
+Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
HandleScope scope(isolate());
Handle<JSObject> json_object =
factory()->NewJSObject(object_constructor(), pretenure_);
@@ -357,7 +359,7 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
// to parse it first.
bool follow_expected = false;
Handle<Map> target;
- if (seq_ascii) {
+ if (seq_one_byte) {
key = Map::ExpectedTransitionKey(map);
follow_expected = !key.is_null() && ParseJsonString(key);
}
@@ -424,8 +426,7 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
if (value.is_null()) return ReportUnexpectedCharacter();
}
- JSObject::SetOwnPropertyIgnoreAttributes(
- json_object, key, value, NONE).Assert();
+ Runtime::DefineObjectProperty(json_object, key, value, NONE).Check();
} while (MatchSkipWhiteSpace(','));
if (c0_ != '}') {
return ReportUnexpectedCharacter();
@@ -447,8 +448,8 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
}
// Parse a JSON array. Position must be right at '['.
-template <bool seq_ascii>
-Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() {
+template <bool seq_one_byte>
+Handle<Object> JsonParser<seq_one_byte>::ParseJsonArray() {
HandleScope scope(isolate());
ZoneList<Handle<Object> > elements(4, zone());
DCHECK_EQ(c0_, '[');
@@ -477,8 +478,8 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() {
}
-template <bool seq_ascii>
-Handle<Object> JsonParser<seq_ascii>::ParseJsonNumber() {
+template <bool seq_one_byte>
+Handle<Object> JsonParser<seq_one_byte>::ParseJsonNumber() {
bool negative = false;
int beg_pos = position_;
if (c0_ == '-') {
@@ -521,7 +522,7 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonNumber() {
}
int length = position_ - beg_pos;
double number;
- if (seq_ascii) {
+ if (seq_one_byte) {
Vector<const uint8_t> chars(seq_source_->GetChars() + beg_pos, length);
number = StringToDouble(isolate()->unicode_cache(),
chars,
@@ -579,9 +580,9 @@ inline Handle<SeqOneByteString> NewRawString(Factory* factory,
// Scans the rest of a JSON string starting from position_ and writes
// prefix[start..end] along with the scanned characters into a
// sequential string of type StringType.
-template <bool seq_ascii>
+template <bool seq_one_byte>
template <typename StringType, typename SinkChar>
-Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
+Handle<String> JsonParser<seq_one_byte>::SlowScanJsonString(
Handle<String> prefix, int start, int end) {
int count = end - start;
int max_length = count + source_length_ - position_;
@@ -601,16 +602,15 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
}
if (c0_ != '\\') {
// If the sink can contain UC16 characters, or source_ contains only
- // ASCII characters, there's no need to test whether we can store the
+ // Latin1 characters, there's no need to test whether we can store the
// character. Otherwise check whether the UC16 source character can fit
- // in the ASCII sink.
- if (sizeof(SinkChar) == kUC16Size ||
- seq_ascii ||
+ // in the Latin1 sink.
+ if (sizeof(SinkChar) == kUC16Size || seq_one_byte ||
c0_ <= String::kMaxOneByteCharCode) {
SeqStringSet(seq_string, count++, c0_);
Advance();
} else {
- // StringType is SeqOneByteString and we just read a non-ASCII char.
+ // StringType is SeqOneByteString and we just read a non-Latin1 char.
return SlowScanJsonString<SeqTwoByteString, uc16>(seq_string, 0, count);
}
} else {
@@ -651,7 +651,8 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
SeqStringSet(seq_string, count++, value);
break;
} else {
- // StringType is SeqOneByteString and we just read a non-ASCII char.
+ // StringType is SeqOneByteString and we just read a non-Latin1
+ // char.
position_ -= 6; // Rewind position_ to \ in \uxxxx.
Advance();
return SlowScanJsonString<SeqTwoByteString, uc16>(seq_string,
@@ -675,9 +676,9 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
}
-template <bool seq_ascii>
+template <bool seq_one_byte>
template <bool is_internalized>
-Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
+Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
DCHECK_EQ('"', c0_);
Advance();
if (c0_ == '"') {
@@ -685,7 +686,7 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
return factory()->empty_string();
}
- if (seq_ascii && is_internalized) {
+ if (seq_one_byte && is_internalized) {
// Fast path for existing internalized strings. If the the string being
// parsed is not a known internalized string, contains backslashes or
// unexpectedly reaches the end of string, return with an empty handle.
@@ -756,12 +757,12 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
}
int beg_pos = position_;
- // Fast case for ASCII only without escape characters.
+ // Fast case for Latin1 only without escape characters.
do {
// Check for control character (0x00-0x1f) or unterminated string (<0).
if (c0_ < 0x20) return Handle<String>::null();
if (c0_ != '\\') {
- if (seq_ascii || c0_ <= String::kMaxOneByteCharCode) {
+ if (seq_one_byte || c0_ <= String::kMaxOneByteCharCode) {
Advance();
} else {
return SlowScanJsonString<SeqTwoByteString, uc16>(source_,
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
index 81249a7d4a..f89a19fd4a 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json-stringifier.h
@@ -38,22 +38,22 @@ class BasicJsonStringifier BASE_EMBEDDED {
INLINE(void ShrinkCurrentPart());
- template <bool is_ascii, typename Char>
+ template <bool is_one_byte, typename Char>
INLINE(void Append_(Char c));
- template <bool is_ascii, typename Char>
+ template <bool is_one_byte, typename Char>
INLINE(void Append_(const Char* chars));
INLINE(void Append(uint8_t c)) {
- if (is_ascii_) {
+ if (is_one_byte_) {
Append_<true>(c);
} else {
Append_<false>(c);
}
}
- INLINE(void AppendAscii(const char* chars)) {
- if (is_ascii_) {
+ INLINE(void AppendOneByte(const char* chars)) {
+ if (is_one_byte_) {
Append_<true>(reinterpret_cast<const uint8_t*>(chars));
} else {
Append_<false>(reinterpret_cast<const uint8_t*>(chars));
@@ -129,7 +129,7 @@ class BasicJsonStringifier BASE_EMBEDDED {
DestChar* dest,
int length));
- template <bool is_ascii, typename Char>
+ template <bool is_one_byte, typename Char>
INLINE(void SerializeString_(Handle<String> string));
template <typename Char>
@@ -159,7 +159,7 @@ class BasicJsonStringifier BASE_EMBEDDED {
Handle<JSArray> stack_;
int current_index_;
int part_length_;
- bool is_ascii_;
+ bool is_one_byte_;
bool overflowed_;
static const int kJsonEscapeTableEntrySize = 8;
@@ -167,7 +167,7 @@ class BasicJsonStringifier BASE_EMBEDDED {
};
-// Translation table to escape ASCII characters.
+// Translation table to escape Latin1 characters.
// Table entries start at a multiple of 8 and are null-terminated.
const char* const BasicJsonStringifier::JsonEscapeTable =
"\\u0000\0 \\u0001\0 \\u0002\0 \\u0003\0 "
@@ -239,7 +239,7 @@ const char* const BasicJsonStringifier::JsonEscapeTable =
BasicJsonStringifier::BasicJsonStringifier(Isolate* isolate)
: isolate_(isolate),
current_index_(0),
- is_ascii_(true),
+ is_one_byte_(true),
overflowed_(false) {
factory_ = isolate_->factory();
accumulator_store_ = Handle<JSValue>::cast(
@@ -258,8 +258,7 @@ MaybeHandle<Object> BasicJsonStringifier::Stringify(Handle<Object> object) {
ShrinkCurrentPart();
Accumulate();
if (overflowed_) {
- return isolate_->Throw<Object>(
- isolate_->factory()->NewInvalidStringLengthError());
+ THROW_NEW_ERROR(isolate_, NewInvalidStringLengthError(), Object);
}
return accumulator();
}
@@ -318,9 +317,9 @@ Handle<String> BasicJsonStringifier::StringifyString_(Isolate* isolate,
}
-template <bool is_ascii, typename Char>
+template <bool is_one_byte, typename Char>
void BasicJsonStringifier::Append_(Char c) {
- if (is_ascii) {
+ if (is_one_byte) {
SeqOneByteString::cast(*current_part_)->SeqOneByteStringSet(
current_index_++, c);
} else {
@@ -331,15 +330,16 @@ void BasicJsonStringifier::Append_(Char c) {
}
-template <bool is_ascii, typename Char>
+template <bool is_one_byte, typename Char>
void BasicJsonStringifier::Append_(const Char* chars) {
- for ( ; *chars != '\0'; chars++) Append_<is_ascii, Char>(*chars);
+ for (; *chars != '\0'; chars++) Append_<is_one_byte, Char>(*chars);
}
MaybeHandle<Object> BasicJsonStringifier::ApplyToJsonFunction(
Handle<Object> object, Handle<Object> key) {
- LookupIterator it(object, tojson_string_, LookupIterator::SKIP_INTERCEPTOR);
+ LookupIterator it(object, tojson_string_,
+ LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
Handle<Object> fun;
ASSIGN_RETURN_ON_EXCEPTION(isolate_, fun, Object::GetProperty(&it), Object);
if (!fun->IsJSFunction()) return object;
@@ -371,8 +371,10 @@ BasicJsonStringifier::Result BasicJsonStringifier::StackPush(
for (int i = 0; i < length; i++) {
if (elements->get(i) == *object) {
AllowHeapAllocation allow_to_return_error;
- isolate_->Throw(*factory_->NewTypeError(
- "circular_structure", HandleVector<Object>(NULL, 0)));
+ Handle<Object> error;
+ MaybeHandle<Object> maybe_error = factory_->NewTypeError(
+ "circular_structure", HandleVector<Object>(NULL, 0));
+ if (maybe_error.ToHandle(&error)) isolate_->Throw(*error);
return EXCEPTION;
}
}
@@ -414,15 +416,15 @@ BasicJsonStringifier::Result BasicJsonStringifier::Serialize_(
switch (Oddball::cast(*object)->kind()) {
case Oddball::kFalse:
if (deferred_string_key) SerializeDeferredKey(comma, key);
- AppendAscii("false");
+ AppendOneByte("false");
return SUCCESS;
case Oddball::kTrue:
if (deferred_string_key) SerializeDeferredKey(comma, key);
- AppendAscii("true");
+ AppendOneByte("true");
return SUCCESS;
case Oddball::kNull:
if (deferred_string_key) SerializeDeferredKey(comma, key);
- AppendAscii("null");
+ AppendOneByte("null");
return SUCCESS;
default:
return UNCHANGED;
@@ -509,7 +511,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSValue(
DCHECK(class_name == isolate_->heap()->Boolean_string());
Object* value = JSValue::cast(*object)->value();
DCHECK(value->IsBoolean());
- AppendAscii(value->IsTrue() ? "true" : "false");
+ AppendOneByte(value->IsTrue() ? "true" : "false");
}
return SUCCESS;
}
@@ -519,7 +521,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeSmi(Smi* object) {
static const int kBufferSize = 100;
char chars[kBufferSize];
Vector<char> buffer(chars, kBufferSize);
- AppendAscii(IntToCString(object->value(), buffer));
+ AppendOneByte(IntToCString(object->value(), buffer));
return SUCCESS;
}
@@ -527,13 +529,13 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeSmi(Smi* object) {
BasicJsonStringifier::Result BasicJsonStringifier::SerializeDouble(
double number) {
if (std::isinf(number) || std::isnan(number)) {
- AppendAscii("null");
+ AppendOneByte("null");
return SUCCESS;
}
static const int kBufferSize = 100;
char chars[kBufferSize];
Vector<char> buffer(chars, kBufferSize);
- AppendAscii(DoubleToCString(number, buffer));
+ AppendOneByte(DoubleToCString(number, buffer));
return SUCCESS;
}
@@ -578,7 +580,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArray(
i);
if (result == SUCCESS) continue;
if (result == UNCHANGED) {
- AppendAscii("null");
+ AppendOneByte("null");
} else {
return result;
}
@@ -611,12 +613,12 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArraySlow(
Object::GetElement(isolate_, object, i),
EXCEPTION);
if (element->IsUndefined()) {
- AppendAscii("null");
+ AppendOneByte("null");
} else {
Result result = SerializeElement(isolate_, element, i);
if (result == SUCCESS) continue;
if (result == UNCHANGED) {
- AppendAscii("null");
+ AppendOneByte("null");
} else {
return result;
}
@@ -729,7 +731,7 @@ void BasicJsonStringifier::Extend() {
if (part_length_ <= kMaxPartLength / kPartLengthGrowthFactor) {
part_length_ *= kPartLengthGrowthFactor;
}
- if (is_ascii_) {
+ if (is_one_byte_) {
current_part_ =
factory_->NewRawOneByteString(part_length_).ToHandleChecked();
} else {
@@ -748,7 +750,7 @@ void BasicJsonStringifier::ChangeEncoding() {
factory_->NewRawTwoByteString(part_length_).ToHandleChecked();
DCHECK(!current_part_.is_null());
current_index_ = 0;
- is_ascii_ = false;
+ is_one_byte_ = false;
}
@@ -777,10 +779,10 @@ int BasicJsonStringifier::SerializeStringUnchecked_(const SrcChar* src,
}
-template <bool is_ascii, typename Char>
+template <bool is_one_byte, typename Char>
void BasicJsonStringifier::SerializeString_(Handle<String> string) {
int length = string->length();
- Append_<is_ascii, char>('"');
+ Append_<is_one_byte, char>('"');
// We make a rough estimate to find out if the current string can be
// serialized without allocating a new string part. The worst case length of
// an escaped character is 6. Shifting the remainin string length right by 3
@@ -789,7 +791,7 @@ void BasicJsonStringifier::SerializeString_(Handle<String> string) {
if (((part_length_ - current_index_) >> 3) > length) {
DisallowHeapAllocation no_gc;
Vector<const Char> vector = GetCharVector<Char>(string);
- if (is_ascii) {
+ if (is_one_byte) {
current_index_ += SerializeStringUnchecked_(
vector.start(),
SeqOneByteString::cast(*current_part_)->GetChars() + current_index_,
@@ -813,15 +815,15 @@ void BasicJsonStringifier::SerializeString_(Handle<String> string) {
}
Char c = vector[i];
if (DoNotEscape(c)) {
- Append_<is_ascii, Char>(c);
+ Append_<is_one_byte, Char>(c);
} else {
- Append_<is_ascii, uint8_t>(reinterpret_cast<const uint8_t*>(
+ Append_<is_one_byte, uint8_t>(reinterpret_cast<const uint8_t*>(
&JsonEscapeTable[c * kJsonEscapeTableEntrySize]));
}
}
}
- Append_<is_ascii, uint8_t>('"');
+ Append_<is_one_byte, uint8_t>('"');
}
@@ -841,7 +843,7 @@ template <>
Vector<const uint8_t> BasicJsonStringifier::GetCharVector(
Handle<String> string) {
String::FlatContent flat = string->GetFlatContent();
- DCHECK(flat.IsAscii());
+ DCHECK(flat.IsOneByte());
return flat.ToOneByteVector();
}
@@ -856,7 +858,7 @@ Vector<const uc16> BasicJsonStringifier::GetCharVector(Handle<String> string) {
void BasicJsonStringifier::SerializeString(Handle<String> object) {
object = String::Flatten(object);
- if (is_ascii_) {
+ if (is_one_byte_) {
if (object->IsOneByteRepresentationUnderneath()) {
SerializeString_<true, uint8_t>(object);
} else {
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index 27b8699cf2..b528b4aef5 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -18,7 +18,7 @@
#include "src/regexp-macro-assembler-irregexp.h"
#include "src/regexp-macro-assembler-tracer.h"
#include "src/regexp-stack.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
#include "src/string-search.h"
#ifndef V8_INTERPRETED_REGEXP
@@ -53,7 +53,7 @@ MaybeHandle<Object> RegExpImpl::CreateRegExpLiteral(
Handle<String> flags) {
// Call the construct code with 2 arguments.
Handle<Object> argv[] = { pattern, flags };
- return Execution::New(constructor, ARRAY_SIZE(argv), argv);
+ return Execution::New(constructor, arraysize(argv), argv);
}
@@ -70,6 +70,9 @@ static JSRegExp::Flags RegExpFlagsFromString(Handle<String> str) {
case 'm':
flags |= JSRegExp::MULTILINE;
break;
+ case 'y':
+ if (FLAG_harmony_regexps) flags |= JSRegExp::STICKY;
+ break;
}
}
return JSRegExp::Flags(flags);
@@ -88,8 +91,8 @@ static inline MaybeHandle<Object> ThrowRegExpException(
elements->set(0, *pattern);
elements->set(1, *error_text);
Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> regexp_err = factory->NewSyntaxError(message, array);
- return isolate->Throw<Object>(regexp_err);
+ Handle<Object> regexp_err;
+ THROW_NEW_ERROR(isolate, NewSyntaxError(message, array), Object);
}
@@ -185,12 +188,14 @@ MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
if (parse_result.simple &&
!flags.is_ignore_case() &&
+ !flags.is_sticky() &&
!HasFewDifferentCharacters(pattern)) {
// Parse-tree is a single atom that is equal to the pattern.
AtomCompile(re, pattern, flags, pattern);
has_been_compiled = true;
} else if (parse_result.tree->IsAtom() &&
!flags.is_ignore_case() &&
+ !flags.is_sticky() &&
parse_result.capture_count == 0) {
RegExpAtom* atom = parse_result.tree->AsAtom();
Vector<const uc16> atom_pattern = atom->data();
@@ -290,25 +295,18 @@ int RegExpImpl::AtomExecRaw(Handle<JSRegExp> regexp,
DCHECK(needle_content.IsFlat());
DCHECK(subject_content.IsFlat());
// dispatch on type of strings
- index = (needle_content.IsAscii()
- ? (subject_content.IsAscii()
- ? SearchString(isolate,
- subject_content.ToOneByteVector(),
- needle_content.ToOneByteVector(),
- index)
- : SearchString(isolate,
- subject_content.ToUC16Vector(),
- needle_content.ToOneByteVector(),
- index))
- : (subject_content.IsAscii()
- ? SearchString(isolate,
- subject_content.ToOneByteVector(),
- needle_content.ToUC16Vector(),
- index)
- : SearchString(isolate,
- subject_content.ToUC16Vector(),
- needle_content.ToUC16Vector(),
- index)));
+ index =
+ (needle_content.IsOneByte()
+ ? (subject_content.IsOneByte()
+ ? SearchString(isolate, subject_content.ToOneByteVector(),
+ needle_content.ToOneByteVector(), index)
+ : SearchString(isolate, subject_content.ToUC16Vector(),
+ needle_content.ToOneByteVector(), index))
+ : (subject_content.IsOneByte()
+ ? SearchString(isolate, subject_content.ToOneByteVector(),
+ needle_content.ToUC16Vector(), index)
+ : SearchString(isolate, subject_content.ToUC16Vector(),
+ needle_content.ToUC16Vector(), index)));
if (index == -1) {
return i / 2; // Return number of matches.
} else {
@@ -346,14 +344,15 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
// Irregexp implementation.
// Ensures that the regexp object contains a compiled version of the
-// source for either ASCII or non-ASCII strings.
+// source for either one-byte or two-byte subject strings.
// If the compiled version doesn't already exist, it is compiled
// from the source pattern.
// If compilation fails, an exception is thrown and this function
// returns false.
-bool RegExpImpl::EnsureCompiledIrregexp(
- Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii) {
- Object* compiled_code = re->DataAt(JSRegExp::code_index(is_ascii));
+bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re,
+ Handle<String> sample_subject,
+ bool is_one_byte) {
+ Object* compiled_code = re->DataAt(JSRegExp::code_index(is_one_byte));
#ifdef V8_INTERPRETED_REGEXP
if (compiled_code->IsByteArray()) return true;
#else // V8_INTERPRETED_REGEXP (RegExp native code)
@@ -361,19 +360,18 @@ bool RegExpImpl::EnsureCompiledIrregexp(
#endif
// We could potentially have marked this as flushable, but have kept
// a saved version if we did not flush it yet.
- Object* saved_code = re->DataAt(JSRegExp::saved_code_index(is_ascii));
+ Object* saved_code = re->DataAt(JSRegExp::saved_code_index(is_one_byte));
if (saved_code->IsCode()) {
// Reinstate the code in the original place.
- re->SetDataAt(JSRegExp::code_index(is_ascii), saved_code);
+ re->SetDataAt(JSRegExp::code_index(is_one_byte), saved_code);
DCHECK(compiled_code->IsSmi());
return true;
}
- return CompileIrregexp(re, sample_subject, is_ascii);
+ return CompileIrregexp(re, sample_subject, is_one_byte);
}
-static bool CreateRegExpErrorObjectAndThrow(Handle<JSRegExp> re,
- bool is_ascii,
+static void CreateRegExpErrorObjectAndThrow(Handle<JSRegExp> re,
Handle<String> error_message,
Isolate* isolate) {
Factory* factory = isolate->factory();
@@ -381,23 +379,23 @@ static bool CreateRegExpErrorObjectAndThrow(Handle<JSRegExp> re,
elements->set(0, re->Pattern());
elements->set(1, *error_message);
Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> regexp_err =
+ Handle<Object> error;
+ MaybeHandle<Object> maybe_error =
factory->NewSyntaxError("malformed_regexp", array);
- isolate->Throw(*regexp_err);
- return false;
+ if (maybe_error.ToHandle(&error)) isolate->Throw(*error);
}
bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
Handle<String> sample_subject,
- bool is_ascii) {
+ bool is_one_byte) {
// Compile the RegExp.
Isolate* isolate = re->GetIsolate();
Zone zone(isolate);
PostponeInterruptsScope postpone(isolate);
// If we had a compilation error the last time this is saved at the
// saved code index.
- Object* entry = re->DataAt(JSRegExp::code_index(is_ascii));
+ Object* entry = re->DataAt(JSRegExp::code_index(is_one_byte));
// When arriving here entry can only be a smi, either representing an
// uncompiled regexp, a previous compilation error, or code that has
// been flushed.
@@ -411,10 +409,10 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
// A previous compilation failed and threw an error which we store in
// the saved code index (we store the error message, not the actual
// error). Recreate the error object and throw it.
- Object* error_string = re->DataAt(JSRegExp::saved_code_index(is_ascii));
+ Object* error_string = re->DataAt(JSRegExp::saved_code_index(is_one_byte));
DCHECK(error_string->IsString());
Handle<String> error_message(String::cast(error_string));
- CreateRegExpErrorObjectAndThrow(re, is_ascii, error_message, isolate);
+ CreateRegExpErrorObjectAndThrow(re, error_message, isolate);
return false;
}
@@ -435,25 +433,20 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
"malformed_regexp"));
return false;
}
- RegExpEngine::CompilationResult result =
- RegExpEngine::Compile(&compile_data,
- flags.is_ignore_case(),
- flags.is_global(),
- flags.is_multiline(),
- pattern,
- sample_subject,
- is_ascii,
- &zone);
+ RegExpEngine::CompilationResult result = RegExpEngine::Compile(
+ &compile_data, flags.is_ignore_case(), flags.is_global(),
+ flags.is_multiline(), flags.is_sticky(), pattern, sample_subject,
+ is_one_byte, &zone);
if (result.error_message != NULL) {
// Unable to compile regexp.
Handle<String> error_message = isolate->factory()->NewStringFromUtf8(
CStrVector(result.error_message)).ToHandleChecked();
- CreateRegExpErrorObjectAndThrow(re, is_ascii, error_message, isolate);
+ CreateRegExpErrorObjectAndThrow(re, error_message, isolate);
return false;
}
Handle<FixedArray> data = Handle<FixedArray>(FixedArray::cast(re->data()));
- data->set(JSRegExp::code_index(is_ascii), result.code);
+ data->set(JSRegExp::code_index(is_one_byte), result.code);
int register_max = IrregexpMaxRegisterCount(*data);
if (result.num_registers > register_max) {
SetIrregexpMaxRegisterCount(*data, result.num_registers);
@@ -484,13 +477,13 @@ int RegExpImpl::IrregexpNumberOfRegisters(FixedArray* re) {
}
-ByteArray* RegExpImpl::IrregexpByteCode(FixedArray* re, bool is_ascii) {
- return ByteArray::cast(re->get(JSRegExp::code_index(is_ascii)));
+ByteArray* RegExpImpl::IrregexpByteCode(FixedArray* re, bool is_one_byte) {
+ return ByteArray::cast(re->get(JSRegExp::code_index(is_one_byte)));
}
-Code* RegExpImpl::IrregexpNativeCode(FixedArray* re, bool is_ascii) {
- return Code::cast(re->get(JSRegExp::code_index(is_ascii)));
+Code* RegExpImpl::IrregexpNativeCode(FixedArray* re, bool is_one_byte) {
+ return Code::cast(re->get(JSRegExp::code_index(is_one_byte)));
}
@@ -511,9 +504,9 @@ int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
Handle<String> subject) {
subject = String::Flatten(subject);
- // Check the asciiness of the underlying storage.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
- if (!EnsureCompiledIrregexp(regexp, subject, is_ascii)) return -1;
+ // Check representation of the underlying storage.
+ bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
+ if (!EnsureCompiledIrregexp(regexp, subject, is_one_byte)) return -1;
#ifdef V8_INTERPRETED_REGEXP
// Byte-code regexp needs space allocated for all its registers.
@@ -543,13 +536,13 @@ int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
DCHECK(index <= subject->length());
DCHECK(subject->IsFlat());
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+ bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
#ifndef V8_INTERPRETED_REGEXP
DCHECK(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
do {
- EnsureCompiledIrregexp(regexp, subject, is_ascii);
- Handle<Code> code(IrregexpNativeCode(*irregexp, is_ascii), isolate);
+ EnsureCompiledIrregexp(regexp, subject, is_one_byte);
+ Handle<Code> code(IrregexpNativeCode(*irregexp, is_one_byte), isolate);
// The stack is used to allocate registers for the compiled regexp code.
// This means that in case of failure, the output registers array is left
// untouched and contains the capture results from the previous successful
@@ -576,10 +569,10 @@ int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
// must restart from scratch.
// In this case, it means we must make sure we are prepared to handle
// the, potentially, different subject (the string can switch between
- // being internal and external, and even between being ASCII and UC16,
+ // being internal and external, and even between being Latin1 and UC16,
// but the characters are always the same).
IrregexpPrepare(regexp, subject);
- is_ascii = subject->IsOneByteRepresentationUnderneath();
+ is_one_byte = subject->IsOneByteRepresentationUnderneath();
} while (true);
UNREACHABLE();
return RE_EXCEPTION;
@@ -597,7 +590,8 @@ int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
for (int i = number_of_capture_registers - 1; i >= 0; i--) {
raw_output[i] = -1;
}
- Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_ascii), isolate);
+ Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_one_byte),
+ isolate);
IrregexpResult result = IrregexpInterpreter::Match(isolate,
byte_codes,
@@ -998,7 +992,7 @@ class FrequencyCollator {
class RegExpCompiler {
public:
- RegExpCompiler(int capture_count, bool ignore_case, bool is_ascii,
+ RegExpCompiler(int capture_count, bool ignore_case, bool is_one_byte,
Zone* zone);
int AllocateRegister() {
@@ -1031,7 +1025,7 @@ class RegExpCompiler {
void SetRegExpTooBig() { reg_exp_too_big_ = true; }
inline bool ignore_case() { return ignore_case_; }
- inline bool ascii() { return ascii_; }
+ inline bool one_byte() { return one_byte_; }
FrequencyCollator* frequency_collator() { return &frequency_collator_; }
int current_expansion_factor() { return current_expansion_factor_; }
@@ -1050,7 +1044,7 @@ class RegExpCompiler {
int recursion_depth_;
RegExpMacroAssembler* macro_assembler_;
bool ignore_case_;
- bool ascii_;
+ bool one_byte_;
bool reg_exp_too_big_;
int current_expansion_factor_;
FrequencyCollator frequency_collator_;
@@ -1076,13 +1070,13 @@ static RegExpEngine::CompilationResult IrregexpRegExpTooBig(Isolate* isolate) {
// Attempts to compile the regexp using an Irregexp code generator. Returns
// a fixed array or a null handle depending on whether it succeeded.
-RegExpCompiler::RegExpCompiler(int capture_count, bool ignore_case, bool ascii,
- Zone* zone)
+RegExpCompiler::RegExpCompiler(int capture_count, bool ignore_case,
+ bool one_byte, Zone* zone)
: next_register_(2 * (capture_count + 1)),
work_list_(NULL),
recursion_depth_(0),
ignore_case_(ignore_case),
- ascii_(ascii),
+ one_byte_(one_byte),
reg_exp_too_big_(false),
current_expansion_factor_(1),
frequency_collator_(),
@@ -1593,9 +1587,8 @@ void ChoiceNode::GenerateGuard(RegExpMacroAssembler* macro_assembler,
// Returns the number of characters in the equivalence class, omitting those
// that cannot occur in the source string because it is ASCII.
-static int GetCaseIndependentLetters(Isolate* isolate,
- uc16 character,
- bool ascii_subject,
+static int GetCaseIndependentLetters(Isolate* isolate, uc16 character,
+ bool one_byte_subject,
unibrow::uchar* letters) {
int length =
isolate->jsregexp_uncanonicalize()->get(character, '\0', letters);
@@ -1605,11 +1598,14 @@ static int GetCaseIndependentLetters(Isolate* isolate,
letters[0] = character;
length = 1;
}
- if (!ascii_subject || character <= String::kMaxOneByteCharCode) {
+ if (!one_byte_subject || character <= String::kMaxOneByteCharCode) {
return length;
}
+
// The standard requires that non-ASCII characters cannot have ASCII
// character codes in their equivalence class.
+ // TODO(dcarney): issue 3550 this is not actually true for Latin1 anymore,
+ // is it? For example, \u00C5 is equivalent to \u212B.
return 0;
}
@@ -1645,18 +1641,19 @@ static inline bool EmitAtomNonLetter(Isolate* isolate,
bool check,
bool preloaded) {
RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- bool ascii = compiler->ascii();
+ bool one_byte = compiler->one_byte();
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- int length = GetCaseIndependentLetters(isolate, c, ascii, chars);
+ int length = GetCaseIndependentLetters(isolate, c, one_byte, chars);
if (length < 1) {
- // This can't match. Must be an ASCII subject and a non-ASCII character.
- // We do not need to do anything since the ASCII pass already handled this.
+ // This can't match. Must be an one-byte subject and a non-one-byte
+ // character. We do not need to do anything since the one-byte pass
+ // already handled this.
return false; // Bounds not checked.
}
bool checked = false;
// We handle the length > 1 case in a later pass.
if (length == 1) {
- if (ascii && c > String::kMaxOneByteCharCodeU) {
+ if (one_byte && c > String::kMaxOneByteCharCodeU) {
// Can't match - see above.
return false; // Bounds not checked.
}
@@ -1671,12 +1668,10 @@ static inline bool EmitAtomNonLetter(Isolate* isolate,
static bool ShortCutEmitCharacterPair(RegExpMacroAssembler* macro_assembler,
- bool ascii,
- uc16 c1,
- uc16 c2,
+ bool one_byte, uc16 c1, uc16 c2,
Label* on_failure) {
uc16 char_mask;
- if (ascii) {
+ if (one_byte) {
char_mask = String::kMaxOneByteCharCode;
} else {
char_mask = String::kMaxUtf16CodeUnit;
@@ -1727,9 +1722,9 @@ static inline bool EmitAtomLetter(Isolate* isolate,
bool check,
bool preloaded) {
RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- bool ascii = compiler->ascii();
+ bool one_byte = compiler->one_byte();
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- int length = GetCaseIndependentLetters(isolate, c, ascii, chars);
+ int length = GetCaseIndependentLetters(isolate, c, one_byte, chars);
if (length <= 1) return false;
// We may not need to check against the end of the input string
// if this character lies before a character that matched.
@@ -1740,11 +1735,8 @@ static inline bool EmitAtomLetter(Isolate* isolate,
DCHECK(unibrow::Ecma262UnCanonicalize::kMaxWidth == 4);
switch (length) {
case 2: {
- if (ShortCutEmitCharacterPair(macro_assembler,
- ascii,
- chars[0],
- chars[1],
- on_failure)) {
+ if (ShortCutEmitCharacterPair(macro_assembler, one_byte, chars[0],
+ chars[1], on_failure)) {
} else {
macro_assembler->CheckCharacter(chars[0], &ok);
macro_assembler->CheckNotCharacter(chars[1], on_failure);
@@ -1919,7 +1911,7 @@ static void SplitSearchSpace(ZoneList<int>* ranges,
// new_start_index is the index of the first edge that is beyond the
// current kSize space.
- // For very large search spaces we do a binary chop search of the non-ASCII
+ // For very large search spaces we do a binary chop search of the non-Latin1
// space instead of just going to the end of the current kSize space. The
// heuristics are complicated a little by the fact that any 128-character
// encoding space can be quickly tested with a table lookup, so we don't
@@ -1928,14 +1920,13 @@ static void SplitSearchSpace(ZoneList<int>* ranges,
// for example, we only want to match every second character (eg. the lower
// case characters on some Unicode pages).
int binary_chop_index = (end_index + start_index) / 2;
- // The first test ensures that we get to the code that handles the ASCII
+ // The first test ensures that we get to the code that handles the Latin1
// range with a single not-taken branch, speeding up this important
- // character range (even non-ASCII charset-based text has spaces and
+ // character range (even non-Latin1 charset-based text has spaces and
// punctuation).
- if (*border - 1 > String::kMaxOneByteCharCode && // ASCII case.
+ if (*border - 1 > String::kMaxOneByteCharCode && // Latin1 case.
end_index - start_index > (*new_start_index - start_index) * 2 &&
- last - first > kSize * 2 &&
- binary_chop_index > *new_start_index &&
+ last - first > kSize * 2 && binary_chop_index > *new_start_index &&
ranges->at(binary_chop_index) >= first + 2 * kSize) {
int scan_forward_for_section_border = binary_chop_index;;
int new_border = (ranges->at(binary_chop_index) | kMask) + 1;
@@ -2122,20 +2113,16 @@ static void GenerateBranches(RegExpMacroAssembler* masm,
static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
- RegExpCharacterClass* cc,
- bool ascii,
- Label* on_failure,
- int cp_offset,
- bool check_offset,
- bool preloaded,
- Zone* zone) {
+ RegExpCharacterClass* cc, bool one_byte,
+ Label* on_failure, int cp_offset, bool check_offset,
+ bool preloaded, Zone* zone) {
ZoneList<CharacterRange>* ranges = cc->ranges(zone);
if (!CharacterRange::IsCanonical(ranges)) {
CharacterRange::Canonicalize(ranges);
}
int max_char;
- if (ascii) {
+ if (one_byte) {
max_char = String::kMaxOneByteCharCode;
} else {
max_char = String::kMaxUtf16CodeUnit;
@@ -2456,6 +2443,7 @@ bool QuickCheckDetails::Rationalize(bool asc) {
bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
+ Trace* bounds_check_trace,
Trace* trace,
bool preload_has_checked_bounds,
Label* on_possible_success,
@@ -2465,7 +2453,7 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
GetQuickCheckDetails(
details, compiler, 0, trace->at_start() == Trace::FALSE_VALUE);
if (details->cannot_match()) return false;
- if (!details->Rationalize(compiler->ascii())) return false;
+ if (!details->Rationalize(compiler->one_byte())) return false;
DCHECK(details->characters() == 1 ||
compiler->macro_assembler()->CanReadUnaligned());
uint32_t mask = details->mask();
@@ -2474,8 +2462,13 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
RegExpMacroAssembler* assembler = compiler->macro_assembler();
if (trace->characters_preloaded() != details->characters()) {
+ DCHECK(trace->cp_offset() == bounds_check_trace->cp_offset());
+ // We are attempting to preload the minimum number of characters
+ // any choice would eat, so if the bounds check fails, then none of the
+ // choices can succeed, so we can just immediately backtrack, rather
+ // than go to the next choice.
assembler->LoadCurrentCharacter(trace->cp_offset(),
- trace->backtrack(),
+ bounds_check_trace->backtrack(),
!preload_has_checked_bounds,
details->characters());
}
@@ -2487,7 +2480,7 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
// If number of characters preloaded is 1 then we used a byte or 16 bit
// load so the value is already masked down.
uint32_t char_mask;
- if (compiler->ascii()) {
+ if (compiler->one_byte()) {
char_mask = String::kMaxOneByteCharCode;
} else {
char_mask = String::kMaxUtf16CodeUnit;
@@ -2495,11 +2488,11 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
if ((mask & char_mask) == char_mask) need_mask = false;
mask &= char_mask;
} else {
- // For 2-character preloads in ASCII mode or 1-character preloads in
- // TWO_BYTE mode we also use a 16 bit load with zero extend.
- if (details->characters() == 2 && compiler->ascii()) {
+ // For 2-character preloads in one-byte mode or 1-character preloads in
+ // two-byte mode we also use a 16 bit load with zero extend.
+ if (details->characters() == 2 && compiler->one_byte()) {
if ((mask & 0xffff) == 0xffff) need_mask = false;
- } else if (details->characters() == 1 && !compiler->ascii()) {
+ } else if (details->characters() == 1 && !compiler->one_byte()) {
if ((mask & 0xffff) == 0xffff) need_mask = false;
} else {
if (mask == 0xffffffff) need_mask = false;
@@ -2539,7 +2532,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
DCHECK(characters_filled_in < details->characters());
int characters = details->characters();
int char_mask;
- if (compiler->ascii()) {
+ if (compiler->one_byte()) {
char_mask = String::kMaxOneByteCharCode;
} else {
char_mask = String::kMaxUtf16CodeUnit;
@@ -2553,18 +2546,20 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
details->positions(characters_filled_in);
uc16 c = quarks[i];
if (c > char_mask) {
- // If we expect a non-ASCII character from an ASCII string,
- // there is no way we can match. Not even case independent
- // matching can turn an ASCII character into non-ASCII or
+ // If we expect a non-Latin1 character from an one-byte string,
+ // there is no way we can match. Not even case-independent
+ // matching can turn an Latin1 character into non-Latin1 or
// vice versa.
+ // TODO(dcarney): issue 3550. Verify that this works as expected.
+ // For example, \u0178 is uppercase of \u00ff (y-umlaut).
details->set_cannot_match();
pos->determines_perfectly = false;
return;
}
if (compiler->ignore_case()) {
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- int length = GetCaseIndependentLetters(isolate, c, compiler->ascii(),
- chars);
+ int length = GetCaseIndependentLetters(isolate, c,
+ compiler->one_byte(), chars);
DCHECK(length != 0); // Can only happen if c > char_mask (see above).
if (length == 1) {
// This letter has no case equivalents, so it's nice and simple
@@ -2693,7 +2688,7 @@ void QuickCheckDetails::Clear() {
}
-void QuickCheckDetails::Advance(int by, bool ascii) {
+void QuickCheckDetails::Advance(int by, bool one_byte) {
DCHECK(by >= 0);
if (by >= characters_) {
Clear();
@@ -2757,7 +2752,7 @@ class VisitMarker {
};
-RegExpNode* SeqRegExpNode::FilterASCII(int depth, bool ignore_case) {
+RegExpNode* SeqRegExpNode::FilterOneByte(int depth, bool ignore_case) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
DCHECK(!info()->visited);
@@ -2767,7 +2762,7 @@ RegExpNode* SeqRegExpNode::FilterASCII(int depth, bool ignore_case) {
RegExpNode* SeqRegExpNode::FilterSuccessor(int depth, bool ignore_case) {
- RegExpNode* next = on_success_->FilterASCII(depth - 1, ignore_case);
+ RegExpNode* next = on_success_->FilterOneByte(depth - 1, ignore_case);
if (next == NULL) return set_replacement(NULL);
on_success_ = next;
return set_replacement(this);
@@ -2791,7 +2786,7 @@ static bool RangesContainLatin1Equivalents(ZoneList<CharacterRange>* ranges) {
}
-RegExpNode* TextNode::FilterASCII(int depth, bool ignore_case) {
+RegExpNode* TextNode::FilterOneByte(int depth, bool ignore_case) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
DCHECK(!info()->visited);
@@ -2845,7 +2840,7 @@ RegExpNode* TextNode::FilterASCII(int depth, bool ignore_case) {
}
-RegExpNode* LoopChoiceNode::FilterASCII(int depth, bool ignore_case) {
+RegExpNode* LoopChoiceNode::FilterOneByte(int depth, bool ignore_case) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
@@ -2853,17 +2848,17 @@ RegExpNode* LoopChoiceNode::FilterASCII(int depth, bool ignore_case) {
VisitMarker marker(info());
RegExpNode* continue_replacement =
- continue_node_->FilterASCII(depth - 1, ignore_case);
+ continue_node_->FilterOneByte(depth - 1, ignore_case);
// If we can't continue after the loop then there is no sense in doing the
// loop.
if (continue_replacement == NULL) return set_replacement(NULL);
}
- return ChoiceNode::FilterASCII(depth - 1, ignore_case);
+ return ChoiceNode::FilterOneByte(depth - 1, ignore_case);
}
-RegExpNode* ChoiceNode::FilterASCII(int depth, bool ignore_case) {
+RegExpNode* ChoiceNode::FilterOneByte(int depth, bool ignore_case) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
@@ -2883,7 +2878,7 @@ RegExpNode* ChoiceNode::FilterASCII(int depth, bool ignore_case) {
for (int i = 0; i < choice_count; i++) {
GuardedAlternative alternative = alternatives_->at(i);
RegExpNode* replacement =
- alternative.node()->FilterASCII(depth - 1, ignore_case);
+ alternative.node()->FilterOneByte(depth - 1, ignore_case);
DCHECK(replacement != this); // No missing EMPTY_MATCH_CHECK.
if (replacement != NULL) {
alternatives_->at(i).set_node(replacement);
@@ -2903,7 +2898,7 @@ RegExpNode* ChoiceNode::FilterASCII(int depth, bool ignore_case) {
new(zone()) ZoneList<GuardedAlternative>(surviving, zone());
for (int i = 0; i < choice_count; i++) {
RegExpNode* replacement =
- alternatives_->at(i).node()->FilterASCII(depth - 1, ignore_case);
+ alternatives_->at(i).node()->FilterOneByte(depth - 1, ignore_case);
if (replacement != NULL) {
alternatives_->at(i).set_node(replacement);
new_alternatives->Add(alternatives_->at(i), zone());
@@ -2914,8 +2909,8 @@ RegExpNode* ChoiceNode::FilterASCII(int depth, bool ignore_case) {
}
-RegExpNode* NegativeLookaheadChoiceNode::FilterASCII(int depth,
- bool ignore_case) {
+RegExpNode* NegativeLookaheadChoiceNode::FilterOneByte(int depth,
+ bool ignore_case) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
@@ -2923,12 +2918,12 @@ RegExpNode* NegativeLookaheadChoiceNode::FilterASCII(int depth,
// Alternative 0 is the negative lookahead, alternative 1 is what comes
// afterwards.
RegExpNode* node = alternatives_->at(1).node();
- RegExpNode* replacement = node->FilterASCII(depth - 1, ignore_case);
+ RegExpNode* replacement = node->FilterOneByte(depth - 1, ignore_case);
if (replacement == NULL) return set_replacement(NULL);
alternatives_->at(1).set_node(replacement);
RegExpNode* neg_node = alternatives_->at(0).node();
- RegExpNode* neg_replacement = neg_node->FilterASCII(depth - 1, ignore_case);
+ RegExpNode* neg_replacement = neg_node->FilterOneByte(depth - 1, ignore_case);
// If the negative lookahead is always going to fail then
// we don't need to check it.
if (neg_replacement == NULL) return set_replacement(replacement);
@@ -3037,7 +3032,7 @@ static void EmitHat(RegExpCompiler* compiler,
if (!assembler->CheckSpecialCharacterClass('n',
new_trace.backtrack())) {
// Newline means \n, \r, 0x2028 or 0x2029.
- if (!compiler->ascii()) {
+ if (!compiler->one_byte()) {
assembler->CheckCharacterAfterAnd(0x2028, 0xfffe, &ok);
}
assembler->CheckCharacter('\n', &ok);
@@ -3235,7 +3230,7 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
int* checked_up_to) {
RegExpMacroAssembler* assembler = compiler->macro_assembler();
Isolate* isolate = assembler->zone()->isolate();
- bool ascii = compiler->ascii();
+ bool one_byte = compiler->one_byte();
Label* backtrack = trace->backtrack();
QuickCheckDetails* quick_check = trace->quick_check_performed();
int element_count = elms_->length();
@@ -3249,8 +3244,8 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
if (DeterminedAlready(quick_check, elm.cp_offset() + j)) continue;
EmitCharacterFunction* emit_function = NULL;
switch (pass) {
- case NON_ASCII_MATCH:
- DCHECK(ascii);
+ case NON_LATIN1_MATCH:
+ DCHECK(one_byte);
if (quarks[j] > String::kMaxOneByteCharCode) {
assembler->GoTo(backtrack);
return;
@@ -3285,14 +3280,8 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
if (first_element_checked && i == 0) continue;
if (DeterminedAlready(quick_check, elm.cp_offset())) continue;
RegExpCharacterClass* cc = elm.char_class();
- EmitCharClass(assembler,
- cc,
- ascii,
- backtrack,
- cp_offset,
- *checked_up_to < cp_offset,
- preloaded,
- zone());
+ EmitCharClass(assembler, cc, one_byte, backtrack, cp_offset,
+ *checked_up_to < cp_offset, preloaded, zone());
UpdateBoundsCheck(cp_offset, checked_up_to);
}
}
@@ -3333,9 +3322,9 @@ void TextNode::Emit(RegExpCompiler* compiler, Trace* trace) {
return;
}
- if (compiler->ascii()) {
+ if (compiler->one_byte()) {
int dummy = 0;
- TextEmitPass(compiler, NON_ASCII_MATCH, false, trace, false, &dummy);
+ TextEmitPass(compiler, NON_LATIN1_MATCH, false, trace, false, &dummy);
}
bool first_elt_done = false;
@@ -3391,7 +3380,7 @@ void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) {
// Adjust the offsets of the quick check performed information. This
// information is used to find out what we already determined about the
// characters by means of mask and compare.
- quick_check_performed_.Advance(by, compiler->ascii());
+ quick_check_performed_.Advance(by, compiler->one_byte());
cp_offset_ += by;
if (cp_offset_ > RegExpMacroAssembler::kMaxCPOffset) {
compiler->SetRegExpTooBig();
@@ -3401,7 +3390,7 @@ void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) {
}
-void TextNode::MakeCaseIndependent(bool is_ascii) {
+void TextNode::MakeCaseIndependent(bool is_one_byte) {
int element_count = elms_->length();
for (int i = 0; i < element_count; i++) {
TextElement elm = elms_->at(i);
@@ -3413,7 +3402,7 @@ void TextNode::MakeCaseIndependent(bool is_ascii) {
ZoneList<CharacterRange>* ranges = cc->ranges(zone());
int range_count = ranges->length();
for (int j = 0; j < range_count; j++) {
- ranges->at(j).AddCaseEquivalents(ranges, is_ascii, zone());
+ ranges->at(j).AddCaseEquivalents(ranges, is_one_byte, zone());
}
}
}
@@ -3441,7 +3430,7 @@ RegExpNode* TextNode::GetSuccessorOfOmnivorousTextNode(
}
if (ranges->length() != 1) return NULL;
uint32_t max_char;
- if (compiler->ascii()) {
+ if (compiler->one_byte()) {
max_char = String::kMaxOneByteCharCode;
} else {
max_char = String::kMaxUtf16CodeUnit;
@@ -3494,6 +3483,7 @@ void LoopChoiceNode::AddContinueAlternative(GuardedAlternative alt) {
void LoopChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
if (trace->stop_node() == this) {
+ // Back edge of greedy optimized loop node graph.
int text_length =
GreedyLoopTextLengthForAlternative(&(alternatives_->at(0)));
DCHECK(text_length != kNodeIsTooComplexForGreedyLoops);
@@ -3517,8 +3507,8 @@ int ChoiceNode::CalculatePreloadCharacters(RegExpCompiler* compiler,
int eats_at_least) {
int preload_characters = Min(4, eats_at_least);
if (compiler->macro_assembler()->CanReadUnaligned()) {
- bool ascii = compiler->ascii();
- if (ascii) {
+ bool one_byte = compiler->one_byte();
+ if (one_byte) {
if (preload_characters > 4) preload_characters = 4;
// We can't preload 3 characters because there is no machine instruction
// to do that. We can't just load 4 because we could be reading
@@ -3588,18 +3578,18 @@ static const int kSpaceRanges[] = { '\t', '\r' + 1, ' ', ' ' + 1,
0x00A0, 0x00A1, 0x1680, 0x1681, 0x180E, 0x180F, 0x2000, 0x200B,
0x2028, 0x202A, 0x202F, 0x2030, 0x205F, 0x2060, 0x3000, 0x3001,
0xFEFF, 0xFF00, 0x10000 };
-static const int kSpaceRangeCount = ARRAY_SIZE(kSpaceRanges);
+static const int kSpaceRangeCount = arraysize(kSpaceRanges);
static const int kWordRanges[] = {
'0', '9' + 1, 'A', 'Z' + 1, '_', '_' + 1, 'a', 'z' + 1, 0x10000 };
-static const int kWordRangeCount = ARRAY_SIZE(kWordRanges);
+static const int kWordRangeCount = arraysize(kWordRanges);
static const int kDigitRanges[] = { '0', '9' + 1, 0x10000 };
-static const int kDigitRangeCount = ARRAY_SIZE(kDigitRanges);
+static const int kDigitRangeCount = arraysize(kDigitRanges);
static const int kSurrogateRanges[] = { 0xd800, 0xe000, 0x10000 };
-static const int kSurrogateRangeCount = ARRAY_SIZE(kSurrogateRanges);
+static const int kSurrogateRangeCount = arraysize(kSurrogateRanges);
static const int kLineTerminatorRanges[] = { 0x000A, 0x000B, 0x000D, 0x000E,
0x2028, 0x202A, 0x10000 };
-static const int kLineTerminatorRangeCount = ARRAY_SIZE(kLineTerminatorRanges);
+static const int kLineTerminatorRangeCount = arraysize(kLineTerminatorRanges);
void BoyerMoorePositionInfo::Set(int character) {
@@ -3644,7 +3634,7 @@ BoyerMooreLookahead::BoyerMooreLookahead(
int length, RegExpCompiler* compiler, Zone* zone)
: length_(length),
compiler_(compiler) {
- if (compiler->ascii()) {
+ if (compiler->one_byte()) {
max_char_ = String::kMaxOneByteCharCode;
} else {
max_char_ = String::kMaxUtf16CodeUnit;
@@ -3712,8 +3702,9 @@ int BoyerMooreLookahead::FindBestInterval(
// dividing by 2 we switch off the skipping if the probability of skipping
// is less than 50%. This is because the multibyte mask-and-compare
// skipping in quickcheck is more likely to do well on this case.
- bool in_quickcheck_range = ((i - remembered_from < 4) ||
- (compiler_->ascii() ? remembered_from <= 4 : remembered_from <= 2));
+ bool in_quickcheck_range =
+ ((i - remembered_from < 4) ||
+ (compiler_->one_byte() ? remembered_from <= 4 : remembered_from <= 2));
// Called 'probability' but it is only a rough estimate and can actually
// be outside the 0-kSize range.
int probability = (in_quickcheck_range ? kSize / 2 : kSize) - frequency;
@@ -3760,13 +3751,13 @@ int BoyerMooreLookahead::GetSkipTable(int min_lookahead,
// See comment above on the implementation of GetSkipTable.
-bool BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
+void BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
const int kSize = RegExpMacroAssembler::kTableSize;
int min_lookahead = 0;
int max_lookahead = 0;
- if (!FindWorthwhileInterval(&min_lookahead, &max_lookahead)) return false;
+ if (!FindWorthwhileInterval(&min_lookahead, &max_lookahead)) return;
bool found_single_character = false;
int single_character = 0;
@@ -3790,7 +3781,7 @@ bool BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
if (found_single_character && lookahead_width == 1 && max_lookahead < 3) {
// The mask-compare can probably handle this better.
- return false;
+ return;
}
if (found_single_character) {
@@ -3807,7 +3798,7 @@ bool BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
masm->AdvanceCurrentPosition(lookahead_width);
masm->GoTo(&again);
masm->Bind(&cont);
- return true;
+ return;
}
Factory* factory = masm->zone()->isolate()->factory();
@@ -3823,8 +3814,6 @@ bool BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
masm->AdvanceCurrentPosition(skip_distance);
masm->GoTo(&again);
masm->Bind(&cont);
-
- return true;
}
@@ -3878,10 +3867,12 @@ bool BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
* \ F V
* \-----S4
*
- * For greedy loops we reverse our expectation and expect to match rather
- * than fail. Therefore we want the loop code to look like this (U is the
- * unwind code that steps back in the greedy loop). The following alternatives
- * look the same as above.
+ * For greedy loops we push the current position, then generate the code that
+ * eats the input specially in EmitGreedyLoop. The other choice (the
+ * continuation) is generated by the normal code in EmitChoices, and steps back
+ * in the input to the starting position when it fails to match. The loop code
+ * looks like this (U is the unwind code that steps back in the greedy loop).
+ *
* _____
* / \
* V |
@@ -3890,26 +3881,25 @@ bool BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
* / |S |
* F/ \_____/
* /
- * |<-----------
- * | \
- * V \
- * Q2 ---> S2 \
- * | S / |
- * F| / |
- * | F/ |
- * | / |
- * | R |
- * | / |
- * F VL |
- * <------U |
- * back |S |
- * \______________/
+ * |<-----
+ * | \
+ * V |S
+ * Q2 ---> U----->backtrack
+ * | F /
+ * S| /
+ * V F /
+ * S2--/
*/
-void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- int choice_count = alternatives_->length();
+GreedyLoopState::GreedyLoopState(bool not_at_start) {
+ counter_backtrack_trace_.set_backtrack(&label_);
+ if (not_at_start) counter_backtrack_trace_.set_at_start(false);
+}
+
+
+void ChoiceNode::AssertGuardsMentionRegisters(Trace* trace) {
#ifdef DEBUG
+ int choice_count = alternatives_->length();
for (int i = 0; i < choice_count - 1; i++) {
GuardedAlternative alternative = alternatives_->at(i);
ZoneList<Guard*>* guards = alternative.guards();
@@ -3919,12 +3909,38 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
}
}
#endif
+}
+
+
+void ChoiceNode::SetUpPreLoad(RegExpCompiler* compiler,
+ Trace* current_trace,
+ PreloadState* state) {
+ if (state->eats_at_least_ == PreloadState::kEatsAtLeastNotYetInitialized) {
+ // Save some time by looking at most one machine word ahead.
+ state->eats_at_least_ =
+ EatsAtLeast(compiler->one_byte() ? 4 : 2, kRecursionBudget,
+ current_trace->at_start() == Trace::FALSE_VALUE);
+ }
+ state->preload_characters_ =
+ CalculatePreloadCharacters(compiler, state->eats_at_least_);
+
+ state->preload_is_current_ =
+ (current_trace->characters_preloaded() == state->preload_characters_);
+ state->preload_has_checked_bounds_ = state->preload_is_current_;
+}
+
+
+void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+ int choice_count = alternatives_->length();
+
+ AssertGuardsMentionRegisters(trace);
LimitResult limit_result = LimitVersions(compiler, trace);
if (limit_result == DONE) return;
DCHECK(limit_result == CONTINUE);
- int new_flush_budget = trace->flush_budget() / choice_count;
+ // For loop nodes we already flushed (see LoopChoiceNode::Emit), but for
+ // other choice nodes we only flush if we are out of code size budget.
if (trace->flush_budget() == 0 && trace->actions() != NULL) {
trace->Flush(compiler, this);
return;
@@ -3932,141 +3948,217 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
RecursionCheck rc(compiler);
- Trace* current_trace = trace;
+ PreloadState preload;
+ preload.init();
+ GreedyLoopState greedy_loop_state(not_at_start());
- int text_length = GreedyLoopTextLengthForAlternative(&(alternatives_->at(0)));
- bool greedy_loop = false;
- Label greedy_loop_label;
- Trace counter_backtrack_trace;
- counter_backtrack_trace.set_backtrack(&greedy_loop_label);
- if (not_at_start()) counter_backtrack_trace.set_at_start(false);
+ int text_length = GreedyLoopTextLengthForAlternative(&alternatives_->at(0));
+ AlternativeGenerationList alt_gens(choice_count, zone());
if (choice_count > 1 && text_length != kNodeIsTooComplexForGreedyLoops) {
- // Here we have special handling for greedy loops containing only text nodes
- // and other simple nodes. These are handled by pushing the current
- // position on the stack and then incrementing the current position each
- // time around the switch. On backtrack we decrement the current position
- // and check it against the pushed value. This avoids pushing backtrack
- // information for each iteration of the loop, which could take up a lot of
- // space.
- greedy_loop = true;
- DCHECK(trace->stop_node() == NULL);
- macro_assembler->PushCurrentPosition();
- current_trace = &counter_backtrack_trace;
- Label greedy_match_failed;
- Trace greedy_match_trace;
- if (not_at_start()) greedy_match_trace.set_at_start(false);
- greedy_match_trace.set_backtrack(&greedy_match_failed);
- Label loop_label;
- macro_assembler->Bind(&loop_label);
- greedy_match_trace.set_stop_node(this);
- greedy_match_trace.set_loop_label(&loop_label);
- alternatives_->at(0).node()->Emit(compiler, &greedy_match_trace);
- macro_assembler->Bind(&greedy_match_failed);
+ trace = EmitGreedyLoop(compiler,
+ trace,
+ &alt_gens,
+ &preload,
+ &greedy_loop_state,
+ text_length);
+ } else {
+ // TODO(erikcorry): Delete this. We don't need this label, but it makes us
+ // match the traces produced pre-cleanup.
+ Label second_choice;
+ compiler->macro_assembler()->Bind(&second_choice);
+
+ preload.eats_at_least_ = EmitOptimizedUnanchoredSearch(compiler, trace);
+
+ EmitChoices(compiler,
+ &alt_gens,
+ 0,
+ trace,
+ &preload);
+ }
+
+ // At this point we need to generate slow checks for the alternatives where
+ // the quick check was inlined. We can recognize these because the associated
+ // label was bound.
+ int new_flush_budget = trace->flush_budget() / choice_count;
+ for (int i = 0; i < choice_count; i++) {
+ AlternativeGeneration* alt_gen = alt_gens.at(i);
+ Trace new_trace(*trace);
+ // If there are actions to be flushed we have to limit how many times
+ // they are flushed. Take the budget of the parent trace and distribute
+ // it fairly amongst the children.
+ if (new_trace.actions() != NULL) {
+ new_trace.set_flush_budget(new_flush_budget);
+ }
+ bool next_expects_preload =
+ i == choice_count - 1 ? false : alt_gens.at(i + 1)->expects_preload;
+ EmitOutOfLineContinuation(compiler,
+ &new_trace,
+ alternatives_->at(i),
+ alt_gen,
+ preload.preload_characters_,
+ next_expects_preload);
}
+}
+
+
+Trace* ChoiceNode::EmitGreedyLoop(RegExpCompiler* compiler,
+ Trace* trace,
+ AlternativeGenerationList* alt_gens,
+ PreloadState* preload,
+ GreedyLoopState* greedy_loop_state,
+ int text_length) {
+ RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+ // Here we have special handling for greedy loops containing only text nodes
+ // and other simple nodes. These are handled by pushing the current
+ // position on the stack and then incrementing the current position each
+ // time around the switch. On backtrack we decrement the current position
+ // and check it against the pushed value. This avoids pushing backtrack
+ // information for each iteration of the loop, which could take up a lot of
+ // space.
+ DCHECK(trace->stop_node() == NULL);
+ macro_assembler->PushCurrentPosition();
+ Label greedy_match_failed;
+ Trace greedy_match_trace;
+ if (not_at_start()) greedy_match_trace.set_at_start(false);
+ greedy_match_trace.set_backtrack(&greedy_match_failed);
+ Label loop_label;
+ macro_assembler->Bind(&loop_label);
+ greedy_match_trace.set_stop_node(this);
+ greedy_match_trace.set_loop_label(&loop_label);
+ alternatives_->at(0).node()->Emit(compiler, &greedy_match_trace);
+ macro_assembler->Bind(&greedy_match_failed);
Label second_choice; // For use in greedy matches.
macro_assembler->Bind(&second_choice);
- int first_normal_choice = greedy_loop ? 1 : 0;
-
- bool not_at_start = current_trace->at_start() == Trace::FALSE_VALUE;
- const int kEatsAtLeastNotYetInitialized = -1;
- int eats_at_least = kEatsAtLeastNotYetInitialized;
-
- bool skip_was_emitted = false;
-
- if (!greedy_loop && choice_count == 2) {
- GuardedAlternative alt1 = alternatives_->at(1);
- if (alt1.guards() == NULL || alt1.guards()->length() == 0) {
- RegExpNode* eats_anything_node = alt1.node();
- if (eats_anything_node->GetSuccessorOfOmnivorousTextNode(compiler) ==
- this) {
- // At this point we know that we are at a non-greedy loop that will eat
- // any character one at a time. Any non-anchored regexp has such a
- // loop prepended to it in order to find where it starts. We look for
- // a pattern of the form ...abc... where we can look 6 characters ahead
- // and step forwards 3 if the character is not one of abc. Abc need
- // not be atoms, they can be any reasonably limited character class or
- // small alternation.
- DCHECK(trace->is_trivial()); // This is the case on LoopChoiceNodes.
- BoyerMooreLookahead* lookahead = bm_info(not_at_start);
- if (lookahead == NULL) {
- eats_at_least = Min(kMaxLookaheadForBoyerMoore,
- EatsAtLeast(kMaxLookaheadForBoyerMoore,
- kRecursionBudget,
- not_at_start));
- if (eats_at_least >= 1) {
- BoyerMooreLookahead* bm =
- new(zone()) BoyerMooreLookahead(eats_at_least,
- compiler,
- zone());
- GuardedAlternative alt0 = alternatives_->at(0);
- alt0.node()->FillInBMInfo(0, kRecursionBudget, bm, not_at_start);
- skip_was_emitted = bm->EmitSkipInstructions(macro_assembler);
- }
- } else {
- skip_was_emitted = lookahead->EmitSkipInstructions(macro_assembler);
- }
- }
- }
+ Trace* new_trace = greedy_loop_state->counter_backtrack_trace();
+
+ EmitChoices(compiler,
+ alt_gens,
+ 1,
+ new_trace,
+ preload);
+
+ macro_assembler->Bind(greedy_loop_state->label());
+ // If we have unwound to the bottom then backtrack.
+ macro_assembler->CheckGreedyLoop(trace->backtrack());
+ // Otherwise try the second priority at an earlier position.
+ macro_assembler->AdvanceCurrentPosition(-text_length);
+ macro_assembler->GoTo(&second_choice);
+ return new_trace;
+}
+
+int ChoiceNode::EmitOptimizedUnanchoredSearch(RegExpCompiler* compiler,
+ Trace* trace) {
+ int eats_at_least = PreloadState::kEatsAtLeastNotYetInitialized;
+ if (alternatives_->length() != 2) return eats_at_least;
+
+ GuardedAlternative alt1 = alternatives_->at(1);
+ if (alt1.guards() != NULL && alt1.guards()->length() != 0) {
+ return eats_at_least;
}
+ RegExpNode* eats_anything_node = alt1.node();
+ if (eats_anything_node->GetSuccessorOfOmnivorousTextNode(compiler) != this) {
+ return eats_at_least;
+ }
+
+ // Really we should be creating a new trace when we execute this function,
+ // but there is no need, because the code it generates cannot backtrack, and
+ // we always arrive here with a trivial trace (since it's the entry to a
+ // loop. That also implies that there are no preloaded characters, which is
+ // good, because it means we won't be violating any assumptions by
+ // overwriting those characters with new load instructions.
+ DCHECK(trace->is_trivial());
- if (eats_at_least == kEatsAtLeastNotYetInitialized) {
- // Save some time by looking at most one machine word ahead.
- eats_at_least =
- EatsAtLeast(compiler->ascii() ? 4 : 2, kRecursionBudget, not_at_start);
+ RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+ // At this point we know that we are at a non-greedy loop that will eat
+ // any character one at a time. Any non-anchored regexp has such a
+ // loop prepended to it in order to find where it starts. We look for
+ // a pattern of the form ...abc... where we can look 6 characters ahead
+ // and step forwards 3 if the character is not one of abc. Abc need
+ // not be atoms, they can be any reasonably limited character class or
+ // small alternation.
+ BoyerMooreLookahead* bm = bm_info(false);
+ if (bm == NULL) {
+ eats_at_least = Min(kMaxLookaheadForBoyerMoore,
+ EatsAtLeast(kMaxLookaheadForBoyerMoore,
+ kRecursionBudget,
+ false));
+ if (eats_at_least >= 1) {
+ bm = new(zone()) BoyerMooreLookahead(eats_at_least,
+ compiler,
+ zone());
+ GuardedAlternative alt0 = alternatives_->at(0);
+ alt0.node()->FillInBMInfo(0, kRecursionBudget, bm, false);
+ }
+ }
+ if (bm != NULL) {
+ bm->EmitSkipInstructions(macro_assembler);
}
- int preload_characters = CalculatePreloadCharacters(compiler, eats_at_least);
+ return eats_at_least;
+}
- bool preload_is_current = !skip_was_emitted &&
- (current_trace->characters_preloaded() == preload_characters);
- bool preload_has_checked_bounds = preload_is_current;
- AlternativeGenerationList alt_gens(choice_count, zone());
+void ChoiceNode::EmitChoices(RegExpCompiler* compiler,
+ AlternativeGenerationList* alt_gens,
+ int first_choice,
+ Trace* trace,
+ PreloadState* preload) {
+ RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+ SetUpPreLoad(compiler, trace, preload);
// For now we just call all choices one after the other. The idea ultimately
// is to use the Dispatch table to try only the relevant ones.
- for (int i = first_normal_choice; i < choice_count; i++) {
+ int choice_count = alternatives_->length();
+
+ int new_flush_budget = trace->flush_budget() / choice_count;
+
+ for (int i = first_choice; i < choice_count; i++) {
+ bool is_last = i == choice_count - 1;
+ bool fall_through_on_failure = !is_last;
GuardedAlternative alternative = alternatives_->at(i);
- AlternativeGeneration* alt_gen = alt_gens.at(i);
- alt_gen->quick_check_details.set_characters(preload_characters);
+ AlternativeGeneration* alt_gen = alt_gens->at(i);
+ alt_gen->quick_check_details.set_characters(preload->preload_characters_);
ZoneList<Guard*>* guards = alternative.guards();
int guard_count = (guards == NULL) ? 0 : guards->length();
- Trace new_trace(*current_trace);
- new_trace.set_characters_preloaded(preload_is_current ?
- preload_characters :
+ Trace new_trace(*trace);
+ new_trace.set_characters_preloaded(preload->preload_is_current_ ?
+ preload->preload_characters_ :
0);
- if (preload_has_checked_bounds) {
- new_trace.set_bound_checked_up_to(preload_characters);
+ if (preload->preload_has_checked_bounds_) {
+ new_trace.set_bound_checked_up_to(preload->preload_characters_);
}
new_trace.quick_check_performed()->Clear();
if (not_at_start_) new_trace.set_at_start(Trace::FALSE_VALUE);
- alt_gen->expects_preload = preload_is_current;
+ if (!is_last) {
+ new_trace.set_backtrack(&alt_gen->after);
+ }
+ alt_gen->expects_preload = preload->preload_is_current_;
bool generate_full_check_inline = false;
if (FLAG_regexp_optimization &&
- try_to_emit_quick_check_for_alternative(i) &&
+ try_to_emit_quick_check_for_alternative(i == 0) &&
alternative.node()->EmitQuickCheck(compiler,
+ trace,
&new_trace,
- preload_has_checked_bounds,
+ preload->preload_has_checked_bounds_,
&alt_gen->possible_success,
&alt_gen->quick_check_details,
- i < choice_count - 1)) {
+ fall_through_on_failure)) {
// Quick check was generated for this choice.
- preload_is_current = true;
- preload_has_checked_bounds = true;
- // On the last choice in the ChoiceNode we generated the quick
- // check to fall through on possible success. So now we need to
- // generate the full check inline.
- if (i == choice_count - 1) {
+ preload->preload_is_current_ = true;
+ preload->preload_has_checked_bounds_ = true;
+ // If we generated the quick check to fall through on possible success,
+ // we now need to generate the full check inline.
+ if (!fall_through_on_failure) {
macro_assembler->Bind(&alt_gen->possible_success);
new_trace.set_quick_check_performed(&alt_gen->quick_check_details);
- new_trace.set_characters_preloaded(preload_characters);
- new_trace.set_bound_checked_up_to(preload_characters);
+ new_trace.set_characters_preloaded(preload->preload_characters_);
+ new_trace.set_bound_checked_up_to(preload->preload_characters_);
generate_full_check_inline = true;
}
} else if (alt_gen->quick_check_details.cannot_match()) {
- if (i == choice_count - 1 && !greedy_loop) {
+ if (!fall_through_on_failure) {
macro_assembler->GoTo(trace->backtrack());
}
continue;
@@ -4076,13 +4168,10 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
// previous cases that go here when they fail. There's no reason to
// insist that they preload characters since the slow check we are about
// to generate probably can't use it.
- if (i != first_normal_choice) {
+ if (i != first_choice) {
alt_gen->expects_preload = false;
new_trace.InvalidateCurrentCharacter();
}
- if (i < choice_count - 1) {
- new_trace.set_backtrack(&alt_gen->after);
- }
generate_full_check_inline = true;
}
if (generate_full_check_inline) {
@@ -4093,38 +4182,10 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
GenerateGuard(macro_assembler, guards->at(j), &new_trace);
}
alternative.node()->Emit(compiler, &new_trace);
- preload_is_current = false;
+ preload->preload_is_current_ = false;
}
macro_assembler->Bind(&alt_gen->after);
}
- if (greedy_loop) {
- macro_assembler->Bind(&greedy_loop_label);
- // If we have unwound to the bottom then backtrack.
- macro_assembler->CheckGreedyLoop(trace->backtrack());
- // Otherwise try the second priority at an earlier position.
- macro_assembler->AdvanceCurrentPosition(-text_length);
- macro_assembler->GoTo(&second_choice);
- }
-
- // At this point we need to generate slow checks for the alternatives where
- // the quick check was inlined. We can recognize these because the associated
- // label was bound.
- for (int i = first_normal_choice; i < choice_count - 1; i++) {
- AlternativeGeneration* alt_gen = alt_gens.at(i);
- Trace new_trace(*current_trace);
- // If there are actions to be flushed we have to limit how many times
- // they are flushed. Take the budget of the parent trace and distribute
- // it fairly amongst the children.
- if (new_trace.actions() != NULL) {
- new_trace.set_flush_budget(new_flush_budget);
- }
- EmitOutOfLineContinuation(compiler,
- &new_trace,
- alternatives_->at(i),
- alt_gen,
- preload_characters,
- alt_gens.at(i + 1)->expects_preload);
- }
}
@@ -5272,12 +5333,11 @@ void CharacterRange::Split(ZoneList<CharacterRange>* base,
void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
- bool is_ascii,
- Zone* zone) {
+ bool is_one_byte, Zone* zone) {
Isolate* isolate = zone->isolate();
uc16 bottom = from();
uc16 top = to();
- if (is_ascii && !RangeContainsLatin1Equivalents(*this)) {
+ if (is_one_byte && !RangeContainsLatin1Equivalents(*this)) {
if (bottom > String::kMaxOneByteCharCode) return;
if (top > String::kMaxOneByteCharCode) top = String::kMaxOneByteCharCode;
}
@@ -5696,7 +5756,7 @@ void TextNode::CalculateOffsets() {
void Analysis::VisitText(TextNode* that) {
if (ignore_case_) {
- that->MakeCaseIndependent(is_ascii_);
+ that->MakeCaseIndependent(is_one_byte_);
}
EnsureAnalyzed(that->on_success());
if (!has_failed()) {
@@ -5972,18 +6032,13 @@ void DispatchTableConstructor::VisitAction(ActionNode* that) {
RegExpEngine::CompilationResult RegExpEngine::Compile(
- RegExpCompileData* data,
- bool ignore_case,
- bool is_global,
- bool is_multiline,
- Handle<String> pattern,
- Handle<String> sample_subject,
- bool is_ascii,
- Zone* zone) {
+ RegExpCompileData* data, bool ignore_case, bool is_global,
+ bool is_multiline, bool is_sticky, Handle<String> pattern,
+ Handle<String> sample_subject, bool is_one_byte, Zone* zone) {
if ((data->capture_count + 1) * 2 - 1 > RegExpMacroAssembler::kMaxRegister) {
return IrregexpRegExpTooBig(zone->isolate());
}
- RegExpCompiler compiler(data->capture_count, ignore_case, is_ascii, zone);
+ RegExpCompiler compiler(data->capture_count, ignore_case, is_one_byte, zone);
// Sample some characters from the middle of the string.
static const int kSampleSize = 128;
@@ -6006,9 +6061,9 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
bool is_end_anchored = data->tree->IsAnchoredAtEnd();
bool is_start_anchored = data->tree->IsAnchoredAtStart();
int max_length = data->tree->max_match();
- if (!is_start_anchored) {
+ if (!is_start_anchored && !is_sticky) {
// Add a .*? at the beginning, outside the body capture, unless
- // this expression is anchored at the beginning.
+ // this expression is anchored at the beginning or sticky.
RegExpNode* loop_node =
RegExpQuantifier::ToNode(0,
RegExpTree::kInfinity,
@@ -6030,18 +6085,18 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
node = loop_node;
}
}
- if (is_ascii) {
- node = node->FilterASCII(RegExpCompiler::kMaxRecursion, ignore_case);
+ if (is_one_byte) {
+ node = node->FilterOneByte(RegExpCompiler::kMaxRecursion, ignore_case);
// Do it again to propagate the new nodes to places where they were not
// put because they had not been calculated yet.
if (node != NULL) {
- node = node->FilterASCII(RegExpCompiler::kMaxRecursion, ignore_case);
+ node = node->FilterOneByte(RegExpCompiler::kMaxRecursion, ignore_case);
}
}
if (node == NULL) node = new(zone) EndNode(EndNode::BACKTRACK, zone);
data->node = node;
- Analysis analysis(ignore_case, is_ascii);
+ Analysis analysis(ignore_case, is_one_byte);
analysis.EnsureAnalyzed(node);
if (analysis.has_failed()) {
const char* error_message = analysis.error_message();
@@ -6053,8 +6108,8 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
// Native regexp implementation.
NativeRegExpMacroAssembler::Mode mode =
- is_ascii ? NativeRegExpMacroAssembler::ASCII
- : NativeRegExpMacroAssembler::UC16;
+ is_one_byte ? NativeRegExpMacroAssembler::LATIN1
+ : NativeRegExpMacroAssembler::UC16;
#if V8_TARGET_ARCH_IA32
RegExpMacroAssemblerIA32 macro_assembler(mode, (data->capture_count + 1) * 2,
diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h
index 11bad24d4b..c65adea4c5 100644
--- a/deps/v8/src/jsregexp.h
+++ b/deps/v8/src/jsregexp.h
@@ -204,8 +204,8 @@ class RegExpImpl {
static void SetIrregexpMaxRegisterCount(FixedArray* re, int value);
static int IrregexpNumberOfCaptures(FixedArray* re);
static int IrregexpNumberOfRegisters(FixedArray* re);
- static ByteArray* IrregexpByteCode(FixedArray* re, bool is_ascii);
- static Code* IrregexpNativeCode(FixedArray* re, bool is_ascii);
+ static ByteArray* IrregexpByteCode(FixedArray* re, bool is_one_byte);
+ static Code* IrregexpNativeCode(FixedArray* re, bool is_one_byte);
// Limit the space regexps take up on the heap. In order to limit this we
// would like to keep track of the amount of regexp code on the heap. This
@@ -216,10 +216,11 @@ class RegExpImpl {
static const int kRegWxpCompiledLimit = 1 * MB;
private:
- static bool CompileIrregexp(
- Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii);
- static inline bool EnsureCompiledIrregexp(
- Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii);
+ static bool CompileIrregexp(Handle<JSRegExp> re,
+ Handle<String> sample_subject, bool is_one_byte);
+ static inline bool EnsureCompiledIrregexp(Handle<JSRegExp> re,
+ Handle<String> sample_subject,
+ bool is_one_byte);
};
@@ -262,7 +263,7 @@ class CharacterRange {
bool is_valid() { return from_ <= to_; }
bool IsEverything(uc16 max) { return from_ == 0 && to_ >= max; }
bool IsSingleton() { return (from_ == to_); }
- void AddCaseEquivalents(ZoneList<CharacterRange>* ranges, bool is_ascii,
+ void AddCaseEquivalents(ZoneList<CharacterRange>* ranges, bool is_one_byte,
Zone* zone);
static void Split(ZoneList<CharacterRange>* base,
Vector<const int> overlay,
@@ -406,7 +407,7 @@ FOR_EACH_REG_EXP_TREE_TYPE(FORWARD_DECLARE)
#undef FORWARD_DECLARE
-class TextElement V8_FINAL BASE_EMBEDDED {
+class TextElement FINAL BASE_EMBEDDED {
public:
enum TextType {
ATOM,
@@ -445,7 +446,9 @@ class TextElement V8_FINAL BASE_EMBEDDED {
class Trace;
-
+struct PreloadState;
+class GreedyLoopState;
+class AlternativeGenerationList;
struct NodeInfo {
NodeInfo()
@@ -524,11 +527,11 @@ class QuickCheckDetails {
mask_(0),
value_(0),
cannot_match_(false) { }
- bool Rationalize(bool ascii);
+ bool Rationalize(bool one_byte);
// Merge in the information from another branch of an alternation.
void Merge(QuickCheckDetails* other, int from_index);
// Advance the current position by some amount.
- void Advance(int by, bool ascii);
+ void Advance(int by, bool one_byte);
void Clear();
bool cannot_match() { return cannot_match_; }
void set_cannot_match() { cannot_match_ = true; }
@@ -587,6 +590,7 @@ class RegExpNode: public ZoneObject {
// Falls through on certain failure, jumps to the label on possible success.
// If the node cannot make a quick check it does nothing and returns false.
bool EmitQuickCheck(RegExpCompiler* compiler,
+ Trace* bounds_check_trace,
Trace* trace,
bool preload_has_checked_bounds,
Label* on_possible_success,
@@ -622,11 +626,13 @@ class RegExpNode: public ZoneObject {
UNREACHABLE();
}
- // If we know that the input is ASCII then there are some nodes that can
+ // If we know that the input is one-byte then there are some nodes that can
// never match. This method returns a node that can be substituted for
// itself, or NULL if the node can never match.
- virtual RegExpNode* FilterASCII(int depth, bool ignore_case) { return this; }
- // Helper for FilterASCII.
+ virtual RegExpNode* FilterOneByte(int depth, bool ignore_case) {
+ return this;
+ }
+ // Helper for FilterOneByte.
RegExpNode* replacement() {
DCHECK(info()->replacement_calculated);
return replacement_;
@@ -720,7 +726,7 @@ class SeqRegExpNode: public RegExpNode {
: RegExpNode(on_success->zone()), on_success_(on_success) { }
RegExpNode* on_success() { return on_success_; }
void set_on_success(RegExpNode* node) { on_success_ = node; }
- virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
+ virtual RegExpNode* FilterOneByte(int depth, bool ignore_case);
virtual void FillInBMInfo(int offset,
int budget,
BoyerMooreLookahead* bm,
@@ -841,7 +847,7 @@ class TextNode: public SeqRegExpNode {
int characters_filled_in,
bool not_at_start);
ZoneList<TextElement>* elements() { return elms_; }
- void MakeCaseIndependent(bool is_ascii);
+ void MakeCaseIndependent(bool is_one_byte);
virtual int GreedyLoopTextLength();
virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
RegExpCompiler* compiler);
@@ -850,11 +856,11 @@ class TextNode: public SeqRegExpNode {
BoyerMooreLookahead* bm,
bool not_at_start);
void CalculateOffsets();
- virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
+ virtual RegExpNode* FilterOneByte(int depth, bool ignore_case);
private:
enum TextEmitPassType {
- NON_ASCII_MATCH, // Check for characters that can't match.
+ NON_LATIN1_MATCH, // Check for characters that can't match.
SIMPLE_CHARACTER_MATCH, // Case-dependent single character check.
NON_LETTER_CHARACTER_MATCH, // Check characters that have no case equivs.
CASE_CHARACTER_MATCH, // Case-independent single character check.
@@ -1076,8 +1082,10 @@ class ChoiceNode: public RegExpNode {
bool not_at_start() { return not_at_start_; }
void set_not_at_start() { not_at_start_ = true; }
void set_being_calculated(bool b) { being_calculated_ = b; }
- virtual bool try_to_emit_quick_check_for_alternative(int i) { return true; }
- virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
+ virtual bool try_to_emit_quick_check_for_alternative(bool is_first) {
+ return true;
+ }
+ virtual RegExpNode* FilterOneByte(int depth, bool ignore_case);
protected:
int GreedyLoopTextLengthForAlternative(GuardedAlternative* alternative);
@@ -1096,6 +1104,22 @@ class ChoiceNode: public RegExpNode {
AlternativeGeneration* alt_gen,
int preload_characters,
bool next_expects_preload);
+ void SetUpPreLoad(RegExpCompiler* compiler,
+ Trace* current_trace,
+ PreloadState* preloads);
+ void AssertGuardsMentionRegisters(Trace* trace);
+ int EmitOptimizedUnanchoredSearch(RegExpCompiler* compiler, Trace* trace);
+ Trace* EmitGreedyLoop(RegExpCompiler* compiler,
+ Trace* trace,
+ AlternativeGenerationList* alt_gens,
+ PreloadState* preloads,
+ GreedyLoopState* greedy_loop_state,
+ int text_length);
+ void EmitChoices(RegExpCompiler* compiler,
+ AlternativeGenerationList* alt_gens,
+ int first_choice,
+ Trace* trace,
+ PreloadState* preloads);
DispatchTable* table_;
// If true, this node is never checked at the start of the input.
// Allows a new trace to start with at_start() set to false.
@@ -1131,8 +1155,10 @@ class NegativeLookaheadChoiceNode: public ChoiceNode {
// starts by loading enough characters for the alternative that takes fewest
// characters, but on a negative lookahead the negative branch did not take
// part in that calculation (EatsAtLeast) so the assumptions don't hold.
- virtual bool try_to_emit_quick_check_for_alternative(int i) { return i != 0; }
- virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
+ virtual bool try_to_emit_quick_check_for_alternative(bool is_first) {
+ return !is_first;
+ }
+ virtual RegExpNode* FilterOneByte(int depth, bool ignore_case);
};
@@ -1142,7 +1168,8 @@ class LoopChoiceNode: public ChoiceNode {
: ChoiceNode(2, zone),
loop_node_(NULL),
continue_node_(NULL),
- body_can_be_zero_length_(body_can_be_zero_length) { }
+ body_can_be_zero_length_(body_can_be_zero_length)
+ { }
void AddLoopAlternative(GuardedAlternative alt);
void AddContinueAlternative(GuardedAlternative alt);
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
@@ -1159,7 +1186,7 @@ class LoopChoiceNode: public ChoiceNode {
RegExpNode* continue_node() { return continue_node_; }
bool body_can_be_zero_length() { return body_can_be_zero_length_; }
virtual void Accept(NodeVisitor* visitor);
- virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
+ virtual RegExpNode* FilterOneByte(int depth, bool ignore_case);
private:
// AddAlternative is made private for loop nodes because alternatives
@@ -1293,7 +1320,7 @@ class BoyerMooreLookahead : public ZoneObject {
void SetRest(int from_map) {
for (int i = from_map; i < length_; i++) SetAll(i);
}
- bool EmitSkipInstructions(RegExpMacroAssembler* masm);
+ void EmitSkipInstructions(RegExpMacroAssembler* masm);
private:
// This is the value obtained by EatsAtLeast. If we do not have at least this
@@ -1302,7 +1329,7 @@ class BoyerMooreLookahead : public ZoneObject {
// point.
int length_;
RegExpCompiler* compiler_;
- // 0x7f for ASCII, 0xffff for UTF-16.
+ // 0xff for Latin1, 0xffff for UTF-16.
int max_char_;
ZoneList<BoyerMoorePositionInfo*>* bitmaps_;
@@ -1486,6 +1513,31 @@ class Trace {
};
+class GreedyLoopState {
+ public:
+ explicit GreedyLoopState(bool not_at_start);
+
+ Label* label() { return &label_; }
+ Trace* counter_backtrack_trace() { return &counter_backtrack_trace_; }
+
+ private:
+ Label label_;
+ Trace counter_backtrack_trace_;
+};
+
+
+struct PreloadState {
+ static const int kEatsAtLeastNotYetInitialized = -1;
+ bool preload_is_current_;
+ bool preload_has_checked_bounds_;
+ int preload_characters_;
+ int eats_at_least_;
+ void init() {
+ eats_at_least_ = kEatsAtLeastNotYetInitialized;
+ }
+};
+
+
class NodeVisitor {
public:
virtual ~NodeVisitor() { }
@@ -1546,10 +1598,10 @@ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
// +-------+ +------------+
class Analysis: public NodeVisitor {
public:
- Analysis(bool ignore_case, bool is_ascii)
+ Analysis(bool ignore_case, bool is_one_byte)
: ignore_case_(ignore_case),
- is_ascii_(is_ascii),
- error_message_(NULL) { }
+ is_one_byte_(is_one_byte),
+ error_message_(NULL) {}
void EnsureAnalyzed(RegExpNode* node);
#define DECLARE_VISIT(Type) \
@@ -1569,7 +1621,7 @@ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
private:
bool ignore_case_;
- bool is_ascii_;
+ bool is_one_byte_;
const char* error_message_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis);
@@ -1608,13 +1660,11 @@ class RegExpEngine: public AllStatic {
int num_registers;
};
- static CompilationResult Compile(RegExpCompileData* input,
- bool ignore_case,
- bool global,
- bool multiline,
+ static CompilationResult Compile(RegExpCompileData* input, bool ignore_case,
+ bool global, bool multiline, bool sticky,
Handle<String> pattern,
Handle<String> sample_subject,
- bool is_ascii, Zone* zone);
+ bool is_one_byte, Zone* zone);
static void DotPrint(const char* label, RegExpNode* node, bool ignore_case);
};
diff --git a/deps/v8/src/libplatform/default-platform-unittest.cc b/deps/v8/src/libplatform/default-platform-unittest.cc
new file mode 100644
index 0000000000..d2c160e558
--- /dev/null
+++ b/deps/v8/src/libplatform/default-platform-unittest.cc
@@ -0,0 +1,43 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/libplatform/default-platform.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::InSequence;
+using testing::StrictMock;
+
+namespace v8 {
+namespace platform {
+
+namespace {
+
+struct MockTask : public Task {
+ virtual ~MockTask() { Die(); }
+ MOCK_METHOD0(Run, void());
+ MOCK_METHOD0(Die, void());
+};
+
+} // namespace
+
+
+TEST(DefaultPlatformTest, PumpMessageLoop) {
+ InSequence s;
+
+ int dummy;
+ Isolate* isolate = reinterpret_cast<Isolate*>(&dummy);
+
+ DefaultPlatform platform;
+ EXPECT_FALSE(platform.PumpMessageLoop(isolate));
+
+ StrictMock<MockTask>* task = new StrictMock<MockTask>;
+ platform.CallOnForegroundThread(isolate, task);
+ EXPECT_CALL(*task, Run());
+ EXPECT_CALL(*task, Die());
+ EXPECT_TRUE(platform.PumpMessageLoop(isolate));
+ EXPECT_FALSE(platform.PumpMessageLoop(isolate));
+}
+
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index 9a503bc34c..b5b8571dbc 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -9,6 +9,7 @@
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
+#include "src/base/sys-info.h"
#include "src/libplatform/worker-thread.h"
namespace v8 {
@@ -58,8 +59,9 @@ DefaultPlatform::~DefaultPlatform() {
void DefaultPlatform::SetThreadPoolSize(int thread_pool_size) {
base::LockGuard<base::Mutex> guard(&lock_);
DCHECK(thread_pool_size >= 0);
- if (thread_pool_size < 1)
- thread_pool_size = base::OS::NumberOfProcessorsOnline();
+ if (thread_pool_size < 1) {
+ thread_pool_size = base::SysInfo::NumberOfProcessors();
+ }
thread_pool_size_ =
std::max(std::min(thread_pool_size, kMaxThreadPoolSize), 1);
}
diff --git a/deps/v8/src/libplatform/default-platform.h b/deps/v8/src/libplatform/default-platform.h
index fcbb14c36c..1efd7b24fe 100644
--- a/deps/v8/src/libplatform/default-platform.h
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -34,9 +34,9 @@ class DefaultPlatform : public Platform {
// v8::Platform implementation.
virtual void CallOnBackgroundThread(
- Task* task, ExpectedRuntime expected_runtime) V8_OVERRIDE;
+ Task* task, ExpectedRuntime expected_runtime) OVERRIDE;
virtual void CallOnForegroundThread(v8::Isolate* isolate,
- Task* task) V8_OVERRIDE;
+ Task* task) OVERRIDE;
private:
static const int kMaxThreadPoolSize;
diff --git a/deps/v8/src/libplatform/libplatform.gyp b/deps/v8/src/libplatform/libplatform.gyp
new file mode 100644
index 0000000000..4321da723a
--- /dev/null
+++ b/deps/v8/src/libplatform/libplatform.gyp
@@ -0,0 +1,39 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'v8_code': 1,
+ },
+ 'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
+ 'targets': [
+ {
+ 'target_name': 'libplatform-unittests',
+ 'type': 'executable',
+ 'dependencies': [
+ '../../testing/gtest.gyp:gtest',
+ '../../testing/gmock.gyp:gmock',
+ '../../testing/gmock.gyp:gmock_main',
+ '../../tools/gyp/v8.gyp:v8_libplatform',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ 'default-platform-unittest.cc',
+ 'task-queue-unittest.cc',
+ 'worker-thread-unittest.cc',
+ ],
+ 'conditions': [
+ ['os_posix == 1', {
+ # TODO(svenpanne): This is a temporary work-around to fix the warnings
+ # that show up because we use -std=gnu++0x instead of -std=c++11.
+ 'cflags!': [
+ '-pedantic',
+ ],
+ }],
+ ],
+ },
+ ],
+}
diff --git a/deps/v8/src/libplatform/task-queue-unittest.cc b/deps/v8/src/libplatform/task-queue-unittest.cc
new file mode 100644
index 0000000000..9a186589f7
--- /dev/null
+++ b/deps/v8/src/libplatform/task-queue-unittest.cc
@@ -0,0 +1,60 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8-platform.h"
+#include "src/base/platform/platform.h"
+#include "src/libplatform/task-queue.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::InSequence;
+using testing::IsNull;
+using testing::StrictMock;
+
+namespace v8 {
+namespace platform {
+
+namespace {
+
+struct MockTask : public Task {
+ MOCK_METHOD0(Run, void());
+};
+
+
+class TaskQueueThread FINAL : public base::Thread {
+ public:
+ explicit TaskQueueThread(TaskQueue* queue)
+ : Thread(Options("libplatform TaskQueueThread")), queue_(queue) {}
+
+ virtual void Run() OVERRIDE { EXPECT_THAT(queue_->GetNext(), IsNull()); }
+
+ private:
+ TaskQueue* queue_;
+};
+
+} // namespace
+
+
+TEST(TaskQueueTest, Basic) {
+ TaskQueue queue;
+ MockTask task;
+ queue.Append(&task);
+ EXPECT_EQ(&task, queue.GetNext());
+ queue.Terminate();
+ EXPECT_THAT(queue.GetNext(), IsNull());
+}
+
+
+TEST(TaskQueueTest, TerminateMultipleReaders) {
+ TaskQueue queue;
+ TaskQueueThread thread1(&queue);
+ TaskQueueThread thread2(&queue);
+ thread1.Start();
+ thread2.Start();
+ queue.Terminate();
+ thread1.Join();
+ thread2.Join();
+}
+
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/src/libplatform/worker-thread-unittest.cc b/deps/v8/src/libplatform/worker-thread-unittest.cc
new file mode 100644
index 0000000000..175b311666
--- /dev/null
+++ b/deps/v8/src/libplatform/worker-thread-unittest.cc
@@ -0,0 +1,48 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8-platform.h"
+#include "src/libplatform/task-queue.h"
+#include "src/libplatform/worker-thread.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::InSequence;
+using testing::IsNull;
+using testing::StrictMock;
+
+namespace v8 {
+namespace platform {
+
+namespace {
+
+struct MockTask : public Task {
+ virtual ~MockTask() { Die(); }
+ MOCK_METHOD0(Run, void());
+ MOCK_METHOD0(Die, void());
+};
+
+} // namespace
+
+
+TEST(WorkerThreadTest, Basic) {
+ static const size_t kNumTasks = 10;
+
+ TaskQueue queue;
+ for (size_t i = 0; i < kNumTasks; ++i) {
+ InSequence s;
+ StrictMock<MockTask>* task = new StrictMock<MockTask>;
+ EXPECT_CALL(*task, Run());
+ EXPECT_CALL(*task, Die());
+ queue.Append(task);
+ }
+
+ WorkerThread thread1(&queue);
+ WorkerThread thread2(&queue);
+
+ // TaskQueue DCHECKS that it's empty in its destructor.
+ queue.Terminate();
+}
+
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/src/libplatform/worker-thread.h b/deps/v8/src/libplatform/worker-thread.h
index 5550f16a7f..67f086d8ab 100644
--- a/deps/v8/src/libplatform/worker-thread.h
+++ b/deps/v8/src/libplatform/worker-thread.h
@@ -22,7 +22,7 @@ class WorkerThread : public base::Thread {
virtual ~WorkerThread();
// Thread implementation.
- virtual void Run() V8_OVERRIDE;
+ virtual void Run() OVERRIDE;
private:
friend class QuitTask;
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index 8350c80bbf..5f4f17f16f 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -108,8 +108,8 @@ LiveRange::LiveRange(int id, Zone* zone)
current_interval_(NULL),
last_processed_use_(NULL),
current_hint_operand_(NULL),
- spill_operand_(new(zone) LOperand()),
- spill_start_index_(kMaxInt) { }
+ spill_operand_(new (zone) LOperand()),
+ spill_start_index_(kMaxInt) {}
void LiveRange::set_assigned_register(int reg, Zone* zone) {
@@ -527,7 +527,7 @@ LAllocator::LAllocator(int num_values, HGraph* graph)
num_registers_(-1),
graph_(graph),
has_osr_entry_(false),
- allocation_ok_(true) { }
+ allocation_ok_(true) {}
void LAllocator::InitializeLivenessAnalysis() {
@@ -1016,7 +1016,7 @@ void LAllocator::ResolvePhis(HBasicBlock* block) {
for (int i = 0; i < phis->length(); ++i) {
HPhi* phi = phis->at(i);
LUnallocated* phi_operand =
- new(chunk()->zone()) LUnallocated(LUnallocated::NONE);
+ new (chunk()->zone()) LUnallocated(LUnallocated::NONE);
phi_operand->set_virtual_register(phi->id());
for (int j = 0; j < phi->OperandCount(); ++j) {
HValue* op = phi->OperandAt(j);
diff --git a/deps/v8/src/lithium-codegen.cc b/deps/v8/src/lithium-codegen.cc
index 8b6444db58..ea6b83a06a 100644
--- a/deps/v8/src/lithium-codegen.cc
+++ b/deps/v8/src/lithium-codegen.cc
@@ -132,7 +132,7 @@ void LCodeGenBase::CheckEnvironmentUsage() {
void LCodeGenBase::Comment(const char* format, ...) {
if (!FLAG_code_comments) return;
char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
+ StringBuilder builder(buffer, arraysize(buffer));
va_list arguments;
va_start(arguments, format);
builder.AddFormattedList(format, arguments);
@@ -147,6 +147,15 @@ void LCodeGenBase::Comment(const char* format, ...) {
}
+void LCodeGenBase::DeoptComment(const Deoptimizer::Reason& reason) {
+ OStringStream os;
+ os << ";;; deoptimize at " << HSourcePosition(reason.raw_position) << " "
+ << reason.mnemonic;
+ if (reason.detail != NULL) os << ": " << reason.detail;
+ Comment("%s", os.c_str());
+}
+
+
int LCodeGenBase::GetNextEmittedBlock() const {
for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
if (!graph()->blocks()->at(i)->IsReachable()) continue;
@@ -217,19 +226,25 @@ void LCodeGenBase::RegisterWeakObjectsInOptimizedCode(Handle<Code> code) {
void LCodeGenBase::Abort(BailoutReason reason) {
- info()->set_bailout_reason(reason);
+ info()->AbortOptimization(reason);
+ status_ = ABORTED;
+}
+
+
+void LCodeGenBase::Retry(BailoutReason reason) {
+ info()->RetryOptimization(reason);
status_ = ABORTED;
}
void LCodeGenBase::AddDeprecationDependency(Handle<Map> map) {
- if (map->is_deprecated()) return Abort(kMapBecameDeprecated);
+ if (map->is_deprecated()) return Retry(kMapBecameDeprecated);
chunk_->AddDeprecationDependency(map);
}
void LCodeGenBase::AddStabilityDependency(Handle<Map> map) {
- if (!map->is_stable()) return Abort(kMapBecameUnstable);
+ if (!map->is_stable()) return Retry(kMapBecameUnstable);
chunk_->AddStabilityDependency(map);
}
diff --git a/deps/v8/src/lithium-codegen.h b/deps/v8/src/lithium-codegen.h
index 1eb963e6fa..40d4d8e4f8 100644
--- a/deps/v8/src/lithium-codegen.h
+++ b/deps/v8/src/lithium-codegen.h
@@ -7,7 +7,9 @@
#include "src/v8.h"
+#include "src/bailout-reason.h"
#include "src/compiler.h"
+#include "src/deoptimizer.h"
namespace v8 {
namespace internal {
@@ -33,6 +35,7 @@ class LCodeGenBase BASE_EMBEDDED {
HGraph* graph() const;
void FPRINTF_CHECKING Comment(const char* format, ...);
+ void DeoptComment(const Deoptimizer::Reason& reason);
bool GenerateBody();
virtual void GenerateBodyInstructionPre(LInstruction* instr) {}
@@ -74,6 +77,7 @@ class LCodeGenBase BASE_EMBEDDED {
bool is_aborted() const { return status_ == ABORTED; }
void Abort(BailoutReason reason);
+ void Retry(BailoutReason reason);
// Methods for code dependencies.
void AddDeprecationDependency(Handle<Map> map);
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index a8d4d22ab5..7d992a1eb8 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/lithium.h"
+
#include "src/v8.h"
-#include "src/lithium.h"
#include "src/scopes.h"
#include "src/serialize.h"
@@ -145,6 +146,7 @@ void LSubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
template<LOperand::Kind kOperandKind, int kNumCachedOperands>
void LSubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
delete[] cache;
+ cache = NULL;
}
@@ -436,7 +438,7 @@ LChunk* LChunk::NewChunk(HGraph* graph) {
int values = graph->GetMaximumValueID();
CompilationInfo* info = graph->info();
if (values > LUnallocated::kMaxVirtualRegisters) {
- info->set_bailout_reason(kNotEnoughVirtualRegistersForValues);
+ info->AbortOptimization(kNotEnoughVirtualRegistersForValues);
return NULL;
}
LAllocator allocator(values, graph);
@@ -445,7 +447,7 @@ LChunk* LChunk::NewChunk(HGraph* graph) {
if (chunk == NULL) return NULL;
if (!allocator.Allocate(chunk)) {
- info->set_bailout_reason(kNotEnoughVirtualRegistersRegalloc);
+ info->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
return NULL;
}
@@ -509,19 +511,35 @@ void LChunk::set_allocated_double_registers(BitVector* allocated_registers) {
}
+void LChunkBuilderBase::Abort(BailoutReason reason) {
+ info()->AbortOptimization(reason);
+ status_ = ABORTED;
+}
+
+
+void LChunkBuilderBase::Retry(BailoutReason reason) {
+ info()->RetryOptimization(reason);
+ status_ = ABORTED;
+}
+
+
LEnvironment* LChunkBuilderBase::CreateEnvironment(
- HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
+ HEnvironment* hydrogen_env, int* argument_index_accumulator,
ZoneList<HValue*>* objects_to_materialize) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
- argument_index_accumulator,
- objects_to_materialize);
+ LEnvironment* outer =
+ CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator,
+ objects_to_materialize);
BailoutId ast_id = hydrogen_env->ast_id();
DCHECK(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
+
+ int omitted_count = (hydrogen_env->frame_type() == JS_FUNCTION)
+ ? 0
+ : hydrogen_env->specials_count();
+
+ int value_count = hydrogen_env->length() - omitted_count;
LEnvironment* result =
new(zone()) LEnvironment(hydrogen_env->closure(),
hydrogen_env->frame_type(),
@@ -537,8 +555,10 @@ LEnvironment* LChunkBuilderBase::CreateEnvironment(
// Store the environment description into the environment
// (with holes for nested objects)
for (int i = 0; i < hydrogen_env->length(); ++i) {
- if (hydrogen_env->is_special_index(i)) continue;
-
+ if (hydrogen_env->is_special_index(i) &&
+ hydrogen_env->frame_type() != JS_FUNCTION) {
+ continue;
+ }
LOperand* op;
HValue* value = hydrogen_env->values()->at(i);
CHECK(!value->IsPushArguments()); // Do not deopt outgoing arguments
diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h
index 032c1d4290..83f760d672 100644
--- a/deps/v8/src/lithium.h
+++ b/deps/v8/src/lithium.h
@@ -8,6 +8,7 @@
#include <set>
#include "src/allocation.h"
+#include "src/bailout-reason.h"
#include "src/hydrogen.h"
#include "src/safepoint-table.h"
#include "src/zone-allocator.h"
@@ -254,7 +255,7 @@ class LUnallocated : public LOperand {
};
-class LMoveOperands V8_FINAL BASE_EMBEDDED {
+class LMoveOperands FINAL BASE_EMBEDDED {
public:
LMoveOperands(LOperand* source, LOperand* destination)
: source_(source), destination_(destination) {
@@ -302,7 +303,7 @@ class LMoveOperands V8_FINAL BASE_EMBEDDED {
template<LOperand::Kind kOperandKind, int kNumCachedOperands>
-class LSubKindOperand V8_FINAL : public LOperand {
+class LSubKindOperand FINAL : public LOperand {
public:
static LSubKindOperand* Create(int index, Zone* zone) {
DCHECK(index >= 0);
@@ -332,7 +333,7 @@ LITHIUM_OPERAND_LIST(LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS)
#undef LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS
-class LParallelMove V8_FINAL : public ZoneObject {
+class LParallelMove FINAL : public ZoneObject {
public:
explicit LParallelMove(Zone* zone) : move_operands_(4, zone) { }
@@ -351,7 +352,7 @@ class LParallelMove V8_FINAL : public ZoneObject {
};
-class LPointerMap V8_FINAL : public ZoneObject {
+class LPointerMap FINAL : public ZoneObject {
public:
explicit LPointerMap(Zone* zone)
: pointer_operands_(8, zone),
@@ -384,7 +385,7 @@ class LPointerMap V8_FINAL : public ZoneObject {
};
-class LEnvironment V8_FINAL : public ZoneObject {
+class LEnvironment FINAL : public ZoneObject {
public:
LEnvironment(Handle<JSFunction> closure,
FrameType frame_type,
@@ -534,7 +535,7 @@ class LEnvironment V8_FINAL : public ZoneObject {
// Iterates over the non-null, non-constant operands in an environment.
-class ShallowIterator V8_FINAL BASE_EMBEDDED {
+class ShallowIterator FINAL BASE_EMBEDDED {
public:
explicit ShallowIterator(LEnvironment* env)
: env_(env),
@@ -578,7 +579,7 @@ class ShallowIterator V8_FINAL BASE_EMBEDDED {
// Iterator for non-null, non-constant operands incl. outer environments.
-class DeepIterator V8_FINAL BASE_EMBEDDED {
+class DeepIterator FINAL BASE_EMBEDDED {
public:
explicit DeepIterator(LEnvironment* env)
: current_iterator_(env) {
@@ -697,13 +698,34 @@ class LChunk : public ZoneObject {
class LChunkBuilderBase BASE_EMBEDDED {
public:
- explicit LChunkBuilderBase(Zone* zone)
+ explicit LChunkBuilderBase(CompilationInfo* info, HGraph* graph)
: argument_count_(0),
- zone_(zone) { }
+ chunk_(NULL),
+ info_(info),
+ graph_(graph),
+ status_(UNUSED),
+ zone_(graph->zone()) {}
virtual ~LChunkBuilderBase() { }
+ void Abort(BailoutReason reason);
+ void Retry(BailoutReason reason);
+
protected:
+ enum Status { UNUSED, BUILDING, DONE, ABORTED };
+
+ LPlatformChunk* chunk() const { return chunk_; }
+ CompilationInfo* info() const { return info_; }
+ HGraph* graph() const { return graph_; }
+ int argument_count() const { return argument_count_; }
+ Isolate* isolate() const { return graph_->isolate(); }
+ Heap* heap() const { return isolate()->heap(); }
+
+ bool is_unused() const { return status_ == UNUSED; }
+ bool is_building() const { return status_ == BUILDING; }
+ bool is_done() const { return status_ == DONE; }
+ bool is_aborted() const { return status_ == ABORTED; }
+
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) = 0;
@@ -718,6 +740,10 @@ class LChunkBuilderBase BASE_EMBEDDED {
Zone* zone() const { return zone_; }
int argument_count_;
+ LPlatformChunk* chunk_;
+ CompilationInfo* info_;
+ HGraph* const graph_;
+ Status status_;
private:
Zone* zone_;
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index 57258b0c51..a87c31bac1 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -649,15 +649,15 @@ Handle<Code> FunctionInfoWrapper::GetFunctionCode() {
}
-Handle<FixedArray> FunctionInfoWrapper::GetFeedbackVector() {
+Handle<TypeFeedbackVector> FunctionInfoWrapper::GetFeedbackVector() {
Handle<Object> element = this->GetField(kSharedFunctionInfoOffset_);
- Handle<FixedArray> result;
+ Handle<TypeFeedbackVector> result;
if (element->IsJSValue()) {
Handle<JSValue> value_wrapper = Handle<JSValue>::cast(element);
Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
Handle<SharedFunctionInfo> shared =
Handle<SharedFunctionInfo>::cast(raw_result);
- result = Handle<FixedArray>(shared->feedback_vector(), isolate());
+ result = Handle<TypeFeedbackVector>(shared->feedback_vector(), isolate());
CHECK_EQ(result->length(), GetSlotCount());
} else {
// Scripts may never have a SharedFunctionInfo created, so
@@ -875,11 +875,11 @@ MaybeHandle<JSArray> LiveEdit::GatherCompileInfo(Handle<Script> script,
Factory* factory = isolate->factory();
Handle<String> start_pos_key = factory->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("startPosition"));
- Handle<String> end_pos_key = factory->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("endPosition"));
- Handle<String> script_obj_key = factory->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("scriptObject"));
+ STATIC_CHAR_VECTOR("startPosition"));
+ Handle<String> end_pos_key =
+ factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("endPosition"));
+ Handle<String> script_obj_key =
+ factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("scriptObject"));
Handle<Smi> start_pos(
Smi::FromInt(message_location.start_pos()), isolate);
Handle<Smi> end_pos(Smi::FromInt(message_location.end_pos()), isolate);
@@ -1203,7 +1203,7 @@ void LiveEdit::ReplaceFunctionCode(
}
shared_info->DisableOptimization(kLiveEdit);
// Update the type feedback vector
- Handle<FixedArray> feedback_vector =
+ Handle<TypeFeedbackVector> feedback_vector =
compile_info_wrapper.GetFeedbackVector();
shared_info->set_feedback_vector(*feedback_vector);
}
diff --git a/deps/v8/src/liveedit.h b/deps/v8/src/liveedit.h
index 3465d886d7..53418b0918 100644
--- a/deps/v8/src/liveedit.h
+++ b/deps/v8/src/liveedit.h
@@ -307,7 +307,7 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
Handle<Code> GetFunctionCode();
- Handle<FixedArray> GetFeedbackVector();
+ Handle<TypeFeedbackVector> GetFeedbackVector();
Handle<Object> GetCodeScopeInfo();
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 0c6c4355aa..86f5ce0967 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -6,6 +6,7 @@
#include "src/v8.h"
+#include "src/bailout-reason.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
@@ -1221,8 +1222,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
CALL_LISTENERS(CodeCreateEvent(tag, code, shared, info, name));
if (!FLAG_log_code || !log_->IsEnabled()) return;
- if (code == isolate_->builtins()->builtin(Builtins::kCompileUnoptimized))
- return;
+ if (code == isolate_->builtins()->builtin(Builtins::kCompileLazy)) return;
Log::MessageBuilder msg(log_);
AppendCodeCreateHeader(&msg, tag, code);
@@ -1755,8 +1755,7 @@ void Logger::LogCompiledFunctions() {
// During iteration, there can be heap allocation due to
// GetScriptLineNumber call.
for (int i = 0; i < compiled_funcs_count; ++i) {
- if (code_objects[i].is_identical_to(
- isolate_->builtins()->CompileUnoptimized()))
+ if (code_objects[i].is_identical_to(isolate_->builtins()->CompileLazy()))
continue;
LogExistingFunction(sfis[i], code_objects[i]);
}
diff --git a/deps/v8/src/lookup-inl.h b/deps/v8/src/lookup-inl.h
index ccd1fbb14d..d4777a0baa 100644
--- a/deps/v8/src/lookup-inl.h
+++ b/deps/v8/src/lookup-inl.h
@@ -19,8 +19,11 @@ JSReceiver* LookupIterator::NextHolder(Map* map) {
DCHECK(!next->map()->IsGlobalObjectMap() ||
next->map()->is_hidden_prototype());
- if (!check_derived() &&
- !(check_hidden() && next->map()->is_hidden_prototype())) {
+ if (!check_prototype_chain() &&
+ !(check_hidden() && next->map()->is_hidden_prototype()) &&
+ // Always lookup behind the JSGlobalProxy into the JSGlobalObject, even
+ // when not checking other hidden prototypes.
+ !map->IsJSGlobalProxyMap()) {
return NULL;
}
@@ -28,16 +31,14 @@ JSReceiver* LookupIterator::NextHolder(Map* map) {
}
-LookupIterator::State LookupIterator::LookupInHolder(Map* map) {
+LookupIterator::State LookupIterator::LookupInHolder(Map* map,
+ JSReceiver* holder) {
+ STATIC_ASSERT(INTERCEPTOR == BEFORE_PROPERTY);
DisallowHeapAllocation no_gc;
switch (state_) {
case NOT_FOUND:
- if (map->IsJSProxyMap()) {
- return JSPROXY;
- }
- if (check_access_check() && map->is_access_check_needed()) {
- return ACCESS_CHECK;
- }
+ if (map->IsJSProxyMap()) return JSPROXY;
+ if (map->is_access_check_needed()) return ACCESS_CHECK;
// Fall through.
case ACCESS_CHECK:
if (check_interceptor() && map->has_named_interceptor()) {
@@ -46,17 +47,35 @@ LookupIterator::State LookupIterator::LookupInHolder(Map* map) {
// Fall through.
case INTERCEPTOR:
if (map->is_dictionary_map()) {
- property_encoding_ = DICTIONARY;
+ NameDictionary* dict = JSObject::cast(holder)->property_dictionary();
+ number_ = dict->FindEntry(name_);
+ if (number_ == NameDictionary::kNotFound) return NOT_FOUND;
+ property_details_ = dict->DetailsAt(number_);
+ if (holder->IsGlobalObject()) {
+ if (property_details_.IsDeleted()) return NOT_FOUND;
+ PropertyCell* cell = PropertyCell::cast(dict->ValueAt(number_));
+ if (cell->value()->IsTheHole()) return NOT_FOUND;
+ }
} else {
DescriptorArray* descriptors = map->instance_descriptors();
number_ = descriptors->SearchWithCache(*name_, map);
if (number_ == DescriptorArray::kNotFound) return NOT_FOUND;
- property_encoding_ = DESCRIPTOR;
+ property_details_ = descriptors->GetDetails(number_);
+ }
+ has_property_ = true;
+ switch (property_details_.type()) {
+ case v8::internal::CONSTANT:
+ case v8::internal::FIELD:
+ case v8::internal::NORMAL:
+ return DATA;
+ case v8::internal::CALLBACKS:
+ return ACCESSOR;
}
- return PROPERTY;
- case PROPERTY:
+ case ACCESSOR:
+ case DATA:
return NOT_FOUND;
case JSPROXY:
+ case TRANSITION:
UNREACHABLE();
}
UNREACHABLE();
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index 967ce498ac..b855abe97f 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -5,6 +5,7 @@
#include "src/v8.h"
#include "src/bootstrapper.h"
+#include "src/deoptimizer.h"
#include "src/lookup.h"
#include "src/lookup-inl.h"
@@ -13,46 +14,56 @@ namespace internal {
void LookupIterator::Next() {
+ DCHECK_NE(JSPROXY, state_);
+ DCHECK_NE(TRANSITION, state_);
DisallowHeapAllocation no_gc;
has_property_ = false;
- JSReceiver* holder = NULL;
+ JSReceiver* holder = *holder_;
Map* map = *holder_map_;
// Perform lookup on current holder.
- state_ = LookupInHolder(map);
+ state_ = LookupInHolder(map, holder);
+ if (IsFound()) return;
// Continue lookup if lookup on current holder failed.
- while (!IsFound()) {
+ do {
JSReceiver* maybe_holder = NextHolder(map);
if (maybe_holder == NULL) break;
holder = maybe_holder;
map = holder->map();
- state_ = LookupInHolder(map);
- }
+ state_ = LookupInHolder(map, holder);
+ } while (!IsFound());
- // Either was found in the receiver, or the receiver has no prototype.
- if (holder == NULL) return;
-
- maybe_holder_ = handle(holder);
- holder_map_ = handle(map);
+ if (holder != *holder_) {
+ holder_ = handle(holder, isolate_);
+ holder_map_ = handle(map, isolate_);
+ }
}
Handle<JSReceiver> LookupIterator::GetRoot() const {
- Handle<Object> receiver = GetReceiver();
- if (receiver->IsJSReceiver()) return Handle<JSReceiver>::cast(receiver);
+ if (receiver_->IsJSReceiver()) return Handle<JSReceiver>::cast(receiver_);
Handle<Object> root =
- handle(receiver->GetRootMap(isolate_)->prototype(), isolate_);
+ handle(receiver_->GetRootMap(isolate_)->prototype(), isolate_);
CHECK(!root->IsNull());
return Handle<JSReceiver>::cast(root);
}
Handle<Map> LookupIterator::GetReceiverMap() const {
- Handle<Object> receiver = GetReceiver();
- if (receiver->IsNumber()) return isolate_->factory()->heap_number_map();
- return handle(Handle<HeapObject>::cast(receiver)->map());
+ if (receiver_->IsNumber()) return isolate_->factory()->heap_number_map();
+ return handle(Handle<HeapObject>::cast(receiver_)->map(), isolate_);
+}
+
+
+Handle<JSObject> LookupIterator::GetStoreTarget() const {
+ if (receiver_->IsJSGlobalProxy()) {
+ PrototypeIterator iter(isolate(), receiver_);
+ if (iter.IsAtEnd()) return Handle<JSGlobalProxy>::cast(receiver_);
+ return Handle<JSGlobalObject>::cast(PrototypeIterator::GetCurrent(iter));
+ }
+ return Handle<JSObject>::cast(receiver_);
}
@@ -63,110 +74,137 @@ bool LookupIterator::IsBootstrapping() const {
bool LookupIterator::HasAccess(v8::AccessType access_type) const {
DCHECK_EQ(ACCESS_CHECK, state_);
- DCHECK(is_guaranteed_to_have_holder());
return isolate_->MayNamedAccess(GetHolder<JSObject>(), name_, access_type);
}
-bool LookupIterator::HasProperty() {
- DCHECK_EQ(PROPERTY, state_);
- DCHECK(is_guaranteed_to_have_holder());
-
- if (property_encoding_ == DICTIONARY) {
- Handle<JSObject> holder = GetHolder<JSObject>();
- number_ = holder->property_dictionary()->FindEntry(name_);
- if (number_ == NameDictionary::kNotFound) return false;
-
- property_details_ = holder->property_dictionary()->DetailsAt(number_);
- // Holes in dictionary cells are absent values.
- if (holder->IsGlobalObject() &&
- (property_details_.IsDeleted() || FetchValue()->IsTheHole())) {
- return false;
- }
- } else {
- // Can't use descriptor_number() yet because has_property_ is still false.
- property_details_ =
- holder_map_->instance_descriptors()->GetDetails(number_);
- }
-
- switch (property_details_.type()) {
- case v8::internal::FIELD:
- case v8::internal::NORMAL:
- case v8::internal::CONSTANT:
- property_kind_ = DATA;
- break;
- case v8::internal::CALLBACKS:
- property_kind_ = ACCESSOR;
- break;
- case v8::internal::HANDLER:
- case v8::internal::NONEXISTENT:
- case v8::internal::INTERCEPTOR:
- UNREACHABLE();
- }
-
- has_property_ = true;
- return true;
+void LookupIterator::ReloadPropertyInformation() {
+ state_ = BEFORE_PROPERTY;
+ state_ = LookupInHolder(*holder_map_, *holder_);
+ DCHECK(IsFound() || holder_map_->is_dictionary_map());
}
void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
- DCHECK(has_property_);
+ DCHECK(state_ == DATA || state_ == ACCESSOR);
DCHECK(HolderIsReceiverOrHiddenPrototype());
- if (property_encoding_ == DICTIONARY) return;
+ if (holder_map_->is_dictionary_map()) return;
holder_map_ =
Map::PrepareForDataProperty(holder_map_, descriptor_number(), value);
JSObject::MigrateToMap(GetHolder<JSObject>(), holder_map_);
- // Reload property information.
+ ReloadPropertyInformation();
+}
+
+
+void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
+ PropertyAttributes attributes) {
+ DCHECK(state_ == DATA || state_ == ACCESSOR);
+ DCHECK(HolderIsReceiverOrHiddenPrototype());
+ Handle<JSObject> holder = GetHolder<JSObject>();
if (holder_map_->is_dictionary_map()) {
- property_encoding_ = DICTIONARY;
+ PropertyDetails details(attributes, NORMAL, 0);
+ JSObject::SetNormalizedProperty(holder, name(), value, details);
} else {
- property_encoding_ = DESCRIPTOR;
+ holder_map_ = Map::ReconfigureDataProperty(holder_map_, descriptor_number(),
+ attributes);
+ JSObject::MigrateToMap(holder, holder_map_);
}
- CHECK(HasProperty());
+
+ ReloadPropertyInformation();
}
-void LookupIterator::TransitionToDataProperty(
+void LookupIterator::PrepareTransitionToDataProperty(
Handle<Object> value, PropertyAttributes attributes,
Object::StoreFromKeyed store_mode) {
- DCHECK(!has_property_ || !HolderIsReceiverOrHiddenPrototype());
+ if (state_ == TRANSITION) return;
+ DCHECK(state_ != LookupIterator::ACCESSOR ||
+ GetAccessors()->IsDeclaredAccessorInfo());
+ DCHECK(state_ == NOT_FOUND || !HolderIsReceiverOrHiddenPrototype());
// Can only be called when the receiver is a JSObject. JSProxy has to be
// handled via a trap. Adding properties to primitive values is not
// observable.
- Handle<JSObject> receiver = Handle<JSObject>::cast(GetReceiver());
-
- // Properties have to be added to context extension objects through
- // SetOwnPropertyIgnoreAttributes.
- DCHECK(!receiver->IsJSContextExtensionObject());
+ Handle<JSObject> receiver = GetStoreTarget();
- if (receiver->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate(), receiver);
- receiver =
- Handle<JSGlobalObject>::cast(PrototypeIterator::GetCurrent(iter));
+ if (!name().is_identical_to(isolate()->factory()->hidden_string()) &&
+ !receiver->map()->is_extensible()) {
+ return;
}
- maybe_holder_ = receiver;
- holder_map_ = Map::TransitionToDataProperty(handle(receiver->map()), name_,
- value, attributes, store_mode);
+ transition_map_ = Map::TransitionToDataProperty(
+ handle(receiver->map(), isolate_), name_, value, attributes, store_mode);
+ state_ = TRANSITION;
+}
+
+
+void LookupIterator::ApplyTransitionToDataProperty() {
+ DCHECK_EQ(TRANSITION, state_);
+
+ Handle<JSObject> receiver = GetStoreTarget();
+ holder_ = receiver;
+ holder_map_ = transition_map_;
+ JSObject::MigrateToMap(receiver, holder_map_);
+ ReloadPropertyInformation();
+}
+
+
+void LookupIterator::TransitionToAccessorProperty(
+ AccessorComponent component, Handle<Object> accessor,
+ PropertyAttributes attributes) {
+ DCHECK(!accessor->IsNull());
+ // Can only be called when the receiver is a JSObject. JSProxy has to be
+ // handled via a trap. Adding properties to primitive values is not
+ // observable.
+ Handle<JSObject> receiver = GetStoreTarget();
+ holder_ = receiver;
+ holder_map_ =
+ Map::TransitionToAccessorProperty(handle(receiver->map(), isolate_),
+ name_, component, accessor, attributes);
JSObject::MigrateToMap(receiver, holder_map_);
- // Reload the information.
- state_ = NOT_FOUND;
- configuration_ = CHECK_OWN_REAL;
- state_ = LookupInHolder(*holder_map_);
- DCHECK(IsFound());
- HasProperty();
+ ReloadPropertyInformation();
+
+ if (!holder_map_->is_dictionary_map()) return;
+
+ // We have to deoptimize since accesses to data properties may have been
+ // inlined without a corresponding map-check.
+ if (holder_map_->IsGlobalObjectMap()) {
+ Deoptimizer::DeoptimizeGlobalObject(*receiver);
+ }
+
+ // Install the accessor into the dictionary-mode object.
+ PropertyDetails details(attributes, CALLBACKS, 0);
+ Handle<AccessorPair> pair;
+ if (state() == ACCESSOR && GetAccessors()->IsAccessorPair()) {
+ pair = Handle<AccessorPair>::cast(GetAccessors());
+ // If the component and attributes are identical, nothing has to be done.
+ if (pair->get(component) == *accessor) {
+ if (property_details().attributes() == attributes) return;
+ } else {
+ pair = AccessorPair::Copy(pair);
+ pair->set(component, *accessor);
+ }
+ } else {
+ pair = isolate()->factory()->NewAccessorPair();
+ pair->set(component, *accessor);
+ }
+ JSObject::SetNormalizedProperty(receiver, name_, pair, details);
+
+ JSObject::ReoptimizeIfPrototype(receiver);
+ holder_map_ = handle(receiver->map(), isolate_);
+ ReloadPropertyInformation();
}
bool LookupIterator::HolderIsReceiverOrHiddenPrototype() const {
DCHECK(has_property_ || state_ == INTERCEPTOR || state_ == JSPROXY);
+ // Optimization that only works if configuration_ is not mutable.
+ if (!check_prototype_chain()) return true;
DisallowHeapAllocation no_gc;
- Handle<Object> receiver = GetReceiver();
- if (!receiver->IsJSReceiver()) return false;
- Object* current = *receiver;
- JSReceiver* holder = *maybe_holder_.ToHandleChecked();
+ if (!receiver_->IsJSReceiver()) return false;
+ Object* current = *receiver_;
+ JSReceiver* holder = *holder_;
// JSProxy do not occur as hidden prototypes.
if (current->IsJSProxy()) {
return JSReceiver::cast(current) == holder;
@@ -185,21 +223,17 @@ bool LookupIterator::HolderIsReceiverOrHiddenPrototype() const {
Handle<Object> LookupIterator::FetchValue() const {
Object* result = NULL;
Handle<JSObject> holder = GetHolder<JSObject>();
- switch (property_encoding_) {
- case DICTIONARY:
- result = holder->property_dictionary()->ValueAt(number_);
- if (holder->IsGlobalObject()) {
- result = PropertyCell::cast(result)->value();
- }
- break;
- case DESCRIPTOR:
- if (property_details_.type() == v8::internal::FIELD) {
- FieldIndex field_index =
- FieldIndex::ForDescriptor(*holder_map_, number_);
- return JSObject::FastPropertyAt(
- holder, property_details_.representation(), field_index);
- }
- result = holder_map_->instance_descriptors()->GetValue(number_);
+ if (holder_map_->is_dictionary_map()) {
+ result = holder->property_dictionary()->ValueAt(number_);
+ if (holder_map_->IsGlobalObjectMap()) {
+ result = PropertyCell::cast(result)->value();
+ }
+ } else if (property_details_.type() == v8::internal::FIELD) {
+ FieldIndex field_index = FieldIndex::ForDescriptor(*holder_map_, number_);
+ return JSObject::FastPropertyAt(holder, property_details_.representation(),
+ field_index);
+ } else {
+ result = holder_map_->instance_descriptors()->GetValue(number_);
}
return handle(result, isolate_);
}
@@ -207,7 +241,7 @@ Handle<Object> LookupIterator::FetchValue() const {
int LookupIterator::GetConstantIndex() const {
DCHECK(has_property_);
- DCHECK_EQ(DESCRIPTOR, property_encoding_);
+ DCHECK(!holder_map_->is_dictionary_map());
DCHECK_EQ(v8::internal::CONSTANT, property_details_.type());
return descriptor_number();
}
@@ -215,12 +249,22 @@ int LookupIterator::GetConstantIndex() const {
FieldIndex LookupIterator::GetFieldIndex() const {
DCHECK(has_property_);
- DCHECK_EQ(DESCRIPTOR, property_encoding_);
+ DCHECK(!holder_map_->is_dictionary_map());
DCHECK_EQ(v8::internal::FIELD, property_details_.type());
int index =
- holder_map()->instance_descriptors()->GetFieldIndex(descriptor_number());
+ holder_map_->instance_descriptors()->GetFieldIndex(descriptor_number());
bool is_double = representation().IsDouble();
- return FieldIndex::ForPropertyIndex(*holder_map(), index, is_double);
+ return FieldIndex::ForPropertyIndex(*holder_map_, index, is_double);
+}
+
+
+Handle<HeapType> LookupIterator::GetFieldType() const {
+ DCHECK(has_property_);
+ DCHECK(!holder_map_->is_dictionary_map());
+ DCHECK_EQ(v8::internal::FIELD, property_details_.type());
+ return handle(
+ holder_map_->instance_descriptors()->GetFieldType(descriptor_number()),
+ isolate_);
}
@@ -233,25 +277,22 @@ Handle<PropertyCell> LookupIterator::GetPropertyCell() const {
Handle<Object> LookupIterator::GetAccessors() const {
- DCHECK(has_property_);
- DCHECK_EQ(ACCESSOR, property_kind_);
+ DCHECK_EQ(ACCESSOR, state_);
return FetchValue();
}
Handle<Object> LookupIterator::GetDataValue() const {
- DCHECK(has_property_);
- DCHECK_EQ(DATA, property_kind_);
+ DCHECK_EQ(DATA, state_);
Handle<Object> value = FetchValue();
return value;
}
void LookupIterator::WriteDataValue(Handle<Object> value) {
- DCHECK(is_guaranteed_to_have_holder());
- DCHECK(has_property_);
+ DCHECK_EQ(DATA, state_);
Handle<JSObject> holder = GetHolder<JSObject>();
- if (property_encoding_ == DICTIONARY) {
+ if (holder_map_->is_dictionary_map()) {
NameDictionary* property_dictionary = holder->property_dictionary();
if (holder->IsGlobalObject()) {
Handle<PropertyCell> cell(
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index 2d609c5f66..14ca010d31 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -12,70 +12,61 @@
namespace v8 {
namespace internal {
-class LookupIterator V8_FINAL BASE_EMBEDDED {
+class LookupIterator FINAL BASE_EMBEDDED {
public:
enum Configuration {
- CHECK_OWN_REAL = 0,
- CHECK_HIDDEN = 1 << 0,
- CHECK_DERIVED = 1 << 1,
- CHECK_INTERCEPTOR = 1 << 2,
- CHECK_ACCESS_CHECK = 1 << 3,
- CHECK_ALL = CHECK_HIDDEN | CHECK_DERIVED |
- CHECK_INTERCEPTOR | CHECK_ACCESS_CHECK,
- SKIP_INTERCEPTOR = CHECK_ALL ^ CHECK_INTERCEPTOR,
- CHECK_OWN = CHECK_ALL ^ CHECK_DERIVED
+ // Configuration bits.
+ kHidden = 1 << 0,
+ kInterceptor = 1 << 1,
+ kPrototypeChain = 1 << 2,
+
+ // Convience combinations of bits.
+ OWN_SKIP_INTERCEPTOR = 0,
+ OWN = kInterceptor,
+ HIDDEN_SKIP_INTERCEPTOR = kHidden,
+ HIDDEN = kHidden | kInterceptor,
+ PROTOTYPE_CHAIN_SKIP_INTERCEPTOR = kHidden | kPrototypeChain,
+ PROTOTYPE_CHAIN = kHidden | kPrototypeChain | kInterceptor
};
enum State {
- NOT_FOUND,
- PROPERTY,
- INTERCEPTOR,
ACCESS_CHECK,
- JSPROXY
- };
-
- enum PropertyKind {
+ INTERCEPTOR,
+ JSPROXY,
+ NOT_FOUND,
+ ACCESSOR,
DATA,
- ACCESSOR
- };
-
- enum PropertyEncoding {
- DICTIONARY,
- DESCRIPTOR
+ TRANSITION,
+ // Set state_ to BEFORE_PROPERTY to ensure that the next lookup will be a
+ // PROPERTY lookup.
+ BEFORE_PROPERTY = INTERCEPTOR
};
- LookupIterator(Handle<Object> receiver,
- Handle<Name> name,
- Configuration configuration = CHECK_ALL)
+ LookupIterator(Handle<Object> receiver, Handle<Name> name,
+ Configuration configuration = PROTOTYPE_CHAIN)
: configuration_(ComputeConfiguration(configuration, name)),
state_(NOT_FOUND),
- property_kind_(DATA),
- property_encoding_(DESCRIPTOR),
- property_details_(NONE, NONEXISTENT, Representation::None()),
+ property_details_(NONE, NORMAL, Representation::None()),
isolate_(name->GetIsolate()),
name_(name),
- maybe_receiver_(receiver),
+ receiver_(receiver),
number_(DescriptorArray::kNotFound) {
- Handle<JSReceiver> root = GetRoot();
- holder_map_ = handle(root->map());
- maybe_holder_ = root;
+ holder_ = GetRoot();
+ holder_map_ = handle(holder_->map(), isolate_);
Next();
}
- LookupIterator(Handle<Object> receiver,
- Handle<Name> name,
+ LookupIterator(Handle<Object> receiver, Handle<Name> name,
Handle<JSReceiver> holder,
- Configuration configuration = CHECK_ALL)
+ Configuration configuration = PROTOTYPE_CHAIN)
: configuration_(ComputeConfiguration(configuration, name)),
state_(NOT_FOUND),
- property_kind_(DATA),
- property_encoding_(DESCRIPTOR),
- property_details_(NONE, NONEXISTENT, Representation::None()),
+ property_details_(NONE, NORMAL, Representation::None()),
isolate_(name->GetIsolate()),
name_(name),
- holder_map_(holder->map()),
- maybe_receiver_(receiver),
- maybe_holder_(holder),
+ holder_map_(holder->map(), isolate_),
+ receiver_(receiver),
+ holder_(holder),
number_(DescriptorArray::kNotFound) {
Next();
}
@@ -86,60 +77,61 @@ class LookupIterator V8_FINAL BASE_EMBEDDED {
bool IsFound() const { return state_ != NOT_FOUND; }
void Next();
+ void NotFound() {
+ has_property_ = false;
+ state_ = NOT_FOUND;
+ }
- Heap* heap() const { return isolate_->heap(); }
Factory* factory() const { return isolate_->factory(); }
- Handle<Object> GetReceiver() const {
- return Handle<Object>::cast(maybe_receiver_.ToHandleChecked());
+ Handle<Object> GetReceiver() const { return receiver_; }
+ Handle<JSObject> GetStoreTarget() const;
+ bool is_dictionary_holder() const { return holder_map_->is_dictionary_map(); }
+ Handle<Map> transition_map() const {
+ DCHECK_EQ(TRANSITION, state_);
+ return transition_map_;
}
- Handle<Map> holder_map() const { return holder_map_; }
template <class T>
Handle<T> GetHolder() const {
DCHECK(IsFound());
- return Handle<T>::cast(maybe_holder_.ToHandleChecked());
+ return Handle<T>::cast(holder_);
}
Handle<JSReceiver> GetRoot() const;
bool HolderIsReceiverOrHiddenPrototype() const;
- /* Dynamically reduce the trapped types. */
- void skip_interceptor() {
- configuration_ = static_cast<Configuration>(
- configuration_ & ~CHECK_INTERCEPTOR);
- }
- void skip_access_check() {
- configuration_ = static_cast<Configuration>(
- configuration_ & ~CHECK_ACCESS_CHECK);
- }
-
/* ACCESS_CHECK */
bool HasAccess(v8::AccessType access_type) const;
/* PROPERTY */
- // HasProperty needs to be called before any of the other PROPERTY methods
- // below can be used. It ensures that we are able to provide a definite
- // answer, and loads extra information about the property.
- bool HasProperty();
void PrepareForDataProperty(Handle<Object> value);
- void TransitionToDataProperty(Handle<Object> value,
- PropertyAttributes attributes,
- Object::StoreFromKeyed store_mode);
- PropertyKind property_kind() const {
- DCHECK(has_property_);
- return property_kind_;
- }
- PropertyEncoding property_encoding() const {
- DCHECK(has_property_);
- return property_encoding_;
- }
+ void PrepareTransitionToDataProperty(Handle<Object> value,
+ PropertyAttributes attributes,
+ Object::StoreFromKeyed store_mode);
+ bool IsCacheableTransition() {
+ bool cacheable =
+ state_ == TRANSITION && transition_map()->GetBackPointer()->IsMap();
+ if (cacheable) {
+ property_details_ = transition_map_->GetLastDescriptorDetails();
+ has_property_ = true;
+ }
+ return cacheable;
+ }
+ void ApplyTransitionToDataProperty();
+ void ReconfigureDataProperty(Handle<Object> value,
+ PropertyAttributes attributes);
+ void TransitionToAccessorProperty(AccessorComponent component,
+ Handle<Object> accessor,
+ PropertyAttributes attributes);
PropertyDetails property_details() const {
DCHECK(has_property_);
return property_details_;
}
- bool IsConfigurable() const { return !property_details().IsDontDelete(); }
+ bool IsConfigurable() const { return property_details().IsConfigurable(); }
+ bool IsReadOnly() const { return property_details().IsReadOnly(); }
Representation representation() const {
return property_details().representation();
}
FieldIndex GetFieldIndex() const;
+ Handle<HeapType> GetFieldType() const;
int GetConstantIndex() const;
Handle<PropertyCell> GetPropertyCell() const;
Handle<Object> GetAccessors() const;
@@ -152,61 +144,51 @@ class LookupIterator V8_FINAL BASE_EMBEDDED {
Handle<Map> GetReceiverMap() const;
MUST_USE_RESULT inline JSReceiver* NextHolder(Map* map);
- inline State LookupInHolder(Map* map);
+ inline State LookupInHolder(Map* map, JSReceiver* holder);
Handle<Object> FetchValue() const;
+ void ReloadPropertyInformation();
bool IsBootstrapping() const;
- // Methods that fetch data from the holder ensure they always have a holder.
- // This means the receiver needs to be present as opposed to just the receiver
- // map. Other objects in the prototype chain are transitively guaranteed to be
- // present via the receiver map.
- bool is_guaranteed_to_have_holder() const {
- return !maybe_receiver_.is_null();
- }
+ bool check_hidden() const { return (configuration_ & kHidden) != 0; }
bool check_interceptor() const {
- return !IsBootstrapping() && (configuration_ & CHECK_INTERCEPTOR) != 0;
- }
- bool check_derived() const {
- return (configuration_ & CHECK_DERIVED) != 0;
- }
- bool check_hidden() const {
- return (configuration_ & CHECK_HIDDEN) != 0;
+ return !IsBootstrapping() && (configuration_ & kInterceptor) != 0;
}
- bool check_access_check() const {
- return (configuration_ & CHECK_ACCESS_CHECK) != 0;
+ bool check_prototype_chain() const {
+ return (configuration_ & kPrototypeChain) != 0;
}
int descriptor_number() const {
DCHECK(has_property_);
- DCHECK_EQ(DESCRIPTOR, property_encoding_);
+ DCHECK(!holder_map_->is_dictionary_map());
return number_;
}
int dictionary_entry() const {
DCHECK(has_property_);
- DCHECK_EQ(DICTIONARY, property_encoding_);
+ DCHECK(holder_map_->is_dictionary_map());
return number_;
}
static Configuration ComputeConfiguration(
Configuration configuration, Handle<Name> name) {
if (name->IsOwn()) {
- return static_cast<Configuration>(configuration & CHECK_OWN);
+ return static_cast<Configuration>(configuration & HIDDEN);
} else {
return configuration;
}
}
+ // If configuration_ becomes mutable, update
+ // HolderIsReceiverOrHiddenPrototype.
Configuration configuration_;
State state_;
bool has_property_;
- PropertyKind property_kind_;
- PropertyEncoding property_encoding_;
PropertyDetails property_details_;
Isolate* isolate_;
Handle<Name> name_;
Handle<Map> holder_map_;
- MaybeHandle<Object> maybe_receiver_;
- MaybeHandle<JSReceiver> maybe_holder_;
+ Handle<Map> transition_map_;
+ Handle<Object> receiver_;
+ Handle<JSReceiver> holder_;
int number_;
};
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index 131df878b5..b3ff0fcf11 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -166,14 +166,16 @@ macro TO_STRING_INLINE(arg) = (IS_STRING(%IS_VAR(arg)) ? arg : NonStringToString
macro TO_NUMBER_INLINE(arg) = (IS_NUMBER(%IS_VAR(arg)) ? arg : NonNumberToNumber(arg));
macro TO_OBJECT_INLINE(arg) = (IS_SPEC_OBJECT(%IS_VAR(arg)) ? arg : ToObject(arg));
macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
+macro HAS_OWN_PROPERTY(obj, index) = (%_CallFunction(obj, index, ObjectHasOwnProperty));
# Private names.
# GET_PRIVATE should only be used if the property is known to exists on obj
# itself (it should really use %GetOwnProperty, but that would be way slower).
-macro GLOBAL_PRIVATE(name) = (%CreateGlobalPrivateSymbol(name));
-macro NEW_PRIVATE(name) = (%CreatePrivateSymbol(name));
+macro GLOBAL_PRIVATE(name) = (%CreateGlobalPrivateOwnSymbol(name));
+macro NEW_PRIVATE_OWN(name) = (%CreatePrivateOwnSymbol(name));
macro IS_PRIVATE(sym) = (%SymbolIsPrivate(sym));
macro HAS_PRIVATE(obj, sym) = (%HasOwnProperty(obj, sym));
+macro HAS_DEFINED_PRIVATE(obj, sym) = (!IS_UNDEFINED(obj[sym]));
macro GET_PRIVATE(obj, sym) = (obj[sym]);
macro SET_PRIVATE(obj, sym, val) = (obj[sym] = val);
macro DELETE_PRIVATE(obj, sym) = (delete obj[sym]);
diff --git a/deps/v8/src/math.js b/deps/v8/src/math.js
index 13cdb31cdc..f06249d252 100644
--- a/deps/v8/src/math.js
+++ b/deps/v8/src/math.js
@@ -173,8 +173,8 @@ function MathSign(x) {
x = TO_NUMBER_INLINE(x);
if (x > 0) return 1;
if (x < 0) return -1;
- if (x === 0) return x;
- return NAN;
+ // -0, 0 or NaN.
+ return x;
}
// ES6 draft 09-27-13, section 20.2.2.34.
@@ -182,23 +182,8 @@ function MathTrunc(x) {
x = TO_NUMBER_INLINE(x);
if (x > 0) return MathFloor(x);
if (x < 0) return MathCeil(x);
- if (x === 0) return x;
- return NAN;
-}
-
-// ES6 draft 09-27-13, section 20.2.2.30.
-function MathSinh(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- // Idempotent for NaN, +/-0 and +/-Infinity.
- if (x === 0 || !NUMBER_IS_FINITE(x)) return x;
- return (MathExp(x) - MathExp(-x)) / 2;
-}
-
-// ES6 draft 09-27-13, section 20.2.2.12.
-function MathCosh(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- if (!NUMBER_IS_FINITE(x)) return MathAbs(x);
- return (MathExp(x) + MathExp(-x)) / 2;
+ // -0, 0 or NaN.
+ return x;
}
// ES6 draft 09-27-13, section 20.2.2.33.
@@ -327,26 +312,6 @@ function CubeRoot(x) {
return NEWTON_ITERATION_CBRT(x, approx);
}
-// ES6 draft 09-27-13, section 20.2.2.14.
-// Use Taylor series to approximate.
-// exp(x) - 1 at 0 == -1 + exp(0) + exp'(0)*x/1! + exp''(0)*x^2/2! + ...
-// == x/1! + x^2/2! + x^3/3! + ...
-// The closer x is to 0, the fewer terms are required.
-function MathExpm1(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- var xabs = MathAbs(x);
- if (xabs < 2E-7) {
- return x * (1 + x * (1/2));
- } else if (xabs < 6E-5) {
- return x * (1 + x * (1/2 + x * (1/6)));
- } else if (xabs < 2E-2) {
- return x * (1 + x * (1/2 + x * (1/6 +
- x * (1/24 + x * (1/120 + x * (1/720))))));
- } else { // Use regular exp if not close enough to 0.
- return MathExp(x) - 1;
- }
-}
-
// -------------------------------------------------------------------
function SetUpMath() {
@@ -396,8 +361,8 @@ function SetUpMath() {
"imul", MathImul,
"sign", MathSign,
"trunc", MathTrunc,
- "sinh", MathSinh,
- "cosh", MathCosh,
+ "sinh", MathSinh, // implemented by third_party/fdlibm
+ "cosh", MathCosh, // implemented by third_party/fdlibm
"tanh", MathTanh,
"asinh", MathAsinh,
"acosh", MathAcosh,
@@ -408,8 +373,8 @@ function SetUpMath() {
"fround", MathFroundJS,
"clz32", MathClz32,
"cbrt", MathCbrt,
- "log1p", MathLog1p, // implemented by third_party/fdlibm
- "expm1", MathExpm1
+ "log1p", MathLog1p, // implemented by third_party/fdlibm
+ "expm1", MathExpm1 // implemented by third_party/fdlibm
));
%SetInlineBuiltinFlag(MathCeil);
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index 865bdca8fa..290f756770 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -130,7 +130,7 @@ Handle<String> MessageHandler::GetMessage(Isolate* isolate,
Handle<Object> data) {
Factory* factory = isolate->factory();
Handle<String> fmt_str =
- factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("FormatMessage"));
+ factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("FormatMessage"));
Handle<JSFunction> fun = Handle<JSFunction>::cast(Object::GetProperty(
isolate->js_builtins_object(), fmt_str).ToHandleChecked());
Handle<JSMessageObject> message = Handle<JSMessageObject>::cast(data);
@@ -138,10 +138,10 @@ Handle<String> MessageHandler::GetMessage(Isolate* isolate,
Handle<Object>(message->arguments(), isolate) };
MaybeHandle<Object> maybe_result = Execution::TryCall(
- fun, isolate->js_builtins_object(), ARRAY_SIZE(argv), argv);
+ fun, isolate->js_builtins_object(), arraysize(argv), argv);
Handle<Object> result;
if (!maybe_result.ToHandle(&result) || !result->IsString()) {
- return factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("<error>"));
+ return factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("<error>"));
}
Handle<String> result_string = Handle<String>::cast(result);
// A string that has been obtained from JS code in this way is
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index eba1e16ece..4a71a61f5c 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -8,6 +8,7 @@ var kMessages = {
// Error
cyclic_proto: ["Cyclic __proto__ value"],
code_gen_from_strings: ["%0"],
+ constructor_special_method: ["Class constructor may not be an accessor"],
generator_running: ["Generator is already running"],
generator_finished: ["Generator has already finished"],
// TypeError
@@ -37,6 +38,8 @@ var kMessages = {
cannot_convert_to_primitive: ["Cannot convert object to primitive value"],
not_constructor: ["%0", " is not a constructor"],
not_defined: ["%0", " is not defined"],
+ non_method: ["'super' is referenced from non-method"],
+ unsupported_super: ["Unsupported reference to 'super'"],
non_object_property_load: ["Cannot read property '", "%0", "' of ", "%1"],
non_object_property_store: ["Cannot set property '", "%0", "' of ", "%1"],
with_expression: ["%0", " has no properties"],
@@ -44,6 +47,8 @@ var kMessages = {
no_setter_in_callback: ["Cannot set property ", "%0", " of ", "%1", " which has only a getter"],
apply_non_function: ["Function.prototype.apply was called on ", "%0", ", which is a ", "%1", " and not a function"],
apply_wrong_args: ["Function.prototype.apply: Arguments list has wrong type"],
+ toMethod_non_function: ["Function.prototype.toMethod was called on ", "%0", ", which is a ", "%1", " and not a function"],
+ toMethod_non_object: ["Function.prototype.toMethod: home object ", "%0", " is not an object"],
invalid_in_operator_use: ["Cannot use 'in' operator to search for '", "%0", "' in ", "%1"],
instanceof_function_expected: ["Expecting a function in instanceof check, but got ", "%0"],
instanceof_nonobject_proto: ["Function has non-object prototype '", "%0", "' in instanceof check"],
@@ -137,6 +142,7 @@ var kMessages = {
array_indexof_not_defined: ["Array.getIndexOf: Argument undefined"],
object_not_extensible: ["Can't add property ", "%0", ", object is not extensible"],
illegal_access: ["Illegal access"],
+ static_prototype: ["Classes may not have static property named prototype"],
strict_mode_with: ["Strict mode code may not include a with statement"],
strict_eval_arguments: ["Unexpected eval or arguments in strict mode"],
too_many_arguments: ["Too many arguments in function call (only 65535 allowed)"],
@@ -167,7 +173,8 @@ var kMessages = {
symbol_to_number: ["Cannot convert a Symbol value to a number"],
invalid_module_path: ["Module does not export '", "%0", "', or export is not itself a module"],
module_type_error: ["Module '", "%0", "' used improperly"],
- module_export_undefined: ["Export '", "%0", "' is not defined in module"]
+ module_export_undefined: ["Export '", "%0", "' is not defined in module"],
+ unexpected_super: ["'super' keyword unexpected here"]
};
@@ -750,10 +757,10 @@ function GetStackTraceLine(recv, fun, pos, isGlobal) {
// ----------------------------------------------------------------------------
// Error implementation
-var CallSiteReceiverKey = NEW_PRIVATE("CallSite#receiver");
-var CallSiteFunctionKey = NEW_PRIVATE("CallSite#function");
-var CallSitePositionKey = NEW_PRIVATE("CallSite#position");
-var CallSiteStrictModeKey = NEW_PRIVATE("CallSite#strict_mode");
+var CallSiteReceiverKey = NEW_PRIVATE_OWN("CallSite#receiver");
+var CallSiteFunctionKey = NEW_PRIVATE_OWN("CallSite#function");
+var CallSitePositionKey = NEW_PRIVATE_OWN("CallSite#position");
+var CallSiteStrictModeKey = NEW_PRIVATE_OWN("CallSite#strict_mode");
function CallSite(receiver, fun, pos, strict_mode) {
SET_PRIVATE(this, CallSiteReceiverKey, receiver);
@@ -1108,26 +1115,33 @@ function GetTypeName(receiver, requireConstructor) {
var stack_trace_symbol; // Set during bootstrapping.
-var formatted_stack_trace_symbol = NEW_PRIVATE("formatted stack trace");
+var formatted_stack_trace_symbol = NEW_PRIVATE_OWN("formatted stack trace");
// Format the stack trace if not yet done, and return it.
// Cache the formatted stack trace on the holder.
var StackTraceGetter = function() {
- var formatted_stack_trace = GET_PRIVATE(this, formatted_stack_trace_symbol);
- if (IS_UNDEFINED(formatted_stack_trace)) {
- var holder = this;
- while (!HAS_PRIVATE(holder, stack_trace_symbol)) {
- holder = %GetPrototype(holder);
- if (!holder) return UNDEFINED;
+ var formatted_stack_trace = UNDEFINED;
+ var holder = this;
+ while (holder) {
+ var formatted_stack_trace =
+ GET_PRIVATE(holder, formatted_stack_trace_symbol);
+ if (IS_UNDEFINED(formatted_stack_trace)) {
+ // No formatted stack trace available.
+ var stack_trace = GET_PRIVATE(holder, stack_trace_symbol);
+ if (IS_UNDEFINED(stack_trace)) {
+ // Neither formatted nor structured stack trace available.
+ // Look further up the prototype chain.
+ holder = %GetPrototype(holder);
+ continue;
+ }
+ formatted_stack_trace = FormatStackTrace(holder, stack_trace);
+ SET_PRIVATE(holder, stack_trace_symbol, UNDEFINED);
+ SET_PRIVATE(holder, formatted_stack_trace_symbol, formatted_stack_trace);
}
- var stack_trace = GET_PRIVATE(holder, stack_trace_symbol);
- if (IS_UNDEFINED(stack_trace)) return UNDEFINED;
- formatted_stack_trace = FormatStackTrace(holder, stack_trace);
- SET_PRIVATE(holder, stack_trace_symbol, UNDEFINED);
- SET_PRIVATE(holder, formatted_stack_trace_symbol, formatted_stack_trace);
+ return formatted_stack_trace;
}
- return formatted_stack_trace;
+ return UNDEFINED;
};
diff --git a/deps/v8/src/mips/OWNERS b/deps/v8/src/mips/OWNERS
index 8d6807d267..5508ba626f 100644
--- a/deps/v8/src/mips/OWNERS
+++ b/deps/v8/src/mips/OWNERS
@@ -1,10 +1,5 @@
-plind44@gmail.com
paul.lind@imgtec.com
-gergely@homejinni.com
gergely.kis@imgtec.com
-palfia@homejinni.com
akos.palfi@imgtec.com
-kilvadyb@homejinni.com
balazs.kilvady@imgtec.com
-Dusan.Milosavljevic@rt-rk.com
dusan.milosavljevic@imgtec.com
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 2666f6ada7..1cd9361e9a 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -99,6 +99,11 @@ int DoubleRegister::NumAllocatableRegisters() {
}
+int DoubleRegister::NumAllocatableAliasedRegisters() {
+ return NumAllocatableRegisters();
+}
+
+
int FPURegister::ToAllocationIndex(FPURegister reg) {
DCHECK(reg.code() % 2 == 0);
DCHECK(reg.code() / 2 < kMaxNumAllocatableRegisters);
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index 936a73b5f9..f1e5dfb670 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -37,6 +37,7 @@
#if V8_TARGET_ARCH_MIPS
+#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/mips/assembler-mips-inl.h"
#include "src/serialize.h"
@@ -98,10 +99,32 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
#ifndef __mips__
// For the simulator build, use FPU.
supported_ |= 1u << FPU;
+#if defined(_MIPS_ARCH_MIPS32R6)
+ // FP64 mode is implied on r6.
+ supported_ |= 1u << FP64FPU;
+#endif
+#if defined(FPU_MODE_FP64)
+ supported_ |= 1u << FP64FPU;
+#endif
#else
// Probe for additional features at runtime.
base::CPU cpu;
if (cpu.has_fpu()) supported_ |= 1u << FPU;
+#if defined(FPU_MODE_FPXX)
+ if (cpu.is_fp64_mode()) supported_ |= 1u << FP64FPU;
+#elif defined(FPU_MODE_FP64)
+ supported_ |= 1u << FP64FPU;
+#endif
+#if defined(_MIPS_ARCH_MIPS32RX)
+ if (cpu.architecture() == 6) {
+ supported_ |= 1u << MIPSr6;
+ } else if (cpu.architecture() == 2) {
+ supported_ |= 1u << MIPSr1;
+ supported_ |= 1u << MIPSr2;
+ } else {
+ supported_ |= 1u << MIPSr1;
+ }
+#endif
#endif
}
@@ -317,7 +340,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
- DCHECK(m >= 4 && IsPowerOf2(m));
+ DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@@ -484,7 +507,9 @@ bool Assembler::IsBranch(Instr instr) {
opcode == BGTZL ||
(opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
rt_field == BLTZAL || rt_field == BGEZAL)) ||
- (opcode == COP1 && rs_field == BC1); // Coprocessor branch.
+ (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
+ (opcode == COP1 && rs_field == BC1EQZ) ||
+ (opcode == COP1 && rs_field == BC1NEZ);
}
@@ -529,12 +554,18 @@ bool Assembler::IsJal(Instr instr) {
bool Assembler::IsJr(Instr instr) {
- return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
+ if (!IsMipsArchVariant(kMips32r6)) {
+ return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
+ } else {
+ return GetOpcodeField(instr) == SPECIAL &&
+ GetRdField(instr) == 0 && GetFunctionField(instr) == JALR;
+ }
}
bool Assembler::IsJalr(Instr instr) {
- return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
+ return GetOpcodeField(instr) == SPECIAL &&
+ GetRdField(instr) != 0 && GetFunctionField(instr) == JALR;
}
@@ -1019,6 +1050,88 @@ int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
}
+int32_t Assembler::branch_offset_compact(Label* L,
+ bool jump_elimination_allowed) {
+ int32_t target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos();
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ return kEndOfChain;
+ }
+ }
+
+ int32_t offset = target_pos - pc_offset();
+ DCHECK((offset & 3) == 0);
+ DCHECK(is_int16(offset >> 2));
+
+ return offset;
+}
+
+
+int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) {
+ int32_t target_pos;
+
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos();
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ return kEndOfChain;
+ }
+ }
+
+ int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
+ DCHECK((offset & 3) == 0);
+ DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
+
+ return offset;
+}
+
+
+int32_t Assembler::branch_offset21_compact(Label* L,
+ bool jump_elimination_allowed) {
+ int32_t target_pos;
+
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos();
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ return kEndOfChain;
+ }
+ }
+
+ int32_t offset = target_pos - pc_offset();
+ DCHECK((offset & 3) == 0);
+ DCHECK(((offset >> 2) & 0xFFe00000) == 0); // Offset is 21bit width.
+
+ return offset;
+}
+
+
void Assembler::label_at_put(Label* L, int at_offset) {
int target_pos;
if (L->is_bound()) {
@@ -1072,7 +1185,33 @@ void Assembler::bgez(Register rs, int16_t offset) {
}
+void Assembler::bgezc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BLEZL, rt, rt, offset);
+}
+
+
+void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rs.code() != rt.code());
+ GenInstrImmediate(BLEZ, rs, rt, offset);
+}
+
+
+void Assembler::bgec(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rs.code() != rt.code());
+ GenInstrImmediate(BLEZL, rs, rt, offset);
+}
+
+
void Assembler::bgezal(Register rs, int16_t offset) {
+ DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
BlockTrampolinePoolScope block_trampoline_pool(this);
positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
@@ -1087,6 +1226,13 @@ void Assembler::bgtz(Register rs, int16_t offset) {
}
+void Assembler::bgtzc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BGTZL, zero_reg, rt, offset);
+}
+
+
void Assembler::blez(Register rs, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BLEZ, rs, zero_reg, offset);
@@ -1094,6 +1240,38 @@ void Assembler::blez(Register rs, int16_t offset) {
}
+void Assembler::blezc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BLEZL, zero_reg, rt, offset);
+}
+
+
+void Assembler::bltzc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BGTZL, rt, rt, offset);
+}
+
+
+void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rs.code() != rt.code());
+ GenInstrImmediate(BGTZ, rs, rt, offset);
+}
+
+
+void Assembler::bltc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rs.code() != rt.code());
+ GenInstrImmediate(BGTZL, rs, rt, offset);
+}
+
+
void Assembler::bltz(Register rs, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BLTZ, offset);
@@ -1102,6 +1280,7 @@ void Assembler::bltz(Register rs, int16_t offset) {
void Assembler::bltzal(Register rs, int16_t offset) {
+ DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
BlockTrampolinePoolScope block_trampoline_pool(this);
positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
@@ -1116,6 +1295,101 @@ void Assembler::bne(Register rs, Register rt, int16_t offset) {
}
+void Assembler::bovc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(rs.code() >= rt.code());
+ GenInstrImmediate(ADDI, rs, rt, offset);
+}
+
+
+void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(rs.code() >= rt.code());
+ GenInstrImmediate(DADDI, rs, rt, offset);
+}
+
+
+void Assembler::blezalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BLEZ, zero_reg, rt, offset);
+}
+
+
+void Assembler::bgezalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BLEZ, rt, rt, offset);
+}
+
+
+void Assembler::bgezall(Register rs, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
+}
+
+
+void Assembler::bltzalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BGTZ, rt, rt, offset);
+}
+
+
+void Assembler::bgtzalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BGTZ, zero_reg, rt, offset);
+}
+
+
+void Assembler::beqzalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(ADDI, zero_reg, rt, offset);
+}
+
+
+void Assembler::bnezalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(DADDI, zero_reg, rt, offset);
+}
+
+
+void Assembler::beqc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(rs.code() < rt.code());
+ GenInstrImmediate(ADDI, rs, rt, offset);
+}
+
+
+void Assembler::beqzc(Register rs, int32_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ Instr instr = BEQZC | (rs.code() << kRsShift) | offset;
+ emit(instr);
+}
+
+
+void Assembler::bnec(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(rs.code() < rt.code());
+ GenInstrImmediate(DADDI, rs, rt, offset);
+}
+
+
+void Assembler::bnezc(Register rs, int32_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ Instr instr = BNEZC | (rs.code() << kRsShift) | offset;
+ emit(instr);
+}
+
+
void Assembler::j(int32_t target) {
#if DEBUG
// Get pc of delay slot.
@@ -1129,12 +1403,16 @@ void Assembler::j(int32_t target) {
void Assembler::jr(Register rs) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (rs.is(ra)) {
- positions_recorder()->WriteRecordedPositions();
+ if (!IsMipsArchVariant(kMips32r6)) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (rs.is(ra)) {
+ positions_recorder()->WriteRecordedPositions();
+ }
+ GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
+ } else {
+ jalr(rs, zero_reg);
}
- GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
- BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -1205,7 +1483,41 @@ void Assembler::subu(Register rd, Register rs, Register rt) {
void Assembler::mul(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
+ } else {
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
+ }
+}
+
+
+void Assembler::mulu(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
+}
+
+
+void Assembler::muh(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
+}
+
+
+void Assembler::muhu(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
+}
+
+
+void Assembler::mod(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
+}
+
+
+void Assembler::modu(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
}
@@ -1224,11 +1536,23 @@ void Assembler::div(Register rs, Register rt) {
}
+void Assembler::div(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
+}
+
+
void Assembler::divu(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
}
+void Assembler::divu(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
+}
+
+
// Logical.
void Assembler::and_(Register rd, Register rs, Register rt) {
@@ -1311,7 +1635,7 @@ void Assembler::srav(Register rd, Register rt, Register rs) {
void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
// Should be called via MacroAssembler::Ror.
DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
emit(instr);
@@ -1321,7 +1645,7 @@ void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
void Assembler::rotrv(Register rd, Register rt, Register rs) {
// Should be called via MacroAssembler::Ror.
DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
emit(instr);
@@ -1445,6 +1769,14 @@ void Assembler::lui(Register rd, int32_t j) {
}
+void Assembler::aui(Register rs, Register rt, int32_t j) {
+ // This instruction uses same opcode as 'lui'. The difference in encoding is
+ // 'lui' has zero reg. for rs field.
+ DCHECK(is_uint16(j));
+ GenInstrImmediate(LUI, rs, rt, j);
+}
+
+
// -------------Misc-instructions--------------
// Break / Trap instructions.
@@ -1588,15 +1920,19 @@ void Assembler::movf(Register rd, Register rs, uint16_t cc) {
// Bit twiddling.
void Assembler::clz(Register rd, Register rs) {
- // Clz instr requires same GPR number in 'rd' and 'rt' fields.
- GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ // Clz instr requires same GPR number in 'rd' and 'rt' fields.
+ GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
+ } else {
+ GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
+ }
}
void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ins.
// Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
}
@@ -1604,13 +1940,13 @@ void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ext.
// Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
}
void Assembler::pref(int32_t hint, const MemOperand& rs) {
- DCHECK(kArchVariant != kLoongson);
+ DCHECK(!IsMipsArchVariant(kLoongson));
DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
| (rs.offset_);
@@ -1629,12 +1965,20 @@ void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// load to two 32-bit loads.
- GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
- Register::kMantissaOffset);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
- Register::kExponentOffset);
+ if (IsFp64Mode()) {
+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
+ Register::kMantissaOffset);
+ GenInstrImmediate(LW, src.rm(), at, src.offset_ +
+ Register::kExponentOffset);
+ mthc1(at, fd);
+ } else {
+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
+ Register::kMantissaOffset);
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
+ Register::kExponentOffset);
+ }
}
@@ -1646,12 +1990,20 @@ void Assembler::swc1(FPURegister fd, const MemOperand& src) {
void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// store to two 32-bit stores.
- GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
- Register::kMantissaOffset);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
- Register::kExponentOffset);
+ if (IsFp64Mode()) {
+ GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
+ Register::kMantissaOffset);
+ mfhc1(at, fd);
+ GenInstrImmediate(SW, src.rm(), at, src.offset_ +
+ Register::kExponentOffset);
+ } else {
+ GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
+ Register::kMantissaOffset);
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
+ Register::kExponentOffset);
+ }
}
@@ -1660,11 +2012,21 @@ void Assembler::mtc1(Register rt, FPURegister fs) {
}
+void Assembler::mthc1(Register rt, FPURegister fs) {
+ GenInstrRegister(COP1, MTHC1, rt, fs, f0);
+}
+
+
void Assembler::mfc1(Register rt, FPURegister fs) {
GenInstrRegister(COP1, MFC1, rt, fs, f0);
}
+void Assembler::mfhc1(Register rt, FPURegister fs) {
+ GenInstrRegister(COP1, MFHC1, rt, fs, f0);
+}
+
+
void Assembler::ctc1(Register rt, FPUControlRegister fs) {
GenInstrRegister(COP1, CTC1, rt, fs);
}
@@ -1785,25 +2147,25 @@ void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
}
void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
}
void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
}
void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
}
@@ -1838,13 +2200,45 @@ void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
}
+void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
+}
+
+
+void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
+}
+
+
+void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
+}
+
+
+void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
+}
+
+
void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
}
void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
}
@@ -1860,7 +2254,7 @@ void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
}
@@ -1870,7 +2264,32 @@ void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
}
-// Conditions.
+// Conditions for >= MIPSr6.
+void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
+ FPURegister fd, FPURegister fs, FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt & ~(31 << kRsShift)) == 0);
+ Instr instr = COP1 | fmt | ft.code() << kFtShift |
+ fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
+ emit(instr);
+}
+
+
+void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
+ emit(instr);
+}
+
+
+void Assembler::bc1nez(int16_t offset, FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
+ emit(instr);
+}
+
+
+// Conditions for < MIPSr6.
void Assembler::c(FPUCondition cond, SecondaryField fmt,
FPURegister fs, FPURegister ft, uint16_t cc) {
DCHECK(is_uint3(cc));
@@ -2184,7 +2603,7 @@ void Assembler::set_target_address_at(Address pc,
// lui rt, upper-16.
// ori rt rt, lower-16.
*p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
- *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
+ *(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
// The following code is an optimization for the common case of Call()
// or Jump() which is load to register, and jump through register:
@@ -2227,20 +2646,20 @@ void Assembler::set_target_address_at(Address pc,
if (IsJalr(instr3)) {
// Try to convert JALR to JAL.
if (in_range && GetRt(instr2) == GetRs(instr3)) {
- *(p+2) = JAL | target_field;
+ *(p + 2) = JAL | target_field;
patched_jump = true;
}
} else if (IsJr(instr3)) {
// Try to convert JR to J, skip returns (jr ra).
bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
- *(p+2) = J | target_field;
+ *(p + 2) = J | target_field;
patched_jump = true;
}
} else if (IsJal(instr3)) {
if (in_range) {
// We are patching an already converted JAL.
- *(p+2) = JAL | target_field;
+ *(p + 2) = JAL | target_field;
} else {
// Patch JAL, but out of range, revert to JALR.
// JALR rs reg is the rt reg specified in the ORI instruction.
@@ -2252,12 +2671,16 @@ void Assembler::set_target_address_at(Address pc,
} else if (IsJ(instr3)) {
if (in_range) {
// We are patching an already converted J (jump).
- *(p+2) = J | target_field;
+ *(p + 2) = J | target_field;
} else {
// Trying patch J, but out of range, just go back to JR.
// JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
uint32_t rs_field = GetRt(instr2) << kRsShift;
- *(p+2) = SPECIAL | rs_field | JR;
+ if (IsMipsArchVariant(kMips32r6)) {
+ *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
+ } else {
+ *(p + 2) = SPECIAL | rs_field | JR;
+ }
}
patched_jump = true;
}
@@ -2285,19 +2708,23 @@ void Assembler::JumpLabelToJumpRegister(Address pc) {
uint32_t rs_field = GetRt(instr2) << kRsShift;
uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
- *(p+2) = SPECIAL | rs_field | rd_field | JALR;
+ *(p + 2) = SPECIAL | rs_field | rd_field | JALR;
patched = true;
} else if (IsJ(instr3)) {
DCHECK(GetOpcodeField(instr1) == LUI);
DCHECK(GetOpcodeField(instr2) == ORI);
uint32_t rs_field = GetRt(instr2) << kRsShift;
- *(p+2) = SPECIAL | rs_field | JR;
+ if (IsMipsArchVariant(kMips32r6)) {
+ *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
+ } else {
+ *(p + 2) = SPECIAL | rs_field | JR;
+ }
patched = true;
}
if (patched) {
- CpuFeatures::FlushICache(pc+2, sizeof(Address));
+ CpuFeatures::FlushICache(pc + 2, sizeof(Address));
}
}
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 8469c1ca18..c6b12b76d7 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -222,6 +222,10 @@ struct FPURegister {
inline static int NumRegisters();
inline static int NumAllocatableRegisters();
+
+ // TODO(turbofan): Proper support for float32.
+ inline static int NumAllocatableAliasedRegisters();
+
inline static int ToAllocationIndex(FPURegister reg);
static const char* AllocationIndexToString(int index);
@@ -328,6 +332,8 @@ const FPURegister f31 = { 31 };
#define kLithiumScratchReg2 s4
#define kLithiumScratchDouble f30
#define kDoubleRegZero f28
+// Used on mips32r6 for compare operations.
+#define kDoubleCompareReg f31
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
@@ -465,11 +471,20 @@ class Assembler : public AssemblerBase {
// position. Links the label to the current position if it is still unbound.
// Manages the jump elimination optimization if the second parameter is true.
int32_t branch_offset(Label* L, bool jump_elimination_allowed);
+ int32_t branch_offset_compact(Label* L, bool jump_elimination_allowed);
+ int32_t branch_offset21(Label* L, bool jump_elimination_allowed);
+ int32_t branch_offset21_compact(Label* L, bool jump_elimination_allowed);
int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
int32_t o = branch_offset(L, jump_elimination_allowed);
DCHECK((o & 3) == 0); // Assert the offset is aligned.
return o >> 2;
}
+ int32_t shifted_branch_offset_compact(Label* L,
+ bool jump_elimination_allowed) {
+ int32_t o = branch_offset_compact(L, jump_elimination_allowed);
+ DCHECK((o & 3) == 0); // Assert the offset is aligned.
+ return o >> 2;
+ }
uint32_t jump_address(Label* L);
// Puts a labels target address at the given position.
@@ -627,15 +642,99 @@ class Assembler : public AssemblerBase {
beq(rs, rt, branch_offset(L, false) >> 2);
}
void bgez(Register rs, int16_t offset);
+ void bgezc(Register rt, int16_t offset);
+ void bgezc(Register rt, Label* L) {
+ bgezc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void bgeuc(Register rs, Register rt, int16_t offset);
+ void bgeuc(Register rs, Register rt, Label* L) {
+ bgeuc(rs, rt, branch_offset_compact(L, false)>>2);
+ }
+ void bgec(Register rs, Register rt, int16_t offset);
+ void bgec(Register rs, Register rt, Label* L) {
+ bgec(rs, rt, branch_offset_compact(L, false)>>2);
+ }
void bgezal(Register rs, int16_t offset);
+ void bgezalc(Register rt, int16_t offset);
+ void bgezalc(Register rt, Label* L) {
+ bgezalc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void bgezall(Register rs, int16_t offset);
+ void bgezall(Register rs, Label* L) {
+ bgezall(rs, branch_offset(L, false)>>2);
+ }
void bgtz(Register rs, int16_t offset);
+ void bgtzc(Register rt, int16_t offset);
+ void bgtzc(Register rt, Label* L) {
+ bgtzc(rt, branch_offset_compact(L, false)>>2);
+ }
void blez(Register rs, int16_t offset);
+ void blezc(Register rt, int16_t offset);
+ void blezc(Register rt, Label* L) {
+ blezc(rt, branch_offset_compact(L, false)>>2);
+ }
void bltz(Register rs, int16_t offset);
+ void bltzc(Register rt, int16_t offset);
+ void bltzc(Register rt, Label* L) {
+ bltzc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void bltuc(Register rs, Register rt, int16_t offset);
+ void bltuc(Register rs, Register rt, Label* L) {
+ bltuc(rs, rt, branch_offset_compact(L, false)>>2);
+ }
+ void bltc(Register rs, Register rt, int16_t offset);
+ void bltc(Register rs, Register rt, Label* L) {
+ bltc(rs, rt, branch_offset_compact(L, false)>>2);
+ }
void bltzal(Register rs, int16_t offset);
+ void blezalc(Register rt, int16_t offset);
+ void blezalc(Register rt, Label* L) {
+ blezalc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void bltzalc(Register rt, int16_t offset);
+ void bltzalc(Register rt, Label* L) {
+ bltzalc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void bgtzalc(Register rt, int16_t offset);
+ void bgtzalc(Register rt, Label* L) {
+ bgtzalc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void beqzalc(Register rt, int16_t offset);
+ void beqzalc(Register rt, Label* L) {
+ beqzalc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void beqc(Register rs, Register rt, int16_t offset);
+ void beqc(Register rs, Register rt, Label* L) {
+ beqc(rs, rt, branch_offset_compact(L, false)>>2);
+ }
+ void beqzc(Register rs, int32_t offset);
+ void beqzc(Register rs, Label* L) {
+ beqzc(rs, branch_offset21_compact(L, false)>>2);
+ }
+ void bnezalc(Register rt, int16_t offset);
+ void bnezalc(Register rt, Label* L) {
+ bnezalc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void bnec(Register rs, Register rt, int16_t offset);
+ void bnec(Register rs, Register rt, Label* L) {
+ bnec(rs, rt, branch_offset_compact(L, false)>>2);
+ }
+ void bnezc(Register rt, int32_t offset);
+ void bnezc(Register rt, Label* L) {
+ bnezc(rt, branch_offset21_compact(L, false)>>2);
+ }
void bne(Register rs, Register rt, int16_t offset);
void bne(Register rs, Register rt, Label* L) {
bne(rs, rt, branch_offset(L, false)>>2);
}
+ void bovc(Register rs, Register rt, int16_t offset);
+ void bovc(Register rs, Register rt, Label* L) {
+ bovc(rs, rt, branch_offset_compact(L, false)>>2);
+ }
+ void bnvc(Register rs, Register rt, int16_t offset);
+ void bnvc(Register rs, Register rt, Label* L) {
+ bnvc(rs, rt, branch_offset_compact(L, false)>>2);
+ }
// Never use the int16_t b(l)cond version with a branch offset
// instead of using the Label* version.
@@ -658,7 +757,14 @@ class Assembler : public AssemblerBase {
void multu(Register rs, Register rt);
void div(Register rs, Register rt);
void divu(Register rs, Register rt);
+ void div(Register rd, Register rs, Register rt);
+ void divu(Register rd, Register rs, Register rt);
+ void mod(Register rd, Register rs, Register rt);
+ void modu(Register rd, Register rs, Register rt);
void mul(Register rd, Register rs, Register rt);
+ void muh(Register rd, Register rs, Register rt);
+ void mulu(Register rd, Register rs, Register rt);
+ void muhu(Register rd, Register rs, Register rt);
void addiu(Register rd, Register rs, int32_t j);
@@ -672,6 +778,7 @@ class Assembler : public AssemblerBase {
void ori(Register rd, Register rs, int32_t j);
void xori(Register rd, Register rs, int32_t j);
void lui(Register rd, int32_t j);
+ void aui(Register rs, Register rt, int32_t j);
// Shifts.
// Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
@@ -736,6 +843,15 @@ class Assembler : public AssemblerBase {
void movt(Register rd, Register rs, uint16_t cc = 0);
void movf(Register rd, Register rs, uint16_t cc = 0);
+ void sel(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs, uint8_t sel);
+ void seleqz(Register rs, Register rt, Register rd);
+ void seleqz(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs);
+ void selnez(Register rs, Register rt, Register rd);
+ void selnez(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs);
+
// Bit twiddling.
void clz(Register rd, Register rs);
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
@@ -751,7 +867,10 @@ class Assembler : public AssemblerBase {
void sdc1(FPURegister fs, const MemOperand& dst);
void mtc1(Register rt, FPURegister fs);
+ void mthc1(Register rt, FPURegister fs);
+
void mfc1(Register rt, FPURegister fs);
+ void mfhc1(Register rt, FPURegister fs);
void ctc1(Register rt, FPUControlRegister fs);
void cfc1(Register rt, FPUControlRegister fs);
@@ -790,6 +909,11 @@ class Assembler : public AssemblerBase {
void ceil_l_s(FPURegister fd, FPURegister fs);
void ceil_l_d(FPURegister fd, FPURegister fs);
+ void min(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
+ void mina(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
+ void max(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
+ void maxa(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
+
void cvt_s_w(FPURegister fd, FPURegister fs);
void cvt_s_l(FPURegister fd, FPURegister fs);
void cvt_s_d(FPURegister fd, FPURegister fs);
@@ -798,7 +922,20 @@ class Assembler : public AssemblerBase {
void cvt_d_l(FPURegister fd, FPURegister fs);
void cvt_d_s(FPURegister fd, FPURegister fs);
- // Conditions and branches.
+ // Conditions and branches for MIPSr6.
+ void cmp(FPUCondition cond, SecondaryField fmt,
+ FPURegister fd, FPURegister ft, FPURegister fs);
+
+ void bc1eqz(int16_t offset, FPURegister ft);
+ void bc1eqz(Label* L, FPURegister ft) {
+ bc1eqz(branch_offset(L, false)>>2, ft);
+ }
+ void bc1nez(int16_t offset, FPURegister ft);
+ void bc1nez(Label* L, FPURegister ft) {
+ bc1nez(branch_offset(L, false)>>2, ft);
+ }
+
+ // Conditions and branches for non MIPSr6.
void c(FPUCondition cond, SecondaryField fmt,
FPURegister ft, FPURegister fs, uint16_t cc = 0);
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 462ef675c9..df6dc534be 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -12,8 +12,8 @@
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
+#include "src/runtime/runtime.h"
+
namespace v8 {
namespace internal {
@@ -45,11 +45,9 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
}
- // JumpToExternalReference expects s0 to contain the number of arguments
+ // JumpToExternalReference expects a0 to contain the number of arguments
// including the receiver and the extra arguments.
- __ Addu(s0, a0, num_extra_args + 1);
- __ sll(s1, s0, kPointerSizeLog2);
- __ Subu(s1, s1, kPointerSize);
+ __ Addu(a0, a0, num_extra_args + 1);
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@@ -824,8 +822,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
}
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 2e8e6074b5..f2fdab6896 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -6,328 +6,86 @@
#if V8_TARGET_ARCH_MIPS
+#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+#include "src/isolate.h"
+#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
-#include "src/stub-cache.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
-void FastNewClosureStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a2 };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
-}
-
-
-void FastNewContextStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a1 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void ToNumberStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void NumberToStringStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a0 };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
-}
-
-
-void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a3, a2, a1 };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi(),
- Representation::Tagged() };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry,
- representations);
-}
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a3, a2, a1, a0 };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
-}
-
-
-void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a2, a3 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void CallFunctionStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- UNIMPLEMENTED();
-}
-
-
-void CallConstructStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- UNIMPLEMENTED();
-}
-
-
-void RegExpConstructResultStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a2, a1, a0 };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a0, a1 };
- Address entry =
- Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(entry));
-}
-
-
-void CompareNilICStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(CompareNilIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
-}
-
-
-const Register InterfaceDescriptor::ContextRegister() { return cp; }
-
-
static void InitializeArrayConstructorDescriptor(
- CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
+ Isolate* isolate, CodeStubDescriptor* descriptor,
int constant_stack_parameter_count) {
- // register state
- // cp -- context
- // a0 -- number of arguments
- // a1 -- function
- // a2 -- allocation site with elements kind
Address deopt_handler = Runtime::FunctionForId(
Runtime::kArrayConstructor)->entry;
if (constant_stack_parameter_count == 0) {
- Register registers[] = { cp, a1, a2 };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
- deopt_handler, NULL, constant_stack_parameter_count,
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE);
} else {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = { cp, a1, a2, a0 };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32() };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers, a0,
- deopt_handler, representations,
- constant_stack_parameter_count,
+ descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
}
static void InitializeInternalArrayConstructorDescriptor(
- CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
+ Isolate* isolate, CodeStubDescriptor* descriptor,
int constant_stack_parameter_count) {
- // register state
- // cp -- context
- // a0 -- number of arguments
- // a1 -- constructor function
Address deopt_handler = Runtime::FunctionForId(
Runtime::kInternalArrayConstructor)->entry;
if (constant_stack_parameter_count == 0) {
- Register registers[] = { cp, a1 };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
- deopt_handler, NULL, constant_stack_parameter_count,
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE);
} else {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = { cp, a1, a0 };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32() };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers, a0,
- deopt_handler, representations,
- constant_stack_parameter_count,
+ descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
}
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 0);
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(MajorKey(), descriptor, -1);
-}
-
-
-void ToBooleanStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(ToBooleanIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
-}
-
-
-void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0);
-}
-
-
-void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1);
+void ArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
}
-void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1);
+void ArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
}
-void BinaryOpICStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a1, a0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(BinaryOpIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
+void ArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
}
-void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a2, a1, a0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
+void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
-void StringAddStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a1, a0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kStringAdd)->entry);
+void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
}
-void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
- Register registers[] = { cp, // context,
- a1, // JSFunction
- a0, // actual number of arguments
- a2, // expected number of arguments
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // JSFunction
- Representation::Integer32(), // actual number of arguments
- Representation::Integer32(), // expected number of arguments
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::KeyedCall);
- Register registers[] = { cp, // context
- a2, // key
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // key
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::NamedCall);
- Register registers[] = { cp, // context
- a2, // name
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // name
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::CallHandler);
- Register registers[] = { cp, // context
- a0, // receiver
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // receiver
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::ApiFunctionCall);
- Register registers[] = { cp, // context
- a0, // callee
- t0, // call_data
- a2, // holder
- a1, // api_function_address
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
+void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
}
@@ -348,26 +106,25 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register rhs);
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
+ ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
isolate()->counters()->code_stubs()->Increment();
- CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
- int param_count = descriptor->GetEnvironmentParameterCount();
+ CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
+ int param_count = descriptor.GetEnvironmentParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
DCHECK(param_count == 0 ||
- a0.is(descriptor->GetEnvironmentParameterRegister(
- param_count - 1)));
+ a0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
// Push arguments, adjust sp.
__ Subu(sp, sp, Operand(param_count * kPointerSize));
for (int i = 0; i < param_count; ++i) {
// Store argument to stack.
- __ sw(descriptor->GetEnvironmentParameterRegister(i),
- MemOperand(sp, (param_count-1-i) * kPointerSize));
+ __ sw(descriptor.GetEnvironmentParameterRegister(i),
+ MemOperand(sp, (param_count - 1 - i) * kPointerSize));
}
- ExternalReference miss = descriptor->miss_handler();
__ CallExternalReference(miss, param_count);
}
@@ -375,108 +132,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-// Takes a Smi and converts to an IEEE 64 bit floating point value in two
-// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
-// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
-// scratch register. Destroys the source register. No GC occurs during this
-// stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public PlatformCodeStub {
- public:
- ConvertToDoubleStub(Isolate* isolate,
- Register result_reg_1,
- Register result_reg_2,
- Register source_reg,
- Register scratch_reg)
- : PlatformCodeStub(isolate),
- result1_(result_reg_1),
- result2_(result_reg_2),
- source_(source_reg),
- zeros_(scratch_reg) { }
-
- private:
- Register result1_;
- Register result2_;
- Register source_;
- Register zeros_;
-
- // Minor key encoding in 16 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 14> {};
-
- Major MajorKey() const { return ConvertToDouble; }
- int MinorKey() const {
- // Encode the parameters in a unique 16 bit value.
- return result1_.code() +
- (result2_.code() << 4) +
- (source_.code() << 8) +
- (zeros_.code() << 12);
- }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
- Register exponent, mantissa;
- if (kArchEndian == kLittle) {
- exponent = result1_;
- mantissa = result2_;
- } else {
- exponent = result2_;
- mantissa = result1_;
- }
- Label not_special;
- // Convert from Smi to integer.
- __ sra(source_, source_, kSmiTagSize);
- // Move sign bit from source to destination. This works because the sign bit
- // in the exponent word of the double has the same position and polarity as
- // the 2's complement sign bit in a Smi.
- STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- __ And(exponent, source_, Operand(HeapNumber::kSignMask));
- // Subtract from 0 if source was negative.
- __ subu(at, zero_reg, source_);
- __ Movn(source_, at, exponent);
-
- // We have -1, 0 or 1, which we treat specially. Register source_ contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ Branch(&not_special, gt, source_, Operand(1));
-
- // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
- const uint32_t exponent_word_for_1 =
- HeapNumber::kExponentBias << HeapNumber::kExponentShift;
- // Safe to use 'at' as dest reg here.
- __ Or(at, exponent, Operand(exponent_word_for_1));
- __ Movn(exponent, at, source_); // Write exp when source not 0.
- // 1, 0 and -1 all have 0 for the second word.
- __ Ret(USE_DELAY_SLOT);
- __ mov(mantissa, zero_reg);
-
- __ bind(&not_special);
- // Count leading zeros.
- // Gets the wrong answer for 0, but we already checked for that case above.
- __ Clz(zeros_, source_);
- // Compute exponent and or it into the exponent register.
- // We use mantissa as a scratch register here.
- __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
- __ subu(mantissa, mantissa, zeros_);
- __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
- __ Or(exponent, exponent, mantissa);
-
- // Shift up the source chopping the top bit off.
- __ Addu(zeros_, zeros_, Operand(1));
- // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
- __ sllv(source_, source_, zeros_);
- // Compute lower part of fraction (last 12 bits).
- __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
- // And the top (top 20 bits).
- __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
-
- __ Ret(USE_DELAY_SLOT);
- __ or_(exponent, exponent, source_);
-}
-
-
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done;
Register input_reg = source();
@@ -633,32 +288,32 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// We test for the special value that has a different exponent.
STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
// Test sign, and save for later conditionals.
- __ And(sign_, the_int_, Operand(0x80000000u));
- __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
+ __ And(sign(), the_int(), Operand(0x80000000u));
+ __ Branch(&max_negative_int, eq, the_int(), Operand(0x80000000u));
// Set up the correct exponent in scratch_. All non-Smi int32s have the same.
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
uint32_t non_smi_exponent =
(HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ li(scratch_, Operand(non_smi_exponent));
+ __ li(scratch(), Operand(non_smi_exponent));
// Set the sign bit in scratch_ if the value was negative.
- __ or_(scratch_, scratch_, sign_);
+ __ or_(scratch(), scratch(), sign());
// Subtract from 0 if the value was negative.
- __ subu(at, zero_reg, the_int_);
- __ Movn(the_int_, at, sign_);
+ __ subu(at, zero_reg, the_int());
+ __ Movn(the_int(), at, sign());
// We should be masking the implict first digit of the mantissa away here,
// but it just ends up combining harmlessly with the last digit of the
// exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
// the most significant 1 to hit the last bit of the 12 bit sign and exponent.
DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ srl(at, the_int_, shift_distance);
- __ or_(scratch_, scratch_, at);
- __ sw(scratch_, FieldMemOperand(the_heap_number_,
+ __ srl(at, the_int(), shift_distance);
+ __ or_(scratch(), scratch(), at);
+ __ sw(scratch(), FieldMemOperand(the_heap_number(),
HeapNumber::kExponentOffset));
- __ sll(scratch_, the_int_, 32 - shift_distance);
+ __ sll(scratch(), the_int(), 32 - shift_distance);
__ Ret(USE_DELAY_SLOT);
- __ sw(scratch_, FieldMemOperand(the_heap_number_,
+ __ sw(scratch(), FieldMemOperand(the_heap_number(),
HeapNumber::kMantissaOffset));
__ bind(&max_negative_int);
@@ -667,13 +322,13 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// The actual mantissa bits stored are all 0 because the implicit most
// significant 1 bit is not stored.
non_smi_exponent += 1 << HeapNumber::kExponentShift;
- __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
- __ sw(scratch_,
- FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
- __ mov(scratch_, zero_reg);
+ __ li(scratch(), Operand(HeapNumber::kSignMask | non_smi_exponent));
+ __ sw(scratch(),
+ FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
+ __ mov(scratch(), zero_reg);
__ Ret(USE_DELAY_SLOT);
- __ sw(scratch_,
- FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
+ __ sw(scratch(),
+ FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
}
@@ -941,15 +596,14 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
}
-static void ICCompareStub_CheckInputType(MacroAssembler* masm,
- Register input,
+static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
Register scratch,
- CompareIC::State expected,
+ CompareICState::State expected,
Label* fail) {
Label ok;
- if (expected == CompareIC::SMI) {
+ if (expected == CompareICState::SMI) {
__ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::NUMBER) {
+ } else if (expected == CompareICState::NUMBER) {
__ JumpIfSmi(input, &ok);
__ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
DONT_DO_SMI_CHECK);
@@ -963,14 +617,14 @@ static void ICCompareStub_CheckInputType(MacroAssembler* masm,
// On entry a1 and a2 are the values to be compared.
// On exit a0 is 0, positive or negative to indicate the result of
// the comparison.
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
Register lhs = a1;
Register rhs = a0;
Condition cc = GetCondition();
Label miss;
- ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss);
- ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss);
+ CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
+ CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles;
@@ -1022,16 +676,28 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Check if LESS condition is satisfied. If true, move conditionally
// result to v0.
- __ c(OLT, D, f12, f14);
- __ Movt(v0, t0);
- // Use previous check to store conditionally to v0 oposite condition
- // (GREATER). If rhs is equal to lhs, this will be corrected in next
- // check.
- __ Movf(v0, t1);
- // Check if EQUAL condition is satisfied. If true, move conditionally
- // result to v0.
- __ c(EQ, D, f12, f14);
- __ Movt(v0, t2);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ __ c(OLT, D, f12, f14);
+ __ Movt(v0, t0);
+ // Use previous check to store conditionally to v0 oposite condition
+ // (GREATER). If rhs is equal to lhs, this will be corrected in next
+ // check.
+ __ Movf(v0, t1);
+ // Check if EQUAL condition is satisfied. If true, move conditionally
+ // result to v0.
+ __ c(EQ, D, f12, f14);
+ __ Movt(v0, t2);
+ } else {
+ Label skip;
+ __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
+ __ mov(v0, t0); // Return LESS as result.
+
+ __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
+ __ mov(v0, t2); // Return EQUAL as result.
+
+ __ mov(v0, t1); // Return GREATER as result.
+ __ bind(&skip);
+ }
__ Ret();
@@ -1080,29 +746,19 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
masm, lhs, rhs, &flat_string_check, &slow);
}
- // Check for both being sequential ASCII strings, and inline if that is the
- // case.
+ // Check for both being sequential one-byte strings,
+ // and inline if that is the case.
__ bind(&flat_string_check);
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow);
+ __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
__ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
a3);
if (cc == eq) {
- StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- lhs,
- rhs,
- a2,
- a3,
- t0);
+ StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, t0);
} else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- lhs,
- rhs,
- a2,
- a3,
- t0,
- t1);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, t0,
+ t1);
}
// Never falls through to here.
@@ -1157,7 +813,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// store the registers in any particular way, but we do have to store and
// restore them.
__ MultiPush(kJSCallerSaved | ra.bit());
- if (save_doubles_ == kSaveFPRegs) {
+ if (save_doubles()) {
__ MultiPushFPU(kCallerSavedFPU);
}
const int argument_count = 1;
@@ -1170,7 +826,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
__ CallCFunction(
ExternalReference::store_buffer_overflow_function(isolate()),
argument_count);
- if (save_doubles_ == kSaveFPRegs) {
+ if (save_doubles()) {
__ MultiPopFPU(kCallerSavedFPU);
}
@@ -1181,7 +837,8 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
const Register base = a1;
- const Register exponent = a2;
+ const Register exponent = MathPowTaggedDescriptor::exponent();
+ DCHECK(exponent.is(a2));
const Register heapnumbermap = t1;
const Register heapnumber = v0;
const DoubleRegister double_base = f2;
@@ -1193,7 +850,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const Register scratch2 = t3;
Label call_runtime, done, int_exponent;
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
Label base_is_smi, unpack_exponent;
// The exponent and base are supplied as arguments on the stack.
// This can only happen if the stub is called from non-optimized code.
@@ -1221,7 +878,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
__ ldc1(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
- } else if (exponent_type_ == TAGGED) {
+ } else if (exponent_type() == TAGGED) {
// Base is already in double_base.
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
@@ -1229,7 +886,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
FieldMemOperand(exponent, HeapNumber::kValueOffset));
}
- if (exponent_type_ != INTEGER) {
+ if (exponent_type() != INTEGER) {
Label int_exponent_convert;
// Detect integer exponents stored as double.
__ EmitFPUTruncate(kRoundToMinusInf,
@@ -1242,7 +899,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// scratch2 == 0 means there was no conversion error.
__ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
// compile time and uses DoMathPowHalf instead. We then skip this check
// for non-constant cases of +/-0.5 as these hardly occur.
@@ -1311,7 +968,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&int_exponent);
// Get two copies of exponent in the registers scratch and exponent.
- if (exponent_type_ == INTEGER) {
+ if (exponent_type() == INTEGER) {
__ mov(scratch, exponent);
} else {
// Exponent has previously been stored into scratch as untagged integer.
@@ -1359,7 +1016,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Returning or bailing out.
Counters* counters = isolate()->counters();
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
__ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
@@ -1426,20 +1083,10 @@ void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
void CodeStub::GenerateFPStubs(Isolate* isolate) {
+ // Generate if not already in cache.
SaveFPRegsMode mode = kSaveFPRegs;
- CEntryStub save_doubles(isolate, 1, mode);
- StoreBufferOverflowStub stub(isolate, mode);
- // These stubs might already be in the snapshot, detect that and don't
- // regenerate, which would lead to code stub initialization state being messed
- // up.
- Code* save_doubles_code;
- if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
- save_doubles_code = *save_doubles.GetCode();
- }
- Code* store_buffer_overflow_code;
- if (!stub.FindCodeInCache(&store_buffer_overflow_code)) {
- store_buffer_overflow_code = *stub.GetCode();
- }
+ CEntryStub(isolate, 1, mode).GetCode();
+ StoreBufferOverflowStub(isolate, mode).GetCode();
isolate->set_fp_stubs_generated(true);
}
@@ -1452,26 +1099,22 @@ void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
void CEntryStub::Generate(MacroAssembler* masm) {
// Called from JavaScript; parameters are on stack as if calling JS function
- // s0: number of arguments including receiver
- // s1: size of arguments excluding receiver
- // s2: pointer to builtin function
+ // a0: number of arguments including receiver
+ // a1: pointer to builtin function
// fp: frame pointer (restored after C call)
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
ProfileEntryHookStub::MaybeCallEntryHook(masm);
- // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
- // The reason for this is that these arguments would need to be saved anyway
- // so it's faster to set them up directly.
- // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
-
// Compute the argv pointer in a callee-saved register.
+ __ sll(s1, a0, kPointerSizeLog2);
__ Addu(s1, sp, s1);
+ __ Subu(s1, s1, kPointerSize);
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(save_doubles_);
+ __ EnterExitFrame(save_doubles());
// s0: number of arguments including receiver (C callee-saved)
// s1: pointer to first argument (C callee-saved)
@@ -1479,7 +1122,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Prepare arguments for C routine.
// a0 = argc
- __ mov(a0, s0);
+ __ mov(s0, a0);
+ __ mov(s2, a1);
// a1 = argv (set in the delay slot after find_ra below).
// We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
@@ -1559,7 +1203,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// sp: stack pointer
// fp: frame pointer
// s0: still holds argc (callee-saved).
- __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN);
+ __ LeaveExitFrame(save_doubles(), s0, true, EMIT_RETURN);
// Handling of exception.
__ bind(&exception_returned);
@@ -1586,7 +1230,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
}
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+void JSEntryStub::Generate(MacroAssembler* masm) {
Label invoke, handler_entry, exit;
Isolate* isolate = masm->isolate();
@@ -1620,7 +1264,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// We build an EntryFrame.
__ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ int marker = type();
__ li(t2, Operand(Smi::FromInt(marker)));
__ li(t1, Operand(Smi::FromInt(marker)));
__ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
@@ -1711,7 +1355,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// 4 args slots
// args
- if (is_construct) {
+ if (type() == StackFrame::ENTRY_CONSTRUCT) {
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
isolate);
__ li(t0, Operand(construct_entry));
@@ -1770,8 +1414,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
void InstanceofStub::Generate(MacroAssembler* masm) {
// Call site inlining and patching implies arguments in registers.
DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
- // ReturnTrueFalse is only implemented for inlined call sites.
- DCHECK(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
// Fixed register usage throughout the stub:
const Register object = a0; // Object (lhs).
@@ -1796,7 +1438,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// If there is a call site cache don't look in the global cache, but do the
// real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck()) {
+ if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
Label miss;
__ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
__ Branch(&miss, ne, function, Operand(at));
@@ -1855,6 +1497,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
if (!HasCallSiteInlineCheck()) {
__ mov(v0, zero_reg);
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+ if (ReturnTrueFalseObject()) {
+ __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+ }
} else {
// Patch the call site to return true.
__ LoadRoot(v0, Heap::kTrueValueRootIndex);
@@ -1873,6 +1518,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
if (!HasCallSiteInlineCheck()) {
__ li(v0, Operand(Smi::FromInt(1)));
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+ if (ReturnTrueFalseObject()) {
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ }
} else {
// Patch the call site to return false.
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
@@ -1900,19 +1548,31 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
ne,
scratch,
Operand(isolate()->factory()->null_value()));
- __ li(v0, Operand(Smi::FromInt(1)));
+ if (ReturnTrueFalseObject()) {
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ } else {
+ __ li(v0, Operand(Smi::FromInt(1)));
+ }
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
__ bind(&object_not_null);
// Smi values are not instances of anything.
__ JumpIfNotSmi(object, &object_not_null_or_smi);
- __ li(v0, Operand(Smi::FromInt(1)));
+ if (ReturnTrueFalseObject()) {
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ } else {
+ __ li(v0, Operand(Smi::FromInt(1)));
+ }
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
__ bind(&object_not_null_or_smi);
// String values are not instances of anything.
__ IsObjectJSStringType(object, scratch, &slow);
- __ li(v0, Operand(Smi::FromInt(1)));
+ if (ReturnTrueFalseObject()) {
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ } else {
+ __ li(v0, Operand(Smi::FromInt(1)));
+ }
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
// Slow-case. Tail call builtin.
@@ -1939,7 +1599,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
- Register receiver = LoadIC::ReceiverRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3,
t0, &miss);
__ bind(&miss);
@@ -1948,17 +1608,13 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-Register InstanceofStub::left() { return a0; }
-
-
-Register InstanceofStub::right() { return a1; }
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
const int kDisplacement =
StandardFrameConstants::kCallerSPOffset - kPointerSize;
+ DCHECK(a1.is(ArgumentsAccessReadDescriptor::index()));
+ DCHECK(a0.is(ArgumentsAccessReadDescriptor::parameter_count()));
// Check that the key is a smiGenerateReadElement.
Label slow;
@@ -2253,6 +1909,32 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
}
+void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
+ // Return address is in ra.
+ Label slow;
+
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
+
+ // Check that the key is an array index, that is Uint32.
+ __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask));
+ __ Branch(&slow, ne, t0, Operand(zero_reg));
+
+ // Everything is fine, call runtime.
+ __ Push(receiver, key); // Receiver, key.
+
+ // Perform tail call to the entry.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
+ masm->isolate()),
+ 2, 1);
+
+ __ bind(&slow);
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
+}
+
+
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// sp[0] : number of parameters
// sp[4] : receiver displacement
@@ -2517,8 +2199,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kStringEncodingMask == 4);
STATIC_ASSERT(kOneByteStringTag == 4);
STATIC_ASSERT(kTwoByteStringTag == 0);
- __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
- __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
+ __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for one-byte.
+ __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
__ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
__ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
__ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
@@ -2531,7 +2213,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(t9, &runtime);
// a1: previous index
- // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
+ // a3: encoding of subject string (1 if one_byte, 0 if two_byte);
// t9: code
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
@@ -2586,7 +2268,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ sw(a0, MemOperand(sp, 1 * kPointerSize));
// For arguments 4 and 3 get string length, calculate start of string data
- // and calculate the shift of the index (0 for ASCII and 1 for two byte).
+ // calculate the shift of the index (0 for one-byte and 1 for two-byte).
__ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
__ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
// Load the length from the original subject string from the previous stack
@@ -2805,9 +2487,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// a3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
- DCHECK_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
- DCHECK_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state into t0.
@@ -3005,7 +2687,7 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
void CallFunctionStub::Generate(MacroAssembler* masm) {
- CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+ CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
}
@@ -3107,7 +2789,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ TailCallStub(&stub);
__ bind(&miss);
- GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+ GenerateMiss(masm);
// The slow case, we need this no matter what to complete a call after a miss.
CallFunctionNoFeedback(masm,
@@ -3126,7 +2808,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
Label extra_checks_or_miss, slow_start;
Label slow, non_function, wrap, cont;
Label have_js_function;
- int argc = state_.arg_count();
+ int argc = arg_count();
ParameterCount actual(argc);
EmitLoadTypeFeedbackVector(masm, a2);
@@ -3138,7 +2820,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Branch(&extra_checks_or_miss, ne, a1, Operand(t0));
__ bind(&have_js_function);
- if (state_.CallAsMethod()) {
+ if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
// Compute the receiver in sloppy mode.
__ lw(a3, MemOperand(sp, argc * kPointerSize));
@@ -3155,7 +2837,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&slow);
EmitSlowCase(masm, argc, &non_function);
- if (state_.CallAsMethod()) {
+ if (CallAsMethod()) {
__ bind(&wrap);
EmitWrapCase(masm, argc, &cont);
}
@@ -3183,7 +2865,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// We are here because tracing is on or we are going monomorphic.
__ bind(&miss);
- GenerateMiss(masm, IC::kCallIC_Miss);
+ GenerateMiss(masm);
// the slow case
__ bind(&slow_start);
@@ -3198,9 +2880,9 @@ void CallICStub::Generate(MacroAssembler* masm) {
}
-void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
// Get the receiver of the function from the stack; 1 ~ return address.
- __ lw(t0, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize));
+ __ lw(t0, MemOperand(sp, (arg_count() + 1) * kPointerSize));
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -3209,6 +2891,9 @@ void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
__ Push(t0, a1, a2, a3);
// Call the entry.
+ IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+ : IC::kCallIC_Customization_Miss;
+
ExternalReference miss = ExternalReference(IC_Utility(id),
masm->isolate());
__ CallExternalReference(miss, 4);
@@ -3221,11 +2906,6 @@ void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
// StringCharCodeAtGenerator.
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- Label flat_string;
- Label ascii_string;
- Label got_char_code;
- Label sliced_string;
-
DCHECK(!t0.is(index_));
DCHECK(!t0.is(result_));
DCHECK(!t0.is(object_));
@@ -3329,7 +3009,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiShiftSize == 0);
- DCHECK(IsPowerOf2(String::kMaxOneByteCharCode + 1));
+ DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
__ And(t0,
code_,
Operand(kSmiTagMask |
@@ -3337,7 +3017,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ Branch(&slow_case_, ne, t0, Operand(zero_reg));
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged ASCII char code.
+ // At this point code register contains smi tagged one-byte char code.
STATIC_ASSERT(kSmiTag == 0);
__ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
__ Addu(result_, result_, t0);
@@ -3366,10 +3046,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
}
-enum CopyCharactersFlags {
- COPY_ASCII = 1,
- DEST_ALWAYS_ALIGNED = 2
-};
+enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
@@ -3413,57 +3090,6 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
}
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash = seed + character + ((seed + character) << 10);
- __ LoadRoot(hash, Heap::kHashSeedRootIndex);
- // Untag smi seed and add the character.
- __ SmiUntag(hash);
- __ addu(hash, hash, character);
- __ sll(at, hash, 10);
- __ addu(hash, hash, at);
- // hash ^= hash >> 6;
- __ srl(at, hash, 6);
- __ xor_(hash, hash, at);
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash += character;
- __ addu(hash, hash, character);
- // hash += hash << 10;
- __ sll(at, hash, 10);
- __ addu(hash, hash, at);
- // hash ^= hash >> 6;
- __ srl(at, hash, 6);
- __ xor_(hash, hash, at);
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash) {
- // hash += hash << 3;
- __ sll(at, hash, 3);
- __ addu(hash, hash, at);
- // hash ^= hash >> 11;
- __ srl(at, hash, 11);
- __ xor_(hash, hash, at);
- // hash += hash << 15;
- __ sll(at, hash, 15);
- __ addu(hash, hash, at);
-
- __ li(at, Operand(String::kHashBitMask));
- __ and_(hash, hash, at);
-
- // if (hash == 0) hash = 27;
- __ ori(at, zero_reg, StringHasher::kZeroHash);
- __ Movz(hash, at, hash);
-}
-
-
void SubStringStub::Generate(MacroAssembler* masm) {
Label runtime;
// Stack frame on entry.
@@ -3584,7 +3210,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ And(t0, a1, Operand(kStringEncodingMask));
__ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
- __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime);
+ __ AllocateOneByteSlicedString(v0, a2, t2, t3, &runtime);
__ jmp(&set_slice_header);
__ bind(&two_byte_slice);
__ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
@@ -3628,7 +3254,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
// Allocate and copy the resulting ASCII string.
- __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
+ __ AllocateOneByteString(v0, a2, t0, t2, t3, &runtime);
// Locate first character of substring to copy.
__ Addu(t1, t1, a3);
@@ -3687,12 +3313,9 @@ void SubStringStub::Generate(MacroAssembler* masm) {
}
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
+void StringHelper::GenerateFlatOneByteStringEquals(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3) {
Register length = scratch1;
// Compare lengths.
@@ -3717,9 +3340,8 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
// Compare characters.
__ bind(&compare_chars);
- GenerateAsciiCharsCompareLoop(masm,
- left, right, length, scratch2, scratch3, v0,
- &strings_not_equal);
+ GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
+ v0, &strings_not_equal);
// Characters are equal.
__ Ret(USE_DELAY_SLOT);
@@ -3727,13 +3349,9 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
}
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
+void StringHelper::GenerateCompareFlatOneByteStrings(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3, Register scratch4) {
Label result_not_equal, compare_lengths;
// Find minimum length and length difference.
__ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
@@ -3747,9 +3365,8 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
// Compare loop.
- GenerateAsciiCharsCompareLoop(masm,
- left, right, min_length, scratch2, scratch4, v0,
- &result_not_equal);
+ GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
+ scratch4, v0, &result_not_equal);
// Compare lengths - strings up to min-length are equal.
__ bind(&compare_lengths);
@@ -3772,14 +3389,9 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
}
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
+void StringHelper::GenerateOneByteCharsCompareLoop(
+ MacroAssembler* masm, Register left, Register right, Register length,
+ Register scratch1, Register scratch2, Register scratch3,
Label* chars_not_equal) {
// Change index to run from -length to -1 by adding length to string
// start. This means that loop ends when index reaches zero, which
@@ -3827,13 +3439,13 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ bind(&not_same);
- // Check that both objects are sequential ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
+ // Check that both objects are sequential one-byte strings.
+ __ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
// Compare flat ASCII strings natively. Remove arguments from stack first.
__ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
__ Addu(sp, sp, Operand(2 * kPointerSize));
- GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, t0, t1);
__ bind(&runtime);
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
@@ -3863,13 +3475,13 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// Tail call into the stub that handles binary operations with allocation
// sites.
- BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+ BinaryOpWithAllocationSiteStub stub(isolate(), state());
__ TailCallStub(&stub);
}
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::SMI);
+void CompareICStub::GenerateSmis(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::SMI);
Label miss;
__ Or(a2, a1, a0);
__ JumpIfNotSmi(a2, &miss);
@@ -3891,17 +3503,17 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::NUMBER);
+void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- if (left_ == CompareIC::SMI) {
+ if (left() == CompareICState::SMI) {
__ JumpIfNotSmi(a1, &miss);
}
- if (right_ == CompareIC::SMI) {
+ if (right() == CompareICState::SMI) {
__ JumpIfNotSmi(a0, &miss);
}
@@ -3959,12 +3571,12 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+ CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
+ if (Token::IsOrderedRelationalCompareOp(op())) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&miss, ne, a0, Operand(at));
__ JumpIfSmi(a1, &unordered);
@@ -3974,7 +3586,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
}
__ bind(&maybe_undefined2);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
+ if (Token::IsOrderedRelationalCompareOp(op())) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&unordered, eq, a1, Operand(at));
}
@@ -3984,8 +3596,8 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::INTERNALIZED_STRING);
+void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::INTERNALIZED_STRING);
Label miss;
// Registers containing left and right operands respectively.
@@ -4024,8 +3636,8 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::UNIQUE_NAME);
+void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::UNIQUE_NAME);
DCHECK(GetCondition() == eq);
Label miss;
@@ -4045,8 +3657,8 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
__ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
__ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(tmp1, &miss);
- __ JumpIfNotUniqueName(tmp2, &miss);
+ __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
+ __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
// Use a0 as result
__ mov(v0, a0);
@@ -4068,11 +3680,11 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::STRING);
+void CompareICStub::GenerateStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::STRING);
Label miss;
- bool equality = Token::IsEqualityOp(op_);
+ bool equality = Token::IsEqualityOp(op());
// Registers containing left and right operands respectively.
Register left = a1;
@@ -4126,18 +3738,18 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
__ bind(&is_symbol);
}
- // Check that both strings are sequential ASCII.
+ // Check that both strings are sequential one-byte.
Label runtime;
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(
- tmp1, tmp2, tmp3, tmp4, &runtime);
+ __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
+ &runtime);
- // Compare flat ASCII strings. Returns when done.
+ // Compare flat one-byte strings. Returns when done.
if (equality) {
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, left, right, tmp1, tmp2, tmp3);
+ StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
+ tmp3);
} else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(
- masm, left, right, tmp1, tmp2, tmp3, tmp4);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
+ tmp2, tmp3, tmp4);
}
// Handle more complex cases in runtime.
@@ -4154,8 +3766,8 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::OBJECT);
+void CompareICStub::GenerateObjects(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::OBJECT);
Label miss;
__ And(a2, a1, Operand(a0));
__ JumpIfSmi(a2, &miss);
@@ -4174,7 +3786,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
Label miss;
__ And(a2, a1, a0);
__ JumpIfSmi(a2, &miss);
@@ -4191,7 +3803,7 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+void CompareICStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
@@ -4199,7 +3811,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1, a0);
__ Push(ra, a1, a0);
- __ li(t0, Operand(Smi::FromInt(op_)));
+ __ li(t0, Operand(Smi::FromInt(op())));
__ addiu(sp, sp, -kPointerSize);
__ CallExternalReference(miss, 3, USE_DELAY_SLOT);
__ sw(t0, MemOperand(sp)); // In the delay slot.
@@ -4300,7 +3912,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
__ lbu(entity_name,
FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(entity_name, miss);
+ __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
__ bind(&good);
// Restore the properties.
@@ -4472,12 +4084,12 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// Stop if found the property.
__ Branch(&in_dictionary, eq, entry_key, Operand(key));
- if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
// Check if the entry name is not a unique name.
__ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
__ lbu(entry_key,
FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
+ __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
}
}
@@ -4485,7 +4097,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// If we are doing negative lookup then probing failure should be
// treated as a lookup success. For positive lookup probing failure
// should be treated as lookup failure.
- if (mode_ == POSITIVE_LOOKUP) {
+ if (mode() == POSITIVE_LOOKUP) {
__ Ret(USE_DELAY_SLOT);
__ mov(result, zero_reg);
}
@@ -4529,11 +4141,11 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
__ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
__ nop();
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object(),
+ address(),
+ value(),
+ save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
}
__ Ret();
@@ -4555,7 +4167,7 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.Save(masm);
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
__ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
@@ -4575,10 +4187,10 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
InformIncrementalMarker(masm);
regs_.Restore(masm);
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ __ RememberedSetHelper(object(),
+ address(),
+ value(),
+ save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
__ bind(&dont_need_remembered_set);
@@ -4593,7 +4205,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
Register address =
@@ -4609,7 +4221,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
__ CallCFunction(
ExternalReference::incremental_marking_record_write_function(isolate()),
argument_count);
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
}
@@ -4637,10 +4249,10 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ __ RememberedSetHelper(object(),
+ address(),
+ value(),
+ save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
} else {
__ Ret();
@@ -4681,10 +4293,10 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ __ RememberedSetHelper(object(),
+ address(),
+ value(),
+ save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
} else {
__ Ret();
@@ -4772,7 +4384,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
__ lw(a1, MemOperand(fp, parameter_count_offset));
- if (function_mode_ == JS_FUNCTION_STUB_MODE) {
+ if (function_mode() == JS_FUNCTION_STUB_MODE) {
__ Addu(a1, a1, Operand(1));
}
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
@@ -4782,6 +4394,20 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
}
+void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorLoadStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorKeyedLoadStub stub(isolate());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -4820,7 +4446,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
int frame_alignment = masm->ActivationFrameAlignment();
if (frame_alignment > kPointerSize) {
__ mov(s5, sp);
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
__ And(sp, sp, Operand(-frame_alignment));
}
__ Subu(sp, sp, kCArgsSlotsSize);
@@ -4998,7 +4624,7 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
- if (argument_count_ == ANY) {
+ if (argument_count() == ANY) {
Label not_zero_case, not_one_case;
__ And(at, a0, a0);
__ Branch(&not_zero_case, ne, at, Operand(zero_reg));
@@ -5010,11 +4636,11 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
__ bind(&not_one_case);
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
- } else if (argument_count_ == NONE) {
+ } else if (argument_count() == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- } else if (argument_count_ == ONE) {
+ } else if (argument_count() == ONE) {
CreateArrayDispatchOneArgument(masm, mode);
- } else if (argument_count_ == MORE_THAN_ONE) {
+ } else if (argument_count() == MORE_THAN_ONE) {
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
} else {
UNREACHABLE();
@@ -5024,7 +4650,7 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- a0 : argc (only if argument_count_ == ANY)
+ // -- a0 : argc (only if argument_count() == ANY)
// -- a1 : constructor
// -- a2 : AllocationSite or undefined
// -- sp[0] : return address
@@ -5159,9 +4785,9 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register api_function_address = a1;
Register context = cp;
- int argc = ArgumentBits::decode(bit_field_);
- bool is_store = IsStoreBits::decode(bit_field_);
- bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+ int argc = this->argc();
+ bool is_store = this->is_store();
+ bool call_data_undefined = this->call_data_undefined();
typedef FunctionCallbackArguments FCA;
@@ -5247,7 +4873,8 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// -- a2 : api_function_address
// -----------------------------------
- Register api_function_address = a2;
+ Register api_function_address = ApiGetterDescriptor::function_address();
+ DCHECK(api_function_address.is(a2));
__ mov(a0, sp); // a0 = Handle<Name>
__ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index 0e3f1c3fa6..afad32b039 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -5,9 +5,6 @@
#ifndef V8_MIPS_CODE_STUBS_ARM_H_
#define V8_MIPS_CODE_STUBS_ARM_H_
-#include "src/ic-inl.h"
-
-
namespace v8 {
namespace internal {
@@ -15,24 +12,6 @@ namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
-class StoreBufferOverflowStub: public PlatformCodeStub {
- public:
- StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
- : PlatformCodeStub(isolate), save_doubles_(save_fp) {}
-
- void Generate(MacroAssembler* masm);
-
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- SaveFPRegsMode save_doubles_;
-
- Major MajorKey() const { return StoreBufferOverflow; }
- int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying a large number of characters. This function
@@ -46,33 +25,25 @@ class StringHelper : public AllStatic {
Register scratch,
String::Encoding encoding);
+ // Compares two flat one-byte strings and returns result in v0.
+ static void GenerateCompareFlatOneByteStrings(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3, Register scratch4);
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-class SubStringStub: public PlatformCodeStub {
- public:
- explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+ // Compares two flat one-byte strings for equality and returns result in v0.
+ static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+ Register left, Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
private:
- Major MajorKey() const { return SubString; }
- int MinorKey() const { return 0; }
+ static void GenerateOneByteCharsCompareLoop(
+ MacroAssembler* masm, Register left, Register right, Register length,
+ Register scratch1, Register scratch2, Register scratch3,
+ Label* chars_not_equal);
- void Generate(MacroAssembler* masm);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
@@ -82,61 +53,23 @@ class StoreRegistersStateStub: public PlatformCodeStub {
: PlatformCodeStub(isolate) {}
static void GenerateAheadOfTime(Isolate* isolate);
- private:
- Major MajorKey() const { return StoreRegistersState; }
- int MinorKey() const { return 0; }
- void Generate(MacroAssembler* masm);
+ private:
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
};
+
class RestoreRegistersStateStub: public PlatformCodeStub {
public:
explicit RestoreRegistersStateStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
static void GenerateAheadOfTime(Isolate* isolate);
- private:
- Major MajorKey() const { return RestoreRegistersState; }
- int MinorKey() const { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-class StringCompareStub: public PlatformCodeStub {
- public:
- explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
-
- // Compare two flat ASCII strings and returns result in v0.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- // Compares two flat ASCII strings for equality and returns result
- // in v0.
- static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
private:
- virtual Major MajorKey() const { return StringCompare; }
- virtual int MinorKey() const { return 0; }
- virtual void Generate(MacroAssembler* masm);
-
- static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* chars_not_equal);
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
};
@@ -145,29 +78,38 @@ class StringCompareStub: public PlatformCodeStub {
// so you don't have to set up the frame.
class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
public:
- WriteInt32ToHeapNumberStub(Isolate* isolate,
- Register the_int,
- Register the_heap_number,
- Register scratch,
+ WriteInt32ToHeapNumberStub(Isolate* isolate, Register the_int,
+ Register the_heap_number, Register scratch,
Register scratch2)
- : PlatformCodeStub(isolate),
- the_int_(the_int),
- the_heap_number_(the_heap_number),
- scratch_(scratch),
- sign_(scratch2) {
- DCHECK(IntRegisterBits::is_valid(the_int_.code()));
- DCHECK(HeapNumberRegisterBits::is_valid(the_heap_number_.code()));
- DCHECK(ScratchRegisterBits::is_valid(scratch_.code()));
- DCHECK(SignRegisterBits::is_valid(sign_.code()));
+ : PlatformCodeStub(isolate) {
+ minor_key_ = IntRegisterBits::encode(the_int.code()) |
+ HeapNumberRegisterBits::encode(the_heap_number.code()) |
+ ScratchRegisterBits::encode(scratch.code()) |
+ SignRegisterBits::encode(scratch2.code());
+ DCHECK(IntRegisterBits::is_valid(the_int.code()));
+ DCHECK(HeapNumberRegisterBits::is_valid(the_heap_number.code()));
+ DCHECK(ScratchRegisterBits::is_valid(scratch.code()));
+ DCHECK(SignRegisterBits::is_valid(scratch2.code()));
}
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private:
- Register the_int_;
- Register the_heap_number_;
- Register scratch_;
- Register sign_;
+ Register the_int() const {
+ return Register::from_code(IntRegisterBits::decode(minor_key_));
+ }
+
+ Register the_heap_number() const {
+ return Register::from_code(HeapNumberRegisterBits::decode(minor_key_));
+ }
+
+ Register scratch() const {
+ return Register::from_code(ScratchRegisterBits::decode(minor_key_));
+ }
+
+ Register sign() const {
+ return Register::from_code(SignRegisterBits::decode(minor_key_));
+ }
// Minor key encoding in 16 bits.
class IntRegisterBits: public BitField<int, 0, 4> {};
@@ -175,16 +117,8 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
class ScratchRegisterBits: public BitField<int, 8, 4> {};
class SignRegisterBits: public BitField<int, 12, 4> {};
- Major MajorKey() const { return WriteInt32ToHeapNumber; }
- int MinorKey() const {
- // Encode the parameters in a unique 16 bit value.
- return IntRegisterBits::encode(the_int_.code())
- | HeapNumberRegisterBits::encode(the_heap_number_.code())
- | ScratchRegisterBits::encode(scratch_.code())
- | SignRegisterBits::encode(sign_.code());
- }
-
- void Generate(MacroAssembler* masm);
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(WriteInt32ToHeapNumber, PlatformCodeStub);
};
@@ -197,16 +131,19 @@ class RecordWriteStub: public PlatformCodeStub {
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
: PlatformCodeStub(isolate),
- object_(object),
- value_(value),
- address_(address),
- remembered_set_action_(remembered_set_action),
- save_fp_regs_mode_(fp_mode),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
+ minor_key_ = ObjectBits::encode(object.code()) |
+ ValueBits::encode(value.code()) |
+ AddressBits::encode(address.code()) |
+ RememberedSetActionBits::encode(remembered_set_action) |
+ SaveFPRegsModeBits::encode(fp_mode);
}
+ RecordWriteStub(uint32_t key, Isolate* isolate)
+ : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
+
enum Mode {
STORE_BUFFER_ONLY,
INCREMENTAL,
@@ -274,6 +211,8 @@ class RecordWriteStub: public PlatformCodeStub {
4 * Assembler::kInstrSize);
}
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+
private:
// This is a helper class for freeing up 3 scratch registers. The input is
// two registers that must be preserved and one scratch register provided by
@@ -338,7 +277,9 @@ class RecordWriteStub: public PlatformCodeStub {
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
- void Generate(MacroAssembler* masm);
+ virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+
+ virtual void Generate(MacroAssembler* masm) OVERRIDE;
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
@@ -346,18 +287,28 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
- Major MajorKey() const { return RecordWrite; }
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
- int MinorKey() const {
- return ObjectBits::encode(object_.code()) |
- ValueBits::encode(value_.code()) |
- AddressBits::encode(address_.code()) |
- RememberedSetActionBits::encode(remembered_set_action_) |
- SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+ Register object() const {
+ return Register::from_code(ObjectBits::decode(minor_key_));
}
- void Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ Register value() const {
+ return Register::from_code(ValueBits::decode(minor_key_));
+ }
+
+ Register address() const {
+ return Register::from_code(AddressBits::decode(minor_key_));
+ }
+
+ RememberedSetAction remembered_set_action() const {
+ return RememberedSetActionBits::decode(minor_key_);
+ }
+
+ SaveFPRegsMode save_fp_regs_mode() const {
+ return SaveFPRegsModeBits::decode(minor_key_);
}
class ObjectBits: public BitField<int, 0, 5> {};
@@ -366,13 +317,10 @@ class RecordWriteStub: public PlatformCodeStub {
class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
- Register object_;
- Register value_;
- Register address_;
- RememberedSetAction remembered_set_action_;
- SaveFPRegsMode save_fp_regs_mode_;
Label slow_;
RegisterAllocation regs_;
+
+ DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
};
@@ -384,14 +332,13 @@ class RecordWriteStub: public PlatformCodeStub {
class DirectCEntryStub: public PlatformCodeStub {
public:
explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
- void Generate(MacroAssembler* masm);
void GenerateCall(MacroAssembler* masm, Register target);
private:
- Major MajorKey() const { return DirectCEntry; }
- int MinorKey() const { return 0; }
-
bool NeedsImmovableCode() { return true; }
+
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
};
@@ -400,9 +347,9 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
- : PlatformCodeStub(isolate), mode_(mode) { }
-
- void Generate(MacroAssembler* masm);
+ : PlatformCodeStub(isolate) {
+ minor_key_ = LookupModeBits::encode(mode);
+ }
static void GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
@@ -434,13 +381,12 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() const { return NameDictionaryLookup; }
-
- int MinorKey() const { return LookupModeBits::encode(mode_); }
+ LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
class LookupModeBits: public BitField<LookupMode, 0, 1> {};
- LookupMode mode_;
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
};
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index c413665771..0ecac19b49 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -73,7 +73,8 @@ UnaryMathFunction CreateExpFunction() {
#if defined(V8_HOST_ARCH_MIPS)
MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
-#if defined(USE_SIMULATOR)
+#if defined(USE_SIMULATOR) || defined(_MIPS_ARCH_MIPS32R6) || \
+ defined(_MIPS_ARCH_MIPS32RX)
return stub;
#else
size_t actual_size;
@@ -1058,18 +1059,18 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Branch(call_runtime, ne, at, Operand(zero_reg));
__ lw(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
- Label ascii, done;
+ Label one_byte, done;
__ bind(&check_encoding);
STATIC_ASSERT(kTwoByteStringTag == 0);
__ And(at, result, Operand(kStringEncodingMask));
- __ Branch(&ascii, ne, at, Operand(zero_reg));
+ __ Branch(&one_byte, ne, at, Operand(zero_reg));
// Two-byte string.
__ sll(at, index, 1);
__ Addu(at, string, at);
__ lhu(result, MemOperand(at));
__ jmp(&done);
- __ bind(&ascii);
- // Ascii string.
+ __ bind(&one_byte);
+ // One_byte string.
__ Addu(at, string, index);
__ lbu(result, MemOperand(at));
__ bind(&done);
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
index 82a410ec23..b02ec4ff10 100644
--- a/deps/v8/src/mips/codegen-mips.h
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -8,7 +8,7 @@
#include "src/ast.h"
-#include "src/ic-inl.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/mips/constants-mips.cc b/deps/v8/src/mips/constants-mips.cc
index f14992719d..30e4b2e003 100644
--- a/deps/v8/src/mips/constants-mips.cc
+++ b/deps/v8/src/mips/constants-mips.cc
@@ -278,6 +278,8 @@ Instruction::Type Instruction::InstructionType() const {
case COP1: // Coprocessor instructions.
switch (RsFieldRawNoAssert()) {
case BC1: // Branch on coprocessor condition.
+ case BC1EQZ:
+ case BC1NEZ:
return kImmediateType;
default:
return kRegisterType;
@@ -292,6 +294,7 @@ Instruction::Type Instruction::InstructionType() const {
case BLEZ:
case BGTZ:
case ADDI:
+ case DADDI:
case ADDIU:
case SLTI:
case SLTIU:
@@ -303,6 +306,8 @@ Instruction::Type Instruction::InstructionType() const {
case BNEL:
case BLEZL:
case BGTZL:
+ case BEQZC:
+ case BNEZC:
case LB:
case LH:
case LWL:
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index b2cbea734e..5ead1105ee 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -4,7 +4,7 @@
#ifndef V8_MIPS_CONSTANTS_H_
#define V8_MIPS_CONSTANTS_H_
-
+#include "src/globals.h"
// UNIMPLEMENTED_ macro for MIPS.
#ifdef DEBUG
#define UNIMPLEMENTED_MIPS() \
@@ -17,17 +17,25 @@
#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
enum ArchVariants {
- kMips32r2,
- kMips32r1,
+ kMips32r1 = v8::internal::MIPSr1,
+ kMips32r2 = v8::internal::MIPSr2,
+ kMips32r6 = v8::internal::MIPSr6,
kLoongson
};
#ifdef _MIPS_ARCH_MIPS32R2
static const ArchVariants kArchVariant = kMips32r2;
+#elif _MIPS_ARCH_MIPS32R6
+ static const ArchVariants kArchVariant = kMips32r6;
#elif _MIPS_ARCH_LOONGSON
// The loongson flag refers to the LOONGSON architectures based on MIPS-III,
// which predates (and is a subset of) the mips32r2 and r1 architectures.
static const ArchVariants kArchVariant = kLoongson;
+#elif _MIPS_ARCH_MIPS32RX
+// This flags referred to compatibility mode that creates universal code that
+// can run on any MIPS32 architecture revision. The dynamically generated code
+// by v8 is specialized for the MIPS host detected in runtime probing.
+ static const ArchVariants kArchVariant = kMips32r1;
#else
static const ArchVariants kArchVariant = kMips32r1;
#endif
@@ -45,6 +53,22 @@ enum Endianness {
#error Unknown endianness
#endif
+enum FpuMode {
+ kFP32,
+ kFP64,
+ kFPXX
+};
+
+#if defined(FPU_MODE_FP32)
+ static const FpuMode kFpuMode = kFP32;
+#elif defined(FPU_MODE_FP64)
+ static const FpuMode kFpuMode = kFP64;
+#elif defined(FPU_MODE_FPXX)
+ static const FpuMode kFpuMode = kFPXX;
+#else
+ static const FpuMode kFpuMode = kFP32;
+#endif
+
#if(defined(__mips_hard_float) && __mips_hard_float != 0)
// Use floating-point coprocessor instructions. This flag is raised when
// -mhard-float is passed to the compiler.
@@ -68,6 +92,26 @@ const uint32_t kHoleNanLower32Offset = 4;
#error Unknown endianness
#endif
+#ifndef FPU_MODE_FPXX
+#define IsFp64Mode() \
+ (kFpuMode == kFP64)
+#else
+#define IsFp64Mode() \
+ (CpuFeatures::IsSupported(FP64FPU))
+#endif
+
+#ifndef _MIPS_ARCH_MIPS32RX
+#define IsMipsArchVariant(check) \
+ (kArchVariant == check)
+#else
+#define IsMipsArchVariant(check) \
+ (CpuFeatures::IsSupported(check))
+#endif
+
+
+#define __STDC_FORMAT_MACROS
+#include <inttypes.h>
+
// Defines constants and accessor classes to assemble, disassemble and
// simulate MIPS32 instructions.
//
@@ -99,6 +143,8 @@ const int kInvalidFPURegister = -1;
const int kFCSRRegister = 31;
const int kInvalidFPUControlRegister = -1;
const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
+const uint64_t kFPU64InvalidResult =
+ static_cast<uint64_t>(static_cast<uint64_t>(1) << 63) - 1;
// FCSR constants.
const uint32_t kFCSRInexactFlagBit = 2;
@@ -216,10 +262,14 @@ const int kLuiShift = 16;
const int kImm16Shift = 0;
const int kImm16Bits = 16;
+const int kImm21Shift = 0;
+const int kImm21Bits = 21;
const int kImm26Shift = 0;
const int kImm26Bits = 26;
const int kImm28Shift = 0;
const int kImm28Bits = 28;
+const int kImm32Shift = 0;
+const int kImm32Bits = 32;
// In branches and jumps immediate fields point to words, not bytes,
// and are therefore shifted by 2.
@@ -278,14 +328,16 @@ enum Opcode {
ANDI = ((1 << 3) + 4) << kOpcodeShift,
ORI = ((1 << 3) + 5) << kOpcodeShift,
XORI = ((1 << 3) + 6) << kOpcodeShift,
- LUI = ((1 << 3) + 7) << kOpcodeShift,
+ LUI = ((1 << 3) + 7) << kOpcodeShift, // LUI/AUI family.
+ BEQC = ((2 << 3) + 0) << kOpcodeShift,
COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
BEQL = ((2 << 3) + 4) << kOpcodeShift,
BNEL = ((2 << 3) + 5) << kOpcodeShift,
BLEZL = ((2 << 3) + 6) << kOpcodeShift,
BGTZL = ((2 << 3) + 7) << kOpcodeShift,
+ DADDI = ((3 << 3) + 0) << kOpcodeShift, // This is also BNEC.
SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift,
SPECIAL3 = ((3 << 3) + 7) << kOpcodeShift,
@@ -304,11 +356,13 @@ enum Opcode {
LWC1 = ((6 << 3) + 1) << kOpcodeShift,
LDC1 = ((6 << 3) + 5) << kOpcodeShift,
+ BEQZC = ((6 << 3) + 6) << kOpcodeShift,
PREF = ((6 << 3) + 3) << kOpcodeShift,
SWC1 = ((7 << 3) + 1) << kOpcodeShift,
SDC1 = ((7 << 3) + 5) << kOpcodeShift,
+ BNEZC = ((7 << 3) + 6) << kOpcodeShift,
COP1X = ((1 << 4) + 3) << kOpcodeShift
};
@@ -330,6 +384,8 @@ enum SecondaryField {
BREAK = ((1 << 3) + 5),
MFHI = ((2 << 3) + 0),
+ CLZ_R6 = ((2 << 3) + 0),
+ CLO_R6 = ((2 << 3) + 1),
MFLO = ((2 << 3) + 2),
MULT = ((3 << 3) + 0),
@@ -354,7 +410,21 @@ enum SecondaryField {
TLT = ((6 << 3) + 2),
TLTU = ((6 << 3) + 3),
TEQ = ((6 << 3) + 4),
+ SELEQZ_S = ((6 << 3) + 5),
TNE = ((6 << 3) + 6),
+ SELNEZ_S = ((6 << 3) + 7),
+
+ // Multiply integers in r6.
+ MUL_MUH = ((3 << 3) + 0), // MUL, MUH.
+ MUL_MUH_U = ((3 << 3) + 1), // MUL_U, MUH_U.
+
+ MUL_OP = ((0 << 3) + 2),
+ MUH_OP = ((0 << 3) + 3),
+ DIV_OP = ((0 << 3) + 2),
+ MOD_OP = ((0 << 3) + 3),
+
+ DIV_MOD = ((3 << 3) + 2),
+ DIV_MOD_U = ((3 << 3) + 3),
// SPECIAL2 Encoding of Function Field.
MUL = ((0 << 3) + 2),
@@ -370,6 +440,7 @@ enum SecondaryField {
BGEZ = ((0 << 3) + 1) << 16,
BLTZAL = ((2 << 3) + 0) << 16,
BGEZAL = ((2 << 3) + 1) << 16,
+ BGEZALL = ((2 << 3) + 3) << 16,
// COP1 Encoding of rs Field.
MFC1 = ((0 << 3) + 0) << 21,
@@ -414,6 +485,10 @@ enum SecondaryField {
TRUNC_W_D = ((1 << 3) + 5),
CEIL_W_D = ((1 << 3) + 6),
FLOOR_W_D = ((1 << 3) + 7),
+ MIN = ((3 << 3) + 4),
+ MINA = ((3 << 3) + 5),
+ MAX = ((3 << 3) + 6),
+ MAXA = ((3 << 3) + 7),
CVT_S_D = ((4 << 3) + 0),
CVT_W_D = ((4 << 3) + 4),
CVT_L_D = ((4 << 3) + 5),
@@ -430,6 +505,46 @@ enum SecondaryField {
CVT_D_W = ((4 << 3) + 1),
CVT_S_L = ((4 << 3) + 0),
CVT_D_L = ((4 << 3) + 1),
+ BC1EQZ = ((2 << 2) + 1) << 21,
+ BC1NEZ = ((3 << 2) + 1) << 21,
+ // COP1 CMP positive predicates Bit 5..4 = 00.
+ CMP_AF = ((0 << 3) + 0),
+ CMP_UN = ((0 << 3) + 1),
+ CMP_EQ = ((0 << 3) + 2),
+ CMP_UEQ = ((0 << 3) + 3),
+ CMP_LT = ((0 << 3) + 4),
+ CMP_ULT = ((0 << 3) + 5),
+ CMP_LE = ((0 << 3) + 6),
+ CMP_ULE = ((0 << 3) + 7),
+ CMP_SAF = ((1 << 3) + 0),
+ CMP_SUN = ((1 << 3) + 1),
+ CMP_SEQ = ((1 << 3) + 2),
+ CMP_SUEQ = ((1 << 3) + 3),
+ CMP_SSLT = ((1 << 3) + 4),
+ CMP_SSULT = ((1 << 3) + 5),
+ CMP_SLE = ((1 << 3) + 6),
+ CMP_SULE = ((1 << 3) + 7),
+ // COP1 CMP negative predicates Bit 5..4 = 01.
+ CMP_AT = ((2 << 3) + 0), // Reserved, not implemented.
+ CMP_OR = ((2 << 3) + 1),
+ CMP_UNE = ((2 << 3) + 2),
+ CMP_NE = ((2 << 3) + 3),
+ CMP_UGE = ((2 << 3) + 4), // Reserved, not implemented.
+ CMP_OGE = ((2 << 3) + 5), // Reserved, not implemented.
+ CMP_UGT = ((2 << 3) + 6), // Reserved, not implemented.
+ CMP_OGT = ((2 << 3) + 7), // Reserved, not implemented.
+ CMP_SAT = ((3 << 3) + 0), // Reserved, not implemented.
+ CMP_SOR = ((3 << 3) + 1),
+ CMP_SUNE = ((3 << 3) + 2),
+ CMP_SNE = ((3 << 3) + 3),
+ CMP_SUGE = ((3 << 3) + 4), // Reserved, not implemented.
+ CMP_SOGE = ((3 << 3) + 5), // Reserved, not implemented.
+ CMP_SUGT = ((3 << 3) + 6), // Reserved, not implemented.
+ CMP_SOGT = ((3 << 3) + 7), // Reserved, not implemented.
+
+ SEL = ((2 << 3) + 0),
+ SELEQZ_C = ((2 << 3) + 4), // COP1 on FPR registers.
+ SELNEZ_C = ((2 << 3) + 7), // COP1 on FPR registers.
// COP1 Encoding of Function Field When rs=PS.
// COP1X Encoding of Function Field.
MADD_D = ((4 << 3) + 1),
@@ -775,6 +890,11 @@ class Instruction {
return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
}
+ inline int32_t Imm21Value() const {
+ DCHECK(InstructionType() == kImmediateType);
+ return Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift);
+ }
+
inline int32_t Imm26Value() const {
DCHECK(InstructionType() == kJumpType);
return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
index c421c727d1..96a146715f 100644
--- a/deps/v8/src/mips/debug-mips.cc
+++ b/deps/v8/src/mips/debug-mips.cc
@@ -185,17 +185,17 @@ void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0);
}
void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC store (from ic-mips.cc).
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- Register value = StoreIC::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.bit() | name.bit() | value.bit(), 0);
}
@@ -209,9 +209,9 @@ void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC keyed store call (from ic-mips.cc).
- Register receiver = KeyedStoreIC::ReceiverRegister();
- Register name = KeyedStoreIC::NameRegister();
- Register value = KeyedStoreIC::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.bit() | name.bit() | value.bit(), 0);
}
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 1e88e62b21..b40d7f45ff 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -97,14 +97,13 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
void Deoptimizer::SetPlatformCompiledStubRegisters(
- FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+ FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler());
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
int params = descriptor->GetHandlerParameterCount();
- output_frame->SetRegister(s0.code(), params);
- output_frame->SetRegister(s1.code(), (params - 1) * kPointerSize);
- output_frame->SetRegister(s2.code(), handler);
+ output_frame->SetRegister(a0.code(), params);
+ output_frame->SetRegister(a1.code(), handler);
}
@@ -324,22 +323,59 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// Create a sequence of deoptimization entries.
// Note that registers are still live when jumping to an entry.
- Label table_start, done;
+ Label table_start, done, done_special, trampoline_jump;
__ bind(&table_start);
- for (int i = 0; i < count(); i++) {
- Label start;
- __ bind(&start);
- DCHECK(is_int16(i));
- __ Branch(USE_DELAY_SLOT, &done); // Expose delay slot.
- __ li(at, i); // In the delay slot.
-
- DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
- }
+ int kMaxEntriesBranchReach = (1 << (kImm16Bits - 2))/
+ (table_entry_size_ / Assembler::kInstrSize);
+
+ if (count() <= kMaxEntriesBranchReach) {
+ // Common case.
+ for (int i = 0; i < count(); i++) {
+ Label start;
+ __ bind(&start);
+ DCHECK(is_int16(i));
+ __ Branch(USE_DELAY_SLOT, &done); // Expose delay slot.
+ __ li(at, i); // In the delay slot.
+
+ DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
+ }
+
+ DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
+ count() * table_entry_size_);
+ __ bind(&done);
+ __ Push(at);
+ } else {
+ // Uncommon case, the branch cannot reach.
+ // Create mini trampoline and adjust id constants to get proper value at
+ // the end of table.
+ for (int i = kMaxEntriesBranchReach; i > 1; i--) {
+ Label start;
+ __ bind(&start);
+ DCHECK(is_int16(i));
+ __ Branch(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
+ __ li(at, - i); // In the delay slot.
+ DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
+ }
+ // Entry with id == kMaxEntriesBranchReach - 1.
+ __ bind(&trampoline_jump);
+ __ Branch(USE_DELAY_SLOT, &done_special);
+ __ li(at, -1);
+
+ for (int i = kMaxEntriesBranchReach ; i < count(); i++) {
+ Label start;
+ __ bind(&start);
+ DCHECK(is_int16(i));
+ __ Branch(USE_DELAY_SLOT, &done); // Expose delay slot.
+ __ li(at, i); // In the delay slot.
+ }
- DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
- count() * table_entry_size_);
- __ bind(&done);
- __ Push(at);
+ DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
+ count() * table_entry_size_);
+ __ bind(&done_special);
+ __ addiu(at, at, kMaxEntriesBranchReach);
+ __ bind(&done);
+ __ Push(at);
+ }
}
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index 4a8fe65777..564627e460 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -86,6 +86,7 @@ class Decoder {
void PrintUImm16(Instruction* instr);
void PrintSImm16(Instruction* instr);
void PrintXImm16(Instruction* instr);
+ void PrintXImm21(Instruction* instr);
void PrintXImm26(Instruction* instr);
void PrintCode(Instruction* instr); // For break and trap instructions.
// Printing of instruction name.
@@ -246,6 +247,13 @@ void Decoder::PrintXImm16(Instruction* instr) {
}
+// Print 21-bit immediate value.
+void Decoder::PrintXImm21(Instruction* instr) {
+ uint32_t imm = instr->Imm21Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+}
+
+
// Print 26-bit immediate value.
void Decoder::PrintXImm26(Instruction* instr) {
uint32_t imm = instr->Imm26Value() << kImmFieldShift;
@@ -360,7 +368,11 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
PrintXImm16(instr);
}
return 6;
- } else {
+ } else if (format[3] == '2' && format[4] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "imm21x"));
+ PrintXImm21(instr);
+ return 6;
+ } else if (format[3] == '2' && format[4] == '6') {
DCHECK(STRING_STARTS_WITH(format, "imm26x"));
PrintXImm26(instr);
return 6;
@@ -492,25 +504,15 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
case CVT_W_D:
Format(instr, "cvt.w.d 'fd, 'fs");
break;
- case CVT_L_D: {
- if (kArchVariant == kMips32r2) {
- Format(instr, "cvt.l.d 'fd, 'fs");
- } else {
- Unknown(instr);
- }
+ case CVT_L_D:
+ Format(instr, "cvt.l.d 'fd, 'fs");
break;
- }
case TRUNC_W_D:
Format(instr, "trunc.w.d 'fd, 'fs");
break;
- case TRUNC_L_D: {
- if (kArchVariant == kMips32r2) {
- Format(instr, "trunc.l.d 'fd, 'fs");
- } else {
- Unknown(instr);
- }
+ case TRUNC_L_D:
+ Format(instr, "trunc.l.d 'fd, 'fs");
break;
- }
case ROUND_W_D:
Format(instr, "round.w.d 'fd, 'fs");
break;
@@ -569,22 +571,42 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
break;
case L:
switch (instr->FunctionFieldRaw()) {
- case CVT_D_L: {
- if (kArchVariant == kMips32r2) {
- Format(instr, "cvt.d.l 'fd, 'fs");
- } else {
- Unknown(instr);
- }
+ case CVT_D_L:
+ Format(instr, "cvt.d.l 'fd, 'fs");
break;
- }
- case CVT_S_L: {
- if (kArchVariant == kMips32r2) {
- Format(instr, "cvt.s.l 'fd, 'fs");
- } else {
- Unknown(instr);
- }
+ case CVT_S_L:
+ Format(instr, "cvt.s.l 'fd, 'fs");
+ break;
+ case CMP_UN:
+ Format(instr, "cmp.un.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_EQ:
+ Format(instr, "cmp.eq.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_UEQ:
+ Format(instr, "cmp.ueq.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_LT:
+ Format(instr, "cmp.lt.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_ULT:
+ Format(instr, "cmp.ult.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_LE:
+ Format(instr, "cmp.le.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_ULE:
+ Format(instr, "cmp.ule.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_OR:
+ Format(instr, "cmp.or.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_UNE:
+ Format(instr, "cmp.une.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_NE:
+ Format(instr, "cmp.ne.d 'fd, 'fs, 'ft");
break;
- }
default:
UNREACHABLE();
}
@@ -623,7 +645,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
if (instr->RsValue() == 0) {
Format(instr, "srl 'rd, 'rt, 'sa");
} else {
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2)) {
Format(instr, "rotr 'rd, 'rt, 'sa");
} else {
Unknown(instr);
@@ -640,7 +662,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
if (instr->SaValue() == 0) {
Format(instr, "srlv 'rd, 'rt, 'rs");
} else {
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2)) {
Format(instr, "rotrv 'rd, 'rt, 'rs");
} else {
Unknown(instr);
@@ -651,22 +673,64 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
Format(instr, "srav 'rd, 'rt, 'rs");
break;
case MFHI:
- Format(instr, "mfhi 'rd");
+ if (instr->Bits(25, 16) == 0) {
+ Format(instr, "mfhi 'rd");
+ } else {
+ if ((instr->FunctionFieldRaw() == CLZ_R6)
+ && (instr->FdValue() == 1)) {
+ Format(instr, "clz 'rd, 'rs");
+ } else if ((instr->FunctionFieldRaw() == CLO_R6)
+ && (instr->FdValue() == 1)) {
+ Format(instr, "clo 'rd, 'rs");
+ }
+ }
break;
case MFLO:
Format(instr, "mflo 'rd");
break;
- case MULT:
- Format(instr, "mult 'rs, 'rt");
+ case MULT: // @Mips32r6 == MUL_MUH.
+ if (!IsMipsArchVariant(kMips32r6)) {
+ Format(instr, "mult 'rs, 'rt");
+ } else {
+ if (instr->SaValue() == MUL_OP) {
+ Format(instr, "mul 'rd, 'rs, 'rt");
+ } else {
+ Format(instr, "muh 'rd, 'rs, 'rt");
+ }
+ }
break;
- case MULTU:
- Format(instr, "multu 'rs, 'rt");
+ case MULTU: // @Mips32r6 == MUL_MUH_U.
+ if (!IsMipsArchVariant(kMips32r6)) {
+ Format(instr, "multu 'rs, 'rt");
+ } else {
+ if (instr->SaValue() == MUL_OP) {
+ Format(instr, "mulu 'rd, 'rs, 'rt");
+ } else {
+ Format(instr, "muhu 'rd, 'rs, 'rt");
+ }
+ }
break;
- case DIV:
- Format(instr, "div 'rs, 'rt");
+ case DIV: // @Mips32r6 == DIV_MOD.
+ if (!IsMipsArchVariant(kMips32r6)) {
+ Format(instr, "div 'rs, 'rt");
+ } else {
+ if (instr->SaValue() == DIV_OP) {
+ Format(instr, "div 'rd, 'rs, 'rt");
+ } else {
+ Format(instr, "mod 'rd, 'rs, 'rt");
+ }
+ }
break;
- case DIVU:
- Format(instr, "divu 'rs, 'rt");
+ case DIVU: // @Mips32r6 == DIV_MOD_U.
+ if (!IsMipsArchVariant(kMips32r6)) {
+ Format(instr, "divu 'rs, 'rt");
+ } else {
+ if (instr->SaValue() == DIV_OP) {
+ Format(instr, "divu 'rd, 'rs, 'rt");
+ } else {
+ Format(instr, "modu 'rd, 'rs, 'rt");
+ }
+ }
break;
case ADD:
Format(instr, "add 'rd, 'rs, 'rt");
@@ -738,6 +802,12 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
Format(instr, "movf 'rd, 'rs, 'bc");
}
break;
+ case SELEQZ_S:
+ Format(instr, "seleqz 'rd, 'rs, 'rt");
+ break;
+ case SELNEZ_S:
+ Format(instr, "selnez 'rd, 'rs, 'rt");
+ break;
default:
UNREACHABLE();
}
@@ -748,7 +818,9 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
Format(instr, "mul 'rd, 'rs, 'rt");
break;
case CLZ:
- Format(instr, "clz 'rd, 'rs");
+ if (!IsMipsArchVariant(kMips32r6)) {
+ Format(instr, "clz 'rd, 'rs");
+ }
break;
default:
UNREACHABLE();
@@ -757,7 +829,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
case SPECIAL3:
switch (instr->FunctionFieldRaw()) {
case INS: {
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2)) {
Format(instr, "ins 'rt, 'rs, 'sa, 'ss2");
} else {
Unknown(instr);
@@ -765,7 +837,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
break;
}
case EXT: {
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2)) {
Format(instr, "ext 'rt, 'rs, 'sa, 'ss1");
} else {
Unknown(instr);
@@ -784,7 +856,6 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
void Decoder::DecodeTypeImmediate(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
- // ------------- REGIMM class.
case COP1:
switch (instr->RsFieldRaw()) {
case BC1:
@@ -794,10 +865,150 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "bc1f 'bc, 'imm16u");
}
break;
+ case BC1EQZ:
+ Format(instr, "bc1eqz 'ft, 'imm16u");
+ break;
+ case BC1NEZ:
+ Format(instr, "bc1nez 'ft, 'imm16u");
+ break;
+ case W: // CMP.S instruction.
+ switch (instr->FunctionValue()) {
+ case CMP_AF:
+ Format(instr, "cmp.af.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_UN:
+ Format(instr, "cmp.un.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_EQ:
+ Format(instr, "cmp.eq.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_UEQ:
+ Format(instr, "cmp.ueq.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_LT:
+ Format(instr, "cmp.lt.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_ULT:
+ Format(instr, "cmp.ult.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_LE:
+ Format(instr, "cmp.le.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_ULE:
+ Format(instr, "cmp.ule.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_OR:
+ Format(instr, "cmp.or.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_UNE:
+ Format(instr, "cmp.une.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_NE:
+ Format(instr, "cmp.ne.S 'ft, 'fs, 'fd");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case L: // CMP.D instruction.
+ switch (instr->FunctionValue()) {
+ case CMP_AF:
+ Format(instr, "cmp.af.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_UN:
+ Format(instr, "cmp.un.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_EQ:
+ Format(instr, "cmp.eq.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_UEQ:
+ Format(instr, "cmp.ueq.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_LT:
+ Format(instr, "cmp.lt.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_ULT:
+ Format(instr, "cmp.ult.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_LE:
+ Format(instr, "cmp.le.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_ULE:
+ Format(instr, "cmp.ule.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_OR:
+ Format(instr, "cmp.or.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_UNE:
+ Format(instr, "cmp.une.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_NE:
+ Format(instr, "cmp.ne.D 'ft, 'fs, 'fd");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case S:
+ switch (instr->FunctionValue()) {
+ case SEL:
+ Format(instr, "sel.S 'ft, 'fs, 'fd");
+ break;
+ case SELEQZ_C:
+ Format(instr, "seleqz.S 'ft, 'fs, 'fd");
+ break;
+ case SELNEZ_C:
+ Format(instr, "selnez.S 'ft, 'fs, 'fd");
+ break;
+ case MIN:
+ Format(instr, "min.S 'ft, 'fs, 'fd");
+ break;
+ case MINA:
+ Format(instr, "mina.S 'ft, 'fs, 'fd");
+ break;
+ case MAX:
+ Format(instr, "max.S 'ft, 'fs, 'fd");
+ break;
+ case MAXA:
+ Format(instr, "maxa.S 'ft, 'fs, 'fd");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case D:
+ switch (instr->FunctionValue()) {
+ case SEL:
+ Format(instr, "sel.D 'ft, 'fs, 'fd");
+ break;
+ case SELEQZ_C:
+ Format(instr, "seleqz.D 'ft, 'fs, 'fd");
+ break;
+ case SELNEZ_C:
+ Format(instr, "selnez.D 'ft, 'fs, 'fd");
+ break;
+ case MIN:
+ Format(instr, "min.D 'ft, 'fs, 'fd");
+ break;
+ case MINA:
+ Format(instr, "mina.D 'ft, 'fs, 'fd");
+ break;
+ case MAX:
+ Format(instr, "max.D 'ft, 'fs, 'fd");
+ break;
+ case MAXA:
+ Format(instr, "maxa.D 'ft, 'fs, 'fd");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
default:
UNREACHABLE();
}
+
break; // Case COP1.
+ // ------------- REGIMM class.
case REGIMM:
switch (instr->RtFieldRaw()) {
case BLTZ:
@@ -812,6 +1023,9 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
case BGEZAL:
Format(instr, "bgezal 'rs, 'imm16u");
break;
+ case BGEZALL:
+ Format(instr, "bgezall 'rs, 'imm16u");
+ break;
default:
UNREACHABLE();
}
@@ -824,14 +1038,103 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "bne 'rs, 'rt, 'imm16u");
break;
case BLEZ:
- Format(instr, "blez 'rs, 'imm16u");
+ if ((instr->RtFieldRaw() == 0)
+ && (instr->RsFieldRaw() != 0)) {
+ Format(instr, "blez 'rs, 'imm16u");
+ } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
+ && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bgeuc 'rs, 'rt, 'imm16u");
+ } else if ((instr->RtFieldRaw() == instr->RsFieldRaw())
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bgezalc 'rs, 'imm16u");
+ } else if ((instr->RsFieldRaw() == 0)
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "blezalc 'rs, 'imm16u");
+ } else {
+ UNREACHABLE();
+ }
break;
case BGTZ:
- Format(instr, "bgtz 'rs, 'imm16u");
+ if ((instr->RtFieldRaw() == 0)
+ && (instr->RsFieldRaw() != 0)) {
+ Format(instr, "bgtz 'rs, 'imm16u");
+ } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
+ && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bltuc 'rs, 'rt, 'imm16u");
+ } else if ((instr->RtFieldRaw() == instr->RsFieldRaw())
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bltzalc 'rt, 'imm16u");
+ } else if ((instr->RsFieldRaw() == 0)
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bgtzalc 'rt, 'imm16u");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case BLEZL:
+ if ((instr->RtFieldRaw() == instr->RsFieldRaw())
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bgezc 'rt, 'imm16u");
+ } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
+ && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bgec 'rs, 'rt, 'imm16u");
+ } else if ((instr->RsFieldRaw() == 0)
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "blezc 'rt, 'imm16u");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case BGTZL:
+ if ((instr->RtFieldRaw() == instr->RsFieldRaw())
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bltzc 'rt, 'imm16u");
+ } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
+ && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bltc 'rs, 'rt, 'imm16u");
+ } else if ((instr->RsFieldRaw() == 0)
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bgtzc 'rt, 'imm16u");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case BEQZC:
+ if (instr->RsFieldRaw() != 0) {
+ Format(instr, "beqzc 'rs, 'imm21x");
+ }
+ break;
+ case BNEZC:
+ if (instr->RsFieldRaw() != 0) {
+ Format(instr, "bnezc 'rs, 'imm21x");
+ }
break;
// ------------- Arithmetic instructions.
case ADDI:
- Format(instr, "addi 'rt, 'rs, 'imm16s");
+ if (!IsMipsArchVariant(kMips32r6)) {
+ Format(instr, "addi 'rt, 'rs, 'imm16s");
+ } else {
+ // Check if BOVC or BEQC instruction.
+ if (instr->RsFieldRaw() >= instr->RtFieldRaw()) {
+ Format(instr, "bovc 'rs, 'rt, 'imm16s");
+ } else if (instr->RsFieldRaw() < instr->RtFieldRaw()) {
+ Format(instr, "beqc 'rs, 'rt, 'imm16s");
+ } else {
+ UNREACHABLE();
+ }
+ }
+ break;
+ case DADDI:
+ if (IsMipsArchVariant(kMips32r6)) {
+ // Check if BNVC or BNEC instruction.
+ if (instr->RsFieldRaw() >= instr->RtFieldRaw()) {
+ Format(instr, "bnvc 'rs, 'rt, 'imm16s");
+ } else if (instr->RsFieldRaw() < instr->RtFieldRaw()) {
+ Format(instr, "bnec 'rs, 'rt, 'imm16s");
+ } else {
+ UNREACHABLE();
+ }
+ }
break;
case ADDIU:
Format(instr, "addiu 'rt, 'rs, 'imm16s");
@@ -852,7 +1155,15 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "xori 'rt, 'rs, 'imm16x");
break;
case LUI:
- Format(instr, "lui 'rt, 'imm16x");
+ if (!IsMipsArchVariant(kMips32r6)) {
+ Format(instr, "lui 'rt, 'imm16x");
+ } else {
+ if (instr->RsValue() != 0) {
+ Format(instr, "aui 'rt, 'imm16x");
+ } else {
+ Format(instr, "lui 'rt, 'imm16x");
+ }
+ }
break;
// ------------- Memory instructions.
case LB:
@@ -907,6 +1218,7 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "sdc1 'ft, 'imm16s('rs)");
break;
default:
+ printf("a 0x%x \n", instr->OpcodeFieldRaw());
UNREACHABLE();
break;
}
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index 639f57fd63..8b20639c44 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -14,15 +14,16 @@
// places where we have to move a previous result in v0 to a0 for the
// next call: mov(a0, v0). This is not needed on the other architectures.
+#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compiler.h"
#include "src/debug.h"
#include "src/full-codegen.h"
+#include "src/ic/ic.h"
#include "src/isolate-inl.h"
#include "src/parser.h"
#include "src/scopes.h"
-#include "src/stub-cache.h"
#include "src/mips/code-stubs-mips.h"
#include "src/mips/macro-assembler-mips.h"
@@ -1047,7 +1048,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1178,7 +1180,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&fixed_array);
__ li(a1, FeedbackVector());
- __ li(a2, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
+ __ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ sw(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(slot)));
__ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check
@@ -1320,9 +1322,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(),
- info->strict_mode(),
- info->is_generator());
+ FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
__ li(a2, Operand(info));
__ CallStub(&stub);
} else {
@@ -1342,6 +1342,24 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
}
+void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
+ Comment cnmt(masm_, "[ SuperReference ");
+
+ __ lw(LoadDescriptor::ReceiverRegister(),
+ MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
+ Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
+ __ li(LoadDescriptor::NameRegister(), home_object_symbol);
+
+ CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+
+ Label done;
+ __ Branch(&done, ne, v0, Operand(isolate()->factory()->undefined_value()));
+ __ CallRuntime(Runtime::kThrowNonMethodError, 0);
+ __ bind(&done);
+}
+
+
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
TypeofState typeof_state,
Label* slow) {
@@ -1387,10 +1405,10 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ bind(&fast);
}
- __ lw(LoadIC::ReceiverRegister(), GlobalObjectOperand());
- __ li(LoadIC::NameRegister(), Operand(proxy->var()->name()));
+ __ lw(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ li(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
if (FLAG_vector_ics) {
- __ li(LoadIC::SlotRegister(),
+ __ li(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
}
@@ -1476,10 +1494,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
switch (var->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
- __ lw(LoadIC::ReceiverRegister(), GlobalObjectOperand());
- __ li(LoadIC::NameRegister(), Operand(var->name()));
+ __ lw(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
if (FLAG_vector_ics) {
- __ li(LoadIC::SlotRegister(),
+ __ li(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
}
CallLoadIC(CONTEXTUAL);
@@ -1689,10 +1707,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
- __ mov(StoreIC::ValueRegister(), result_register());
- DCHECK(StoreIC::ValueRegister().is(a0));
- __ li(StoreIC::NameRegister(), Operand(key->value()));
- __ lw(StoreIC::ReceiverRegister(), MemOperand(sp));
+ __ mov(StoreDescriptor::ValueRegister(), result_register());
+ DCHECK(StoreDescriptor::ValueRegister().is(a0));
+ __ li(StoreDescriptor::NameRegister(), Operand(key->value()));
+ __ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
@@ -1856,13 +1874,19 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ enum LhsKind {
+ VARIABLE,
+ NAMED_PROPERTY,
+ KEYED_PROPERTY,
+ NAMED_SUPER_PROPERTY
+ };
LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
if (property != NULL) {
assign_type = (property->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
+ ? (property->IsSuperAccess() ? NAMED_SUPER_PROPERTY
+ : NAMED_PROPERTY)
+ : KEYED_PROPERTY;
}
// Evaluate LHS expression.
@@ -1874,18 +1898,29 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
// We need the receiver both on the stack and in the register.
VisitForStackValue(property->obj());
- __ lw(LoadIC::ReceiverRegister(), MemOperand(sp, 0));
+ __ lw(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
} else {
VisitForStackValue(property->obj());
}
break;
+ case NAMED_SUPER_PROPERTY:
+ VisitForStackValue(property->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(property->obj()->AsSuperReference());
+ __ Push(result_register());
+ if (expr->is_compound()) {
+ const Register scratch = a1;
+ __ lw(scratch, MemOperand(sp, kPointerSize));
+ __ Push(scratch, result_register());
+ }
+ break;
case KEYED_PROPERTY:
// We need the key and receiver on both the stack and in v0 and a1.
if (expr->is_compound()) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ lw(LoadIC::ReceiverRegister(), MemOperand(sp, 1 * kPointerSize));
- __ lw(LoadIC::NameRegister(), MemOperand(sp, 0));
+ __ lw(LoadDescriptor::ReceiverRegister(),
+ MemOperand(sp, 1 * kPointerSize));
+ __ lw(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@@ -1906,6 +1941,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
EmitNamedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
@@ -1952,6 +1991,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyAssignment(expr);
+ break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
@@ -1966,12 +2008,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
VisitForStackValue(expr->expression());
switch (expr->yield_kind()) {
- case Yield::SUSPEND:
+ case Yield::kSuspend:
// Pop value from top-of-stack slot; box result into result register.
EmitCreateIteratorResult(false);
__ push(result_register());
// Fall through.
- case Yield::INITIAL: {
+ case Yield::kInitial: {
Label suspend, continuation, post_runtime, resume;
__ jmp(&suspend);
@@ -2002,7 +2044,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
- case Yield::FINAL: {
+ case Yield::kFinal: {
VisitForAccumulatorValue(expr->generator_object());
__ li(a1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
__ sw(a1, FieldMemOperand(result_register(),
@@ -2014,7 +2056,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
- case Yield::DELEGATING: {
+ case Yield::kDelegating: {
VisitForStackValue(expr->generator_object());
// Initial stack layout is as follows:
@@ -2023,8 +2065,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label l_catch, l_try, l_suspend, l_continuation, l_resume;
Label l_next, l_call;
- Register load_receiver = LoadIC::ReceiverRegister();
- Register load_name = LoadIC::NameRegister();
+ Register load_receiver = LoadDescriptor::ReceiverRegister();
+ Register load_name = LoadDescriptor::NameRegister();
// Initial send value is undefined.
__ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
@@ -2082,10 +2124,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ lw(load_receiver, MemOperand(sp, kPointerSize));
__ lw(load_name, MemOperand(sp, 2 * kPointerSize));
if (FLAG_vector_ics) {
- __ li(LoadIC::SlotRegister(),
+ __ li(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(expr->KeyedLoadFeedbackSlot())));
}
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallIC(ic, TypeFeedbackId::None());
__ mov(a0, v0);
__ mov(a1, a0);
@@ -2102,7 +2144,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ push(load_receiver); // save result
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
if (FLAG_vector_ics) {
- __ li(LoadIC::SlotRegister(),
+ __ li(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(expr->DoneFeedbackSlot())));
}
CallLoadIC(NOT_CONTEXTUAL); // v0=result.done
@@ -2115,7 +2157,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ pop(load_receiver); // result
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
if (FLAG_vector_ics) {
- __ li(LoadIC::SlotRegister(),
+ __ li(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(expr->ValueFeedbackSlot())));
}
CallLoadIC(NOT_CONTEXTUAL); // v0=result.value
@@ -2280,9 +2322,11 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
- __ li(LoadIC::NameRegister(), Operand(key->value()));
+ DCHECK(!prop->IsSuperAccess());
+
+ __ li(LoadDescriptor::NameRegister(), Operand(key->value()));
if (FLAG_vector_ics) {
- __ li(LoadIC::SlotRegister(),
+ __ li(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(prop->PropertyFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL);
} else {
@@ -2291,11 +2335,23 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
}
+void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+ // Stack: receiver, home_object.
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+ DCHECK(prop->IsSuperAccess());
+
+ __ Push(key->value());
+ __ CallRuntime(Runtime::kLoadFromSuper, 3);
+}
+
+
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
if (FLAG_vector_ics) {
- __ li(LoadIC::SlotRegister(),
+ __ li(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(prop->PropertyFeedbackSlot())));
CallIC(ic);
} else {
@@ -2327,8 +2383,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- BinaryOpICStub stub(isolate(), op, mode);
- CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2369,12 +2425,9 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
break;
case Token::MUL: {
__ SmiUntag(scratch1, right);
- __ Mult(left, scratch1);
- __ mflo(scratch1);
- __ mfhi(scratch2);
- __ sra(scratch1, scratch1, 31);
+ __ Mul(scratch2, v0, left, scratch1);
+ __ sra(scratch1, v0, 31);
__ Branch(&stub_call, ne, scratch1, Operand(scratch2));
- __ mflo(v0);
__ Branch(&done, ne, v0, Operand(zero_reg));
__ Addu(scratch2, right, left);
__ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
@@ -2405,9 +2458,9 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
OverwriteMode mode) {
__ mov(a0, result_register());
__ pop(a1);
- BinaryOpICStub stub(isolate(), op, mode);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(v0);
}
@@ -2437,9 +2490,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
case NAMED_PROPERTY: {
__ push(result_register()); // Preserve value.
VisitForAccumulatorValue(prop->obj());
- __ mov(StoreIC::ReceiverRegister(), result_register());
- __ pop(StoreIC::ValueRegister()); // Restore value.
- __ li(StoreIC::NameRegister(),
+ __ mov(StoreDescriptor::ReceiverRegister(), result_register());
+ __ pop(StoreDescriptor::ValueRegister()); // Restore value.
+ __ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
CallStoreIC();
break;
@@ -2448,11 +2501,11 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ push(result_register()); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
- __ mov(KeyedStoreIC::NameRegister(), result_register());
- __ Pop(KeyedStoreIC::ValueRegister(), KeyedStoreIC::ReceiverRegister());
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ __ mov(StoreDescriptor::NameRegister(), result_register());
+ __ Pop(StoreDescriptor::ValueRegister(),
+ StoreDescriptor::ReceiverRegister());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic);
break;
}
@@ -2477,9 +2530,9 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
- __ mov(StoreIC::ValueRegister(), result_register());
- __ li(StoreIC::NameRegister(), Operand(var->name()));
- __ lw(StoreIC::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(StoreDescriptor::ValueRegister(), result_register());
+ __ li(StoreDescriptor::NameRegister(), Operand(var->name()));
+ __ lw(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
CallStoreIC();
} else if (op == Token::INIT_CONST_LEGACY) {
@@ -2549,9 +2602,10 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
- __ mov(StoreIC::ValueRegister(), result_register());
- __ li(StoreIC::NameRegister(), Operand(prop->key()->AsLiteral()->value()));
- __ pop(StoreIC::ReceiverRegister());
+ __ mov(StoreDescriptor::ValueRegister(), result_register());
+ __ li(StoreDescriptor::NameRegister(),
+ Operand(prop->key()->AsLiteral()->value()));
+ __ pop(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2559,6 +2613,24 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
+void FullCodeGenerator::EmitNamedSuperPropertyAssignment(Assignment* expr) {
+ // Assignment to named property of super.
+ // v0 : value
+ // stack : receiver ('this'), home_object
+ Property* prop = expr->target()->AsProperty();
+ DCHECK(prop != NULL);
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(key != NULL);
+
+ __ Push(v0);
+ __ Push(key->value());
+ __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy),
+ 4);
+ context()->Plug(v0);
+}
+
+
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
@@ -2569,13 +2641,11 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// - a0 is the value,
// - a1 is the key,
// - a2 is the receiver.
- __ mov(KeyedStoreIC::ValueRegister(), result_register());
- __ Pop(KeyedStoreIC::ReceiverRegister(), KeyedStoreIC::NameRegister());
- DCHECK(KeyedStoreIC::ValueRegister().is(a0));
+ __ mov(StoreDescriptor::ValueRegister(), result_register());
+ __ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
+ DCHECK(StoreDescriptor::ValueRegister().is(a0));
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2588,16 +2658,23 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
Expression* key = expr->key();
if (key->IsPropertyName()) {
- VisitForAccumulatorValue(expr->obj());
- __ Move(LoadIC::ReceiverRegister(), v0);
- EmitNamedPropertyLoad(expr);
+ if (!expr->IsSuperAccess()) {
+ VisitForAccumulatorValue(expr->obj());
+ __ Move(LoadDescriptor::ReceiverRegister(), v0);
+ EmitNamedPropertyLoad(expr);
+ } else {
+ VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(expr->obj()->AsSuperReference());
+ __ Push(result_register());
+ EmitNamedSuperPropertyLoad(expr);
+ }
PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(v0);
} else {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
- __ Move(LoadIC::NameRegister(), v0);
- __ pop(LoadIC::ReceiverRegister());
+ __ Move(LoadDescriptor::NameRegister(), v0);
+ __ pop(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
context()->Plug(v0);
}
@@ -2615,12 +2692,11 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallIC::CallType call_type = callee->IsVariableProxy()
- ? CallIC::FUNCTION
- : CallIC::METHOD;
+ CallICState::CallType call_type =
+ callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
// Get the target function.
- if (call_type == CallIC::FUNCTION) {
+ if (call_type == CallICState::FUNCTION) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
@@ -2631,7 +2707,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
- __ lw(LoadIC::ReceiverRegister(), MemOperand(sp, 0));
+ DCHECK(!callee->AsProperty()->IsSuperAccess());
+ __ lw(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
@@ -2644,6 +2721,42 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+ DCHECK(callee->IsProperty());
+ Property* prop = callee->AsProperty();
+ DCHECK(prop->IsSuperAccess());
+
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+ // Load the function from the receiver.
+ const Register scratch = a1;
+ SuperReference* super_ref = prop->obj()->AsSuperReference();
+ EmitLoadHomeObject(super_ref);
+ __ mov(scratch, v0);
+ VisitForAccumulatorValue(super_ref->this_var());
+ __ Push(scratch, v0, v0, scratch);
+ __ Push(key->value());
+
+ // Stack here:
+ // - home_object
+ // - this (receiver)
+ // - this (receiver) <-- LoadFromSuper will pop here and below.
+ // - home_object
+ // - key
+ __ CallRuntime(Runtime::kLoadFromSuper, 3);
+
+ // Replace home_object with target function.
+ __ sw(v0, MemOperand(sp, kPointerSize));
+
+ // Stack here:
+ // - target function
+ // - this (receiver)
+ EmitCall(expr, CallICState::METHOD);
+}
+
+
// Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* key) {
@@ -2654,8 +2767,8 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
// Load the function from the receiver.
DCHECK(callee->IsProperty());
- __ lw(LoadIC::ReceiverRegister(), MemOperand(sp, 0));
- __ Move(LoadIC::NameRegister(), v0);
+ __ lw(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ __ Move(LoadDescriptor::NameRegister(), v0);
EmitKeyedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
@@ -2664,11 +2777,11 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ push(at);
__ sw(v0, MemOperand(sp, kPointerSize));
- EmitCall(expr, CallIC::METHOD);
+ EmitCall(expr, CallICState::METHOD);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2811,13 +2924,20 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitCall(expr);
} else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty();
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(property->obj());
- }
- if (property->key()->IsPropertyName()) {
- EmitCallWithLoadIC(expr);
+ bool is_named_call = property->key()->IsPropertyName();
+ // super.x() is handled in EmitCallWithLoadIC.
+ if (property->IsSuperAccess() && is_named_call) {
+ EmitSuperCallWithLoadIC(expr);
} else {
- EmitKeyedCallWithLoadIC(expr, property->key());
+ {
+ PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(property->obj());
+ }
+ if (is_named_call) {
+ EmitCallWithLoadIC(expr);
+ } else {
+ EmitKeyedCallWithLoadIC(expr, property->key());
+ }
}
} else {
DCHECK(call_type == Call::OTHER_CALL);
@@ -3318,7 +3438,7 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
// Functions have class 'Function'.
__ bind(&function);
- __ LoadRoot(v0, Heap::kfunction_class_stringRootIndex);
+ __ LoadRoot(v0, Heap::kFunction_stringRootIndex);
__ jmp(&done);
// Objects with a non-function constructor have class 'Object'.
@@ -3439,9 +3559,9 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
Register index = a1;
Register value = a2;
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- VisitForAccumulatorValue(args->at(0)); // string
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
__ Pop(index, value);
if (FLAG_debug_code) {
@@ -3476,9 +3596,9 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
Register index = a1;
Register value = a2;
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- VisitForAccumulatorValue(args->at(0)); // string
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
__ Pop(index, value);
if (FLAG_debug_code) {
@@ -3838,7 +3958,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
empty_separator_loop, one_char_separator_loop,
@@ -3888,7 +4008,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ lw(elements, FieldMemOperand(array, JSArray::kElementsOffset));
array = no_reg; // End of array's live range.
- // Check that all array elements are sequential ASCII strings, and
+ // Check that all array elements are sequential one-byte strings, and
// accumulate the sum of their lengths, as a smi-encoded value.
__ mov(string_length, zero_reg);
__ Addu(element,
@@ -3904,8 +4024,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// element: Current array element.
// elements_end: Array end.
if (generate_debug_code_) {
- __ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin,
- array_length, Operand(zero_reg));
+ __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin, array_length,
+ Operand(zero_reg));
}
__ bind(&loop);
__ lw(string, MemOperand(element));
@@ -3913,7 +4033,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ JumpIfSmi(string, &bailout);
__ lw(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
__ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
__ lw(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
__ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
__ BranchOnOverflow(&bailout, scratch3);
@@ -3932,23 +4052,21 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// string_length: Sum of string lengths (smi).
// elements: FixedArray of strings.
- // Check that the separator is a flat ASCII string.
+ // Check that the separator is a flat one-byte string.
__ JumpIfSmi(separator, &bailout);
__ lw(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
__ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not
// smi but the other values are, so the result is a smi.
__ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ Subu(string_length, string_length, Operand(scratch1));
- __ Mult(array_length, scratch1);
+ __ Mul(scratch3, scratch2, array_length, scratch1);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
// zero.
- __ mfhi(scratch2);
- __ Branch(&bailout, ne, scratch2, Operand(zero_reg));
- __ mflo(scratch2);
+ __ Branch(&bailout, ne, scratch3, Operand(zero_reg));
__ And(scratch3, scratch2, Operand(0x80000000));
__ Branch(&bailout, ne, scratch3, Operand(zero_reg));
__ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3);
@@ -3966,12 +4084,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// separator: Separator string
// string_length: Length of result string (not smi)
// array_length: Length of the array.
- __ AllocateAsciiString(result,
- string_length,
- scratch1,
- scratch2,
- elements_end,
- &bailout);
+ __ AllocateOneByteString(result, string_length, scratch1, scratch2,
+ elements_end, &bailout);
// Prepare for looping. Set up elements_end to end of the array. Set
// result_pos to the position of the result where to write the first
// character.
@@ -4010,7 +4124,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case.
__ bind(&one_char_separator);
- // Replace separator with its ASCII character value.
+ // Replace separator with its one-byte character value.
__ lbu(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator.
@@ -4021,7 +4135,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// result_pos: the position to which we are currently copying characters.
// element: Current array element.
// elements_end: Array end.
- // separator: Single separator ASCII char (in lower byte).
+ // separator: Single separator one-byte char (in lower byte).
// Copy the separator character to the result.
__ sb(separator, MemOperand(result_pos));
@@ -4101,15 +4215,15 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) {
// Push the builtins object as the receiver.
- Register receiver = LoadIC::ReceiverRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
__ lw(receiver, GlobalObjectOperand());
__ lw(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset));
__ push(receiver);
// Load the function from the receiver.
- __ li(LoadIC::NameRegister(), Operand(expr->name()));
+ __ li(LoadDescriptor::NameRegister(), Operand(expr->name()));
if (FLAG_vector_ics) {
- __ li(LoadIC::SlotRegister(),
+ __ li(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(expr->CallRuntimeFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL);
} else {
@@ -4276,6 +4390,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (prop != NULL) {
assign_type =
(prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ if (prop->IsSuperAccess()) {
+ // throw exception.
+ VisitSuperReference(prop->obj()->AsSuperReference());
+ return;
+ }
}
// Evaluate expression and get value.
@@ -4292,13 +4411,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == NAMED_PROPERTY) {
// Put the object both on the stack and in the register.
VisitForStackValue(prop->obj());
- __ lw(LoadIC::ReceiverRegister(), MemOperand(sp, 0));
+ __ lw(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(prop);
} else {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
- __ lw(LoadIC::ReceiverRegister(), MemOperand(sp, 1 * kPointerSize));
- __ lw(LoadIC::NameRegister(), MemOperand(sp, 0));
+ __ lw(LoadDescriptor::ReceiverRegister(),
+ MemOperand(sp, 1 * kPointerSize));
+ __ lw(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
EmitKeyedPropertyLoad(prop);
}
}
@@ -4381,8 +4501,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Record position before stub call.
SetSourcePosition(expr->position());
- BinaryOpICStub stub(isolate(), Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), Token::ADD, NO_OVERWRITE).code();
+ CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4409,10 +4530,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
case NAMED_PROPERTY: {
- __ mov(StoreIC::ValueRegister(), result_register());
- __ li(StoreIC::NameRegister(),
+ __ mov(StoreDescriptor::ValueRegister(), result_register());
+ __ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- __ pop(StoreIC::ReceiverRegister());
+ __ pop(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -4425,11 +4546,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
- __ mov(KeyedStoreIC::ValueRegister(), result_register());
- __ Pop(KeyedStoreIC::ReceiverRegister(), KeyedStoreIC::NameRegister());
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ __ mov(StoreDescriptor::ValueRegister(), result_register());
+ __ Pop(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -4451,10 +4572,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "[ Global variable");
- __ lw(LoadIC::ReceiverRegister(), GlobalObjectOperand());
- __ li(LoadIC::NameRegister(), Operand(proxy->name()));
+ __ lw(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ li(LoadDescriptor::NameRegister(), Operand(proxy->name()));
if (FLAG_vector_ics) {
- __ li(LoadIC::SlotRegister(),
+ __ li(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
}
// Use a regular load, not a contextual load, to avoid a reference
@@ -4614,7 +4735,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
// Record position and call the compare IC.
SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
new file mode 100644
index 0000000000..936ce20acc
--- /dev/null
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -0,0 +1,303 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
+
+
+const Register LoadDescriptor::ReceiverRegister() { return a1; }
+const Register LoadDescriptor::NameRegister() { return a2; }
+
+
+const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return a0; }
+
+
+const Register VectorLoadICDescriptor::VectorRegister() { return a3; }
+
+
+const Register StoreDescriptor::ReceiverRegister() { return a1; }
+const Register StoreDescriptor::NameRegister() { return a2; }
+const Register StoreDescriptor::ValueRegister() { return a0; }
+
+
+const Register ElementTransitionAndStoreDescriptor::MapRegister() { return a3; }
+
+
+const Register InstanceofDescriptor::left() { return a0; }
+const Register InstanceofDescriptor::right() { return a1; }
+
+
+const Register ArgumentsAccessReadDescriptor::index() { return a1; }
+const Register ArgumentsAccessReadDescriptor::parameter_count() { return a0; }
+
+
+const Register ApiGetterDescriptor::function_address() { return a2; }
+
+
+const Register MathPowTaggedDescriptor::exponent() { return a2; }
+
+
+const Register MathPowIntegerDescriptor::exponent() {
+ return MathPowTaggedDescriptor::exponent();
+}
+
+
+void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a2};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastCloneShallowArrayDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a3, a2, a1};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void FastCloneShallowObjectDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a3, a2, a1, a0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CreateAllocationSiteDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a2, a3};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StoreArrayLiteralElementDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a3, a0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionWithFeedbackDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a1, a3};
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // a0 : number of arguments
+ // a1 : the function to call
+ // a2 : feedback vector
+ // a3 : (only if a2 is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
+ // TODO(turbofan): So far we don't gather type feedback and hence skip the
+ // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+ Register registers[] = {cp, a0, a1, a2};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void RegExpConstructResultDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a2, a1, a0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void TransitionElementsKindDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a0, a1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorConstantArgCountDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // cp -- context
+ // a0 -- number of arguments
+ // a1 -- function
+ // a2 -- allocation site with elements kind
+ Register registers[] = {cp, a1, a2};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = {cp, a1, a2, a0};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(),
+ Representation::Tagged(), Representation::Integer32()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // cp -- context
+ // a0 -- number of arguments
+ // a1 -- constructor function
+ Register registers[] = {cp, a1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void InternalArrayConstructorDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = {cp, a1, a0};
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Integer32()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a1, a0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpWithAllocationSiteDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a2, a1, a0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a1, a0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ cp, // context
+ a2, // key
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ cp, // context
+ a2, // name
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ cp, // context
+ a0, // receiver
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ cp, // context
+ a1, // JSFunction
+ a0, // actual number of arguments
+ a2, // expected number of arguments
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // JSFunction
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ cp, // context
+ a0, // callee
+ t0, // call_data
+ a2, // holder
+ a1, // api_function_address
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index 8cf77607ad..ef72560b01 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -27,17 +27,21 @@
#include "src/v8.h"
+#include "src/base/bits.h"
+#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/hydrogen-osr.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
#include "src/mips/lithium-codegen-mips.h"
#include "src/mips/lithium-gap-resolver-mips.h"
-#include "src/stub-cache.h"
+
namespace v8 {
namespace internal {
-class SafepointGenerator V8_FINAL : public CallWrapper {
+class SafepointGenerator FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -47,9 +51,9 @@ class SafepointGenerator V8_FINAL : public CallWrapper {
deopt_mode_(mode) { }
virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
+ virtual void BeforeCall(int call_size) const OVERRIDE {}
- virtual void AfterCall() const V8_OVERRIDE {
+ virtual void AfterCall() const OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@@ -72,11 +76,8 @@ bool LCodeGen::GenerateCode() {
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::NONE);
- return GeneratePrologue() &&
- GenerateBody() &&
- GenerateDeferredCode() &&
- GenerateDeoptJumpTable() &&
- GenerateSafepointTable();
+ return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+ GenerateJumpTable() && GenerateSafepointTable();
}
@@ -322,32 +323,30 @@ bool LCodeGen::GenerateDeferredCode() {
}
-bool LCodeGen::GenerateDeoptJumpTable() {
- if (deopt_jump_table_.length() > 0) {
+bool LCodeGen::GenerateJumpTable() {
+ if (jump_table_.length() > 0) {
Label needs_frame, call_deopt_entry;
Comment(";;; -------------------- Jump table --------------------");
- Address base = deopt_jump_table_[0].address;
+ Address base = jump_table_[0].address;
Register entry_offset = t9;
- int length = deopt_jump_table_.length();
+ int length = jump_table_.length();
for (int i = 0; i < length; i++) {
- __ bind(&deopt_jump_table_[i].label);
+ Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+ __ bind(&table_entry->label);
- Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
- DCHECK(type == deopt_jump_table_[0].bailout_type);
- Address entry = deopt_jump_table_[i].address;
- int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
- DCHECK(id != Deoptimizer::kNotDeoptimizationEntry);
- Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+ DCHECK(table_entry->bailout_type == jump_table_[0].bailout_type);
+ Address entry = table_entry->address;
+ DeoptComment(table_entry->reason);
// Second-level deopt table entries are contiguous and small, so instead
// of loading the full, absolute address of each one, load an immediate
// offset which will be added to the base address later.
__ li(entry_offset, Operand(entry - base));
- if (deopt_jump_table_[i].needs_frame) {
+ if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
if (needs_frame.is_bound()) {
__ Branch(&needs_frame);
@@ -815,11 +814,11 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
-void LCodeGen::DeoptimizeIf(Condition condition,
- LEnvironment* environment,
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::BailoutType bailout_type,
- Register src1,
+ const char* detail, Register src1,
const Operand& src2) {
+ LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
@@ -859,37 +858,36 @@ void LCodeGen::DeoptimizeIf(Condition condition,
__ bind(&skip);
}
+ Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), detail);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (condition == al && frame_is_built_ &&
!info()->saves_caller_doubles()) {
+ DeoptComment(reason);
__ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
} else {
+ Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ !frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (deopt_jump_table_.is_empty() ||
- (deopt_jump_table_.last().address != entry) ||
- (deopt_jump_table_.last().bailout_type != bailout_type) ||
- (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
- Deoptimizer::JumpTableEntry table_entry(entry,
- bailout_type,
- !frame_is_built_);
- deopt_jump_table_.Add(table_entry, zone());
+ if (jump_table_.is_empty() ||
+ !table_entry.IsEquivalentTo(jump_table_.last())) {
+ jump_table_.Add(table_entry, zone());
}
- __ Branch(&deopt_jump_table_.last().label, condition, src1, src2);
+ __ Branch(&jump_table_.last().label, condition, src1, src2);
}
}
-void LCodeGen::DeoptimizeIf(Condition condition,
- LEnvironment* environment,
- Register src1,
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+ const char* detail, Register src1,
const Operand& src2) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(condition, environment, bailout_type, src1, src2);
+ DeoptimizeIf(condition, instr, bailout_type, detail, src1, src2);
}
@@ -897,7 +895,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, 0, TENURED);
+ DeoptimizationInputData::New(isolate(), length, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
@@ -1119,7 +1117,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ subu(dividend, zero_reg, dividend);
__ And(dividend, dividend, Operand(mask));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
}
__ Branch(USE_DELAY_SLOT, &done);
__ subu(dividend, zero_reg, dividend);
@@ -1138,7 +1136,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
return;
}
@@ -1151,7 +1149,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
- DeoptimizeIf(lt, instr->environment(), dividend, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "minus zero", dividend, Operand(zero_reg));
__ bind(&remainder_not_zero);
}
}
@@ -1164,13 +1162,13 @@ void LCodeGen::DoModI(LModI* instr) {
const Register result_reg = ToRegister(instr->result());
// div runs in the background while we check for special cases.
- __ div(left_reg, right_reg);
+ __ Mod(result_reg, left_reg, right_reg);
Label done;
// Check for x % 0, we have to deopt in this case because we can't return a
// NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "division by zero", right_reg, Operand(zero_reg));
}
// Check for kMinInt % -1, div will return kMinInt, which is not what we
@@ -1179,7 +1177,7 @@ void LCodeGen::DoModI(LModI* instr) {
Label no_overflow_possible;
__ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
+ DeoptimizeIf(eq, instr, "minus zero", right_reg, Operand(-1));
} else {
__ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
__ Branch(USE_DELAY_SLOT, &done);
@@ -1189,10 +1187,9 @@ void LCodeGen::DoModI(LModI* instr) {
}
// If we care about -0, test if the dividend is <0 and the result is 0.
- __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
- __ mfhi(result_reg);
+ __ Branch(&done, ge, left_reg, Operand(zero_reg));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", result_reg, Operand(zero_reg));
}
__ bind(&done);
}
@@ -1202,24 +1199,24 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
Register dividend = ToRegister(instr->dividend());
int32_t divisor = instr->divisor();
Register result = ToRegister(instr->result());
- DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
+ DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
DCHECK(!result.is(dividend));
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
- DeoptimizeIf(eq, instr->environment(), dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr, "overflow", dividend, Operand(kMinInt));
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ And(at, dividend, Operand(mask));
- DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "lost precision", at, Operand(zero_reg));
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@@ -1249,14 +1246,14 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
return;
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
}
__ TruncatingDiv(result, dividend, Abs(divisor));
@@ -1265,7 +1262,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
__ Mul(scratch0(), result, Operand(divisor));
__ Subu(scratch0(), scratch0(), dividend);
- DeoptimizeIf(ne, instr->environment(), scratch0(), Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "lost precision", scratch0(), Operand(zero_reg));
}
}
@@ -1276,21 +1273,22 @@ void LCodeGen::DoDivI(LDivI* instr) {
Register dividend = ToRegister(instr->dividend());
Register divisor = ToRegister(instr->divisor());
const Register result = ToRegister(instr->result());
+ Register remainder = ToRegister(instr->temp());
// On MIPS div is asynchronous - it will run in the background while we
// check for special cases.
- __ div(dividend, divisor);
+ __ Div(remainder, result, dividend, divisor);
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg));
}
// Check for (0 / -x) that will produce negative zero.
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr->environment(), divisor, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg));
__ bind(&left_not_zero);
}
@@ -1299,16 +1297,12 @@ void LCodeGen::DoDivI(LDivI* instr) {
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment(), divisor, Operand(-1));
+ DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1));
__ bind(&left_not_min_int);
}
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- __ mfhi(result);
- DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
- __ mflo(result);
- } else {
- __ mflo(result);
+ DeoptimizeIf(ne, instr, "lost precision", remainder, Operand(zero_reg));
}
}
@@ -1354,14 +1348,14 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
__ Subu(result, zero_reg, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg));
}
// Dividing by -1 is basically negation, unless we overflow.
__ Xor(scratch, scratch, result);
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(ge, instr->environment(), scratch, Operand(zero_reg));
+ DeoptimizeIf(ge, instr, "overflow", scratch, Operand(zero_reg));
}
return;
}
@@ -1389,14 +1383,14 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
return;
}
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -1433,21 +1427,21 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
Register dividend = ToRegister(instr->dividend());
Register divisor = ToRegister(instr->divisor());
const Register result = ToRegister(instr->result());
-
+ Register remainder = scratch0();
// On MIPS div is asynchronous - it will run in the background while we
// check for special cases.
- __ div(dividend, divisor);
+ __ Div(remainder, result, dividend, divisor);
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg));
}
// Check for (0 / -x) that will produce negative zero.
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr->environment(), divisor, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg));
__ bind(&left_not_zero);
}
@@ -1456,15 +1450,12 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment(), divisor, Operand(-1));
+ DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1));
__ bind(&left_not_min_int);
}
// We performed a truncating division. Correct the result if necessary.
Label done;
- Register remainder = scratch0();
- __ mfhi(remainder);
- __ mflo(result);
__ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
__ Xor(remainder, remainder, Operand(divisor));
__ Branch(&done, ge, remainder, Operand(zero_reg));
@@ -1490,14 +1481,14 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
- DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", left, Operand(zero_reg));
}
switch (constant) {
case -1:
if (overflow) {
__ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
- DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "overflow", scratch, Operand(zero_reg));
} else {
__ Subu(result, zero_reg, left);
}
@@ -1506,7 +1497,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (bailout_on_minus_zero) {
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
- DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "minus zero", left, Operand(zero_reg));
}
__ mov(result, zero_reg);
break;
@@ -1521,18 +1512,18 @@ void LCodeGen::DoMulI(LMulI* instr) {
int32_t mask = constant >> 31;
uint32_t constant_abs = (constant + mask) ^ mask;
- if (IsPowerOf2(constant_abs)) {
+ if (base::bits::IsPowerOfTwo32(constant_abs)) {
int32_t shift = WhichPowerOf2(constant_abs);
__ sll(result, left, shift);
// Correct the sign of the result if the constant is negative.
if (constant < 0) __ Subu(result, zero_reg, result);
- } else if (IsPowerOf2(constant_abs - 1)) {
+ } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
int32_t shift = WhichPowerOf2(constant_abs - 1);
__ sll(scratch, left, shift);
__ Addu(result, scratch, left);
// Correct the sign of the result if the constant is negative.
if (constant < 0) __ Subu(result, zero_reg, result);
- } else if (IsPowerOf2(constant_abs + 1)) {
+ } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
int32_t shift = WhichPowerOf2(constant_abs + 1);
__ sll(scratch, left, shift);
__ Subu(result, scratch, left);
@@ -1553,16 +1544,12 @@ void LCodeGen::DoMulI(LMulI* instr) {
// hi:lo = left * right.
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
- __ mult(result, right);
- __ mfhi(scratch);
- __ mflo(result);
+ __ Mul(scratch, result, result, right);
} else {
- __ mult(left, right);
- __ mfhi(scratch);
- __ mflo(result);
+ __ Mul(scratch, result, left, right);
}
__ sra(at, result, 31);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+ DeoptimizeIf(ne, instr, "overflow", scratch, Operand(at));
} else {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
@@ -1577,10 +1564,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ Xor(at, left, right);
__ Branch(&done, ge, at, Operand(zero_reg));
// Bail out if the result is minus zero.
- DeoptimizeIf(eq,
- instr->environment(),
- result,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg));
__ bind(&done);
}
}
@@ -1644,7 +1628,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::SHR:
__ srlv(result, left, ToRegister(right_op));
if (instr->can_deopt()) {
- DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "negative value", result, Operand(zero_reg));
}
break;
case Token::SHL:
@@ -1679,7 +1663,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
if (instr->can_deopt()) {
__ And(at, left, Operand(0x80000000));
- DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "negative value", at, Operand(zero_reg));
}
__ Move(result, left);
}
@@ -1694,7 +1678,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
__ SmiTagCheckOverflow(result, left, scratch);
}
- DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "overflow", scratch, Operand(zero_reg));
} else {
__ sll(result, left, shift_count);
}
@@ -1742,7 +1726,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
ToRegister(right),
overflow); // Reg at also used as scratch.
}
- DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg));
}
}
@@ -1796,9 +1780,9 @@ void LCodeGen::DoDateField(LDateField* instr) {
DCHECK(!scratch.is(object));
__ SmiTst(object, at);
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
__ GetObjectType(object, scratch, scratch);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
+ DeoptimizeIf(ne, instr, "not a date object", scratch, Operand(JS_DATE_TYPE));
if (index->value() == 0) {
__ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
@@ -1933,7 +1917,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
ToRegister(right),
overflow); // Reg at also used as scratch.
}
- DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg));
}
}
@@ -2046,8 +2030,9 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(a0));
DCHECK(ToRegister(instr->result()).is(v0));
- BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
// Other arch use a nop here, to signal that there is no inlined
// patchable code. Mips does not need the nop, since our marker
// instruction (andi zero_reg) will never be used in normal code.
@@ -2193,7 +2178,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg, at);
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
}
const Register map = scratch0();
@@ -2249,7 +2234,8 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+ DeoptimizeIf(al, instr, "unexpected object", zero_reg,
+ Operand(zero_reg));
}
}
}
@@ -2527,7 +2513,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
@@ -2609,7 +2595,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ JumpIfSmi(input, is_false);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
+ if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2638,7 +2624,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// Objects with a non-function constructor have class 'Object'.
__ GetObjectType(temp, temp2, temp2);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
+ if (String::Equals(class_name, isolate()->factory()->Object_string())) {
__ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
} else {
__ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
@@ -2704,15 +2690,15 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
private:
@@ -2828,7 +2814,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// On MIPS there is no need for a "no inlined smi code" marker (nop).
@@ -2893,28 +2879,36 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
__ lw(result, FieldMemOperand(at, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+ DeoptimizeIf(eq, instr, "hole", result, Operand(at));
}
}
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+ DCHECK(FLAG_vector_ics);
+ Register vector = ToRegister(instr->temp_vector());
+ DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
+ __ li(vector, instr->hydrogen()->feedback_vector());
+ // No need to allocate this register.
+ DCHECK(VectorLoadICDescriptor::SlotRegister().is(a0));
+ __ li(VectorLoadICDescriptor::SlotRegister(),
+ Operand(Smi::FromInt(instr->hydrogen()->slot())));
+}
+
+
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister()));
+ DCHECK(ToRegister(instr->global_object())
+ .is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(v0));
- __ li(LoadIC::NameRegister(), Operand(instr->name()));
+ __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ li(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(a0));
- __ li(LoadIC::SlotRegister(),
- Operand(Smi::FromInt(instr->hydrogen()->slot())));
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2935,7 +2929,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register payload = ToRegister(instr->temp());
__ lw(payload, FieldMemOperand(cell, Cell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
+ DeoptimizeIf(eq, instr, "hole", payload, Operand(at));
}
// Store the value.
@@ -2954,7 +2948,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+ DeoptimizeIf(eq, instr, "hole", result, Operand(at));
} else {
Label is_not_hole;
__ Branch(&is_not_hole, ne, result, Operand(at));
@@ -2978,7 +2972,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
+ DeoptimizeIf(eq, instr, "hole", scratch, Operand(at));
} else {
__ Branch(&skip_assignment, ne, scratch, Operand(at));
}
@@ -3033,21 +3027,15 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(v0));
// Name is always in a2.
- __ li(LoadIC::NameRegister(), Operand(instr->name()));
+ __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ li(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(a0));
- __ li(LoadIC::SlotRegister(),
- Operand(Smi::FromInt(instr->hydrogen()->slot())));
- }
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+ }
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3063,7 +3051,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+ DeoptimizeIf(eq, instr, "hole", result, Operand(at));
// If the function does not have an initial map, we're done.
Label done;
@@ -3199,8 +3187,8 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case UINT32_ELEMENTS:
__ lw(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- DeoptimizeIf(Ugreater_equal, instr->environment(),
- result, Operand(0x80000000));
+ DeoptimizeIf(Ugreater_equal, instr, "negative value", result,
+ Operand(0x80000000));
}
break;
case FLOAT32_ELEMENTS:
@@ -3253,7 +3241,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr, "hole", scratch, Operand(kHoleNanUpper32));
}
}
@@ -3289,10 +3277,10 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ SmiTst(result, scratch);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg));
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
+ DeoptimizeIf(eq, instr, "hole", result, Operand(scratch));
}
}
}
@@ -3348,20 +3336,14 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister()));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ li(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(a0));
- __ li(LoadIC::SlotRegister(),
- Operand(Smi::FromInt(instr->hydrogen()->slot())));
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3444,11 +3426,11 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver, scratch);
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "Smi", scratch, Operand(zero_reg));
__ GetObjectType(receiver, scratch, scratch);
- DeoptimizeIf(lt, instr->environment(),
- scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
+ DeoptimizeIf(lt, instr, "not a JavaScript object", scratch,
+ Operand(FIRST_SPEC_OBJECT_TYPE));
__ Branch(&result_in_receiver);
__ bind(&global_object);
@@ -3483,7 +3465,8 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
- DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
+ DeoptimizeIf(hi, instr, "too many arguments", length,
+ Operand(kArgumentsLimit));
// Push the receiver and use the register to keep the original
// number of arguments.
@@ -3613,7 +3596,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Deoptimize if not a heap number.
__ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+ DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at));
Label done;
Register exponent = scratch0();
@@ -3680,21 +3663,21 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
__ mov(result, input);
__ subu(result, zero_reg, input);
// Overflow if result is still negative, i.e. 0x80000000.
- DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "overflow", result, Operand(zero_reg));
__ bind(&done);
}
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LMathAbs* instr_;
};
@@ -3734,15 +3717,16 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
except_flag);
// Deopt if the operation did not succeed.
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
+ Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
Label done;
__ Branch(&done, ne, result, Operand(zero_reg));
- __ mfc1(scratch1, input.high());
+ __ Mfhc1(scratch1, input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
__ bind(&done);
}
}
@@ -3756,7 +3740,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
Label done, check_sign_on_zero;
// Extract exponent bits.
- __ mfc1(result, input.high());
+ __ Mfhc1(result, input);
__ Ext(scratch,
result,
HeapNumber::kExponentShift,
@@ -3775,7 +3759,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// The following conversion will not work with numbers
// outside of ]-2^32, 2^32[.
- DeoptimizeIf(ge, instr->environment(), scratch,
+ DeoptimizeIf(ge, instr, "overflow", scratch,
Operand(HeapNumber::kExponentBias + 32));
// Save the original sign for later comparison.
@@ -3786,12 +3770,11 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// Check sign of the result: if the sign changed, the input
// value was in ]0.5, 0[ and the result should be -0.
- __ mfc1(result, double_scratch0().high());
+ __ Mfhc1(result, double_scratch0());
__ Xor(result, result, Operand(scratch));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// ARM uses 'mi' here, which is 'lt'
- DeoptimizeIf(lt, instr->environment(), result,
- Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "minus zero", result, Operand(zero_reg));
} else {
Label skip2;
// ARM uses 'mi' here, which is 'lt'
@@ -3810,15 +3793,16 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
double_scratch1,
except_flag);
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
+ Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
__ Branch(&done, ne, result, Operand(zero_reg));
__ bind(&check_sign_on_zero);
- __ mfc1(scratch, input.high());
+ __ Mfhc1(scratch, input);
__ And(scratch, scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "minus zero", scratch, Operand(zero_reg));
}
__ bind(&done);
}
@@ -3867,10 +3851,11 @@ void LCodeGen::DoPower(LPower* instr) {
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
+ Register tagged_exponent = MathPowTaggedDescriptor::exponent();
DCHECK(!instr->right()->IsDoubleRegister() ||
ToDoubleRegister(instr->right()).is(f4));
DCHECK(!instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(a2));
+ ToRegister(instr->right()).is(tagged_exponent));
DCHECK(ToDoubleRegister(instr->left()).is(f2));
DCHECK(ToDoubleRegister(instr->result()).is(f0));
@@ -3879,10 +3864,11 @@ void LCodeGen::DoPower(LPower* instr) {
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
- __ JumpIfSmi(a2, &no_deopt);
- __ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ JumpIfSmi(tagged_exponent, &no_deopt);
+ DCHECK(!t3.is(tagged_exponent));
+ __ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
+ DeoptimizeIf(ne, instr, "not a heap number", t3, Operand(at));
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@@ -3948,6 +3934,34 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
}
+void LCodeGen::DoTailCallThroughMegamorphicCache(
+ LTailCallThroughMegamorphicCache* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register name = ToRegister(instr->name());
+ DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(name.is(LoadDescriptor::NameRegister()));
+ DCHECK(receiver.is(a1));
+ DCHECK(name.is(a2));
+
+ Register scratch = a3;
+ Register extra = t0;
+ Register extra2 = t1;
+ Register extra3 = t2;
+
+ // Important for the tail-call.
+ bool must_teardown_frame = NeedsEagerFrame();
+
+ // The probe will tail call to a handler if found.
+ isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
+ must_teardown_frame, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Tail call to miss if we ended up here.
+ if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
+ LoadIC::GenerateMiss(masm());
+}
+
+
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(v0));
@@ -4170,10 +4184,10 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister()));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- __ li(StoreIC::NameRegister(), Operand(instr->name()));
+ __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4197,7 +4211,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr->environment(), reg, operand);
+ DeoptimizeIf(cc, instr, "out of bounds", reg, operand);
}
}
@@ -4401,13 +4415,12 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister()));
- DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister()));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- Handle<Code> ic = (instr->strict_mode() == STRICT)
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4456,7 +4469,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
ne, &no_memento_found);
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
__ bind(&no_memento_found);
}
@@ -4473,14 +4486,14 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) {
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
+ class DeferredStringCharCodeAt FINAL : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStringCharCodeAt(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@@ -4528,14 +4541,14 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
+ class DeferredStringCharFromCode FINAL : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStringCharFromCode(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -4606,18 +4619,18 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagI FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagIU(instr_,
instr_->value(),
instr_->temp1(),
instr_->temp2(),
SIGNED_INT32);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagI* instr_;
};
@@ -4634,18 +4647,18 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagU FINAL : public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagIU(instr_,
instr_->value(),
instr_->temp1(),
instr_->temp2(),
UNSIGNED_INT32);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
@@ -4728,14 +4741,14 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagD FINAL : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagD(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -4791,12 +4804,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ And(at, input, Operand(0xc0000000));
- DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "overflow", at, Operand(zero_reg));
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTagCheckOverflow(output, input, at);
- DeoptimizeIf(lt, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "overflow", at, Operand(zero_reg));
} else {
__ SmiTag(output, input);
}
@@ -4812,19 +4825,20 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
// If the input is a HeapObject, value of scratch won't be zero.
__ And(scratch, input, Operand(kHeapObjectTag));
__ SmiUntag(result, input);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg));
} else {
__ SmiUntag(result, input);
}
}
-void LCodeGen::EmitNumberUntagD(Register input_reg,
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
DoubleRegister result_reg,
- bool can_convert_undefined_to_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
NumberUntagDMode mode) {
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+ bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
Register scratch = scratch0();
Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
@@ -4836,22 +4850,24 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
if (can_convert_undefined_to_nan) {
__ Branch(&convert, ne, scratch, Operand(at));
} else {
- DeoptimizeIf(ne, env, scratch, Operand(at));
+ DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at));
}
// Load heap number.
__ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
if (deoptimize_on_minus_zero) {
__ mfc1(at, result_reg.low());
__ Branch(&done, ne, at, Operand(zero_reg));
- __ mfc1(scratch, result_reg.high());
- DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
+ __ Mfhc1(scratch, result_reg);
+ DeoptimizeIf(eq, instr, "minus zero", scratch,
+ Operand(HeapNumber::kSignMask));
}
__ Branch(&done);
if (can_convert_undefined_to_nan) {
__ bind(&convert);
// Convert undefined (and hole) to NaN.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, env, input_reg, Operand(at));
+ DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg,
+ Operand(at));
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
__ Branch(&done);
@@ -4915,12 +4931,12 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ bind(&check_false);
__ LoadRoot(at, Heap::kFalseValueRootIndex);
- DeoptimizeIf(ne, instr->environment(), scratch2, Operand(at));
+ DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false", scratch2,
+ Operand(at));
__ Branch(USE_DELAY_SLOT, &done);
__ mov(input_reg, zero_reg); // In delay slot.
} else {
- // Deoptimize if we don't have a heap number.
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
+ DeoptimizeIf(ne, instr, "not a heap number", scratch1, Operand(at));
// Load the double value.
__ ldc1(double_scratch,
@@ -4935,15 +4951,15 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
except_flag,
kCheckForInexactConversion);
- // Deopt if the operation did not succeed.
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
+ Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Branch(&done, ne, input_reg, Operand(zero_reg));
- __ mfc1(scratch1, double_scratch.high());
+ __ Mfhc1(scratch1, double_scratch);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
}
}
__ bind(&done);
@@ -4951,14 +4967,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI V8_FINAL : public LDeferredCode {
+ class DeferredTaggedToI FINAL : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredTaggedToI(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
@@ -4997,11 +5013,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
NumberUntagDMode mode = value->representation().IsSmi()
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
- EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->can_convert_undefined_to_nan(),
- instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment(),
- mode);
+ EmitNumberUntagD(instr, input_reg, result_reg, mode);
}
@@ -5024,14 +5036,15 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
+ Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ Branch(&done, ne, result_reg, Operand(zero_reg));
- __ mfc1(scratch1, double_input.high());
+ __ Mfhc1(scratch1, double_input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
__ bind(&done);
}
}
@@ -5057,26 +5070,27 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
+ Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ Branch(&done, ne, result_reg, Operand(zero_reg));
- __ mfc1(scratch1, double_input.high());
+ __ Mfhc1(scratch1, double_input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
__ bind(&done);
}
}
__ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
- DeoptimizeIf(lt, instr->environment(), scratch1, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "overflow", scratch1, Operand(zero_reg));
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
- DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "not a Smi", at, Operand(zero_reg));
}
@@ -5084,7 +5098,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
}
}
@@ -5102,12 +5116,12 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
+ DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(first));
} else {
- DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
+ DeoptimizeIf(lo, instr, "wrong instance type", scratch, Operand(first));
// Omit check for the last type.
if (last != LAST_TYPE) {
- DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
+ DeoptimizeIf(hi, instr, "wrong instance type", scratch, Operand(last));
}
}
} else {
@@ -5115,14 +5129,14 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
uint8_t tag;
instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
- if (IsPowerOf2(mask)) {
- DCHECK(tag == 0 || IsPowerOf2(tag));
+ if (base::bits::IsPowerOfTwo32(mask)) {
+ DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ And(at, scratch, mask);
- DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
- at, Operand(zero_reg));
+ DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type", at,
+ Operand(zero_reg));
} else {
__ And(scratch, scratch, Operand(mask));
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
+ DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(tag));
}
}
}
@@ -5137,11 +5151,9 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ li(at, Operand(Handle<Object>(cell)));
__ lw(at, FieldMemOperand(at, Cell::kValueOffset));
- DeoptimizeIf(ne, instr->environment(), reg,
- Operand(at));
+ DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(at));
} else {
- DeoptimizeIf(ne, instr->environment(), reg,
- Operand(object));
+ DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(object));
}
}
@@ -5157,22 +5169,22 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(v0, scratch0());
}
__ SmiTst(scratch0(), at);
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "instance migration failed", at, Operand(zero_reg));
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+ class DeferredCheckMaps FINAL : public LDeferredCode {
public:
DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
: LDeferredCode(codegen), instr_(instr), object_(object) {
SetExit(check_maps());
}
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredInstanceMigration(instr_, object_);
}
Label* check_maps() { return &check_maps_; }
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LCheckMaps* instr_;
Label check_maps_;
@@ -5210,7 +5222,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ Branch(deferred->entry(), ne, map_reg, Operand(map));
} else {
- DeoptimizeIf(ne, instr->environment(), map_reg, Operand(map));
+ DeoptimizeIf(ne, instr, "wrong map", map_reg, Operand(map));
}
__ bind(&success);
@@ -5248,7 +5260,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
- DeoptimizeIf(ne, instr->environment(), input_reg,
+ DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg,
Operand(factory()->undefined_value()));
__ mov(result_reg, zero_reg);
__ jmp(&done);
@@ -5287,14 +5299,14 @@ void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate V8_FINAL : public LDeferredCode {
+ class DeferredAllocate FINAL : public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredAllocate(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LAllocate* instr_;
};
@@ -5464,9 +5476,8 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(),
- instr->hydrogen()->strict_mode(),
- instr->hydrogen()->is_generator());
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ instr->hydrogen()->kind());
__ li(a2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
@@ -5673,8 +5684,8 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
type = Deoptimizer::LAZY;
}
- Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
- DeoptimizeIf(al, instr->environment(), type, zero_reg, Operand(zero_reg));
+ DeoptimizeIf(al, instr, type, instr->hydrogen()->reason(), zero_reg,
+ Operand(zero_reg));
}
@@ -5701,14 +5712,14 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck V8_FINAL : public LDeferredCode {
+ class DeferredStackCheck FINAL : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStackCheck(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
@@ -5765,18 +5776,19 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Register result = ToRegister(instr->result());
Register object = ToRegister(instr->object());
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), object, Operand(at));
+ DeoptimizeIf(eq, instr, "undefined", object, Operand(at));
Register null_value = t1;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
+ DeoptimizeIf(eq, instr, "null", object, Operand(null_value));
__ And(at, object, kSmiTagMask);
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ GetObjectType(object, a1, a1);
- DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
+ DeoptimizeIf(le, instr, "not a JavaScript object", a1,
+ Operand(LAST_JS_PROXY_TYPE));
Label use_cache, call_runtime;
DCHECK(object.is(a0));
@@ -5793,7 +5805,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
DCHECK(result.is(v0));
__ LoadRoot(at, Heap::kMetaMapRootIndex);
- DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
+ DeoptimizeIf(ne, instr, "wrong map", a1, Operand(at));
__ bind(&use_cache);
}
@@ -5813,7 +5825,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ lw(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "no cache", result, Operand(zero_reg));
__ bind(&done);
}
@@ -5823,7 +5835,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
Register map = ToRegister(instr->map());
__ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
+ DeoptimizeIf(ne, instr, "wrong map", map, Operand(scratch0()));
}
@@ -5842,7 +5854,7 @@ void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ class DeferredLoadMutableDouble FINAL : public LDeferredCode {
public:
DeferredLoadMutableDouble(LCodeGen* codegen,
LLoadFieldByIndex* instr,
@@ -5855,10 +5867,10 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
object_(object),
index_(index) {
}
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LLoadFieldByIndex* instr_;
Register result_;
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index 5c19e0d3ac..43316e471b 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -25,7 +25,7 @@ class LCodeGen: public LCodeGenBase {
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
- deopt_jump_table_(4, info->zone()),
+ jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
@@ -169,10 +169,10 @@ class LCodeGen: public LCodeGenBase {
// Code generation passes. Returns true if code generation should
// continue.
- void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+ void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
bool GeneratePrologue();
bool GenerateDeferredCode();
- bool GenerateDeoptJumpTable();
+ bool GenerateJumpTable();
bool GenerateSafepointTable();
// Generates the custom OSR entrypoint and sets the osr_pc_offset.
@@ -228,14 +228,12 @@ class LCodeGen: public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition condition,
- LEnvironment* environment,
- Deoptimizer::BailoutType bailout_type,
+ void DeoptimizeIf(Condition condition, LInstruction* instr,
+ Deoptimizer::BailoutType bailout_type, const char* detail,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
- void DeoptimizeIf(Condition condition,
- LEnvironment* environment,
- Register src1 = zero_reg,
+ void DeoptimizeIf(Condition condition, LInstruction* instr,
+ const char* detail = NULL, Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
void AddToTranslation(LEnvironment* environment,
@@ -270,7 +268,7 @@ class LCodeGen: public LCodeGenBase {
int arguments,
Safepoint::DeoptMode mode);
- void RecordAndWritePosition(int position) V8_OVERRIDE;
+ void RecordAndWritePosition(int position) OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
@@ -297,12 +295,8 @@ class LCodeGen: public LCodeGenBase {
FPURegister src1,
FPURegister src2);
void EmitCmpI(LOperand* left, LOperand* right);
- void EmitNumberUntagD(Register input,
- DoubleRegister result,
- bool allow_undefined_as_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode);
+ void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+ DoubleRegister result, NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
@@ -356,7 +350,7 @@ class LCodeGen: public LCodeGenBase {
LEnvironment* environment);
- void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+ void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
@@ -364,8 +358,11 @@ class LCodeGen: public LCodeGenBase {
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+ template <class T>
+ void EmitVectorLoadICRegisters(T* instr);
+
ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
+ ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
@@ -383,7 +380,7 @@ class LCodeGen: public LCodeGenBase {
Safepoint::Kind expected_safepoint_kind_;
- class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
+ class PushSafepointRegistersScope FINAL BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
diff --git a/deps/v8/src/mips/lithium-gap-resolver-mips.h b/deps/v8/src/mips/lithium-gap-resolver-mips.h
index 0072e526cb..9e6f14e5aa 100644
--- a/deps/v8/src/mips/lithium-gap-resolver-mips.h
+++ b/deps/v8/src/mips/lithium-gap-resolver-mips.h
@@ -15,7 +15,7 @@ namespace internal {
class LCodeGen;
class LGapResolver;
-class LGapResolver V8_FINAL BASE_EMBEDDED {
+class LGapResolver FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index 5ff73db813..1757d929f7 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -430,12 +430,6 @@ LPlatformChunk* LChunkBuilder::Build() {
}
-void LChunkBuilder::Abort(BailoutReason reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
Register::ToAllocationIndex(reg));
@@ -1087,14 +1081,14 @@ LInstruction* LChunkBuilder::DoCallJSFunction(
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
- const InterfaceDescriptor* descriptor = instr->descriptor();
+ CallInterfaceDescriptor descriptor = instr->descriptor();
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
ops.Add(target, zone());
for (int i = 1; i < instr->OperandCount(); i++) {
- LOperand* op = UseFixed(instr->OperandAt(i),
- descriptor->GetParameterRegister(i - 1));
+ LOperand* op =
+ UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
ops.Add(op, zone());
}
@@ -1104,6 +1098,19 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
}
+LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
+ HTailCallThroughMegamorphicCache* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* receiver_register =
+ UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
+ LOperand* name_register =
+ UseFixed(instr->name(), LoadDescriptor::NameRegister());
+ // Not marked as call. It can't deoptimize, and it never returns.
+ return new (zone()) LTailCallThroughMegamorphicCache(
+ context, receiver_register, name_register);
+}
+
+
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), a1);
@@ -1325,8 +1332,9 @@ LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
DCHECK(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = TempRegister();
LInstruction* result =
- DefineAsRegister(new(zone()) LDivI(dividend, divisor));
+ DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp));
if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
(instr->CheckFlag(HValue::kCanOverflow) &&
@@ -1511,7 +1519,7 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
return DefineAsRegister(mul);
} else if (instr->representation().IsDouble()) {
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
if (instr->HasOneUse() && instr->uses().value()->IsAdd()) {
HAdd* add = HAdd::cast(instr->uses().value());
if (instr == add->left()) {
@@ -1584,7 +1592,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
LInstruction* result = DefineAsRegister(add);
return result;
} else if (instr->representation().IsDouble()) {
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
if (instr->left()->IsMul())
return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
@@ -1626,9 +1634,10 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
Representation exponent_type = instr->right()->representation();
DCHECK(instr->left()->representation().IsDouble());
LOperand* left = UseFixedDouble(instr->left(), f2);
- LOperand* right = exponent_type.IsDouble() ?
- UseFixedDouble(instr->right(), f4) :
- UseFixed(instr->right(), a2);
+ LOperand* right =
+ exponent_type.IsDouble()
+ ? UseFixedDouble(instr->right(), f4)
+ : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
LPower* result = new(zone()) LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, f0),
instr,
@@ -2050,11 +2059,11 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* global_object = UseFixed(instr->global_object(),
- LoadIC::ReceiverRegister());
+ LOperand* global_object =
+ UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
new(zone()) LLoadGlobalGeneric(context, global_object, vector);
@@ -2109,10 +2118,11 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister());
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LInstruction* result =
@@ -2174,11 +2184,12 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), LoadIC::NameRegister());
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LInstruction* result =
@@ -2234,9 +2245,10 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* obj = UseFixed(instr->object(), KeyedStoreIC::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), KeyedStoreIC::NameRegister());
- LOperand* val = UseFixed(instr->value(), KeyedStoreIC::ValueRegister());
+ LOperand* obj =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+ LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
DCHECK(instr->object()->representation().IsTagged());
DCHECK(instr->key()->representation().IsTagged());
@@ -2293,7 +2305,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
}
LOperand* val;
- if (needs_write_barrier || instr->field_representation().IsSmi()) {
+ if (needs_write_barrier) {
val = UseTempRegister(instr->value());
} else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
@@ -2310,8 +2322,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* obj = UseFixed(instr->object(), StoreIC::ReceiverRegister());
- LOperand* val = UseFixed(instr->value(), StoreIC::ValueRegister());
+ LOperand* obj =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val);
return MarkAsCall(result, instr);
@@ -2387,10 +2400,10 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
return DefineAsSpilled(result, spill_index);
} else {
DCHECK(info()->IsStub());
- CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor();
+ CallInterfaceDescriptor descriptor =
+ info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index());
- Register reg = descriptor->GetEnvironmentParameterRegister(index);
+ Register reg = descriptor.GetEnvironmentParameterRegister(index);
return DefineFixed(result, reg);
}
}
@@ -2406,7 +2419,7 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
} else {
spill_index = env_index - instr->environment()->first_local_index();
if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Abort(kTooManySpillSlotsNeededForOSR);
+ Retry(kTooManySpillSlotsNeededForOSR);
spill_index = 0;
}
}
@@ -2503,6 +2516,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
}
+ inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index 9578955600..36e5b57c74 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -152,6 +152,7 @@ class LCodeGen;
V(StringCompareAndBranch) \
V(SubI) \
V(TaggedToI) \
+ V(TailCallThroughMegamorphicCache) \
V(ThisFunction) \
V(ToFastProperties) \
V(TransitionElementsKind) \
@@ -163,11 +164,11 @@ class LCodeGen;
V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ virtual Opcode opcode() const FINAL OVERRIDE { \
return LInstruction::k##type; \
} \
- virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
- virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE; \
+ virtual const char* Mnemonic() const FINAL OVERRIDE { \
return mnemonic; \
} \
static L##type* cast(LInstruction* instr) { \
@@ -287,7 +288,7 @@ class LTemplateResultInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ virtual bool HasResult() const FINAL OVERRIDE {
return R != 0 && result() != NULL;
}
void set_result(LOperand* operand) { results_[0] = operand; }
@@ -309,11 +310,11 @@ class LTemplateInstruction : public LTemplateResultInstruction<R> {
private:
// Iterator support.
- virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
- virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+ virtual int InputCount() FINAL OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
- virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
- virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
+ virtual int TempCount() FINAL OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
};
@@ -328,8 +329,8 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsGap() const FINAL OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
static LGap* cast(LInstruction* instr) {
DCHECK(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@@ -365,11 +366,11 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
};
-class LInstructionGap V8_FINAL : public LGap {
+class LInstructionGap FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return !IsRedundant();
}
@@ -377,14 +378,14 @@ class LInstructionGap V8_FINAL : public LGap {
};
-class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGoto(HBasicBlock* block) : block_(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual bool IsControl() const V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ virtual bool IsControl() const OVERRIDE { return true; }
int block_id() const { return block_->block_id(); }
@@ -393,7 +394,7 @@ class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -409,14 +410,14 @@ class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LDummy FINAL : public LTemplateInstruction<1, 0, 0> {
public:
LDummy() {}
DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
};
-class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
inputs_[0] = value;
@@ -425,25 +426,25 @@ class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- virtual bool IsControl() const V8_OVERRIDE { return true; }
+ virtual bool IsControl() const OVERRIDE { return true; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
-class LLabel V8_FINAL : public LGap {
+class LLabel FINAL : public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -459,16 +460,16 @@ class LLabel V8_FINAL : public LGap {
};
-class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallStub(LOperand* context) {
inputs_[0] = context;
@@ -481,9 +482,30 @@ class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LTailCallThroughMegamorphicCache FINAL
+ : public LTemplateInstruction<0, 3, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ explicit LTailCallThroughMegamorphicCache(LOperand* context,
+ LOperand* receiver,
+ LOperand* name) {
+ inputs_[0] = context;
+ inputs_[1] = receiver;
+ inputs_[2] = name;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* name() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
+ "tail-call-through-megamorphic-cache")
+ DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
+};
+
+
+class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
@@ -495,7 +517,7 @@ class LControlInstruction : public LTemplateInstruction<0, I, T> {
public:
LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
- virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual bool IsControl() const FINAL OVERRIDE { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -534,7 +556,7 @@ class LControlInstruction : public LTemplateInstruction<0, I, T> {
};
-class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LWrapReceiver FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LWrapReceiver(LOperand* receiver, LOperand* function) {
inputs_[0] = receiver;
@@ -549,7 +571,7 @@ class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
@@ -570,7 +592,7 @@ class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
};
-class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
inputs_[0] = arguments;
@@ -584,11 +606,11 @@ class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
LOperand* length() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -600,14 +622,14 @@ class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
};
-class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LModByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -625,7 +647,7 @@ class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LModByConstI FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LModByConstI(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -643,7 +665,7 @@ class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LModI V8_FINAL : public LTemplateInstruction<1, 2, 3> {
+class LModI FINAL : public LTemplateInstruction<1, 2, 3> {
public:
LModI(LOperand* left,
LOperand* right) {
@@ -659,7 +681,7 @@ class LModI V8_FINAL : public LTemplateInstruction<1, 2, 3> {
};
-class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -677,7 +699,7 @@ class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDivByConstI FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDivByConstI(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -695,22 +717,24 @@ class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LDivI FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LDivI(LOperand* dividend, LOperand* divisor) {
+ LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
inputs_[0] = dividend;
inputs_[1] = divisor;
+ temps_[0] = temp;
}
LOperand* dividend() { return inputs_[0]; }
LOperand* divisor() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
};
-class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFlooringDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -729,7 +753,7 @@ class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LFlooringDivByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
inputs_[0] = dividend;
@@ -749,7 +773,7 @@ class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LFlooringDivI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LFlooringDivI(LOperand* dividend, LOperand* divisor) {
inputs_[0] = dividend;
@@ -764,7 +788,7 @@ class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMulI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMulI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -780,7 +804,7 @@ class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
// Instruction for computing multiplier * multiplicand + addend.
-class LMultiplyAddD V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LMultiplyAddD FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LMultiplyAddD(LOperand* addend, LOperand* multiplier,
LOperand* multiplicand) {
@@ -797,13 +821,13 @@ class LMultiplyAddD V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
};
-class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
public:
LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -822,11 +846,11 @@ class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
return hydrogen()->representation().IsDouble();
}
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LMathFloor FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LMathFloor(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -841,7 +865,7 @@ class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LMathRound FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LMathRound(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -856,7 +880,7 @@ class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LMathFround V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathFround FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathFround(LOperand* value) { inputs_[0] = value; }
@@ -866,7 +890,7 @@ class LMathFround V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathAbs FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathAbs(LOperand* context, LOperand* value) {
inputs_[1] = context;
@@ -881,7 +905,7 @@ class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathLog FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathLog(LOperand* value) {
inputs_[0] = value;
@@ -893,7 +917,7 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathClz32 FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathClz32(LOperand* value) {
inputs_[0] = value;
@@ -905,7 +929,7 @@ class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+class LMathExp FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LMathExp(LOperand* value,
LOperand* double_temp,
@@ -927,7 +951,7 @@ class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
};
-class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathSqrt FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSqrt(LOperand* value) {
inputs_[0] = value;
@@ -939,7 +963,7 @@ class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LMathPowHalf FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LMathPowHalf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -953,7 +977,7 @@ class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -968,7 +992,7 @@ class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
};
-class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LCmpHoleAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LCmpHoleAndBranch(LOperand* object) {
inputs_[0] = object;
@@ -981,7 +1005,7 @@ class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
};
-class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -997,7 +1021,7 @@ class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsObjectAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1014,7 +1038,7 @@ class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1027,11 +1051,11 @@ class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1042,11 +1066,11 @@ class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1060,11 +1084,11 @@ class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
+class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
public:
LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1082,11 +1106,11 @@ class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
Token::Value op() const { return hydrogen()->token(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1098,11 +1122,11 @@ class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -1115,7 +1139,7 @@ class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndexAndBranch V8_FINAL
+class LHasCachedArrayIndexAndBranch FINAL
: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@@ -1128,11 +1152,11 @@ class LHasCachedArrayIndexAndBranch V8_FINAL
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1146,11 +1170,11 @@ class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LCmpT FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LCmpT(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1169,7 +1193,7 @@ class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LInstanceOf FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1185,7 +1209,7 @@ class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LInstanceOfKnownGlobal FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
@@ -1206,7 +1230,7 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
return lazy_deopt_env_;
}
virtual void SetDeferredLazyDeoptimizationEnvironment(
- LEnvironment* env) V8_OVERRIDE {
+ LEnvironment* env) OVERRIDE {
lazy_deopt_env_ = env;
}
@@ -1215,7 +1239,7 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
@@ -1230,7 +1254,7 @@ class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
};
-class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LBitI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1247,7 +1271,7 @@ class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LShiftI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -1268,7 +1292,7 @@ class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSubI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1283,7 +1307,7 @@ class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantI FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1292,7 +1316,7 @@ class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantS FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1301,7 +1325,7 @@ class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantD FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1310,7 +1334,7 @@ class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantE FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1321,7 +1345,7 @@ class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantT FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1332,7 +1356,7 @@ class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LBranch(LOperand* value) {
inputs_[0] = value;
@@ -1343,11 +1367,11 @@ class LBranch V8_FINAL : public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCmpMapAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LCmpMapAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1364,7 +1388,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
@@ -1376,7 +1400,7 @@ class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LDateField FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
inputs_[0] = date;
@@ -1395,7 +1419,7 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSeqStringGetChar FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
inputs_[0] = string;
@@ -1410,7 +1434,7 @@ class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LSeqStringSetChar FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LSeqStringSetChar(LOperand* context,
LOperand* string,
@@ -1431,7 +1455,7 @@ class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
};
-class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LAddI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1446,7 +1470,7 @@ class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathMinMax(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1461,7 +1485,7 @@ class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LPower FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1476,7 +1500,7 @@ class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1488,18 +1512,18 @@ class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- virtual Opcode opcode() const V8_OVERRIDE {
+ virtual Opcode opcode() const OVERRIDE {
return LInstruction::kArithmeticD;
}
- virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
- virtual const char* Mnemonic() const V8_OVERRIDE;
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
private:
Token::Value op_;
};
-class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LArithmeticT(Token::Value op,
LOperand* context,
@@ -1516,16 +1540,16 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
LOperand* right() { return inputs_[2]; }
Token::Value op() const { return op_; }
- virtual Opcode opcode() const V8_FINAL { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
- virtual const char* Mnemonic() const V8_OVERRIDE;
+ virtual Opcode opcode() const FINAL { return LInstruction::kArithmeticT; }
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
private:
Token::Value op_;
};
-class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LReturn FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
inputs_[0] = value;
@@ -1548,7 +1572,7 @@ class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
-class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
@@ -1561,7 +1585,7 @@ class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LLoadNamedGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
inputs_[0] = context;
@@ -1580,7 +1604,7 @@ class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadFunctionPrototype FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadFunctionPrototype(LOperand* function) {
inputs_[0] = function;
@@ -1593,7 +1617,7 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadRoot FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
@@ -1602,7 +1626,7 @@ class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
@@ -1632,7 +1656,7 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> {
+class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
public:
LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
LOperand* vector) {
@@ -1652,14 +1676,14 @@ class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> {
};
-class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
LOperand* vector) {
@@ -1680,7 +1704,7 @@ class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1695,7 +1719,7 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
@@ -1712,7 +1736,7 @@ class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LStoreContextSlot(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -1727,11 +1751,11 @@ class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LPushArgument FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
@@ -1743,7 +1767,7 @@ class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDrop FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
@@ -1756,7 +1780,7 @@ class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
+class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 0> {
public:
LStoreCodeEntry(LOperand* function, LOperand* code_object) {
inputs_[0] = function;
@@ -1773,7 +1797,7 @@ class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
};
-class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
+class LInnerAllocatedObject FINAL: public LTemplateInstruction<1, 2, 0> {
public:
LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
inputs_[0] = base_object;
@@ -1783,27 +1807,27 @@ class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
LOperand* base_object() const { return inputs_[0]; }
LOperand* offset() const { return inputs_[1]; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
};
-class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LThisFunction FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
-class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LContext FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
DECLARE_HYDROGEN_ACCESSOR(Context)
};
-class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LDeclareGlobals FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
inputs_[0] = context;
@@ -1816,7 +1840,7 @@ class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallJSFunction(LOperand* function) {
inputs_[0] = function;
@@ -1827,48 +1851,47 @@ class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
+class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
public:
- LCallWithDescriptor(const InterfaceDescriptor* descriptor,
- const ZoneList<LOperand*>& operands,
- Zone* zone)
- : descriptor_(descriptor),
- inputs_(descriptor->GetRegisterParameterCount() + 1, zone) {
- DCHECK(descriptor->GetRegisterParameterCount() + 1 == operands.length());
+ LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+ const ZoneList<LOperand*>& operands, Zone* zone)
+ : descriptor_(descriptor),
+ inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
+ DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
inputs_.AddAll(operands, zone);
}
LOperand* target() const { return inputs_[0]; }
- const InterfaceDescriptor* descriptor() { return descriptor_; }
+ const CallInterfaceDescriptor descriptor() { return descriptor_; }
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
- const InterfaceDescriptor* descriptor_;
+ CallInterfaceDescriptor descriptor_;
ZoneList<LOperand*> inputs_;
// Iterator support.
- virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
- virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+ virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
- virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
- virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+ virtual int TempCount() FINAL OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
};
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
@@ -1881,13 +1904,13 @@ class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
@@ -1904,7 +1927,7 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNew(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
@@ -1917,13 +1940,13 @@ class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
@@ -1936,13 +1959,13 @@ class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallRuntime(LOperand* context) {
inputs_[0] = context;
@@ -1953,7 +1976,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
@@ -1963,7 +1986,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1975,7 +1998,7 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LUint32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1987,7 +2010,7 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagI FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -2003,7 +2026,7 @@ class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagU FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -2019,7 +2042,7 @@ class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagD FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
@@ -2036,7 +2059,7 @@ class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToSmi FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleToSmi(LOperand* value) {
inputs_[0] = value;
@@ -2052,7 +2075,7 @@ class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToI FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleToI(LOperand* value) {
inputs_[0] = value;
@@ -2068,7 +2091,7 @@ class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LTaggedToI FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LTaggedToI(LOperand* value,
LOperand* temp,
@@ -2089,7 +2112,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiTag FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -2102,7 +2125,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberUntagD FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value;
@@ -2115,7 +2138,7 @@ class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -2132,7 +2155,7 @@ class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
inputs_[0] = object;
@@ -2147,7 +2170,7 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Representation representation() const {
return hydrogen()->field_representation();
@@ -2155,7 +2178,7 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
inputs_[0] = context;
@@ -2170,14 +2193,14 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
-class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
@@ -2202,13 +2225,13 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
uint32_t base_offset() const { return hydrogen()->base_offset(); }
};
-class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyedGeneric(LOperand* context,
LOperand* obj,
@@ -2228,13 +2251,13 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
-class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* context,
@@ -2252,7 +2275,7 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 1> {
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
Handle<Map> transitioned_map() {
@@ -2263,7 +2286,7 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 1> {
};
-class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LTrapAllocationMemento(LOperand* object,
LOperand* temp) {
@@ -2279,7 +2302,7 @@ class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringAdd FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -2297,7 +2320,7 @@ class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
-class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringCharCodeAt FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
inputs_[0] = context;
@@ -2314,7 +2337,7 @@ class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringCharFromCode FINAL : public LTemplateInstruction<1, 2, 0> {
public:
explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
inputs_[0] = context;
@@ -2329,7 +2352,7 @@ class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckValue FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
@@ -2342,7 +2365,7 @@ class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckInstanceType FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckInstanceType(LOperand* value) {
inputs_[0] = value;
@@ -2355,7 +2378,7 @@ class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMaps(LOperand* value = NULL) {
inputs_[0] = value;
@@ -2368,7 +2391,7 @@ class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2380,7 +2403,7 @@ class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
@@ -2393,7 +2416,7 @@ class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LClampDToUint8 FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LClampDToUint8(LOperand* unclamped, LOperand* temp) {
inputs_[0] = unclamped;
@@ -2407,7 +2430,7 @@ class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -2419,7 +2442,7 @@ class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LClampTToUint8(LOperand* unclamped, LOperand* temp) {
inputs_[0] = unclamped;
@@ -2433,7 +2456,7 @@ class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleBits FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleBits(LOperand* value) {
inputs_[0] = value;
@@ -2446,7 +2469,7 @@ class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LConstructDouble FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LConstructDouble(LOperand* hi, LOperand* lo) {
inputs_[0] = hi;
@@ -2460,7 +2483,7 @@ class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
+class LAllocate FINAL : public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* context,
LOperand* size,
@@ -2482,7 +2505,7 @@ class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
};
-class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LRegExpLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LRegExpLiteral(LOperand* context) {
inputs_[0] = context;
@@ -2495,7 +2518,7 @@ class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFunctionLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LFunctionLiteral(LOperand* context) {
inputs_[0] = context;
@@ -2508,7 +2531,7 @@ class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
@@ -2521,7 +2544,7 @@ class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LTypeof FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -2535,7 +2558,7 @@ class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -2548,11 +2571,11 @@ class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
Handle<String> type_literal() { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch FINAL : public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
@@ -2565,18 +2588,18 @@ class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
};
-class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
};
-class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LStackCheck FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LStackCheck(LOperand* context) {
inputs_[0] = context;
@@ -2594,7 +2617,7 @@ class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LForInPrepareMap FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LForInPrepareMap(LOperand* context, LOperand* object) {
inputs_[0] = context;
@@ -2608,7 +2631,7 @@ class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
@@ -2624,7 +2647,7 @@ class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
@@ -2638,7 +2661,7 @@ class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
};
-class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
@@ -2682,7 +2705,7 @@ class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LPlatformChunk V8_FINAL : public LChunk {
+class LPlatformChunk FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
@@ -2692,20 +2715,14 @@ class LPlatformChunk V8_FINAL : public LChunk {
};
-class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
+class LChunkBuilder FINAL : public LChunkBuilderBase {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : LChunkBuilderBase(graph->zone()),
- chunk_(NULL),
- info_(info),
- graph_(graph),
- status_(UNUSED),
+ : LChunkBuilderBase(info, graph),
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- allocator_(allocator) { }
-
- Isolate* isolate() const { return graph_->isolate(); }
+ allocator_(allocator) {}
// Build the sequence for the graph.
LPlatformChunk* Build();
@@ -2739,24 +2756,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
private:
- enum Status {
- UNUSED,
- BUILDING,
- DONE,
- ABORTED
- };
-
- LPlatformChunk* chunk() const { return chunk_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_building() const { return status_ == BUILDING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- void Abort(BailoutReason reason);
-
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(DoubleRegister reg);
@@ -2798,7 +2797,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
- virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
@@ -2842,10 +2841,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoArithmeticT(Token::Value op,
HBinaryOperation* instr);
- LPlatformChunk* chunk_;
- CompilationInfo* info_;
- HGraph* const graph_;
- Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
HBasicBlock* next_block_;
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 6915922729..6f16f1d4c0 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -8,12 +8,14 @@
#if V8_TARGET_ARCH_MIPS
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/isolate-inl.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
@@ -207,8 +209,8 @@ void MacroAssembler::RecordWriteField(
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
- li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
+ li(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
+ li(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
}
}
@@ -282,8 +284,8 @@ void MacroAssembler::RecordWriteForMap(Register object,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- li(dst, Operand(BitCast<int32_t>(kZapValue + 12)));
- li(map, Operand(BitCast<int32_t>(kZapValue + 16)));
+ li(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
+ li(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
}
}
@@ -357,8 +359,8 @@ void MacroAssembler::RecordWrite(
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
- li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
+ li(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
+ li(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
}
}
@@ -395,8 +397,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Ret(eq, t8, Operand(zero_reg));
}
push(ra);
- StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(isolate(), fp_mode);
+ StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
CallStub(&store_buffer_overflow);
pop(ra);
bind(&done);
@@ -641,7 +642,7 @@ void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
mult(rs, rt.rm());
mflo(rd);
} else {
@@ -651,7 +652,7 @@ void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
mult(rs, at);
mflo(rd);
} else {
@@ -661,6 +662,71 @@ void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
}
+void MacroAssembler::Mul(Register rd_hi, Register rd_lo,
+ Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ mult(rs, rt.rm());
+ mflo(rd_lo);
+ mfhi(rd_hi);
+ } else {
+ if (rd_lo.is(rs)) {
+ DCHECK(!rd_hi.is(rs));
+ DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
+ muh(rd_hi, rs, rt.rm());
+ mul(rd_lo, rs, rt.rm());
+ } else {
+ DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
+ mul(rd_lo, rs, rt.rm());
+ muh(rd_hi, rs, rt.rm());
+ }
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ mult(rs, at);
+ mflo(rd_lo);
+ mfhi(rd_hi);
+ } else {
+ if (rd_lo.is(rs)) {
+ DCHECK(!rd_hi.is(rs));
+ DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
+ muh(rd_hi, rs, at);
+ mul(rd_lo, rs, at);
+ } else {
+ DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
+ mul(rd_lo, rs, at);
+ muh(rd_hi, rs, at);
+ }
+ }
+ }
+}
+
+
+void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ mult(rs, rt.rm());
+ mfhi(rd);
+ } else {
+ muh(rd, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ mult(rs, at);
+ mfhi(rd);
+ } else {
+ muh(rd, rs, at);
+ }
+ }
+}
+
+
void MacroAssembler::Mult(Register rs, const Operand& rt) {
if (rt.is_reg()) {
mult(rs, rt.rm());
@@ -697,6 +763,99 @@ void MacroAssembler::Div(Register rs, const Operand& rt) {
}
+void MacroAssembler::Div(Register rem, Register res,
+ Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ div(rs, rt.rm());
+ mflo(res);
+ mfhi(rem);
+ } else {
+ div(res, rs, rt.rm());
+ mod(rem, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ div(rs, at);
+ mflo(res);
+ mfhi(rem);
+ } else {
+ div(res, rs, at);
+ mod(rem, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ div(rs, rt.rm());
+ mflo(res);
+ } else {
+ div(res, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ div(rs, at);
+ mflo(res);
+ } else {
+ div(res, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ div(rs, rt.rm());
+ mfhi(rd);
+ } else {
+ mod(rd, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ div(rs, at);
+ mfhi(rd);
+ } else {
+ mod(rd, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ divu(rs, rt.rm());
+ mfhi(rd);
+ } else {
+ modu(rd, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ divu(rs, at);
+ mfhi(rd);
+ } else {
+ modu(rd, rs, at);
+ }
+ }
+}
+
+
void MacroAssembler::Divu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
divu(rs, rt.rm());
@@ -709,6 +868,28 @@ void MacroAssembler::Divu(Register rs, const Operand& rt) {
}
+void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ divu(rs, rt.rm());
+ mflo(res);
+ } else {
+ divu(res, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ divu(rs, at);
+ mflo(res);
+ } else {
+ divu(res, rs, at);
+ }
+ }
+}
+
+
void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
and_(rd, rs, rt.rm());
@@ -811,7 +992,7 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
if (rt.is_reg()) {
rotrv(rd, rs, rt.rm());
} else {
@@ -837,7 +1018,7 @@ void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
lw(zero_reg, rs);
} else {
pref(hint, rs);
@@ -1033,7 +1214,7 @@ void MacroAssembler::Ext(Register rt,
DCHECK(pos < 32);
DCHECK(pos + size < 33);
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
ext_(rt, rs, pos, size);
} else {
// Move rs to rt and shift it left then right to get the
@@ -1057,7 +1238,7 @@ void MacroAssembler::Ins(Register rt,
DCHECK(pos + size <= 32);
DCHECK(size != 0);
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
ins_(rt, rs, pos, size);
} else {
DCHECK(!rt.is(t8) && !rs.is(t8));
@@ -1111,8 +1292,8 @@ void MacroAssembler::Cvt_d_uw(FPURegister fd,
// Load 2^31 into f20 as its float representation.
li(at, 0x41E00000);
- mtc1(at, FPURegister::from_code(scratch.code() + 1));
mtc1(zero_reg, scratch);
+ Mthc1(at, scratch);
// Add it to fd.
add_d(fd, fd, scratch);
@@ -1129,10 +1310,10 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
- if (kArchVariant == kLoongson && fd.is(fs)) {
- mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+ Mfhc1(t8, fs);
trunc_w_d(fd, fs);
- mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ Mthc1(t8, fs);
} else {
trunc_w_d(fd, fs);
}
@@ -1140,10 +1321,10 @@ void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
- if (kArchVariant == kLoongson && fd.is(fs)) {
- mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+ Mfhc1(t8, fs);
round_w_d(fd, fs);
- mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ Mthc1(t8, fs);
} else {
round_w_d(fd, fs);
}
@@ -1151,10 +1332,10 @@ void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
- if (kArchVariant == kLoongson && fd.is(fs)) {
- mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+ Mfhc1(t8, fs);
floor_w_d(fd, fs);
- mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ Mthc1(t8, fs);
} else {
floor_w_d(fd, fs);
}
@@ -1162,10 +1343,10 @@ void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
- if (kArchVariant == kLoongson && fd.is(fs)) {
- mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+ Mfhc1(t8, fs);
ceil_w_d(fd, fs);
- mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ Mthc1(t8, fs);
} else {
ceil_w_d(fd, fs);
}
@@ -1180,8 +1361,8 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
// Load 2^31 into scratch as its float representation.
li(at, 0x41E00000);
- mtc1(at, FPURegister::from_code(scratch.code() + 1));
mtc1(zero_reg, scratch);
+ Mthc1(at, scratch);
// Test if scratch > fd.
// If fd < 2^31 we can convert it normally.
Label simple_convert;
@@ -1205,6 +1386,24 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
}
+void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
+ if (IsFp64Mode()) {
+ mthc1(rt, fs);
+ } else {
+ mtc1(rt, fs.high());
+ }
+}
+
+
+void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
+ if (IsFp64Mode()) {
+ mfhc1(rt, fs);
+ } else {
+ mfc1(rt, fs.high());
+ }
+}
+
+
void MacroAssembler::BranchF(Label* target,
Label* nan,
Condition cc,
@@ -1220,49 +1419,103 @@ void MacroAssembler::BranchF(Label* target,
DCHECK(nan || target);
// Check for unordered (NaN) cases.
if (nan) {
- c(UN, D, cmp1, cmp2);
- bc1t(nan);
- }
-
- if (target) {
- // Here NaN cases were either handled by this function or are assumed to
- // have been handled by the caller.
- // Unsigned conditions are treated as their signed counterpart.
- switch (cc) {
- case lt:
- c(OLT, D, cmp1, cmp2);
- bc1t(target);
- break;
- case gt:
- c(ULE, D, cmp1, cmp2);
- bc1f(target);
- break;
- case ge:
- c(ULT, D, cmp1, cmp2);
- bc1f(target);
- break;
- case le:
- c(OLE, D, cmp1, cmp2);
- bc1t(target);
- break;
- case eq:
- c(EQ, D, cmp1, cmp2);
- bc1t(target);
- break;
- case ueq:
- c(UEQ, D, cmp1, cmp2);
- bc1t(target);
- break;
- case ne:
- c(EQ, D, cmp1, cmp2);
- bc1f(target);
- break;
- case nue:
- c(UEQ, D, cmp1, cmp2);
- bc1f(target);
- break;
- default:
- CHECK(0);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ c(UN, D, cmp1, cmp2);
+ bc1t(nan);
+ } else {
+ // Use kDoubleCompareReg for comparison result. It has to be unavailable
+ // to lithium register allocator.
+ DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
+ cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(nan, kDoubleCompareReg);
+ }
+ }
+
+ if (!IsMipsArchVariant(kMips32r6)) {
+ if (target) {
+ // Here NaN cases were either handled by this function or are assumed to
+ // have been handled by the caller.
+ switch (cc) {
+ case lt:
+ c(OLT, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case gt:
+ c(ULE, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ case ge:
+ c(ULT, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ case le:
+ c(OLE, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case eq:
+ c(EQ, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case ueq:
+ c(UEQ, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case ne:
+ c(EQ, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ case nue:
+ c(UEQ, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ default:
+ CHECK(0);
+ }
+ }
+ } else {
+ if (target) {
+ // Here NaN cases were either handled by this function or are assumed to
+ // have been handled by the caller.
+ // Unsigned conditions are treated as their signed counterpart.
+ // Use kDoubleCompareReg for comparison result, it is
+ // valid in fp64 (FR = 1) mode which is implied for mips32r6.
+ DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
+ switch (cc) {
+ case lt:
+ cmp(OLT, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
+ break;
+ case gt:
+ cmp(ULE, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
+ break;
+ case ge:
+ cmp(ULT, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
+ break;
+ case le:
+ cmp(OLE, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
+ break;
+ case eq:
+ cmp(EQ, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
+ break;
+ case ueq:
+ cmp(UEQ, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
+ break;
+ case ne:
+ cmp(EQ, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
+ break;
+ case nue:
+ cmp(UEQ, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
+ break;
+ default:
+ CHECK(0);
+ }
}
}
@@ -1297,16 +1550,16 @@ void MacroAssembler::Move(FPURegister dst, double imm) {
// register of FPU register pair.
if (hi != 0) {
li(at, Operand(hi));
- mtc1(at, dst.high());
+ Mthc1(at, dst);
} else {
- mtc1(zero_reg, dst.high());
+ Mthc1(zero_reg, dst);
}
}
}
void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
Label done;
Branch(&done, ne, rt, Operand(zero_reg));
mov(rd, rs);
@@ -1318,7 +1571,7 @@ void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
Label done;
Branch(&done, eq, rt, Operand(zero_reg));
mov(rd, rs);
@@ -1330,7 +1583,7 @@ void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
// Tests an FP condition code and then conditionally move rs to rd.
// We do not currently use any FPU cc bit other than bit 0.
DCHECK(cc == 0);
@@ -1356,7 +1609,7 @@ void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
// Tests an FP condition code and then conditionally move rs to rd.
// We do not currently use any FPU cc bit other than bit 0.
DCHECK(cc == 0);
@@ -1382,7 +1635,7 @@ void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
void MacroAssembler::Clz(Register rd, Register rs) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
Register mask = t8;
Register scratch = t9;
@@ -1717,7 +1970,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
// Unsigned comparison.
case Ugreater:
if (r2.is(zero_reg)) {
- bgtz(rs, offset);
+ bne(rs, zero_reg, offset);
} else {
sltu(scratch, r2, rs);
bne(scratch, zero_reg, offset);
@@ -1725,7 +1978,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
break;
case Ugreater_equal:
if (r2.is(zero_reg)) {
- bgez(rs, offset);
+ b(offset);
} else {
sltu(scratch, rs, r2);
beq(scratch, zero_reg, offset);
@@ -1742,7 +1995,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
break;
case Uless_equal:
if (r2.is(zero_reg)) {
- b(offset);
+ beq(rs, zero_reg, offset);
} else {
sltu(scratch, r2, rs);
beq(scratch, zero_reg, offset);
@@ -1824,7 +2077,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
// Unsigned comparison.
case Ugreater:
if (rt.imm32_ == 0) {
- bgtz(rs, offset);
+ bne(rs, zero_reg, offset);
} else {
r2 = scratch;
li(r2, rt);
@@ -1834,7 +2087,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
break;
case Ugreater_equal:
if (rt.imm32_ == 0) {
- bgez(rs, offset);
+ b(offset);
} else if (is_int16(rt.imm32_)) {
sltiu(scratch, rs, rt.imm32_);
beq(scratch, zero_reg, offset);
@@ -1861,7 +2114,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
break;
case Uless_equal:
if (rt.imm32_ == 0) {
- b(offset);
+ beq(rs, zero_reg, offset);
} else {
r2 = scratch;
li(r2, rt);
@@ -1963,7 +2216,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
case Ugreater:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
+ bne(rs, zero_reg, offset);
} else {
sltu(scratch, r2, rs);
offset = shifted_branch_offset(L, false);
@@ -1973,7 +2226,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
case Ugreater_equal:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
+ b(offset);
} else {
sltu(scratch, rs, r2);
offset = shifted_branch_offset(L, false);
@@ -1993,7 +2246,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
case Uless_equal:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
- b(offset);
+ beq(rs, zero_reg, offset);
} else {
sltu(scratch, r2, rs);
offset = shifted_branch_offset(L, false);
@@ -2105,7 +2358,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
case Ugreater_equal:
if (rt.imm32_ == 0) {
offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
+ b(offset);
} else if (is_int16(rt.imm32_)) {
sltiu(scratch, rs, rt.imm32_);
offset = shifted_branch_offset(L, false);
@@ -2244,7 +2497,7 @@ void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
li(r2, rt);
}
- {
+ if (!IsMipsArchVariant(kMips32r6)) {
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
@@ -2308,7 +2561,88 @@ void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
default:
UNREACHABLE();
}
+ } else {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ switch (cond) {
+ case cc_always:
+ bal(offset);
+ break;
+ case eq:
+ bne(rs, r2, 2);
+ nop();
+ bal(offset);
+ break;
+ case ne:
+ beq(rs, r2, 2);
+ nop();
+ bal(offset);
+ break;
+
+ // Signed comparison.
+ case greater:
+ // rs > rt
+ slt(scratch, r2, rs);
+ beq(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case greater_equal:
+ // rs >= rt
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case less:
+ // rs < r2
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case less_equal:
+ // rs <= r2
+ slt(scratch, r2, rs);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+
+
+ // Unsigned comparison.
+ case Ugreater:
+ // rs > rt
+ sltu(scratch, r2, rs);
+ beq(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case Ugreater_equal:
+ // rs >= rt
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case Uless:
+ // rs < r2
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case Uless_equal:
+ // rs <= r2
+ sltu(scratch, r2, rs);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
+
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
@@ -2339,7 +2673,7 @@ void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
li(r2, rt);
}
- {
+ if (!IsMipsArchVariant(kMips32r6)) {
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
@@ -2414,7 +2748,100 @@ void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
default:
UNREACHABLE();
}
+ } else {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ switch (cond) {
+ case cc_always:
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case eq:
+ bne(rs, r2, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case ne:
+ beq(rs, r2, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+
+ // Signed comparison.
+ case greater:
+ // rs > rt
+ slt(scratch, r2, rs);
+ beq(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case greater_equal:
+ // rs >= rt
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case less:
+ // rs < r2
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case less_equal:
+ // rs <= r2
+ slt(scratch, r2, rs);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+
+
+ // Unsigned comparison.
+ case Ugreater:
+ // rs > rt
+ sltu(scratch, r2, rs);
+ beq(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case Ugreater_equal:
+ // rs >= rt
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case Uless:
+ // rs < r2
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case Uless_equal:
+ // rs <= r2
+ sltu(scratch, r2, rs);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
}
+
// Check that offset could actually hold on an int16_t.
DCHECK(is_int16(offset));
@@ -3136,12 +3563,10 @@ void MacroAssembler::AllocateTwoByteString(Register result,
}
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string
// while observing object alignment.
DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
@@ -3149,7 +3574,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
- // Allocate ASCII string in new space.
+ // Allocate one-byte string in new space.
Allocate(scratch1,
result,
scratch2,
@@ -3158,11 +3583,8 @@ void MacroAssembler::AllocateAsciiString(Register result,
TAG_OBJECT);
// Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
@@ -3181,11 +3603,10 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
}
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
Allocate(ConsString::kSize,
result,
scratch1,
@@ -3193,11 +3614,8 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
gc_required,
TAG_OBJECT);
- InitializeNewString(result,
- length,
- Heap::kConsAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
@@ -3217,24 +3635,21 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
}
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
- InitializeNewString(result,
- length,
- Heap::kSlicedAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
-void MacroAssembler::JumpIfNotUniqueName(Register reg,
- Label* not_unique_name) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
+ Label* not_unique_name) {
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
Label succeed;
And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
@@ -4128,8 +4543,34 @@ void MacroAssembler::SmiToDoubleFPURegister(Register smi,
}
-void MacroAssembler::AdduAndCheckForOverflow(Register dst,
- Register left,
+void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
+ const Operand& right,
+ Register overflow_dst,
+ Register scratch) {
+ if (right.is_reg()) {
+ AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
+ } else {
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ addiu(dst, left, right.immediate()); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ // Load right since xori takes uint16 as immediate.
+ addiu(t9, zero_reg, right.immediate());
+ xor_(overflow_dst, dst, t9);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else {
+ addiu(dst, left, right.immediate());
+ xor_(overflow_dst, dst, left);
+ // Load right since xori takes uint16 as immediate.
+ addiu(t9, zero_reg, right.immediate());
+ xor_(scratch, dst, t9);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ }
+}
+
+
+void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
Register right,
Register overflow_dst,
Register scratch) {
@@ -4170,8 +4611,34 @@ void MacroAssembler::AdduAndCheckForOverflow(Register dst,
}
-void MacroAssembler::SubuAndCheckForOverflow(Register dst,
- Register left,
+void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
+ const Operand& right,
+ Register overflow_dst,
+ Register scratch) {
+ if (right.is_reg()) {
+ SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
+ } else {
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ addiu(dst, left, -(right.immediate())); // Left is overwritten.
+ xor_(overflow_dst, dst, scratch); // scratch is original left.
+ // Load right since xori takes uint16 as immediate.
+ addiu(t9, zero_reg, right.immediate());
+ xor_(scratch, scratch, t9); // scratch is original left.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else {
+ addiu(dst, left, -(right.immediate()));
+ xor_(overflow_dst, dst, left);
+ // Load right since xori takes uint16 as immediate.
+ addiu(t9, zero_reg, right.immediate());
+ xor_(scratch, left, t9);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ }
+}
+
+
+void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
Register right,
Register overflow_dst,
Register scratch) {
@@ -4602,7 +5069,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
// The stack must be allign to 0 modulo 8 for stores with sdc1.
DCHECK(kDoubleSize == frame_alignment);
if (frame_alignment > 0) {
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
And(sp, sp, Operand(-frame_alignment)); // Align stack.
}
int space = FPURegister::kMaxNumRegisters * kDoubleSize;
@@ -4620,7 +5087,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
DCHECK(stack_space >= 0);
Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
if (frame_alignment > 0) {
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
And(sp, sp, Operand(-frame_alignment)); // Align stack.
}
@@ -4715,7 +5182,7 @@ void MacroAssembler::AssertStackIsAligned() {
if (frame_alignment > kPointerSize) {
Label alignment_as_expected;
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
andi(at, sp, frame_alignment_mask);
Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
// Don't use Check here, as it will call Runtime_Abort re-entering here.
@@ -4979,71 +5446,59 @@ void MacroAssembler::LookupNumberStringCache(Register object,
}
-void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
+void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
+ Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
- // Test that both first and second are sequential ASCII strings.
+ // Test that both first and second are sequential one-byte strings.
// Assume that they are non-smis.
lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
- JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
- scratch2,
- scratch1,
- scratch2,
- failure);
+ JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
+ scratch2, failure);
}
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
+void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
// Check that neither is a smi.
STATIC_ASSERT(kSmiTag == 0);
And(scratch1, first, Operand(second));
JumpIfSmi(scratch1, failure);
- JumpIfNonSmisNotBothSequentialAsciiStrings(first,
- second,
- scratch1,
- scratch2,
- failure);
+ JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
+ scratch2, failure);
}
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
+ Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
- const int kFlatAsciiStringMask =
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatAsciiStringTag =
+ const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
- DCHECK(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
- andi(scratch1, first, kFlatAsciiStringMask);
- Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
- andi(scratch2, second, kFlatAsciiStringMask);
- Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
+ DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
+ andi(scratch1, first, kFlatOneByteStringMask);
+ Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
+ andi(scratch2, second, kFlatOneByteStringMask);
+ Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
}
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure) {
- const int kFlatAsciiStringMask =
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
+ Register scratch,
+ Label* failure) {
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatAsciiStringTag =
+ const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
- And(scratch, type, Operand(kFlatAsciiStringMask));
- Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
+ And(scratch, type, Operand(kFlatOneByteStringMask));
+ Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
}
@@ -5116,7 +5571,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
// and the original value of sp.
mov(scratch, sp);
Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
And(sp, sp, Operand(-frame_alignment));
sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
@@ -5173,7 +5628,7 @@ void MacroAssembler::CallCFunctionHelper(Register function,
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
Label alignment_as_expected;
And(at, sp, Operand(frame_alignment_mask));
Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
@@ -5446,8 +5901,8 @@ void MacroAssembler::EnsureNotWhite(
bind(&skip);
}
- // Sequential string, either ASCII or UC16.
- // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+ // Sequential string, either Latin1 or UC16.
+ // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
@@ -5746,17 +6201,18 @@ void MacroAssembler::TruncatingDiv(Register result,
DCHECK(!dividend.is(result));
DCHECK(!dividend.is(at));
DCHECK(!result.is(at));
- MultiplierAndShift ms(divisor);
- li(at, Operand(ms.multiplier()));
- Mult(dividend, Operand(at));
- mfhi(result);
- if (divisor > 0 && ms.multiplier() < 0) {
+ base::MagicNumbersForDivision<uint32_t> mag =
+ base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+ li(at, Operand(mag.multiplier));
+ Mulh(result, dividend, Operand(at));
+ bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+ if (divisor > 0 && neg) {
Addu(result, result, Operand(dividend));
}
- if (divisor < 0 && ms.multiplier() > 0) {
+ if (divisor < 0 && !neg && mag.multiplier > 0) {
Subu(result, result, Operand(dividend));
}
- if (ms.shift() > 0) sra(result, result, ms.shift());
+ if (mag.shift > 0) sra(result, result, mag.shift);
srl(at, dividend, 31);
Addu(result, result, Operand(at));
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index c67d7fe149..62d3aa81d4 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -234,11 +234,11 @@ class MacroAssembler: public Assembler {
inline void Move(Register dst_low, Register dst_high, FPURegister src) {
mfc1(dst_low, src);
- mfc1(dst_high, FPURegister::from_code(src.code() + 1));
+ Mfhc1(dst_high, src);
}
inline void FmoveHigh(Register dst_high, FPURegister src) {
- mfc1(dst_high, FPURegister::from_code(src.code() + 1));
+ Mfhc1(dst_high, src);
}
inline void FmoveLow(Register dst_low, FPURegister src) {
@@ -247,7 +247,7 @@ class MacroAssembler: public Assembler {
inline void Move(FPURegister dst, Register src_low, Register src_high) {
mtc1(src_low, dst);
- mtc1(src_high, FPURegister::from_code(dst.code() + 1));
+ Mthc1(src_high, dst);
}
// Conditional move.
@@ -518,32 +518,25 @@ class MacroAssembler: public Assembler {
Register scratch2,
Register scratch3,
Label* gc_required);
- void AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
+ void AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3, Label* gc_required);
void AllocateTwoByteConsString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
- void AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateOneByteConsString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
void AllocateTwoByteSlicedString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
- void AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateOneByteSlicedString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. All registers are clobbered also
@@ -582,14 +575,31 @@ class MacroAssembler: public Assembler {
instr(rs, Operand(j)); \
}
+#define DEFINE_INSTRUCTION3(instr) \
+ void instr(Register rd_hi, Register rd_lo, Register rs, const Operand& rt); \
+ void instr(Register rd_hi, Register rd_lo, Register rs, Register rt) { \
+ instr(rd_hi, rd_lo, rs, Operand(rt)); \
+ } \
+ void instr(Register rd_hi, Register rd_lo, Register rs, int32_t j) { \
+ instr(rd_hi, rd_lo, rs, Operand(j)); \
+ }
+
DEFINE_INSTRUCTION(Addu);
DEFINE_INSTRUCTION(Subu);
DEFINE_INSTRUCTION(Mul);
+ DEFINE_INSTRUCTION(Div);
+ DEFINE_INSTRUCTION(Divu);
+ DEFINE_INSTRUCTION(Mod);
+ DEFINE_INSTRUCTION(Modu);
+ DEFINE_INSTRUCTION(Mulh);
DEFINE_INSTRUCTION2(Mult);
DEFINE_INSTRUCTION2(Multu);
DEFINE_INSTRUCTION2(Div);
DEFINE_INSTRUCTION2(Divu);
+ DEFINE_INSTRUCTION3(Div);
+ DEFINE_INSTRUCTION3(Mul);
+
DEFINE_INSTRUCTION(And);
DEFINE_INSTRUCTION(Or);
DEFINE_INSTRUCTION(Xor);
@@ -742,6 +752,20 @@ class MacroAssembler: public Assembler {
void Round_w_d(FPURegister fd, FPURegister fs);
void Floor_w_d(FPURegister fd, FPURegister fs);
void Ceil_w_d(FPURegister fd, FPURegister fs);
+
+ // FP32 mode: Move the general purpose register into
+ // the high part of the double-register pair.
+ // FP64 mode: Move the general-purpose register into
+ // the higher 32 bits of the 64-bit coprocessor register,
+ // while leaving the low bits unchanged.
+ void Mthc1(Register rt, FPURegister fs);
+
+ // FP32 mode: move the high part of the double-register pair into
+ // general purpose register.
+ // FP64 mode: Move the higher 32 bits of the 64-bit coprocessor register into
+ // general-purpose register.
+ void Mfhc1(Register rt, FPURegister fs);
+
// Wrapper function for the different cmp/branch types.
void BranchF(Label* target,
Label* nan,
@@ -1121,12 +1145,20 @@ class MacroAssembler: public Assembler {
Register overflow_dst,
Register scratch = at);
+ void AdduAndCheckForOverflow(Register dst, Register left,
+ const Operand& right, Register overflow_dst,
+ Register scratch = at);
+
void SubuAndCheckForOverflow(Register dst,
Register left,
Register right,
Register overflow_dst,
Register scratch = at);
+ void SubuAndCheckForOverflow(Register dst, Register left,
+ const Operand& right, Register overflow_dst,
+ Register scratch = at);
+
void BranchOnOverflow(Label* label,
Register overflow_check,
BranchDelaySlot bd = PROTECT) {
@@ -1151,13 +1183,10 @@ class MacroAssembler: public Assembler {
// Runtime calls.
// See comments at the beginning of CEntryStub::Generate.
- inline void PrepareCEntryArgs(int num_args) {
- li(s0, num_args);
- li(s1, (num_args - 1) * kPointerSize);
- }
+ inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
inline void PrepareCEntryFunction(const ExternalReference& ref) {
- li(s2, Operand(ref));
+ li(a1, Operand(ref));
}
#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
@@ -1453,20 +1482,16 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Checks if both instance types are sequential ASCII strings and jumps to
// label if either is not.
- void JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* failure);
+ void JumpIfBothInstanceTypesAreNotSequentialOneByte(
+ Register first_object_instance_type, Register second_object_instance_type,
+ Register scratch1, Register scratch2, Label* failure);
- // Check if instance type is sequential ASCII string and jump to label if
+ // Check if instance type is sequential one-byte string and jump to label if
// it is not.
- void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure);
+ void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
+ Label* failure);
- void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
+ void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
void EmitSeqStringSetCharCheck(Register string,
Register index,
@@ -1474,21 +1499,20 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Register scratch,
uint32_t encoding_mask);
- // Test that both first and second are sequential ASCII strings.
- // Assume that they are non-smis.
- void JumpIfNonSmisNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure);
-
- // Test that both first and second are sequential ASCII strings.
- // Check that they are non-smis.
- void JumpIfNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure);
+ // Checks if both objects are sequential one-byte strings and jumps to label
+ // if either is not. Assumes that neither object is a smi.
+ void JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ // Checks if both objects are sequential one-byte strings and jumps to label
+ // if either is not.
+ void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* not_flat_one_byte_strings);
void ClampUint8(Register output_reg, Register input_reg);
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
index 2bc66ecd25..dbc12a0797 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -241,7 +241,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
// Check that there are enough characters left in the input.
BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg));
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
Label success;
Label fail;
Label loop_check;
@@ -365,7 +365,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReference(
Label loop;
__ bind(&loop);
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
__ lbu(a3, MemOperand(a0, 0));
__ addiu(a0, a0, char_size());
__ lbu(t0, MemOperand(a2, 0));
@@ -446,7 +446,7 @@ void RegExpMacroAssemblerMIPS::CheckBitInTable(
Handle<ByteArray> table,
Label* on_bit_set) {
__ li(a0, Operand(table));
- if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
+ if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
__ And(a1, current_character(), Operand(kTableSize - 1));
__ Addu(a0, a0, a1);
} else {
@@ -465,7 +465,7 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
switch (type) {
case 's':
// Match space-characters.
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
// One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success;
__ Branch(&success, eq, current_character(), Operand(' '));
@@ -482,12 +482,12 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
// The emitted code for generic character classes is good enough.
return false;
case 'd':
- // Match ASCII digits ('0'..'9').
+ // Match Latin1 digits ('0'..'9').
__ Subu(a0, current_character(), Operand('0'));
BranchOrBacktrack(on_no_match, hi, a0, Operand('9' - '0'));
return true;
case 'D':
- // Match non ASCII-digits.
+ // Match non Latin1-digits.
__ Subu(a0, current_character(), Operand('0'));
BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0'));
return true;
@@ -511,7 +511,7 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
__ Xor(a0, current_character(), Operand(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
__ Subu(a0, a0, Operand(0x0b));
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0c - 0x0b));
} else {
Label done;
@@ -526,8 +526,8 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
return true;
}
case 'w': {
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
BranchOrBacktrack(on_no_match, hi, current_character(), Operand('z'));
}
ExternalReference map = ExternalReference::re_word_character_map();
@@ -539,8 +539,8 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
}
case 'W': {
Label done;
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
__ Branch(&done, hi, current_character(), Operand('z'));
}
ExternalReference map = ExternalReference::re_word_character_map();
@@ -548,7 +548,7 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
__ Addu(a0, a0, current_character());
__ lbu(a0, MemOperand(a0, 0));
BranchOrBacktrack(on_no_match, ne, a0, Operand(zero_reg));
- if (mode_ != ASCII) {
+ if (mode_ != LATIN1) {
__ bind(&done);
}
return true;
@@ -1046,7 +1046,7 @@ void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
// Align the stack pointer and save the original sp value on the stack.
__ mov(scratch, sp);
__ Subu(sp, sp, Operand(kPointerSize));
- DCHECK(IsPowerOf2(stack_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(stack_alignment));
__ And(sp, sp, Operand(-stack_alignment));
__ sw(scratch, MemOperand(sp));
@@ -1126,7 +1126,7 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+ bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
DCHECK(re_code->instruction_start() <= *return_address);
DCHECK(*return_address <=
@@ -1157,8 +1157,8 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
- // If we changed between an ASCII and an UC16 string, the specialized
+ if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
+ // If we changed between an Latin1 and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
return RETRY;
@@ -1308,7 +1308,7 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
// must only be used to load a single character at a time.
DCHECK(characters == 1);
__ Addu(t5, end_of_input_address(), Operand(offset));
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
__ lbu(current_character(), MemOperand(t5, 0));
} else {
DCHECK(mode_ == UC16);
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.h b/deps/v8/src/mips/regexp-macro-assembler-mips.h
index ddf484cbbe..c7d8f6dcfb 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.h
@@ -197,7 +197,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
MacroAssembler* masm_;
- // Which mode to generate code for (ASCII or UC16).
+ // Which mode to generate code for (Latin1 or UC16).
Mode mode_;
// One greater than maximal register index actually used.
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 30924569bc..fabca67062 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -12,8 +12,8 @@
#if V8_TARGET_ARCH_MIPS
#include "src/assembler.h"
+#include "src/base/bits.h"
#include "src/disasm.h"
-#include "src/globals.h" // Need the BitCast.
#include "src/mips/constants-mips.h"
#include "src/mips/simulator-mips.h"
#include "src/ostreams.h"
@@ -67,11 +67,12 @@ class MipsDebugger {
Simulator* sim_;
int32_t GetRegisterValue(int regnum);
- int32_t GetFPURegisterValueInt(int regnum);
- int64_t GetFPURegisterValueLong(int regnum);
+ int32_t GetFPURegisterValue32(int regnum);
+ int64_t GetFPURegisterValue64(int regnum);
float GetFPURegisterValueFloat(int regnum);
double GetFPURegisterValueDouble(int regnum);
bool GetValue(const char* desc, int32_t* value);
+ bool GetValue(const char* desc, int64_t* value);
// Set or delete a breakpoint. Returns true if successful.
bool SetBreakpoint(Instruction* breakpc);
@@ -160,20 +161,20 @@ int32_t MipsDebugger::GetRegisterValue(int regnum) {
}
-int32_t MipsDebugger::GetFPURegisterValueInt(int regnum) {
+int32_t MipsDebugger::GetFPURegisterValue32(int regnum) {
if (regnum == kNumFPURegisters) {
return sim_->get_pc();
} else {
- return sim_->get_fpu_register(regnum);
+ return sim_->get_fpu_register_word(regnum);
}
}
-int64_t MipsDebugger::GetFPURegisterValueLong(int regnum) {
+int64_t MipsDebugger::GetFPURegisterValue64(int regnum) {
if (regnum == kNumFPURegisters) {
return sim_->get_pc();
} else {
- return sim_->get_fpu_register_long(regnum);
+ return sim_->get_fpu_register(regnum);
}
}
@@ -204,7 +205,7 @@ bool MipsDebugger::GetValue(const char* desc, int32_t* value) {
*value = GetRegisterValue(regnum);
return true;
} else if (fpuregnum != kInvalidFPURegister) {
- *value = GetFPURegisterValueInt(fpuregnum);
+ *value = GetFPURegisterValue32(fpuregnum);
return true;
} else if (strncmp(desc, "0x", 2) == 0) {
return SScanF(desc, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
@@ -215,6 +216,26 @@ bool MipsDebugger::GetValue(const char* desc, int32_t* value) {
}
+bool MipsDebugger::GetValue(const char* desc, int64_t* value) {
+ int regnum = Registers::Number(desc);
+ int fpuregnum = FPURegisters::Number(desc);
+
+ if (regnum != kInvalidRegister) {
+ *value = GetRegisterValue(regnum);
+ return true;
+ } else if (fpuregnum != kInvalidFPURegister) {
+ *value = GetFPURegisterValue64(fpuregnum);
+ return true;
+ } else if (strncmp(desc, "0x", 2) == 0) {
+ return SScanF(desc + 2, "%" SCNx64,
+ reinterpret_cast<uint64_t*>(value)) == 1;
+ } else {
+ return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1;
+ }
+ return false;
+}
+
+
bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
// Check if a breakpoint can be set. If not return without any side-effects.
if (sim_->break_pc_ != NULL) {
@@ -295,34 +316,76 @@ void MipsDebugger::PrintAllRegs() {
void MipsDebugger::PrintAllRegsIncludingFPU() {
-#define FPU_REG_INFO(n) FPURegisters::Name(n), FPURegisters::Name(n+1), \
- GetFPURegisterValueInt(n+1), \
- GetFPURegisterValueInt(n), \
- GetFPURegisterValueDouble(n)
+#define FPU_REG_INFO32(n) FPURegisters::Name(n), FPURegisters::Name(n+1), \
+ GetFPURegisterValue32(n+1), \
+ GetFPURegisterValue32(n), \
+ GetFPURegisterValueDouble(n)
+
+#define FPU_REG_INFO64(n) FPURegisters::Name(n), \
+ GetFPURegisterValue64(n), \
+ GetFPURegisterValueDouble(n)
PrintAllRegs();
PrintF("\n\n");
// f0, f1, f2, ... f31.
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(0) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(2) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(4) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(6) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(8) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(10));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(12));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(14));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(16));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(18));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(20));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(22));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(24));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(26));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(28));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(30));
+ // This must be a compile-time switch,
+ // compiler will throw out warnings otherwise.
+ if (kFpuMode == kFP64) {
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(0) );
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(1) );
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(2) );
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(3) );
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(4) );
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(5) );
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(6) );
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(7) );
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(8) );
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(9) );
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(10));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(11));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(12));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(13));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(14));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(15));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(16));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(17));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(18));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(19));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(20));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(21));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(22));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(23));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(24));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(25));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(26));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(27));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(28));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(29));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(30));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(31));
+ } else {
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(0) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(2) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(4) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(6) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(8) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(10));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(12));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(14));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(16));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(18));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(20));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(22));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(24));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(26));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(28));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(30));
+ }
#undef REG_INFO
-#undef FPU_REG_INFO
+#undef FPU_REG_INFO32
+#undef FPU_REG_INFO64
}
@@ -397,8 +460,6 @@ void MipsDebugger::Debug() {
done = true;
} else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
if (argc == 2) {
- int32_t value;
- float fvalue;
if (strcmp(arg1, "all") == 0) {
PrintAllRegs();
} else if (strcmp(arg1, "allf") == 0) {
@@ -408,24 +469,36 @@ void MipsDebugger::Debug() {
int fpuregnum = FPURegisters::Number(arg1);
if (regnum != kInvalidRegister) {
+ int32_t value;
value = GetRegisterValue(regnum);
PrintF("%s: 0x%08x %d \n", arg1, value, value);
} else if (fpuregnum != kInvalidFPURegister) {
- if (fpuregnum % 2 == 1) {
- value = GetFPURegisterValueInt(fpuregnum);
- fvalue = GetFPURegisterValueFloat(fpuregnum);
- PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue);
+ if (IsFp64Mode()) {
+ int64_t value;
+ double dvalue;
+ value = GetFPURegisterValue64(fpuregnum);
+ dvalue = GetFPURegisterValueDouble(fpuregnum);
+ PrintF("%3s: 0x%016llx %16.4e\n",
+ FPURegisters::Name(fpuregnum), value, dvalue);
} else {
- double dfvalue;
- int32_t lvalue1 = GetFPURegisterValueInt(fpuregnum);
- int32_t lvalue2 = GetFPURegisterValueInt(fpuregnum + 1);
- dfvalue = GetFPURegisterValueDouble(fpuregnum);
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n",
- FPURegisters::Name(fpuregnum+1),
- FPURegisters::Name(fpuregnum),
- lvalue1,
- lvalue2,
- dfvalue);
+ if (fpuregnum % 2 == 1) {
+ int32_t value;
+ float fvalue;
+ value = GetFPURegisterValue32(fpuregnum);
+ fvalue = GetFPURegisterValueFloat(fpuregnum);
+ PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue);
+ } else {
+ double dfvalue;
+ int32_t lvalue1 = GetFPURegisterValue32(fpuregnum);
+ int32_t lvalue2 = GetFPURegisterValue32(fpuregnum + 1);
+ dfvalue = GetFPURegisterValueDouble(fpuregnum);
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n",
+ FPURegisters::Name(fpuregnum+1),
+ FPURegisters::Name(fpuregnum),
+ lvalue1,
+ lvalue2,
+ dfvalue);
+ }
}
} else {
PrintF("%s unrecognized\n", arg1);
@@ -439,7 +512,7 @@ void MipsDebugger::Debug() {
int fpuregnum = FPURegisters::Number(arg1);
if (fpuregnum != kInvalidFPURegister) {
- value = GetFPURegisterValueInt(fpuregnum);
+ value = GetFPURegisterValue32(fpuregnum);
fvalue = GetFPURegisterValueFloat(fpuregnum);
PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue);
} else {
@@ -489,15 +562,28 @@ void MipsDebugger::Debug() {
next_arg++;
}
- int32_t words;
- if (argc == next_arg) {
- words = 10;
+ // TODO(palfia): optimize this.
+ if (IsFp64Mode()) {
+ int64_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!GetValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
} else {
- if (!GetValue(argv[next_arg], &words)) {
+ int32_t words;
+ if (argc == next_arg) {
words = 10;
+ } else {
+ if (!GetValue(argv[next_arg], &words)) {
+ words = 10;
+ }
}
+ end = cur + words;
}
- end = cur + words;
while (cur < end) {
PrintF(" 0x%08x: 0x%08x %10d",
@@ -1012,21 +1098,47 @@ void Simulator::set_dw_register(int reg, const int* dbl) {
}
-void Simulator::set_fpu_register(int fpureg, int32_t value) {
+void Simulator::set_fpu_register(int fpureg, int64_t value) {
+ DCHECK(IsFp64Mode());
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
FPUregisters_[fpureg] = value;
}
+void Simulator::set_fpu_register_word(int fpureg, int32_t value) {
+ // Set ONLY lower 32-bits, leaving upper bits untouched.
+ // TODO(plind): big endian issue.
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ int32_t *pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]);
+ *pword = value;
+}
+
+
+void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) {
+ // Set ONLY upper 32-bits, leaving lower bits untouched.
+ // TODO(plind): big endian issue.
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ int32_t *phiword = (reinterpret_cast<int32_t*>(&FPUregisters_[fpureg])) + 1;
+ *phiword = value;
+}
+
+
void Simulator::set_fpu_register_float(int fpureg, float value) {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- *BitCast<float*>(&FPUregisters_[fpureg]) = value;
+ *bit_cast<float*>(&FPUregisters_[fpureg]) = value;
}
void Simulator::set_fpu_register_double(int fpureg, double value) {
- DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
- *BitCast<double*>(&FPUregisters_[fpureg]) = value;
+ if (IsFp64Mode()) {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ *bit_cast<double*>(&FPUregisters_[fpureg]) = value;
+ } else {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
+ int64_t i64 = bit_cast<int64_t>(value);
+ set_fpu_register_word(fpureg, i64 & 0xffffffff);
+ set_fpu_register_word(fpureg + 1, i64 >> 32);
+ }
}
@@ -1042,6 +1154,7 @@ int32_t Simulator::get_register(int reg) const {
double Simulator::get_double_from_register_pair(int reg) {
+ // TODO(plind): bad ABI stuff, refactor or remove.
DCHECK((reg >= 0) && (reg < kNumSimuRegisters) && ((reg % 2) == 0));
double dm_val = 0.0;
@@ -1054,29 +1167,48 @@ double Simulator::get_double_from_register_pair(int reg) {
}
-int32_t Simulator::get_fpu_register(int fpureg) const {
+int64_t Simulator::get_fpu_register(int fpureg) const {
+ DCHECK(IsFp64Mode());
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
return FPUregisters_[fpureg];
}
-int64_t Simulator::get_fpu_register_long(int fpureg) const {
- DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
- return *BitCast<int64_t*>(
- const_cast<int32_t*>(&FPUregisters_[fpureg]));
+int32_t Simulator::get_fpu_register_word(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return static_cast<int32_t>(FPUregisters_[fpureg] & 0xffffffff);
+}
+
+
+int32_t Simulator::get_fpu_register_signed_word(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return static_cast<int32_t>(FPUregisters_[fpureg] & 0xffffffff);
+}
+
+
+int32_t Simulator::get_fpu_register_hi_word(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return static_cast<int32_t>((FPUregisters_[fpureg] >> 32) & 0xffffffff);
}
float Simulator::get_fpu_register_float(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return *BitCast<float*>(
- const_cast<int32_t*>(&FPUregisters_[fpureg]));
+ return *bit_cast<float*>(const_cast<int64_t*>(&FPUregisters_[fpureg]));
}
double Simulator::get_fpu_register_double(int fpureg) const {
- DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
- return *BitCast<double*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
+ if (IsFp64Mode()) {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return *bit_cast<double*>(&FPUregisters_[fpureg]);
+ } else {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
+ int64_t i64;
+ i64 = static_cast<uint32_t>(get_fpu_register_word(fpureg));
+ i64 |= static_cast<uint64_t>(get_fpu_register_word(fpureg + 1)) << 32;
+ return bit_cast<double>(i64);
+ }
}
@@ -1089,6 +1221,7 @@ void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
*y = get_fpu_register_double(14);
*z = get_register(a2);
} else {
+ // TODO(plind): bad ABI stuff, refactor or remove.
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
char buffer[sizeof(*x)];
@@ -1143,6 +1276,8 @@ bool Simulator::test_fcsr_bit(uint32_t cc) {
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round_error(double original, double rounded) {
bool ret = false;
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
if (!std::isfinite(original) || !std::isfinite(rounded)) {
set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
@@ -1158,7 +1293,7 @@ bool Simulator::set_fcsr_round_error(double original, double rounded) {
ret = true;
}
- if (rounded > INT_MAX || rounded < INT_MIN) {
+ if (rounded > max_int32 || rounded < min_int32) {
set_fcsr_bit(kFCSROverflowFlagBit, true);
// The reference is not really clear but it seems this is required:
set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
@@ -1420,18 +1555,35 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
switch (redirection->type()) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
- arg0 = get_fpu_register(f12);
- arg1 = get_fpu_register(f13);
- arg2 = get_fpu_register(f14);
- arg3 = get_fpu_register(f15);
+ if (IsFp64Mode()) {
+ arg0 = get_fpu_register_word(f12);
+ arg1 = get_fpu_register_hi_word(f12);
+ arg2 = get_fpu_register_word(f14);
+ arg3 = get_fpu_register_hi_word(f14);
+ } else {
+ arg0 = get_fpu_register_word(f12);
+ arg1 = get_fpu_register_word(f13);
+ arg2 = get_fpu_register_word(f14);
+ arg3 = get_fpu_register_word(f15);
+ }
break;
case ExternalReference::BUILTIN_FP_CALL:
- arg0 = get_fpu_register(f12);
- arg1 = get_fpu_register(f13);
+ if (IsFp64Mode()) {
+ arg0 = get_fpu_register_word(f12);
+ arg1 = get_fpu_register_hi_word(f12);
+ } else {
+ arg0 = get_fpu_register_word(f12);
+ arg1 = get_fpu_register_word(f13);
+ }
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
- arg0 = get_fpu_register(f12);
- arg1 = get_fpu_register(f13);
+ if (IsFp64Mode()) {
+ arg0 = get_fpu_register_word(f12);
+ arg1 = get_fpu_register_hi_word(f12);
+ } else {
+ arg0 = get_fpu_register_word(f12);
+ arg1 = get_fpu_register_word(f13);
+ }
arg2 = get_register(a2);
break;
default:
@@ -1735,25 +1887,20 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
switch (op) {
case COP1: // Coprocessor instructions.
switch (instr->RsFieldRaw()) {
- case BC1: // Handled in DecodeTypeImmed, should never come here.
- UNREACHABLE();
- break;
case CFC1:
// At the moment only FCSR is supported.
DCHECK(fs_reg == kFCSRRegister);
*alu_out = FCSR_;
break;
case MFC1:
- *alu_out = get_fpu_register(fs_reg);
+ *alu_out = get_fpu_register_word(fs_reg);
break;
case MFHC1:
- UNIMPLEMENTED_MIPS();
+ *alu_out = get_fpu_register_hi_word(fs_reg);
break;
case CTC1:
case MTC1:
case MTHC1:
- // Do the store in the execution step.
- break;
case S:
case D:
case W:
@@ -1762,7 +1909,8 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
// Do everything in the execution step.
break;
default:
- UNIMPLEMENTED_MIPS();
+ // BC1 BC1EQZ BC1NEZ handled in DecodeTypeImmed, should never come here.
+ UNREACHABLE();
}
break;
case COP1X:
@@ -1810,17 +1958,51 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
case SRAV:
*alu_out = rt >> rs;
break;
- case MFHI:
- *alu_out = get_register(HI);
+ case MFHI: // MFHI == CLZ on R6.
+ if (!IsMipsArchVariant(kMips32r6)) {
+ DCHECK(instr->SaValue() == 0);
+ *alu_out = get_register(HI);
+ } else {
+ // MIPS spec: If no bits were set in GPR rs, the result written to
+ // GPR rd is 32.
+ DCHECK(instr->SaValue() == 1);
+ *alu_out = base::bits::CountLeadingZeros32(rs_u);
+ }
break;
case MFLO:
*alu_out = get_register(LO);
break;
- case MULT:
- *i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
+ case MULT: // MULT == MUL_MUH.
+ if (!IsMipsArchVariant(kMips32r6)) {
+ *i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
+ } else {
+ switch (instr->SaValue()) {
+ case MUL_OP:
+ case MUH_OP:
+ *i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ }
break;
- case MULTU:
- *u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
+ case MULTU: // MULTU == MUL_MUH_U.
+ if (!IsMipsArchVariant(kMips32r6)) {
+ *u64hilo = static_cast<uint64_t>(rs_u) *
+ static_cast<uint64_t>(rt_u);
+ } else {
+ switch (instr->SaValue()) {
+ case MUL_OP:
+ case MUH_OP:
+ *u64hilo = static_cast<uint64_t>(rs_u) *
+ static_cast<uint64_t>(rt_u);
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ }
break;
case ADD:
if (HaveSameSign(rs, rt)) {
@@ -1909,9 +2091,7 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
case CLZ:
// MIPS32 spec: If no bits were set in GPR rs, the result written to
// GPR rd is 32.
- // GCC __builtin_clz: If input is 0, the result is undefined.
- *alu_out =
- rs_u == 0 ? 32 : CompilerIntrinsics::CountLeadingZeros(rs_u);
+ *alu_out = base::bits::CountLeadingZeros32(rs_u);
break;
default:
UNREACHABLE();
@@ -1998,16 +2178,14 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
switch (op) {
case COP1:
switch (instr->RsFieldRaw()) {
- case BC1: // Branch on coprocessor condition.
- UNREACHABLE();
- break;
case CFC1:
set_register(rt_reg, alu_out);
+ break;
case MFC1:
set_register(rt_reg, alu_out);
break;
case MFHC1:
- UNIMPLEMENTED_MIPS();
+ set_register(rt_reg, alu_out);
break;
case CTC1:
// At the moment only FCSR is supported.
@@ -2015,10 +2193,12 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
FCSR_ = registers_[rt_reg];
break;
case MTC1:
- FPUregisters_[fs_reg] = registers_[rt_reg];
+ // Hardware writes upper 32-bits to zero on mtc1.
+ set_fpu_register_hi_word(fs_reg, 0);
+ set_fpu_register_word(fs_reg, registers_[rt_reg]);
break;
case MTHC1:
- UNIMPLEMENTED_MIPS();
+ set_fpu_register_hi_word(fs_reg, registers_[rt_reg]);
break;
case S:
float f;
@@ -2027,20 +2207,9 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
f = get_fpu_register_float(fs_reg);
set_fpu_register_double(fd_reg, static_cast<double>(f));
break;
- case CVT_W_S:
- case CVT_L_S:
- case TRUNC_W_S:
- case TRUNC_L_S:
- case ROUND_W_S:
- case ROUND_L_S:
- case FLOOR_W_S:
- case FLOOR_L_S:
- case CEIL_W_S:
- case CEIL_L_S:
- case CVT_PS_S:
- UNIMPLEMENTED_MIPS();
- break;
default:
+ // CVT_W_S CVT_L_S TRUNC_W_S ROUND_W_S ROUND_L_S FLOOR_W_S FLOOR_L_S
+ // CEIL_W_S CEIL_L_S CVT_PS_S are unimplemented.
UNREACHABLE();
}
break;
@@ -2114,9 +2283,9 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
// round to the even one.
result--;
}
- set_fpu_register(fd_reg, result);
+ set_fpu_register_word(fd_reg, result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPUInvalidResult);
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
}
}
break;
@@ -2124,9 +2293,9 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
{
double rounded = trunc(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register(fd_reg, result);
+ set_fpu_register_word(fd_reg, result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPUInvalidResult);
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
}
}
break;
@@ -2134,9 +2303,9 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
{
double rounded = std::floor(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register(fd_reg, result);
+ set_fpu_register_word(fd_reg, result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPUInvalidResult);
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
}
}
break;
@@ -2144,9 +2313,9 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
{
double rounded = std::ceil(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register(fd_reg, result);
+ set_fpu_register_word(fd_reg, result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPUInvalidResult);
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
}
}
break;
@@ -2156,34 +2325,54 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
case CVT_L_D: { // Mips32r2: Truncate double to 64-bit long-word.
double rounded = trunc(fs);
i64 = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg, i64 & 0xffffffff);
- set_fpu_register(fd_reg + 1, i64 >> 32);
+ if (IsFp64Mode()) {
+ set_fpu_register(fd_reg, i64);
+ } else {
+ set_fpu_register_word(fd_reg, i64 & 0xffffffff);
+ set_fpu_register_word(fd_reg + 1, i64 >> 32);
+ }
break;
}
case TRUNC_L_D: { // Mips32r2 instruction.
double rounded = trunc(fs);
i64 = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg, i64 & 0xffffffff);
- set_fpu_register(fd_reg + 1, i64 >> 32);
+ if (IsFp64Mode()) {
+ set_fpu_register(fd_reg, i64);
+ } else {
+ set_fpu_register_word(fd_reg, i64 & 0xffffffff);
+ set_fpu_register_word(fd_reg + 1, i64 >> 32);
+ }
break;
}
case ROUND_L_D: { // Mips32r2 instruction.
double rounded =
fs > 0 ? std::floor(fs + 0.5) : std::ceil(fs - 0.5);
i64 = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg, i64 & 0xffffffff);
- set_fpu_register(fd_reg + 1, i64 >> 32);
+ if (IsFp64Mode()) {
+ set_fpu_register(fd_reg, i64);
+ } else {
+ set_fpu_register_word(fd_reg, i64 & 0xffffffff);
+ set_fpu_register_word(fd_reg + 1, i64 >> 32);
+ }
break;
}
case FLOOR_L_D: // Mips32r2 instruction.
i64 = static_cast<int64_t>(std::floor(fs));
- set_fpu_register(fd_reg, i64 & 0xffffffff);
- set_fpu_register(fd_reg + 1, i64 >> 32);
+ if (IsFp64Mode()) {
+ set_fpu_register(fd_reg, i64);
+ } else {
+ set_fpu_register_word(fd_reg, i64 & 0xffffffff);
+ set_fpu_register_word(fd_reg + 1, i64 >> 32);
+ }
break;
case CEIL_L_D: // Mips32r2 instruction.
i64 = static_cast<int64_t>(std::ceil(fs));
- set_fpu_register(fd_reg, i64 & 0xffffffff);
- set_fpu_register(fd_reg + 1, i64 >> 32);
+ if (IsFp64Mode()) {
+ set_fpu_register(fd_reg, i64);
+ } else {
+ set_fpu_register_word(fd_reg, i64 & 0xffffffff);
+ set_fpu_register_word(fd_reg + 1, i64 >> 32);
+ }
break;
case C_F_D:
UNIMPLEMENTED_MIPS();
@@ -2195,35 +2384,92 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
case W:
switch (instr->FunctionFieldRaw()) {
case CVT_S_W: // Convert word to float (single).
- alu_out = get_fpu_register(fs_reg);
+ alu_out = get_fpu_register_signed_word(fs_reg);
set_fpu_register_float(fd_reg, static_cast<float>(alu_out));
break;
case CVT_D_W: // Convert word to double.
- alu_out = get_fpu_register(fs_reg);
+ alu_out = get_fpu_register_signed_word(fs_reg);
set_fpu_register_double(fd_reg, static_cast<double>(alu_out));
break;
- default:
+ default: // Mips64r6 CMP.S instructions unimplemented.
UNREACHABLE();
}
break;
case L:
+ fs = get_fpu_register_double(fs_reg);
+ ft = get_fpu_register_double(ft_reg);
switch (instr->FunctionFieldRaw()) {
case CVT_D_L: // Mips32r2 instruction.
// Watch the signs here, we want 2 32-bit vals
// to make a sign-64.
- i64 = static_cast<uint32_t>(get_fpu_register(fs_reg));
- i64 |= static_cast<int64_t>(get_fpu_register(fs_reg + 1)) << 32;
+ if (IsFp64Mode()) {
+ i64 = get_fpu_register(fs_reg);
+ } else {
+ i64 = static_cast<uint32_t>(get_fpu_register_word(fs_reg));
+ i64 |= static_cast<int64_t>(
+ get_fpu_register_word(fs_reg + 1)) << 32;
+ }
set_fpu_register_double(fd_reg, static_cast<double>(i64));
break;
case CVT_S_L:
UNIMPLEMENTED_MIPS();
break;
- default:
+ case CMP_AF: // Mips64r6 CMP.D instructions.
+ UNIMPLEMENTED_MIPS();
+ break;
+ case CMP_UN:
+ if (std::isnan(fs) || std::isnan(ft)) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_EQ:
+ if (fs == ft) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_UEQ:
+ if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_LT:
+ if (fs < ft) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_ULT:
+ if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_LE:
+ if (fs <= ft) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_ULE:
+ if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ default: // CMP_OR CMP_UNE CMP_NE UNIMPLEMENTED.
UNREACHABLE();
}
break;
- case PS:
- break;
default:
UNREACHABLE();
}
@@ -2263,30 +2509,100 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
}
// Instructions using HI and LO registers.
case MULT:
- set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
- set_register(HI, static_cast<int32_t>(i64hilo >> 32));
+ if (!IsMipsArchVariant(kMips32r6)) {
+ set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
+ set_register(HI, static_cast<int32_t>(i64hilo >> 32));
+ } else {
+ switch (instr->SaValue()) {
+ case MUL_OP:
+ set_register(rd_reg,
+ static_cast<int32_t>(i64hilo & 0xffffffff));
+ break;
+ case MUH_OP:
+ set_register(rd_reg, static_cast<int32_t>(i64hilo >> 32));
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ }
break;
case MULTU:
- set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
- set_register(HI, static_cast<int32_t>(u64hilo >> 32));
+ if (!IsMipsArchVariant(kMips32r6)) {
+ set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
+ set_register(HI, static_cast<int32_t>(u64hilo >> 32));
+ } else {
+ switch (instr->SaValue()) {
+ case MUL_OP:
+ set_register(rd_reg,
+ static_cast<int32_t>(u64hilo & 0xffffffff));
+ break;
+ case MUH_OP:
+ set_register(rd_reg, static_cast<int32_t>(u64hilo >> 32));
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ }
break;
case DIV:
- // Divide by zero and overflow was not checked in the configuration
- // step - div and divu do not raise exceptions. On division by 0
- // the result will be UNPREDICTABLE. On overflow (INT_MIN/-1),
- // return INT_MIN which is what the hardware does.
- if (rs == INT_MIN && rt == -1) {
- set_register(LO, INT_MIN);
- set_register(HI, 0);
- } else if (rt != 0) {
- set_register(LO, rs / rt);
- set_register(HI, rs % rt);
+ if (IsMipsArchVariant(kMips32r6)) {
+ switch (instr->SaValue()) {
+ case DIV_OP:
+ if (rs == INT_MIN && rt == -1) {
+ set_register(rd_reg, INT_MIN);
+ } else if (rt != 0) {
+ set_register(rd_reg, rs / rt);
+ }
+ break;
+ case MOD_OP:
+ if (rs == INT_MIN && rt == -1) {
+ set_register(rd_reg, 0);
+ } else if (rt != 0) {
+ set_register(rd_reg, rs % rt);
+ }
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ } else {
+ // Divide by zero and overflow was not checked in the
+ // configuration step - div and divu do not raise exceptions. On
+ // division by 0 the result will be UNPREDICTABLE. On overflow
+ // (INT_MIN/-1), return INT_MIN which is what the hardware does.
+ if (rs == INT_MIN && rt == -1) {
+ set_register(LO, INT_MIN);
+ set_register(HI, 0);
+ } else if (rt != 0) {
+ set_register(LO, rs / rt);
+ set_register(HI, rs % rt);
+ }
}
break;
case DIVU:
- if (rt_u != 0) {
- set_register(LO, rs_u / rt_u);
- set_register(HI, rs_u % rt_u);
+ if (IsMipsArchVariant(kMips32r6)) {
+ switch (instr->SaValue()) {
+ case DIV_OP:
+ if (rt_u != 0) {
+ set_register(rd_reg, rs_u / rt_u);
+ }
+ break;
+ case MOD_OP:
+ if (rt_u != 0) {
+ set_register(rd_reg, rs_u % rt_u);
+ }
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ } else {
+ if (rt_u != 0) {
+ set_register(LO, rs_u / rt_u);
+ set_register(HI, rs_u % rt_u);
+ }
}
break;
// Break and trap instructions.
@@ -2368,6 +2684,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
int16_t imm16 = instr->Imm16Value();
int32_t ft_reg = instr->FtValue(); // Destination register.
+ int64_t ft;
// Zero extended immediate.
uint32_t oe_imm16 = 0xffff & imm16;
@@ -2412,6 +2729,28 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
next_pc = current_pc + kBranchReturnOffset;
}
break;
+ case BC1EQZ:
+ ft = get_fpu_register(ft_reg);
+ do_branch = (ft & 0x1) ? false : true;
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
+ break;
+ case BC1NEZ:
+ ft = get_fpu_register(ft_reg);
+ do_branch = (ft & 0x1) ? true : false;
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
+ break;
default:
UNREACHABLE();
}
@@ -2646,14 +2985,15 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
WriteW(addr, mem_value, instr);
break;
case LWC1:
- set_fpu_register(ft_reg, alu_out);
+ set_fpu_register_hi_word(ft_reg, 0);
+ set_fpu_register_word(ft_reg, alu_out);
break;
case LDC1:
set_fpu_register_double(ft_reg, fp_out);
break;
case SWC1:
addr = rs + se_imm16;
- WriteW(addr, get_fpu_register(ft_reg), instr);
+ WriteW(addr, get_fpu_register_word(ft_reg), instr);
break;
case SDC1:
addr = rs + se_imm16;
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 4c84b86db6..85f64779f1 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -162,11 +162,15 @@ class Simulator {
int32_t get_register(int reg) const;
double get_double_from_register_pair(int reg);
// Same for FPURegisters.
- void set_fpu_register(int fpureg, int32_t value);
+ void set_fpu_register(int fpureg, int64_t value);
+ void set_fpu_register_word(int fpureg, int32_t value);
+ void set_fpu_register_hi_word(int fpureg, int32_t value);
void set_fpu_register_float(int fpureg, float value);
void set_fpu_register_double(int fpureg, double value);
- int32_t get_fpu_register(int fpureg) const;
- int64_t get_fpu_register_long(int fpureg) const;
+ int64_t get_fpu_register(int fpureg) const;
+ int32_t get_fpu_register_word(int fpureg) const;
+ int32_t get_fpu_register_signed_word(int fpureg) const;
+ int32_t get_fpu_register_hi_word(int fpureg) const;
float get_fpu_register_float(int fpureg) const;
double get_fpu_register_double(int fpureg) const;
void set_fcsr_bit(uint32_t cc, bool value);
@@ -338,7 +342,9 @@ class Simulator {
// Registers.
int32_t registers_[kNumSimuRegisters];
// Coprocessor Registers.
- int32_t FPUregisters_[kNumFPURegisters];
+ // Note: FP32 mode uses only the lower 32-bit part of each element,
+ // the upper 32-bit is unpredictable.
+ int64_t FPUregisters_[kNumFPURegisters];
// FPU control register.
uint32_t FCSR_;
diff --git a/deps/v8/src/mips64/OWNERS b/deps/v8/src/mips64/OWNERS
index 8d6807d267..5508ba626f 100644
--- a/deps/v8/src/mips64/OWNERS
+++ b/deps/v8/src/mips64/OWNERS
@@ -1,10 +1,5 @@
-plind44@gmail.com
paul.lind@imgtec.com
-gergely@homejinni.com
gergely.kis@imgtec.com
-palfia@homejinni.com
akos.palfi@imgtec.com
-kilvadyb@homejinni.com
balazs.kilvady@imgtec.com
-Dusan.Milosavljevic@rt-rk.com
dusan.milosavljevic@imgtec.com
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index de294ee665..76dd801af5 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -99,6 +99,11 @@ int DoubleRegister::NumAllocatableRegisters() {
}
+int DoubleRegister::NumAllocatableAliasedRegisters() {
+ return NumAllocatableRegisters();
+}
+
+
int FPURegister::ToAllocationIndex(FPURegister reg) {
DCHECK(reg.code() % 2 == 0);
DCHECK(reg.code() / 2 < kMaxNumAllocatableRegisters);
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index 36e9c2d105..5d51e6354b 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -318,7 +318,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
- DCHECK(m >= 4 && IsPowerOf2(m));
+ DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index 5c754f4950..b296d51758 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -211,6 +211,10 @@ struct FPURegister {
inline static int NumRegisters();
inline static int NumAllocatableRegisters();
+
+ // TODO(turbofan): Proper support for float32.
+ inline static int NumAllocatableAliasedRegisters();
+
inline static int ToAllocationIndex(FPURegister reg);
static const char* AllocationIndexToString(int index);
diff --git a/deps/v8/src/mips64/builtins-mips64.cc b/deps/v8/src/mips64/builtins-mips64.cc
index cbbcc054ff..51a1265604 100644
--- a/deps/v8/src/mips64/builtins-mips64.cc
+++ b/deps/v8/src/mips64/builtins-mips64.cc
@@ -12,8 +12,7 @@
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
@@ -840,8 +839,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
}
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index 970792aafa..ef497ccf5e 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -9,325 +9,82 @@
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+#include "src/isolate.h"
+#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
-#include "src/stub-cache.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
-void FastNewClosureStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a2 };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
-}
-
-
-void FastNewContextStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a1 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void ToNumberStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void NumberToStringStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a0 };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
-}
-
-
-void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a3, a2, a1 };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi(),
- Representation::Tagged() };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry,
- representations);
-}
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a3, a2, a1, a0 };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
-}
-
-
-void CallFunctionStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- UNIMPLEMENTED();
-}
-
-
-void CallConstructStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- UNIMPLEMENTED();
-}
-
-
-void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a2, a3 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void RegExpConstructResultStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a2, a1, a0 };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a0, a1 };
- Address entry =
- Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(entry));
-}
-
-
-void CompareNilICStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(CompareNilIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
-}
-
-
-const Register InterfaceDescriptor::ContextRegister() { return cp; }
-
-
static void InitializeArrayConstructorDescriptor(
- CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
+ Isolate* isolate, CodeStubDescriptor* descriptor,
int constant_stack_parameter_count) {
- // register state
- // cp -- context
- // a0 -- number of arguments
- // a1 -- function
- // a2 -- allocation site with elements kind
Address deopt_handler = Runtime::FunctionForId(
Runtime::kArrayConstructor)->entry;
if (constant_stack_parameter_count == 0) {
- Register registers[] = { cp, a1, a2 };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
- deopt_handler, NULL, constant_stack_parameter_count,
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE);
} else {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = { cp, a1, a2, a0 };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32() };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers, a0,
- deopt_handler, representations,
- constant_stack_parameter_count,
+ descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
}
static void InitializeInternalArrayConstructorDescriptor(
- CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
+ Isolate* isolate, CodeStubDescriptor* descriptor,
int constant_stack_parameter_count) {
- // register state
- // cp -- context
- // a0 -- number of arguments
- // a1 -- constructor function
Address deopt_handler = Runtime::FunctionForId(
Runtime::kInternalArrayConstructor)->entry;
if (constant_stack_parameter_count == 0) {
- Register registers[] = { cp, a1 };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
- deopt_handler, NULL, constant_stack_parameter_count,
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE);
} else {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = { cp, a1, a0 };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32() };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers, a0,
- deopt_handler, representations,
- constant_stack_parameter_count,
+ descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
}
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 0);
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(MajorKey(), descriptor, -1);
+void ArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
}
-void ToBooleanStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(ToBooleanIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
+void ArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
}
-void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0);
+void ArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
}
-void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1);
+void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
-void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1);
+void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
}
-void BinaryOpICStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a1, a0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(BinaryOpIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
-}
-
-
-void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a2, a1, a0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
-}
-
-
-void StringAddStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { cp, a1, a0 };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kStringAdd)->entry);
-}
-
-
-void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
- Register registers[] = { cp, // context
- a1, // JSFunction
- a0, // actual number of arguments
- a2, // expected number of arguments
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // JSFunction
- Representation::Integer32(), // actual number of arguments
- Representation::Integer32(), // expected number of arguments
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::KeyedCall);
- Register registers[] = { cp, // context
- a2, // key
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // key
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::NamedCall);
- Register registers[] = { cp, // context
- a2, // name
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // name
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::CallHandler);
- Register registers[] = { cp, // context
- a0, // receiver
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // receiver
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::ApiFunctionCall);
- Register registers[] = { cp, // context
- a0, // callee
- a4, // call_data
- a2, // holder
- a1, // api_function_address
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
+void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
}
@@ -348,25 +105,25 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register rhs);
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
+ ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
isolate()->counters()->code_stubs()->Increment();
- CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
- int param_count = descriptor->GetEnvironmentParameterCount();
+ CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
+ int param_count = descriptor.GetEnvironmentParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
DCHECK((param_count == 0) ||
- a0.is(descriptor->GetEnvironmentParameterRegister(param_count - 1)));
+ a0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
// Push arguments, adjust sp.
__ Dsubu(sp, sp, Operand(param_count * kPointerSize));
for (int i = 0; i < param_count; ++i) {
// Store argument to stack.
- __ sd(descriptor->GetEnvironmentParameterRegister(i),
- MemOperand(sp, (param_count-1-i) * kPointerSize));
+ __ sd(descriptor.GetEnvironmentParameterRegister(i),
+ MemOperand(sp, (param_count - 1 - i) * kPointerSize));
}
- ExternalReference miss = descriptor->miss_handler();
__ CallExternalReference(miss, param_count);
}
@@ -374,107 +131,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-// Takes a Smi and converts to an IEEE 64 bit floating point value in two
-// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
-// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
-// scratch register. Destroys the source register. No GC occurs during this
-// stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public PlatformCodeStub {
- public:
- ConvertToDoubleStub(Isolate* isolate,
- Register result_reg_1,
- Register result_reg_2,
- Register source_reg,
- Register scratch_reg)
- : PlatformCodeStub(isolate),
- result1_(result_reg_1),
- result2_(result_reg_2),
- source_(source_reg),
- zeros_(scratch_reg) { }
-
- private:
- Register result1_;
- Register result2_;
- Register source_;
- Register zeros_;
-
- // Minor key encoding in 16 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 14> {};
-
- Major MajorKey() const { return ConvertToDouble; }
- int MinorKey() const {
- // Encode the parameters in a unique 16 bit value.
- return result1_.code() +
- (result2_.code() << 4) +
- (source_.code() << 8) +
- (zeros_.code() << 12);
- }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
-#ifndef BIG_ENDIAN_FLOATING_POINT
- Register exponent = result1_;
- Register mantissa = result2_;
-#else
- Register exponent = result2_;
- Register mantissa = result1_;
-#endif
- Label not_special;
- // Convert from Smi to integer.
- __ SmiUntag(source_);
- // Move sign bit from source to destination. This works because the sign bit
- // in the exponent word of the double has the same position and polarity as
- // the 2's complement sign bit in a Smi.
- STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- __ And(exponent, source_, Operand(HeapNumber::kSignMask));
- // Subtract from 0 if source was negative.
- __ subu(at, zero_reg, source_);
- __ Movn(source_, at, exponent);
-
- // We have -1, 0 or 1, which we treat specially. Register source_ contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ Branch(&not_special, gt, source_, Operand(1));
-
- // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
- const uint32_t exponent_word_for_1 =
- HeapNumber::kExponentBias << HeapNumber::kExponentShift;
- // Safe to use 'at' as dest reg here.
- __ Or(at, exponent, Operand(exponent_word_for_1));
- __ Movn(exponent, at, source_); // Write exp when source not 0.
- // 1, 0 and -1 all have 0 for the second word.
- __ Ret(USE_DELAY_SLOT);
- __ mov(mantissa, zero_reg);
-
- __ bind(&not_special);
- // Count leading zeros.
- // Gets the wrong answer for 0, but we already checked for that case above.
- __ Clz(zeros_, source_);
- // Compute exponent and or it into the exponent register.
- // We use mantissa as a scratch register here.
- __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
- __ subu(mantissa, mantissa, zeros_);
- __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
- __ Or(exponent, exponent, mantissa);
-
- // Shift up the source chopping the top bit off.
- __ Addu(zeros_, zeros_, Operand(1));
- // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
- __ sllv(source_, source_, zeros_);
- // Compute lower part of fraction (last 12 bits).
- __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
- // And the top (top 20 bits).
- __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
-
- __ Ret(USE_DELAY_SLOT);
- __ or_(exponent, exponent, source_);
-}
-
-
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done;
Register input_reg = source();
@@ -628,32 +284,32 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// We test for the special value that has a different exponent.
STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
// Test sign, and save for later conditionals.
- __ And(sign_, the_int_, Operand(0x80000000u));
- __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
+ __ And(sign(), the_int(), Operand(0x80000000u));
+ __ Branch(&max_negative_int, eq, the_int(), Operand(0x80000000u));
// Set up the correct exponent in scratch_. All non-Smi int32s have the same.
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
uint32_t non_smi_exponent =
(HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ li(scratch_, Operand(non_smi_exponent));
+ __ li(scratch(), Operand(non_smi_exponent));
// Set the sign bit in scratch_ if the value was negative.
- __ or_(scratch_, scratch_, sign_);
+ __ or_(scratch(), scratch(), sign());
// Subtract from 0 if the value was negative.
- __ subu(at, zero_reg, the_int_);
- __ Movn(the_int_, at, sign_);
+ __ subu(at, zero_reg, the_int());
+ __ Movn(the_int(), at, sign());
// We should be masking the implict first digit of the mantissa away here,
// but it just ends up combining harmlessly with the last digit of the
// exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
// the most significant 1 to hit the last bit of the 12 bit sign and exponent.
DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ srl(at, the_int_, shift_distance);
- __ or_(scratch_, scratch_, at);
- __ sw(scratch_, FieldMemOperand(the_heap_number_,
+ __ srl(at, the_int(), shift_distance);
+ __ or_(scratch(), scratch(), at);
+ __ sw(scratch(), FieldMemOperand(the_heap_number(),
HeapNumber::kExponentOffset));
- __ sll(scratch_, the_int_, 32 - shift_distance);
+ __ sll(scratch(), the_int(), 32 - shift_distance);
__ Ret(USE_DELAY_SLOT);
- __ sw(scratch_, FieldMemOperand(the_heap_number_,
+ __ sw(scratch(), FieldMemOperand(the_heap_number(),
HeapNumber::kMantissaOffset));
__ bind(&max_negative_int);
@@ -662,13 +318,13 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// The actual mantissa bits stored are all 0 because the implicit most
// significant 1 bit is not stored.
non_smi_exponent += 1 << HeapNumber::kExponentShift;
- __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
- __ sw(scratch_,
- FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
- __ mov(scratch_, zero_reg);
+ __ li(scratch(), Operand(HeapNumber::kSignMask | non_smi_exponent));
+ __ sw(scratch(),
+ FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
+ __ mov(scratch(), zero_reg);
__ Ret(USE_DELAY_SLOT);
- __ sw(scratch_,
- FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
+ __ sw(scratch(),
+ FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
}
@@ -934,15 +590,14 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
}
-static void ICCompareStub_CheckInputType(MacroAssembler* masm,
- Register input,
+static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
Register scratch,
- CompareIC::State expected,
+ CompareICState::State expected,
Label* fail) {
Label ok;
- if (expected == CompareIC::SMI) {
+ if (expected == CompareICState::SMI) {
__ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::NUMBER) {
+ } else if (expected == CompareICState::NUMBER) {
__ JumpIfSmi(input, &ok);
__ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
DONT_DO_SMI_CHECK);
@@ -956,14 +611,14 @@ static void ICCompareStub_CheckInputType(MacroAssembler* masm,
// On entry a1 and a2 are the values to be compared.
// On exit a0 is 0, positive or negative to indicate the result of
// the comparison.
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
Register lhs = a1;
Register rhs = a0;
Condition cc = GetCondition();
Label miss;
- ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss);
- ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss);
+ CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
+ CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles;
@@ -1086,29 +741,19 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
masm, lhs, rhs, &flat_string_check, &slow);
}
- // Check for both being sequential ASCII strings, and inline if that is the
- // case.
+ // Check for both being sequential one-byte strings,
+ // and inline if that is the case.
__ bind(&flat_string_check);
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow);
+ __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
__ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
a3);
if (cc == eq) {
- StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- lhs,
- rhs,
- a2,
- a3,
- a4);
+ StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, a4);
} else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- lhs,
- rhs,
- a2,
- a3,
- a4,
- a5);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, a4,
+ a5);
}
// Never falls through to here.
@@ -1163,7 +808,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// store the registers in any particular way, but we do have to store and
// restore them.
__ MultiPush(kJSCallerSaved | ra.bit());
- if (save_doubles_ == kSaveFPRegs) {
+ if (save_doubles()) {
__ MultiPushFPU(kCallerSavedFPU);
}
const int argument_count = 1;
@@ -1176,7 +821,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
__ CallCFunction(
ExternalReference::store_buffer_overflow_function(isolate()),
argument_count);
- if (save_doubles_ == kSaveFPRegs) {
+ if (save_doubles()) {
__ MultiPopFPU(kCallerSavedFPU);
}
@@ -1187,7 +832,8 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
const Register base = a1;
- const Register exponent = a2;
+ const Register exponent = MathPowTaggedDescriptor::exponent();
+ DCHECK(exponent.is(a2));
const Register heapnumbermap = a5;
const Register heapnumber = v0;
const DoubleRegister double_base = f2;
@@ -1199,7 +845,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const Register scratch2 = a7;
Label call_runtime, done, int_exponent;
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
Label base_is_smi, unpack_exponent;
// The exponent and base are supplied as arguments on the stack.
// This can only happen if the stub is called from non-optimized code.
@@ -1227,7 +873,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
__ ldc1(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
- } else if (exponent_type_ == TAGGED) {
+ } else if (exponent_type() == TAGGED) {
// Base is already in double_base.
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
@@ -1235,7 +881,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
FieldMemOperand(exponent, HeapNumber::kValueOffset));
}
- if (exponent_type_ != INTEGER) {
+ if (exponent_type() != INTEGER) {
Label int_exponent_convert;
// Detect integer exponents stored as double.
__ EmitFPUTruncate(kRoundToMinusInf,
@@ -1248,7 +894,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// scratch2 == 0 means there was no conversion error.
__ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
// compile time and uses DoMathPowHalf instead. We then skip this check
// for non-constant cases of +/-0.5 as these hardly occur.
@@ -1317,7 +963,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&int_exponent);
// Get two copies of exponent in the registers scratch and exponent.
- if (exponent_type_ == INTEGER) {
+ if (exponent_type() == INTEGER) {
__ mov(scratch, exponent);
} else {
// Exponent has previously been stored into scratch as untagged integer.
@@ -1365,7 +1011,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Returning or bailing out.
Counters* counters = isolate()->counters();
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
__ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
@@ -1432,20 +1078,10 @@ void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
void CodeStub::GenerateFPStubs(Isolate* isolate) {
+ // Generate if not already in cache.
SaveFPRegsMode mode = kSaveFPRegs;
- CEntryStub save_doubles(isolate, 1, mode);
- StoreBufferOverflowStub stub(isolate, mode);
- // These stubs might already be in the snapshot, detect that and don't
- // regenerate, which would lead to code stub initialization state being messed
- // up.
- Code* save_doubles_code;
- if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
- save_doubles_code = *save_doubles.GetCode();
- }
- Code* store_buffer_overflow_code;
- if (!stub.FindCodeInCache(&store_buffer_overflow_code)) {
- store_buffer_overflow_code = *stub.GetCode();
- }
+ CEntryStub(isolate, 1, mode).GetCode();
+ StoreBufferOverflowStub(isolate, mode).GetCode();
isolate->set_fp_stubs_generated(true);
}
@@ -1477,7 +1113,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(save_doubles_);
+ __ EnterExitFrame(save_doubles());
// s0: number of arguments including receiver (C callee-saved)
// s1: pointer to first argument (C callee-saved)
@@ -1564,7 +1200,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// sp: stack pointer
// fp: frame pointer
// s0: still holds argc (callee-saved).
- __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN);
+ __ LeaveExitFrame(save_doubles(), s0, true, EMIT_RETURN);
// Handling of exception.
__ bind(&exception_returned);
@@ -1591,7 +1227,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
}
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+void JSEntryStub::Generate(MacroAssembler* masm) {
Label invoke, handler_entry, exit;
Isolate* isolate = masm->isolate();
@@ -1631,7 +1267,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// We build an EntryFrame.
__ li(a7, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ int marker = type();
__ li(a6, Operand(Smi::FromInt(marker)));
__ li(a5, Operand(Smi::FromInt(marker)));
ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
@@ -1722,7 +1358,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// [ O32: 4 args slots]
// args
- if (is_construct) {
+ if (type() == StackFrame::ENTRY_CONSTRUCT) {
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
isolate);
__ li(a4, Operand(construct_entry));
@@ -1949,7 +1585,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
- Register receiver = LoadIC::ReceiverRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3,
a4, &miss);
__ bind(&miss);
@@ -1958,17 +1594,13 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-Register InstanceofStub::left() { return a0; }
-
-
-Register InstanceofStub::right() { return a1; }
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
const int kDisplacement =
StandardFrameConstants::kCallerSPOffset - kPointerSize;
+ DCHECK(a1.is(ArgumentsAccessReadDescriptor::index()));
+ DCHECK(a0.is(ArgumentsAccessReadDescriptor::parameter_count()));
// Check that the key is a smiGenerateReadElement.
Label slow;
@@ -2262,6 +1894,32 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
}
+void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
+ // Return address is in ra.
+ Label slow;
+
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
+
+ // Check that the key is an array index, that is Uint32.
+ __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask));
+ __ Branch(&slow, ne, t0, Operand(zero_reg));
+
+ // Everything is fine, call runtime.
+ __ Push(receiver, key); // Receiver, key.
+
+ // Perform tail call to the entry.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
+ masm->isolate()),
+ 2, 1);
+
+ __ bind(&slow);
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
+}
+
+
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// sp[0] : number of parameters
// sp[4] : receiver displacement
@@ -2529,9 +2187,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kStringEncodingMask == 4);
STATIC_ASSERT(kOneByteStringTag == 4);
STATIC_ASSERT(kTwoByteStringTag == 0);
- __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
- __ ld(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
- __ dsra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
+ __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for one_byte.
+ __ ld(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
+ __ dsra(a3, a0, 2); // a3 is 1 for one_byte, 0 for UC16 (used below).
__ ld(a5, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
__ Movz(t9, a5, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
@@ -2543,7 +2201,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(t9, &runtime);
// a1: previous index
- // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
+ // a3: encoding of subject string (1 if one_byte, 0 if two_byte);
// t9: code
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
@@ -2628,7 +2286,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
// For arguments 4 and 3 get string length, calculate start of string data
- // and calculate the shift of the index (0 for ASCII and 1 for two byte).
+ // and calculate the shift of the index (0 for one_byte and 1 for two byte).
__ Daddu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
__ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
// Load the length from the original subject string from the previous stack
@@ -2848,9 +2506,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// a3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
- DCHECK_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
- DCHECK_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state into a4.
@@ -3051,7 +2709,7 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
void CallFunctionStub::Generate(MacroAssembler* masm) {
- CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+ CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
}
@@ -3120,11 +2778,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// StringCharCodeAtGenerator.
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- Label flat_string;
- Label ascii_string;
- Label got_char_code;
- Label sliced_string;
-
DCHECK(!a4.is(index_));
DCHECK(!a4.is(result_));
DCHECK(!a4.is(object_));
@@ -3195,13 +2848,13 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ TailCallStub(&stub);
__ bind(&miss);
- GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+ GenerateMiss(masm);
// The slow case, we need this no matter what to complete a call after a miss.
CallFunctionNoFeedback(masm,
- arg_count(),
- true,
- CallAsMethod());
+ arg_count(),
+ true,
+ CallAsMethod());
// Unreachable.
__ stop("Unexpected code address");
@@ -3214,7 +2867,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
Label extra_checks_or_miss, slow_start;
Label slow, non_function, wrap, cont;
Label have_js_function;
- int argc = state_.arg_count();
+ int argc = arg_count();
ParameterCount actual(argc);
EmitLoadTypeFeedbackVector(masm, a2);
@@ -3226,7 +2879,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Branch(&extra_checks_or_miss, ne, a1, Operand(a4));
__ bind(&have_js_function);
- if (state_.CallAsMethod()) {
+ if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
// Compute the receiver in sloppy mode.
__ ld(a3, MemOperand(sp, argc * kPointerSize));
@@ -3243,7 +2896,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&slow);
EmitSlowCase(masm, argc, &non_function);
- if (state_.CallAsMethod()) {
+ if (CallAsMethod()) {
__ bind(&wrap);
EmitWrapCase(masm, argc, &cont);
}
@@ -3271,7 +2924,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// We are here because tracing is on or we are going monomorphic.
__ bind(&miss);
- GenerateMiss(masm, IC::kCallIC_Miss);
+ GenerateMiss(masm);
// the slow case
__ bind(&slow_start);
@@ -3286,9 +2939,9 @@ void CallICStub::Generate(MacroAssembler* masm) {
}
-void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
// Get the receiver of the function from the stack; 1 ~ return address.
- __ ld(a4, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize));
+ __ ld(a4, MemOperand(sp, (arg_count() + 1) * kPointerSize));
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -3297,6 +2950,9 @@ void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
__ Push(a4, a1, a2, a3);
// Call the entry.
+ IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+ : IC::kCallIC_Customization_Miss;
+
ExternalReference miss = ExternalReference(IC_Utility(id),
masm->isolate());
__ CallExternalReference(miss, 4);
@@ -3373,7 +3029,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
DCHECK(!a4.is(code_));
STATIC_ASSERT(kSmiTag == 0);
- DCHECK(IsPowerOf2(String::kMaxOneByteCharCode + 1));
+ DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
__ And(a4,
code_,
Operand(kSmiTagMask |
@@ -3382,7 +3038,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged ASCII char code.
+ // At this point code register contains smi tagged one_byte char code.
STATIC_ASSERT(kSmiTag == 0);
__ SmiScale(a4, code_, kPointerSizeLog2);
__ Daddu(result_, result_, a4);
@@ -3411,10 +3067,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
}
-enum CopyCharactersFlags {
- COPY_ASCII = 1,
- DEST_ALWAYS_ALIGNED = 2
-};
+enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
@@ -3458,57 +3111,6 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
}
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash = seed + character + ((seed + character) << 10);
- __ LoadRoot(hash, Heap::kHashSeedRootIndex);
- // Untag smi seed and add the character.
- __ SmiUntag(hash);
- __ addu(hash, hash, character);
- __ sll(at, hash, 10);
- __ addu(hash, hash, at);
- // hash ^= hash >> 6;
- __ srl(at, hash, 6);
- __ xor_(hash, hash, at);
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash += character;
- __ addu(hash, hash, character);
- // hash += hash << 10;
- __ sll(at, hash, 10);
- __ addu(hash, hash, at);
- // hash ^= hash >> 6;
- __ srl(at, hash, 6);
- __ xor_(hash, hash, at);
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash) {
- // hash += hash << 3;
- __ sll(at, hash, 3);
- __ addu(hash, hash, at);
- // hash ^= hash >> 11;
- __ srl(at, hash, 11);
- __ xor_(hash, hash, at);
- // hash += hash << 15;
- __ sll(at, hash, 15);
- __ addu(hash, hash, at);
-
- __ li(at, Operand(String::kHashBitMask));
- __ and_(hash, hash, at);
-
- // if (hash == 0) hash = 27;
- __ ori(at, zero_reg, StringHasher::kZeroHash);
- __ Movz(hash, at, hash);
-}
-
-
void SubStringStub::Generate(MacroAssembler* masm) {
Label runtime;
// Stack frame on entry.
@@ -3633,7 +3235,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ And(a4, a1, Operand(kStringEncodingMask));
__ Branch(&two_byte_slice, eq, a4, Operand(zero_reg));
- __ AllocateAsciiSlicedString(v0, a2, a6, a7, &runtime);
+ __ AllocateOneByteSlicedString(v0, a2, a6, a7, &runtime);
__ jmp(&set_slice_header);
__ bind(&two_byte_slice);
__ AllocateTwoByteSlicedString(v0, a2, a6, a7, &runtime);
@@ -3676,8 +3278,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ And(a4, a1, Operand(kStringEncodingMask));
__ Branch(&two_byte_sequential, eq, a4, Operand(zero_reg));
- // Allocate and copy the resulting ASCII string.
- __ AllocateAsciiString(v0, a2, a4, a6, a7, &runtime);
+ // Allocate and copy the resulting one_byte string.
+ __ AllocateOneByteString(v0, a2, a4, a6, a7, &runtime);
// Locate first character of substring to copy.
__ Daddu(a5, a5, a3);
@@ -3735,12 +3337,9 @@ void SubStringStub::Generate(MacroAssembler* masm) {
}
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
+void StringHelper::GenerateFlatOneByteStringEquals(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3) {
Register length = scratch1;
// Compare lengths.
@@ -3765,9 +3364,8 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
// Compare characters.
__ bind(&compare_chars);
- GenerateAsciiCharsCompareLoop(masm,
- left, right, length, scratch2, scratch3, v0,
- &strings_not_equal);
+ GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
+ v0, &strings_not_equal);
// Characters are equal.
__ Ret(USE_DELAY_SLOT);
@@ -3775,13 +3373,9 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
}
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
+void StringHelper::GenerateCompareFlatOneByteStrings(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3, Register scratch4) {
Label result_not_equal, compare_lengths;
// Find minimum length and length difference.
__ ld(scratch1, FieldMemOperand(left, String::kLengthOffset));
@@ -3795,9 +3389,8 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
// Compare loop.
- GenerateAsciiCharsCompareLoop(masm,
- left, right, min_length, scratch2, scratch4, v0,
- &result_not_equal);
+ GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
+ scratch4, v0, &result_not_equal);
// Compare lengths - strings up to min-length are equal.
__ bind(&compare_lengths);
@@ -3820,14 +3413,9 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
}
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
+void StringHelper::GenerateOneByteCharsCompareLoop(
+ MacroAssembler* masm, Register left, Register right, Register length,
+ Register scratch1, Register scratch2, Register scratch3,
Label* chars_not_equal) {
// Change index to run from -length to -1 by adding length to string
// start. This means that loop ends when index reaches zero, which
@@ -3875,13 +3463,13 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ bind(&not_same);
- // Check that both objects are sequential ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
+ // Check that both objects are sequential one_byte strings.
+ __ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
- // Compare flat ASCII strings natively. Remove arguments from stack first.
+ // Compare flat one_byte strings natively. Remove arguments from stack first.
__ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
__ Daddu(sp, sp, Operand(2 * kPointerSize));
- GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, a4, a5);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, a4, a5);
__ bind(&runtime);
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
@@ -3911,13 +3499,13 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// Tail call into the stub that handles binary operations with allocation
// sites.
- BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+ BinaryOpWithAllocationSiteStub stub(isolate(), state());
__ TailCallStub(&stub);
}
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::SMI);
+void CompareICStub::GenerateSmis(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::SMI);
Label miss;
__ Or(a2, a1, a0);
__ JumpIfNotSmi(a2, &miss);
@@ -3939,17 +3527,17 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::NUMBER);
+void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- if (left_ == CompareIC::SMI) {
+ if (left() == CompareICState::SMI) {
__ JumpIfNotSmi(a1, &miss);
}
- if (right_ == CompareIC::SMI) {
+ if (right() == CompareICState::SMI) {
__ JumpIfNotSmi(a0, &miss);
}
@@ -4007,12 +3595,12 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+ CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
+ if (Token::IsOrderedRelationalCompareOp(op())) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&miss, ne, a0, Operand(at));
__ JumpIfSmi(a1, &unordered);
@@ -4022,7 +3610,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
}
__ bind(&maybe_undefined2);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
+ if (Token::IsOrderedRelationalCompareOp(op())) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&unordered, eq, a1, Operand(at));
}
@@ -4032,8 +3620,8 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::INTERNALIZED_STRING);
+void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::INTERNALIZED_STRING);
Label miss;
// Registers containing left and right operands respectively.
@@ -4072,8 +3660,8 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::UNIQUE_NAME);
+void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::UNIQUE_NAME);
DCHECK(GetCondition() == eq);
Label miss;
@@ -4093,8 +3681,8 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
__ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
__ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(tmp1, &miss);
- __ JumpIfNotUniqueName(tmp2, &miss);
+ __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
+ __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
// Use a0 as result
__ mov(v0, a0);
@@ -4116,11 +3704,11 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::STRING);
+void CompareICStub::GenerateStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::STRING);
Label miss;
- bool equality = Token::IsEqualityOp(op_);
+ bool equality = Token::IsEqualityOp(op());
// Registers containing left and right operands respectively.
Register left = a1;
@@ -4174,18 +3762,18 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
__ bind(&is_symbol);
}
- // Check that both strings are sequential ASCII.
+ // Check that both strings are sequential one_byte.
Label runtime;
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(
- tmp1, tmp2, tmp3, tmp4, &runtime);
+ __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
+ &runtime);
- // Compare flat ASCII strings. Returns when done.
+ // Compare flat one_byte strings. Returns when done.
if (equality) {
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, left, right, tmp1, tmp2, tmp3);
+ StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
+ tmp3);
} else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(
- masm, left, right, tmp1, tmp2, tmp3, tmp4);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
+ tmp2, tmp3, tmp4);
}
// Handle more complex cases in runtime.
@@ -4202,8 +3790,8 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::OBJECT);
+void CompareICStub::GenerateObjects(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::OBJECT);
Label miss;
__ And(a2, a1, Operand(a0));
__ JumpIfSmi(a2, &miss);
@@ -4222,7 +3810,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
Label miss;
__ And(a2, a1, a0);
__ JumpIfSmi(a2, &miss);
@@ -4239,7 +3827,7 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+void CompareICStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
@@ -4247,7 +3835,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1, a0);
__ Push(ra, a1, a0);
- __ li(a4, Operand(Smi::FromInt(op_)));
+ __ li(a4, Operand(Smi::FromInt(op())));
__ daddiu(sp, sp, -kPointerSize);
__ CallExternalReference(miss, 3, USE_DELAY_SLOT);
__ sd(a4, MemOperand(sp)); // In the delay slot.
@@ -4349,7 +3937,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
__ lbu(entity_name,
FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(entity_name, miss);
+ __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
__ bind(&good);
// Restore the properties.
@@ -4521,12 +4109,12 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// Stop if found the property.
__ Branch(&in_dictionary, eq, entry_key, Operand(key));
- if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
// Check if the entry name is not a unique name.
__ ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
__ lbu(entry_key,
FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
+ __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
}
}
@@ -4534,7 +4122,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// If we are doing negative lookup then probing failure should be
// treated as a lookup success. For positive lookup probing failure
// should be treated as lookup failure.
- if (mode_ == POSITIVE_LOOKUP) {
+ if (mode() == POSITIVE_LOOKUP) {
__ Ret(USE_DELAY_SLOT);
__ mov(result, zero_reg);
}
@@ -4578,11 +4166,11 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
__ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
__ nop();
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object(),
+ address(),
+ value(),
+ save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
}
__ Ret();
@@ -4604,7 +4192,7 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.Save(masm);
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
__ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
@@ -4624,10 +4212,10 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
InformIncrementalMarker(masm);
regs_.Restore(masm);
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ __ RememberedSetHelper(object(),
+ address(),
+ value(),
+ save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
__ bind(&dont_need_remembered_set);
@@ -4642,7 +4230,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
Register address =
@@ -4658,7 +4246,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
__ CallCFunction(
ExternalReference::incremental_marking_record_write_function(isolate()),
argument_count);
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
}
@@ -4686,10 +4274,10 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ __ RememberedSetHelper(object(),
+ address(),
+ value(),
+ save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
} else {
__ Ret();
@@ -4730,10 +4318,10 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ __ RememberedSetHelper(object(),
+ address(),
+ value(),
+ save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
} else {
__ Ret();
@@ -4821,7 +4409,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
__ ld(a1, MemOperand(fp, parameter_count_offset));
- if (function_mode_ == JS_FUNCTION_STUB_MODE) {
+ if (function_mode() == JS_FUNCTION_STUB_MODE) {
__ Daddu(a1, a1, Operand(1));
}
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
@@ -4831,6 +4419,20 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
}
+void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorLoadStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorKeyedLoadStub stub(isolate());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -4869,7 +4471,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
int frame_alignment = masm->ActivationFrameAlignment();
if (frame_alignment > kPointerSize) {
__ mov(s5, sp);
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
__ And(sp, sp, Operand(-frame_alignment));
}
@@ -5047,7 +4649,7 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
- if (argument_count_ == ANY) {
+ if (argument_count() == ANY) {
Label not_zero_case, not_one_case;
__ And(at, a0, a0);
__ Branch(&not_zero_case, ne, at, Operand(zero_reg));
@@ -5059,11 +4661,11 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
__ bind(&not_one_case);
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
- } else if (argument_count_ == NONE) {
+ } else if (argument_count() == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- } else if (argument_count_ == ONE) {
+ } else if (argument_count() == ONE) {
CreateArrayDispatchOneArgument(masm, mode);
- } else if (argument_count_ == MORE_THAN_ONE) {
+ } else if (argument_count() == MORE_THAN_ONE) {
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
} else {
UNREACHABLE();
@@ -5073,7 +4675,7 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- a0 : argc (only if argument_count_ == ANY)
+ // -- a0 : argc (only if argument_count() == ANY)
// -- a1 : constructor
// -- a2 : AllocationSite or undefined
// -- sp[0] : return address
@@ -5208,9 +4810,9 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register api_function_address = a1;
Register context = cp;
- int argc = ArgumentBits::decode(bit_field_);
- bool is_store = IsStoreBits::decode(bit_field_);
- bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+ int argc = this->argc();
+ bool is_store = this->is_store();
+ bool call_data_undefined = this->call_data_undefined();
typedef FunctionCallbackArguments FCA;
@@ -5296,7 +4898,8 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// -- a2 : api_function_address
// -----------------------------------
- Register api_function_address = a2;
+ Register api_function_address = ApiGetterDescriptor::function_address();
+ DCHECK(api_function_address.is(a2));
__ mov(a0, sp); // a0 = Handle<Name>
__ Daddu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
diff --git a/deps/v8/src/mips64/code-stubs-mips64.h b/deps/v8/src/mips64/code-stubs-mips64.h
index 73f19cde41..6c324bb85c 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.h
+++ b/deps/v8/src/mips64/code-stubs-mips64.h
@@ -5,9 +5,6 @@
#ifndef V8_MIPS_CODE_STUBS_ARM_H_
#define V8_MIPS_CODE_STUBS_ARM_H_
-#include "src/ic-inl.h"
-
-
namespace v8 {
namespace internal {
@@ -15,24 +12,6 @@ namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
-class StoreBufferOverflowStub: public PlatformCodeStub {
- public:
- StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
- : PlatformCodeStub(isolate), save_doubles_(save_fp) {}
-
- void Generate(MacroAssembler* masm);
-
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- SaveFPRegsMode save_doubles_;
-
- Major MajorKey() const { return StoreBufferOverflow; }
- int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying a large number of characters. This function
@@ -46,32 +25,26 @@ class StringHelper : public AllStatic {
Register scratch,
String::Encoding encoding);
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character);
+ // Compares two flat one-byte strings and returns result in v0.
+ static void GenerateCompareFlatOneByteStrings(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3, Register scratch4);
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash);
+ // Compares two flat one-byte strings for equality and returns result in v0.
+ static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+ Register left, Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-class SubStringStub: public PlatformCodeStub {
- public:
- explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+ static void GenerateOneByteCharsCompareLoop(
+ MacroAssembler* masm, Register left, Register right, Register length,
+ Register scratch1, Register scratch2, Register scratch3,
+ Label* chars_not_equal);
private:
- Major MajorKey() const { return SubString; }
- int MinorKey() const { return 0; }
-
- void Generate(MacroAssembler* masm);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
@@ -81,92 +54,64 @@ class StoreRegistersStateStub: public PlatformCodeStub {
: PlatformCodeStub(isolate) {}
static void GenerateAheadOfTime(Isolate* isolate);
- private:
- Major MajorKey() const { return StoreRegistersState; }
- int MinorKey() const { return 0; }
- void Generate(MacroAssembler* masm);
+ private:
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
};
+
class RestoreRegistersStateStub: public PlatformCodeStub {
public:
explicit RestoreRegistersStateStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
static void GenerateAheadOfTime(Isolate* isolate);
- private:
- Major MajorKey() const { return RestoreRegistersState; }
- int MinorKey() const { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-class StringCompareStub: public PlatformCodeStub {
- public:
- explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
-
- // Compare two flat ASCII strings and returns result in v0.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- // Compares two flat ASCII strings for equality and returns result
- // in v0.
- static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
private:
- virtual Major MajorKey() const { return StringCompare; }
- virtual int MinorKey() const { return 0; }
- virtual void Generate(MacroAssembler* masm);
-
- static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* chars_not_equal);
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
};
-
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
public:
- WriteInt32ToHeapNumberStub(Isolate* isolate,
- Register the_int,
- Register the_heap_number,
- Register scratch,
+ WriteInt32ToHeapNumberStub(Isolate* isolate, Register the_int,
+ Register the_heap_number, Register scratch,
Register scratch2)
- : PlatformCodeStub(isolate),
- the_int_(the_int),
- the_heap_number_(the_heap_number),
- scratch_(scratch),
- sign_(scratch2) {
- DCHECK(IntRegisterBits::is_valid(the_int_.code()));
- DCHECK(HeapNumberRegisterBits::is_valid(the_heap_number_.code()));
- DCHECK(ScratchRegisterBits::is_valid(scratch_.code()));
- DCHECK(SignRegisterBits::is_valid(sign_.code()));
+ : PlatformCodeStub(isolate) {
+ minor_key_ = IntRegisterBits::encode(the_int.code()) |
+ HeapNumberRegisterBits::encode(the_heap_number.code()) |
+ ScratchRegisterBits::encode(scratch.code()) |
+ SignRegisterBits::encode(scratch2.code());
+ DCHECK(IntRegisterBits::is_valid(the_int.code()));
+ DCHECK(HeapNumberRegisterBits::is_valid(the_heap_number.code()));
+ DCHECK(ScratchRegisterBits::is_valid(scratch.code()));
+ DCHECK(SignRegisterBits::is_valid(scratch2.code()));
}
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private:
- Register the_int_;
- Register the_heap_number_;
- Register scratch_;
- Register sign_;
+ void Generate(MacroAssembler* masm);
+
+ Register the_int() const {
+ return Register::from_code(IntRegisterBits::decode(minor_key_));
+ }
+
+ Register the_heap_number() const {
+ return Register::from_code(HeapNumberRegisterBits::decode(minor_key_));
+ }
+
+ Register scratch() const {
+ return Register::from_code(ScratchRegisterBits::decode(minor_key_));
+ }
+
+ Register sign() const {
+ return Register::from_code(SignRegisterBits::decode(minor_key_));
+ }
// Minor key encoding in 16 bits.
class IntRegisterBits: public BitField<int, 0, 4> {};
@@ -174,16 +119,8 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
class ScratchRegisterBits: public BitField<int, 8, 4> {};
class SignRegisterBits: public BitField<int, 12, 4> {};
- Major MajorKey() const { return WriteInt32ToHeapNumber; }
- int MinorKey() const {
- // Encode the parameters in a unique 16 bit value.
- return IntRegisterBits::encode(the_int_.code())
- | HeapNumberRegisterBits::encode(the_heap_number_.code())
- | ScratchRegisterBits::encode(scratch_.code())
- | SignRegisterBits::encode(sign_.code());
- }
-
- void Generate(MacroAssembler* masm);
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_CODE_STUB(WriteInt32ToHeapNumber, PlatformCodeStub);
};
@@ -196,16 +133,19 @@ class RecordWriteStub: public PlatformCodeStub {
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
: PlatformCodeStub(isolate),
- object_(object),
- value_(value),
- address_(address),
- remembered_set_action_(remembered_set_action),
- save_fp_regs_mode_(fp_mode),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
+ minor_key_ = ObjectBits::encode(object.code()) |
+ ValueBits::encode(value.code()) |
+ AddressBits::encode(address.code()) |
+ RememberedSetActionBits::encode(remembered_set_action) |
+ SaveFPRegsModeBits::encode(fp_mode);
}
+ RecordWriteStub(uint32_t key, Isolate* isolate)
+ : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
+
enum Mode {
STORE_BUFFER_ONLY,
INCREMENTAL,
@@ -273,6 +213,8 @@ class RecordWriteStub: public PlatformCodeStub {
4 * Assembler::kInstrSize);
}
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+
private:
// This is a helper class for freeing up 3 scratch registers. The input is
// two registers that must be preserved and one scratch register provided by
@@ -337,7 +279,9 @@ class RecordWriteStub: public PlatformCodeStub {
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
- void Generate(MacroAssembler* masm);
+ virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+
+ virtual void Generate(MacroAssembler* masm) OVERRIDE;
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
@@ -345,18 +289,28 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
- Major MajorKey() const { return RecordWrite; }
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
- int MinorKey() const {
- return ObjectBits::encode(object_.code()) |
- ValueBits::encode(value_.code()) |
- AddressBits::encode(address_.code()) |
- RememberedSetActionBits::encode(remembered_set_action_) |
- SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+ Register object() const {
+ return Register::from_code(ObjectBits::decode(minor_key_));
}
- void Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ Register value() const {
+ return Register::from_code(ValueBits::decode(minor_key_));
+ }
+
+ Register address() const {
+ return Register::from_code(AddressBits::decode(minor_key_));
+ }
+
+ RememberedSetAction remembered_set_action() const {
+ return RememberedSetActionBits::decode(minor_key_);
+ }
+
+ SaveFPRegsMode save_fp_regs_mode() const {
+ return SaveFPRegsModeBits::decode(minor_key_);
}
class ObjectBits: public BitField<int, 0, 5> {};
@@ -365,13 +319,10 @@ class RecordWriteStub: public PlatformCodeStub {
class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
- Register object_;
- Register value_;
- Register address_;
- RememberedSetAction remembered_set_action_;
- SaveFPRegsMode save_fp_regs_mode_;
Label slow_;
RegisterAllocation regs_;
+
+ DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
};
@@ -383,14 +334,13 @@ class RecordWriteStub: public PlatformCodeStub {
class DirectCEntryStub: public PlatformCodeStub {
public:
explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
- void Generate(MacroAssembler* masm);
void GenerateCall(MacroAssembler* masm, Register target);
private:
- Major MajorKey() const { return DirectCEntry; }
- int MinorKey() const { return 0; }
-
bool NeedsImmovableCode() { return true; }
+
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
};
@@ -399,9 +349,9 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
- : PlatformCodeStub(isolate), mode_(mode) { }
-
- void Generate(MacroAssembler* masm);
+ : PlatformCodeStub(isolate) {
+ minor_key_ = LookupModeBits::encode(mode);
+ }
static void GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
@@ -433,13 +383,12 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() const { return NameDictionaryLookup; }
-
- int MinorKey() const { return LookupModeBits::encode(mode_); }
+ LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
class LookupModeBits: public BitField<LookupMode, 0, 1> {};
- LookupMode mode_;
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
};
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
index 8533ede55c..fb395f7561 100644
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ b/deps/v8/src/mips64/codegen-mips64.cc
@@ -948,18 +948,18 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Branch(call_runtime, ne, at, Operand(zero_reg));
__ ld(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
- Label ascii, done;
+ Label one_byte, done;
__ bind(&check_encoding);
STATIC_ASSERT(kTwoByteStringTag == 0);
__ And(at, result, Operand(kStringEncodingMask));
- __ Branch(&ascii, ne, at, Operand(zero_reg));
+ __ Branch(&one_byte, ne, at, Operand(zero_reg));
// Two-byte string.
__ dsll(at, index, 1);
__ Daddu(at, string, at);
__ lhu(result, MemOperand(at));
__ jmp(&done);
- __ bind(&ascii);
- // Ascii string.
+ __ bind(&one_byte);
+ // One_byte string.
__ Daddu(at, string, index);
__ lbu(result, MemOperand(at));
__ bind(&done);
diff --git a/deps/v8/src/mips64/codegen-mips64.h b/deps/v8/src/mips64/codegen-mips64.h
index 82a410ec23..b02ec4ff10 100644
--- a/deps/v8/src/mips64/codegen-mips64.h
+++ b/deps/v8/src/mips64/codegen-mips64.h
@@ -8,7 +8,7 @@
#include "src/ast.h"
-#include "src/ic-inl.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/mips64/debug-mips64.cc b/deps/v8/src/mips64/debug-mips64.cc
index 0a091048de..831dc4e458 100644
--- a/deps/v8/src/mips64/debug-mips64.cc
+++ b/deps/v8/src/mips64/debug-mips64.cc
@@ -188,16 +188,16 @@ void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0);
}
void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- Register value = StoreIC::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.bit() | name.bit() | value.bit(), 0);
}
@@ -211,9 +211,9 @@ void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC keyed store call (from ic-mips64.cc).
- Register receiver = KeyedStoreIC::ReceiverRegister();
- Register name = KeyedStoreIC::NameRegister();
- Register value = KeyedStoreIC::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.bit() | name.bit() | value.bit(), 0);
}
diff --git a/deps/v8/src/mips64/deoptimizer-mips64.cc b/deps/v8/src/mips64/deoptimizer-mips64.cc
index 8d5bb2d2e5..2550b765b3 100644
--- a/deps/v8/src/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/mips64/deoptimizer-mips64.cc
@@ -96,7 +96,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
void Deoptimizer::SetPlatformCompiledStubRegisters(
- FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+ FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler());
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
diff --git a/deps/v8/src/mips64/full-codegen-mips64.cc b/deps/v8/src/mips64/full-codegen-mips64.cc
index c69ceccec7..1d4e63e8d5 100644
--- a/deps/v8/src/mips64/full-codegen-mips64.cc
+++ b/deps/v8/src/mips64/full-codegen-mips64.cc
@@ -14,15 +14,16 @@
// places where we have to move a previous result in v0 to a0 for the
// next call: mov(a0, v0). This is not needed on the other architectures.
+#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compiler.h"
#include "src/debug.h"
#include "src/full-codegen.h"
+#include "src/ic/ic.h"
#include "src/isolate-inl.h"
#include "src/parser.h"
#include "src/scopes.h"
-#include "src/stub-cache.h"
#include "src/mips64/code-stubs-mips64.h"
#include "src/mips64/macro-assembler-mips64.h"
@@ -1042,7 +1043,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1173,7 +1175,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&fixed_array);
__ li(a1, FeedbackVector());
- __ li(a2, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
+ __ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ sd(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(slot)));
__ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check
@@ -1315,9 +1317,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(),
- info->strict_mode(),
- info->is_generator());
+ FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
__ li(a2, Operand(info));
__ CallStub(&stub);
} else {
@@ -1337,6 +1337,24 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
}
+void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
+ Comment cnmt(masm_, "[ SuperReference ");
+
+ __ ld(LoadDescriptor::ReceiverRegister(),
+ MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
+ Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
+ __ li(LoadDescriptor::NameRegister(), home_object_symbol);
+
+ CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+
+ Label done;
+ __ Branch(&done, ne, v0, Operand(isolate()->factory()->undefined_value()));
+ __ CallRuntime(Runtime::kThrowNonMethodError, 0);
+ __ bind(&done);
+}
+
+
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
TypeofState typeof_state,
Label* slow) {
@@ -1382,10 +1400,10 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ bind(&fast);
}
- __ ld(LoadIC::ReceiverRegister(), GlobalObjectOperand());
- __ li(LoadIC::NameRegister(), Operand(proxy->var()->name()));
+ __ ld(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ li(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
if (FLAG_vector_ics) {
- __ li(LoadIC::SlotRegister(),
+ __ li(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
}
@@ -1473,10 +1491,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, "[ Global variable");
// Use inline caching. Variable name is passed in a2 and the global
// object (receiver) in a0.
- __ ld(LoadIC::ReceiverRegister(), GlobalObjectOperand());
- __ li(LoadIC::NameRegister(), Operand(var->name()));
+ __ ld(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
if (FLAG_vector_ics) {
- __ li(LoadIC::SlotRegister(),
+ __ li(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
}
CallLoadIC(CONTEXTUAL);
@@ -1686,10 +1704,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
- __ mov(StoreIC::ValueRegister(), result_register());
- DCHECK(StoreIC::ValueRegister().is(a0));
- __ li(StoreIC::NameRegister(), Operand(key->value()));
- __ ld(StoreIC::ReceiverRegister(), MemOperand(sp));
+ __ mov(StoreDescriptor::ValueRegister(), result_register());
+ DCHECK(StoreDescriptor::ValueRegister().is(a0));
+ __ li(StoreDescriptor::NameRegister(), Operand(key->value()));
+ __ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
@@ -1853,13 +1871,19 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ enum LhsKind {
+ VARIABLE,
+ NAMED_PROPERTY,
+ KEYED_PROPERTY,
+ NAMED_SUPER_PROPERTY
+ };
LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
if (property != NULL) {
assign_type = (property->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
+ ? (property->IsSuperAccess() ? NAMED_SUPER_PROPERTY
+ : NAMED_PROPERTY)
+ : KEYED_PROPERTY;
}
// Evaluate LHS expression.
@@ -1871,18 +1895,29 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
// We need the receiver both on the stack and in the register.
VisitForStackValue(property->obj());
- __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 0));
+ __ ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
} else {
VisitForStackValue(property->obj());
}
break;
+ case NAMED_SUPER_PROPERTY:
+ VisitForStackValue(property->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(property->obj()->AsSuperReference());
+ __ Push(result_register());
+ if (expr->is_compound()) {
+ const Register scratch = a1;
+ __ ld(scratch, MemOperand(sp, kPointerSize));
+ __ Push(scratch, result_register());
+ }
+ break;
case KEYED_PROPERTY:
// We need the key and receiver on both the stack and in v0 and a1.
if (expr->is_compound()) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 1 * kPointerSize));
- __ ld(LoadIC::NameRegister(), MemOperand(sp, 0));
+ __ ld(LoadDescriptor::ReceiverRegister(),
+ MemOperand(sp, 1 * kPointerSize));
+ __ ld(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@@ -1903,6 +1938,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
EmitNamedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
@@ -1949,6 +1988,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyAssignment(expr);
+ break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
@@ -1963,12 +2005,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
VisitForStackValue(expr->expression());
switch (expr->yield_kind()) {
- case Yield::SUSPEND:
+ case Yield::kSuspend:
// Pop value from top-of-stack slot; box result into result register.
EmitCreateIteratorResult(false);
__ push(result_register());
// Fall through.
- case Yield::INITIAL: {
+ case Yield::kInitial: {
Label suspend, continuation, post_runtime, resume;
__ jmp(&suspend);
@@ -1999,7 +2041,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
- case Yield::FINAL: {
+ case Yield::kFinal: {
VisitForAccumulatorValue(expr->generator_object());
__ li(a1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
__ sd(a1, FieldMemOperand(result_register(),
@@ -2011,7 +2053,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
- case Yield::DELEGATING: {
+ case Yield::kDelegating: {
VisitForStackValue(expr->generator_object());
// Initial stack layout is as follows:
@@ -2020,8 +2062,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label l_catch, l_try, l_suspend, l_continuation, l_resume;
Label l_next, l_call;
- Register load_receiver = LoadIC::ReceiverRegister();
- Register load_name = LoadIC::NameRegister();
+ Register load_receiver = LoadDescriptor::ReceiverRegister();
+ Register load_name = LoadDescriptor::NameRegister();
// Initial send value is undefined.
__ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
__ Branch(&l_next);
@@ -2077,10 +2119,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ ld(load_receiver, MemOperand(sp, kPointerSize));
__ ld(load_name, MemOperand(sp, 2 * kPointerSize));
if (FLAG_vector_ics) {
- __ li(LoadIC::SlotRegister(),
+ __ li(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(expr->KeyedLoadFeedbackSlot())));
}
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallIC(ic, TypeFeedbackId::None());
__ mov(a0, v0);
__ mov(a1, a0);
@@ -2097,7 +2139,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ push(load_receiver); // save result
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
if (FLAG_vector_ics) {
- __ li(LoadIC::SlotRegister(),
+ __ li(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(expr->DoneFeedbackSlot())));
}
CallLoadIC(NOT_CONTEXTUAL); // v0=result.done
@@ -2110,7 +2152,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ pop(load_receiver); // result
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
if (FLAG_vector_ics) {
- __ li(LoadIC::SlotRegister(),
+ __ li(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(expr->ValueFeedbackSlot())));
}
CallLoadIC(NOT_CONTEXTUAL); // v0=result.value
@@ -2277,9 +2319,11 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
- __ li(LoadIC::NameRegister(), Operand(key->value()));
+ DCHECK(!prop->IsSuperAccess());
+
+ __ li(LoadDescriptor::NameRegister(), Operand(key->value()));
if (FLAG_vector_ics) {
- __ li(LoadIC::SlotRegister(),
+ __ li(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(prop->PropertyFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL);
} else {
@@ -2288,12 +2332,24 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
}
+void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+ // Stack: receiver, home_object.
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+ DCHECK(prop->IsSuperAccess());
+
+ __ Push(key->value());
+ __ CallRuntime(Runtime::kLoadFromSuper, 3);
+}
+
+
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has register arguments receiver and key.
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
if (FLAG_vector_ics) {
- __ li(LoadIC::SlotRegister(),
+ __ li(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(prop->PropertyFeedbackSlot())));
CallIC(ic);
} else {
@@ -2325,8 +2381,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- BinaryOpICStub stub(isolate(), op, mode);
- CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2399,9 +2455,9 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
OverwriteMode mode) {
__ mov(a0, result_register());
__ pop(a1);
- BinaryOpICStub stub(isolate(), op, mode);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(v0);
}
@@ -2431,9 +2487,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
case NAMED_PROPERTY: {
__ push(result_register()); // Preserve value.
VisitForAccumulatorValue(prop->obj());
- __ mov(StoreIC::ReceiverRegister(), result_register());
- __ pop(StoreIC::ValueRegister()); // Restore value.
- __ li(StoreIC::NameRegister(),
+ __ mov(StoreDescriptor::ReceiverRegister(), result_register());
+ __ pop(StoreDescriptor::ValueRegister()); // Restore value.
+ __ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
CallStoreIC();
break;
@@ -2442,11 +2498,11 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ push(result_register()); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
- __ Move(KeyedStoreIC::NameRegister(), result_register());
- __ Pop(KeyedStoreIC::ValueRegister(), KeyedStoreIC::ReceiverRegister());
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ __ Move(StoreDescriptor::NameRegister(), result_register());
+ __ Pop(StoreDescriptor::ValueRegister(),
+ StoreDescriptor::ReceiverRegister());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic);
break;
}
@@ -2471,9 +2527,9 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
- __ mov(StoreIC::ValueRegister(), result_register());
- __ li(StoreIC::NameRegister(), Operand(var->name()));
- __ ld(StoreIC::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(StoreDescriptor::ValueRegister(), result_register());
+ __ li(StoreDescriptor::NameRegister(), Operand(var->name()));
+ __ ld(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
CallStoreIC();
} else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
@@ -2546,9 +2602,10 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
- __ mov(StoreIC::ValueRegister(), result_register());
- __ li(StoreIC::NameRegister(), Operand(prop->key()->AsLiteral()->value()));
- __ pop(StoreIC::ReceiverRegister());
+ __ mov(StoreDescriptor::ValueRegister(), result_register());
+ __ li(StoreDescriptor::NameRegister(),
+ Operand(prop->key()->AsLiteral()->value()));
+ __ pop(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2556,6 +2613,24 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
+void FullCodeGenerator::EmitNamedSuperPropertyAssignment(Assignment* expr) {
+ // Assignment to named property of super.
+ // v0 : value
+ // stack : receiver ('this'), home_object
+ Property* prop = expr->target()->AsProperty();
+ DCHECK(prop != NULL);
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(key != NULL);
+
+ __ Push(v0);
+ __ Push(key->value());
+ __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy),
+ 4);
+ context()->Plug(v0);
+}
+
+
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
@@ -2566,13 +2641,11 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// - a0 is the value,
// - a1 is the key,
// - a2 is the receiver.
- __ mov(KeyedStoreIC::ValueRegister(), result_register());
- __ Pop(KeyedStoreIC::ReceiverRegister(), KeyedStoreIC::NameRegister());
- DCHECK(KeyedStoreIC::ValueRegister().is(a0));
+ __ mov(StoreDescriptor::ValueRegister(), result_register());
+ __ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
+ DCHECK(StoreDescriptor::ValueRegister().is(a0));
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2585,16 +2658,23 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
Expression* key = expr->key();
if (key->IsPropertyName()) {
- VisitForAccumulatorValue(expr->obj());
- __ Move(LoadIC::ReceiverRegister(), v0);
- EmitNamedPropertyLoad(expr);
+ if (!expr->IsSuperAccess()) {
+ VisitForAccumulatorValue(expr->obj());
+ __ Move(LoadDescriptor::ReceiverRegister(), v0);
+ EmitNamedPropertyLoad(expr);
+ } else {
+ VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(expr->obj()->AsSuperReference());
+ __ Push(result_register());
+ EmitNamedSuperPropertyLoad(expr);
+ }
PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(v0);
} else {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
- __ Move(LoadIC::NameRegister(), v0);
- __ pop(LoadIC::ReceiverRegister());
+ __ Move(LoadDescriptor::NameRegister(), v0);
+ __ pop(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
context()->Plug(v0);
}
@@ -2612,12 +2692,11 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallIC::CallType call_type = callee->IsVariableProxy()
- ? CallIC::FUNCTION
- : CallIC::METHOD;
+ CallICState::CallType call_type =
+ callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
// Get the target function.
- if (call_type == CallIC::FUNCTION) {
+ if (call_type == CallICState::FUNCTION) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
@@ -2628,7 +2707,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
- __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 0));
+ DCHECK(!callee->AsProperty()->IsSuperAccess());
+ __ ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
@@ -2641,6 +2721,42 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+ DCHECK(callee->IsProperty());
+ Property* prop = callee->AsProperty();
+ DCHECK(prop->IsSuperAccess());
+
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+ // Load the function from the receiver.
+ const Register scratch = a1;
+ SuperReference* super_ref = prop->obj()->AsSuperReference();
+ EmitLoadHomeObject(super_ref);
+ __ mov(scratch, v0);
+ VisitForAccumulatorValue(super_ref->this_var());
+ __ Push(scratch, v0, v0, scratch);
+ __ Push(key->value());
+
+ // Stack here:
+ // - home_object
+ // - this (receiver)
+ // - this (receiver) <-- LoadFromSuper will pop here and below.
+ // - home_object
+ // - key
+ __ CallRuntime(Runtime::kLoadFromSuper, 3);
+
+ // Replace home_object with target function.
+ __ sd(v0, MemOperand(sp, kPointerSize));
+
+ // Stack here:
+ // - target function
+ // - this (receiver)
+ EmitCall(expr, CallICState::METHOD);
+}
+
+
// Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* key) {
@@ -2651,8 +2767,8 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
// Load the function from the receiver.
DCHECK(callee->IsProperty());
- __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 0));
- __ Move(LoadIC::NameRegister(), v0);
+ __ ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ __ Move(LoadDescriptor::NameRegister(), v0);
EmitKeyedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
@@ -2661,11 +2777,11 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ push(at);
__ sd(v0, MemOperand(sp, kPointerSize));
- EmitCall(expr, CallIC::METHOD);
+ EmitCall(expr, CallICState::METHOD);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2807,13 +2923,20 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitCall(expr);
} else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty();
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(property->obj());
- }
- if (property->key()->IsPropertyName()) {
- EmitCallWithLoadIC(expr);
+ bool is_named_call = property->key()->IsPropertyName();
+ // super.x() is handled in EmitCallWithLoadIC.
+ if (property->IsSuperAccess() && is_named_call) {
+ EmitSuperCallWithLoadIC(expr);
} else {
- EmitKeyedCallWithLoadIC(expr, property->key());
+ {
+ PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(property->obj());
+ }
+ if (is_named_call) {
+ EmitCallWithLoadIC(expr);
+ } else {
+ EmitKeyedCallWithLoadIC(expr, property->key());
+ }
}
} else {
DCHECK(call_type == Call::OTHER_CALL);
@@ -3314,7 +3437,7 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
// Functions have class 'Function'.
__ bind(&function);
- __ LoadRoot(v0, Heap::kfunction_class_stringRootIndex);
+ __ LoadRoot(v0, Heap::kFunction_stringRootIndex);
__ jmp(&done);
// Objects with a non-function constructor have class 'Object'.
@@ -3435,9 +3558,9 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
Register index = a1;
Register value = a2;
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- VisitForAccumulatorValue(args->at(0)); // string
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
__ Pop(index, value);
if (FLAG_debug_code) {
@@ -3472,9 +3595,9 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
Register index = a1;
Register value = a2;
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- VisitForAccumulatorValue(args->at(0)); // string
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
__ Pop(index, value);
if (FLAG_debug_code) {
@@ -3835,7 +3958,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
empty_separator_loop, one_char_separator_loop,
@@ -3885,7 +4008,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ld(elements, FieldMemOperand(array, JSArray::kElementsOffset));
array = no_reg; // End of array's live range.
- // Check that all array elements are sequential ASCII strings, and
+ // Check that all array elements are sequential one-byte strings, and
// accumulate the sum of their lengths, as a smi-encoded value.
__ mov(string_length, zero_reg);
__ Daddu(element,
@@ -3901,8 +4024,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// element: Current array element.
// elements_end: Array end.
if (generate_debug_code_) {
- __ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin,
- array_length, Operand(zero_reg));
+ __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin, array_length,
+ Operand(zero_reg));
}
__ bind(&loop);
__ ld(string, MemOperand(element));
@@ -3910,7 +4033,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ JumpIfSmi(string, &bailout);
__ ld(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
__ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
__ ld(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
__ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
__ BranchOnOverflow(&bailout, scratch3);
@@ -3929,11 +4052,11 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// string_length: Sum of string lengths (smi).
// elements: FixedArray of strings.
- // Check that the separator is a flat ASCII string.
+ // Check that the separator is a flat one-byte string.
__ JumpIfSmi(separator, &bailout);
__ ld(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
__ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not
@@ -3961,12 +4084,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// separator: Separator string
// string_length: Length of result string (not smi)
// array_length: Length of the array.
- __ AllocateAsciiString(result,
- string_length,
- scratch1,
- scratch2,
- elements_end,
- &bailout);
+ __ AllocateOneByteString(result, string_length, scratch1, scratch2,
+ elements_end, &bailout);
// Prepare for looping. Set up elements_end to end of the array. Set
// result_pos to the position of the result where to write the first
// character.
@@ -4005,7 +4124,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case.
__ bind(&one_char_separator);
- // Replace separator with its ASCII character value.
+ // Replace separator with its one-byte character value.
__ lbu(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator.
@@ -4016,7 +4135,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// result_pos: the position to which we are currently copying characters.
// element: Current array element.
// elements_end: Array end.
- // separator: Single separator ASCII char (in lower byte).
+ // separator: Single separator one-byte char (in lower byte).
// Copy the separator character to the result.
__ sb(separator, MemOperand(result_pos));
@@ -4096,15 +4215,15 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) {
// Push the builtins object as the receiver.
- Register receiver = LoadIC::ReceiverRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
__ ld(receiver, GlobalObjectOperand());
__ ld(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset));
__ push(receiver);
// Load the function from the receiver.
- __ li(LoadIC::NameRegister(), Operand(expr->name()));
+ __ li(LoadDescriptor::NameRegister(), Operand(expr->name()));
if (FLAG_vector_ics) {
- __ li(LoadIC::SlotRegister(),
+ __ li(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(expr->CallRuntimeFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL);
} else {
@@ -4271,6 +4390,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (prop != NULL) {
assign_type =
(prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ if (prop->IsSuperAccess()) {
+ // throw exception.
+ VisitSuperReference(prop->obj()->AsSuperReference());
+ return;
+ }
}
// Evaluate expression and get value.
@@ -4287,13 +4411,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == NAMED_PROPERTY) {
// Put the object both on the stack and in the register.
VisitForStackValue(prop->obj());
- __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 0));
+ __ ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(prop);
} else {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
- __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 1 * kPointerSize));
- __ ld(LoadIC::NameRegister(), MemOperand(sp, 0));
+ __ ld(LoadDescriptor::ReceiverRegister(),
+ MemOperand(sp, 1 * kPointerSize));
+ __ ld(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
EmitKeyedPropertyLoad(prop);
}
}
@@ -4376,8 +4501,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Record position before stub call.
SetSourcePosition(expr->position());
- BinaryOpICStub stub(isolate(), Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), Token::ADD, NO_OVERWRITE).code();
+ CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4404,10 +4530,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
case NAMED_PROPERTY: {
- __ mov(StoreIC::ValueRegister(), result_register());
- __ li(StoreIC::NameRegister(),
+ __ mov(StoreDescriptor::ValueRegister(), result_register());
+ __ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- __ pop(StoreIC::ReceiverRegister());
+ __ pop(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -4420,11 +4546,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
- __ mov(KeyedStoreIC::ValueRegister(), result_register());
- __ Pop(KeyedStoreIC::ReceiverRegister(), KeyedStoreIC::NameRegister());
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ __ mov(StoreDescriptor::ValueRegister(), result_register());
+ __ Pop(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -4446,10 +4572,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "[ Global variable");
- __ ld(LoadIC::ReceiverRegister(), GlobalObjectOperand());
- __ li(LoadIC::NameRegister(), Operand(proxy->name()));
+ __ ld(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ li(LoadDescriptor::NameRegister(), Operand(proxy->name()));
if (FLAG_vector_ics) {
- __ li(LoadIC::SlotRegister(),
+ __ li(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
}
// Use a regular load, not a contextual load, to avoid a reference
@@ -4609,7 +4735,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
// Record position and call the compare IC.
SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
new file mode 100644
index 0000000000..8759bdd9bd
--- /dev/null
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -0,0 +1,303 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
+
+
+const Register LoadDescriptor::ReceiverRegister() { return a1; }
+const Register LoadDescriptor::NameRegister() { return a2; }
+
+
+const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return a0; }
+
+
+const Register VectorLoadICDescriptor::VectorRegister() { return a3; }
+
+
+const Register StoreDescriptor::ReceiverRegister() { return a1; }
+const Register StoreDescriptor::NameRegister() { return a2; }
+const Register StoreDescriptor::ValueRegister() { return a0; }
+
+
+const Register ElementTransitionAndStoreDescriptor::MapRegister() { return a3; }
+
+
+const Register InstanceofDescriptor::left() { return a0; }
+const Register InstanceofDescriptor::right() { return a1; }
+
+
+const Register ArgumentsAccessReadDescriptor::index() { return a1; }
+const Register ArgumentsAccessReadDescriptor::parameter_count() { return a0; }
+
+
+const Register ApiGetterDescriptor::function_address() { return a2; }
+
+
+const Register MathPowTaggedDescriptor::exponent() { return a2; }
+
+
+const Register MathPowIntegerDescriptor::exponent() {
+ return MathPowTaggedDescriptor::exponent();
+}
+
+
+void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a2};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastCloneShallowArrayDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a3, a2, a1};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void FastCloneShallowObjectDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a3, a2, a1, a0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CreateAllocationSiteDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a2, a3};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StoreArrayLiteralElementDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a3, a0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionWithFeedbackDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a1, a3};
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // a0 : number of arguments
+ // a1 : the function to call
+ // a2 : feedback vector
+ // a3 : (only if a2 is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
+ // TODO(turbofan): So far we don't gather type feedback and hence skip the
+ // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+ Register registers[] = {cp, a0, a1, a2};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void RegExpConstructResultDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a2, a1, a0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void TransitionElementsKindDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a0, a1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorConstantArgCountDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // cp -- context
+ // a0 -- number of arguments
+ // a1 -- function
+ // a2 -- allocation site with elements kind
+ Register registers[] = {cp, a1, a2};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = {cp, a1, a2, a0};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(),
+ Representation::Tagged(), Representation::Integer32()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // cp -- context
+ // a0 -- number of arguments
+ // a1 -- constructor function
+ Register registers[] = {cp, a1};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void InternalArrayConstructorDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = {cp, a1, a0};
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Integer32()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a1, a0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpWithAllocationSiteDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a2, a1, a0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a1, a0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ cp, // context
+ a2, // key
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ cp, // context
+ a2, // name
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ cp, // context
+ a0, // receiver
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ cp, // context
+ a1, // JSFunction
+ a0, // actual number of arguments
+ a2, // expected number of arguments
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // JSFunction
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ cp, // context
+ a0, // callee
+ a4, // call_data
+ a2, // holder
+ a1, // api_function_address
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/mips64/lithium-codegen-mips64.cc b/deps/v8/src/mips64/lithium-codegen-mips64.cc
index 4d8d6afdf8..2ed9782ff0 100644
--- a/deps/v8/src/mips64/lithium-codegen-mips64.cc
+++ b/deps/v8/src/mips64/lithium-codegen-mips64.cc
@@ -4,17 +4,19 @@
#include "src/v8.h"
+#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/hydrogen-osr.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
#include "src/mips64/lithium-codegen-mips64.h"
#include "src/mips64/lithium-gap-resolver-mips64.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
-class SafepointGenerator V8_FINAL : public CallWrapper {
+class SafepointGenerator FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -24,9 +26,9 @@ class SafepointGenerator V8_FINAL : public CallWrapper {
deopt_mode_(mode) { }
virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
+ virtual void BeforeCall(int call_size) const OVERRIDE {}
- virtual void AfterCall() const V8_OVERRIDE {
+ virtual void AfterCall() const OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@@ -49,11 +51,8 @@ bool LCodeGen::GenerateCode() {
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::NONE);
- return GeneratePrologue() &&
- GenerateBody() &&
- GenerateDeferredCode() &&
- GenerateDeoptJumpTable() &&
- GenerateSafepointTable();
+ return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+ GenerateJumpTable() && GenerateSafepointTable();
}
@@ -300,26 +299,21 @@ bool LCodeGen::GenerateDeferredCode() {
}
-bool LCodeGen::GenerateDeoptJumpTable() {
- if (deopt_jump_table_.length() > 0) {
+bool LCodeGen::GenerateJumpTable() {
+ if (jump_table_.length() > 0) {
Comment(";;; -------------------- Jump table --------------------");
}
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Label table_start;
__ bind(&table_start);
Label needs_frame;
- for (int i = 0; i < deopt_jump_table_.length(); i++) {
- __ bind(&deopt_jump_table_[i].label);
- Address entry = deopt_jump_table_[i].address;
- Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
- int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- Comment(";;; jump table entry %d.", i);
- } else {
- Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
- }
+ for (int i = 0; i < jump_table_.length(); i++) {
+ Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+ __ bind(&table_entry->label);
+ Address entry = table_entry->address;
+ DeoptComment(table_entry->reason);
__ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
- if (deopt_jump_table_[i].needs_frame) {
+ if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
if (needs_frame.is_bound()) {
__ Branch(&needs_frame);
@@ -770,11 +764,11 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
-void LCodeGen::DeoptimizeIf(Condition condition,
- LEnvironment* environment,
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::BailoutType bailout_type,
- Register src1,
+ const char* detail, Register src1,
const Operand& src2) {
+ LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
@@ -814,37 +808,36 @@ void LCodeGen::DeoptimizeIf(Condition condition,
__ bind(&skip);
}
+ Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), detail);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (condition == al && frame_is_built_ &&
!info()->saves_caller_doubles()) {
+ DeoptComment(reason);
__ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
} else {
+ Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ !frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (deopt_jump_table_.is_empty() ||
- (deopt_jump_table_.last().address != entry) ||
- (deopt_jump_table_.last().bailout_type != bailout_type) ||
- (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
- Deoptimizer::JumpTableEntry table_entry(entry,
- bailout_type,
- !frame_is_built_);
- deopt_jump_table_.Add(table_entry, zone());
+ if (jump_table_.is_empty() ||
+ !table_entry.IsEquivalentTo(jump_table_.last())) {
+ jump_table_.Add(table_entry, zone());
}
- __ Branch(&deopt_jump_table_.last().label, condition, src1, src2);
+ __ Branch(&jump_table_.last().label, condition, src1, src2);
}
}
-void LCodeGen::DeoptimizeIf(Condition condition,
- LEnvironment* environment,
- Register src1,
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+ const char* detail, Register src1,
const Operand& src2) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(condition, environment, bailout_type, src1, src2);
+ DeoptimizeIf(condition, instr, bailout_type, detail, src1, src2);
}
@@ -852,7 +845,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, 0, TENURED);
+ DeoptimizationInputData::New(isolate(), length, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
@@ -1074,7 +1067,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ dsubu(dividend, zero_reg, dividend);
__ And(dividend, dividend, Operand(mask));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
}
__ Branch(USE_DELAY_SLOT, &done);
__ dsubu(dividend, zero_reg, dividend);
@@ -1093,7 +1086,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr, "division by zero");
return;
}
@@ -1106,7 +1099,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
- DeoptimizeIf(lt, instr->environment(), dividend, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "minus zero", dividend, Operand(zero_reg));
__ bind(&remainder_not_zero);
}
}
@@ -1125,7 +1118,7 @@ void LCodeGen::DoModI(LModI* instr) {
// Check for x % 0, we have to deopt in this case because we can't return a
// NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "division by zero", right_reg, Operand(zero_reg));
}
// Check for kMinInt % -1, div will return kMinInt, which is not what we
@@ -1134,7 +1127,7 @@ void LCodeGen::DoModI(LModI* instr) {
Label no_overflow_possible;
__ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
+ DeoptimizeIf(eq, instr, "minus zero", right_reg, Operand(-1));
} else {
__ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
__ Branch(USE_DELAY_SLOT, &done);
@@ -1147,7 +1140,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ Branch(&done, ge, left_reg, Operand(zero_reg));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", result_reg, Operand(zero_reg));
}
__ bind(&done);
}
@@ -1157,24 +1150,24 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
Register dividend = ToRegister(instr->dividend());
int32_t divisor = instr->divisor();
Register result = ToRegister(instr->result());
- DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
+ DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
DCHECK(!result.is(dividend));
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
- DeoptimizeIf(eq, instr->environment(), dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr, "overflow", dividend, Operand(kMinInt));
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ And(at, dividend, Operand(mask));
- DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "lost precision", at, Operand(zero_reg));
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@@ -1204,14 +1197,14 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr, "division by zero");
return;
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
}
__ TruncatingDiv(result, dividend, Abs(divisor));
@@ -1220,7 +1213,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
__ Dmul(scratch0(), result, Operand(divisor));
__ Dsubu(scratch0(), scratch0(), dividend);
- DeoptimizeIf(ne, instr->environment(), scratch0(), Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "lost precision", scratch0(), Operand(zero_reg));
}
}
@@ -1238,14 +1231,14 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg));
}
// Check for (0 / -x) that will produce negative zero.
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr->environment(), divisor, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg));
__ bind(&left_not_zero);
}
@@ -1254,7 +1247,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment(), divisor, Operand(-1));
+ DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1));
__ bind(&left_not_min_int);
}
@@ -1266,7 +1259,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
} else {
__ dmod(remainder, dividend, divisor);
}
- DeoptimizeIf(ne, instr->environment(), remainder, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "lost precision", remainder, Operand(zero_reg));
}
}
@@ -1311,14 +1304,14 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
__ Dsubu(result, zero_reg, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg));
}
__ Xor(scratch, scratch, result);
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(gt, instr->environment(), result, Operand(kMaxInt));
+ DeoptimizeIf(gt, instr, "overflow", result, Operand(kMaxInt));
}
return;
}
@@ -1346,14 +1339,14 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr, "division by zero");
return;
}
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -1397,14 +1390,14 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg));
}
// Check for (0 / -x) that will produce negative zero.
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr->environment(), divisor, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg));
__ bind(&left_not_zero);
}
@@ -1413,7 +1406,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment(), divisor, Operand(-1));
+ DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1));
__ bind(&left_not_min_int);
}
@@ -1450,14 +1443,14 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
- DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", left, Operand(zero_reg));
}
switch (constant) {
case -1:
if (overflow) {
__ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
- DeoptimizeIf(gt, instr->environment(), scratch, Operand(kMaxInt));
+ DeoptimizeIf(gt, instr, "overflow", scratch, Operand(kMaxInt));
} else {
__ Dsubu(result, zero_reg, left);
}
@@ -1466,7 +1459,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (bailout_on_minus_zero) {
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
- DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "minus zero", left, Operand(zero_reg));
}
__ mov(result, zero_reg);
break;
@@ -1481,18 +1474,18 @@ void LCodeGen::DoMulI(LMulI* instr) {
int32_t mask = constant >> 31;
uint32_t constant_abs = (constant + mask) ^ mask;
- if (IsPowerOf2(constant_abs)) {
+ if (base::bits::IsPowerOfTwo32(constant_abs)) {
int32_t shift = WhichPowerOf2(constant_abs);
__ dsll(result, left, shift);
// Correct the sign of the result if the constant is negative.
if (constant < 0) __ Dsubu(result, zero_reg, result);
- } else if (IsPowerOf2(constant_abs - 1)) {
+ } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
int32_t shift = WhichPowerOf2(constant_abs - 1);
__ dsll(scratch, left, shift);
__ Daddu(result, scratch, left);
// Correct the sign of the result if the constant is negative.
if (constant < 0) __ Dsubu(result, zero_reg, result);
- } else if (IsPowerOf2(constant_abs + 1)) {
+ } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
int32_t shift = WhichPowerOf2(constant_abs + 1);
__ dsll(scratch, left, shift);
__ Dsubu(result, scratch, left);
@@ -1521,7 +1514,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiTag(result);
}
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+ DeoptimizeIf(ne, instr, "overflow", scratch, Operand(at));
} else {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
@@ -1536,10 +1529,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ Xor(at, left, right);
__ Branch(&done, ge, at, Operand(zero_reg));
// Bail out if the result is minus zero.
- DeoptimizeIf(eq,
- instr->environment(),
- result,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg));
__ bind(&done);
}
}
@@ -1603,8 +1593,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ srlv(result, left, ToRegister(right_op));
if (instr->can_deopt()) {
// TODO(yy): (-1) >>> 0. anything else?
- DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
- DeoptimizeIf(gt, instr->environment(), result, Operand(kMaxInt));
+ DeoptimizeIf(lt, instr, "negative value", result, Operand(zero_reg));
+ DeoptimizeIf(gt, instr, "negative value", result, Operand(kMaxInt));
}
break;
case Token::SHL:
@@ -1639,7 +1629,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
if (instr->can_deopt()) {
__ And(at, left, Operand(0x80000000));
- DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "negative value", at, Operand(zero_reg));
}
__ Move(result, left);
}
@@ -1695,12 +1685,10 @@ void LCodeGen::DoSubI(LSubI* instr) {
ToRegister(right),
overflow); // Reg at also used as scratch.
}
- DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg));
if (!instr->hydrogen()->representation().IsSmi()) {
- DeoptimizeIf(gt, instr->environment(),
- ToRegister(result), Operand(kMaxInt));
- DeoptimizeIf(lt, instr->environment(),
- ToRegister(result), Operand(kMinInt));
+ DeoptimizeIf(gt, instr, "overflow", ToRegister(result), Operand(kMaxInt));
+ DeoptimizeIf(lt, instr, "overflow", ToRegister(result), Operand(kMinInt));
}
}
}
@@ -1755,9 +1743,9 @@ void LCodeGen::DoDateField(LDateField* instr) {
DCHECK(!scratch.is(object));
__ SmiTst(object, at);
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
__ GetObjectType(object, scratch, scratch);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
+ DeoptimizeIf(ne, instr, "not a date object", scratch, Operand(JS_DATE_TYPE));
if (index->value() == 0) {
__ ld(result, FieldMemOperand(object, JSDate::kValueOffset));
@@ -1892,13 +1880,11 @@ void LCodeGen::DoAddI(LAddI* instr) {
ToRegister(right),
overflow); // Reg at also used as scratch.
}
- DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg));
// if not smi, it must int32.
if (!instr->hydrogen()->representation().IsSmi()) {
- DeoptimizeIf(gt, instr->environment(),
- ToRegister(result), Operand(kMaxInt));
- DeoptimizeIf(lt, instr->environment(),
- ToRegister(result), Operand(kMinInt));
+ DeoptimizeIf(gt, instr, "overflow", ToRegister(result), Operand(kMaxInt));
+ DeoptimizeIf(lt, instr, "overflow", ToRegister(result), Operand(kMinInt));
}
}
}
@@ -2012,8 +1998,9 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(a0));
DCHECK(ToRegister(instr->result()).is(v0));
- BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
// Other arch use a nop here, to signal that there is no inlined
// patchable code. Mips does not need the nop, since our marker
// instruction (andi zero_reg) will never be used in normal code.
@@ -2159,7 +2146,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg, at);
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
}
const Register map = scratch0();
@@ -2215,7 +2202,8 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+ DeoptimizeIf(al, instr, "unexpected object", zero_reg,
+ Operand(zero_reg));
}
}
}
@@ -2495,7 +2483,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
@@ -2577,7 +2565,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ JumpIfSmi(input, is_false);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
+ if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2606,7 +2594,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// Objects with a non-function constructor have class 'Object'.
__ GetObjectType(temp, temp2, temp2);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
+ if (String::Equals(class_name, isolate()->factory()->Object_string())) {
__ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
} else {
__ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
@@ -2672,15 +2660,15 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
private:
@@ -2796,7 +2784,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// On MIPS there is no need for a "no inlined smi code" marker (nop).
@@ -2861,28 +2849,36 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
__ ld(result, FieldMemOperand(at, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+ DeoptimizeIf(eq, instr, "hole", result, Operand(at));
}
}
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+ DCHECK(FLAG_vector_ics);
+ Register vector = ToRegister(instr->temp_vector());
+ DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
+ __ li(vector, instr->hydrogen()->feedback_vector());
+ // No need to allocate this register.
+ DCHECK(VectorLoadICDescriptor::SlotRegister().is(a0));
+ __ li(VectorLoadICDescriptor::SlotRegister(),
+ Operand(Smi::FromInt(instr->hydrogen()->slot())));
+}
+
+
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister()));
+ DCHECK(ToRegister(instr->global_object())
+ .is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(v0));
- __ li(LoadIC::NameRegister(), Operand(instr->name()));
+ __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ li(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(a0));
- __ li(LoadIC::SlotRegister(),
- Operand(Smi::FromInt(instr->hydrogen()->slot())));
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2903,7 +2899,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register payload = ToRegister(instr->temp());
__ ld(payload, FieldMemOperand(cell, Cell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
+ DeoptimizeIf(eq, instr, "hole", payload, Operand(at));
}
// Store the value.
@@ -2921,7 +2917,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+ DeoptimizeIf(eq, instr, "hole", result, Operand(at));
} else {
Label is_not_hole;
__ Branch(&is_not_hole, ne, result, Operand(at));
@@ -2945,7 +2941,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
+ DeoptimizeIf(eq, instr, "hole", scratch, Operand(at));
} else {
__ Branch(&skip_assignment, ne, scratch, Operand(at));
}
@@ -3015,21 +3011,15 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(v0));
// Name is always in a2.
- __ li(LoadIC::NameRegister(), Operand(instr->name()));
+ __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ li(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(a0));
- __ li(LoadIC::SlotRegister(),
- Operand(Smi::FromInt(instr->hydrogen()->slot())));
- }
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+ }
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3045,7 +3035,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+ DeoptimizeIf(eq, instr, "hole", result, Operand(at));
// If the function does not have an initial map, we're done.
Label done;
@@ -3191,8 +3181,8 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case UINT32_ELEMENTS:
__ lw(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- DeoptimizeIf(Ugreater_equal, instr->environment(),
- result, Operand(0x80000000));
+ DeoptimizeIf(Ugreater_equal, instr, "negative value", result,
+ Operand(0x80000000));
}
break;
case FLOAT32_ELEMENTS:
@@ -3252,7 +3242,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr, "hole", scratch, Operand(kHoleNanUpper32));
}
}
@@ -3306,10 +3296,10 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (hinstr->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ SmiTst(result, scratch);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg));
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
+ DeoptimizeIf(eq, instr, "hole", result, Operand(scratch));
}
}
}
@@ -3371,20 +3361,14 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister()));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ li(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(a0));
- __ li(LoadIC::SlotRegister(),
- Operand(Smi::FromInt(instr->hydrogen()->slot())));
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3471,11 +3455,11 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver, scratch);
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "Smi", scratch, Operand(zero_reg));
__ GetObjectType(receiver, scratch, scratch);
- DeoptimizeIf(lt, instr->environment(),
- scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
+ DeoptimizeIf(lt, instr, "not a JavaScript object", scratch,
+ Operand(FIRST_SPEC_OBJECT_TYPE));
__ Branch(&result_in_receiver);
__ bind(&global_object);
@@ -3510,7 +3494,8 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
- DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
+ DeoptimizeIf(hi, instr, "too many arguments", length,
+ Operand(kArgumentsLimit));
// Push the receiver and use the register to keep the original
// number of arguments.
@@ -3640,7 +3625,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Deoptimize if not a heap number.
__ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+ DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at));
Label done;
Register exponent = scratch0();
@@ -3707,21 +3692,21 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
__ mov(result, input);
__ dsubu(result, zero_reg, input);
// Overflow if result is still negative, i.e. 0x80000000.
- DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "overflow", result, Operand(zero_reg));
__ bind(&done);
}
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LMathAbs* instr_;
};
@@ -3761,7 +3746,8 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
except_flag);
// Deopt if the operation did not succeed.
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
+ Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
@@ -3769,7 +3755,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ Branch(&done, ne, result, Operand(zero_reg));
__ mfhc1(scratch1, input); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
__ bind(&done);
}
}
@@ -3802,7 +3788,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// The following conversion will not work with numbers
// outside of ]-2^32, 2^32[.
- DeoptimizeIf(ge, instr->environment(), scratch,
+ DeoptimizeIf(ge, instr, "overflow", scratch,
Operand(HeapNumber::kExponentBias + 32));
// Save the original sign for later comparison.
@@ -3820,8 +3806,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ Xor(result, result, Operand(scratch));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// ARM uses 'mi' here, which is 'lt'
- DeoptimizeIf(lt, instr->environment(), result,
- Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "minus zero", result, Operand(zero_reg));
} else {
Label skip2;
// ARM uses 'mi' here, which is 'lt'
@@ -3840,7 +3825,8 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
double_scratch1,
except_flag);
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
+ Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
@@ -3848,7 +3834,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ bind(&check_sign_on_zero);
__ mfhc1(scratch, input); // Get exponent/sign bits.
__ And(scratch, scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "minus zero", scratch, Operand(zero_reg));
}
__ bind(&done);
}
@@ -3897,10 +3883,11 @@ void LCodeGen::DoPower(LPower* instr) {
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
+ Register tagged_exponent = MathPowTaggedDescriptor::exponent();
DCHECK(!instr->right()->IsDoubleRegister() ||
ToDoubleRegister(instr->right()).is(f4));
DCHECK(!instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(a2));
+ ToRegister(instr->right()).is(tagged_exponent));
DCHECK(ToDoubleRegister(instr->left()).is(f2));
DCHECK(ToDoubleRegister(instr->result()).is(f0));
@@ -3909,10 +3896,11 @@ void LCodeGen::DoPower(LPower* instr) {
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
- __ JumpIfSmi(a2, &no_deopt);
- __ ld(a7, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ JumpIfSmi(tagged_exponent, &no_deopt);
+ DCHECK(!a7.is(tagged_exponent));
+ __ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr->environment(), a7, Operand(at));
+ DeoptimizeIf(ne, instr, "not a heap number", a7, Operand(at));
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@@ -3978,6 +3966,34 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
}
+void LCodeGen::DoTailCallThroughMegamorphicCache(
+ LTailCallThroughMegamorphicCache* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register name = ToRegister(instr->name());
+ DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(name.is(LoadDescriptor::NameRegister()));
+ DCHECK(receiver.is(a1));
+ DCHECK(name.is(a2));
+
+ Register scratch = a3;
+ Register extra = a4;
+ Register extra2 = a5;
+ Register extra3 = a6;
+
+ // Important for the tail-call.
+ bool must_teardown_frame = NeedsEagerFrame();
+
+ // The probe will tail call to a handler if found.
+ isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
+ must_teardown_frame, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Tail call to miss if we ended up here.
+ if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
+ LoadIC::GenerateMiss(masm());
+}
+
+
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(v0));
@@ -4199,10 +4215,10 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister()));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- __ li(StoreIC::NameRegister(), Operand(instr->name()));
+ __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4226,7 +4242,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr->environment(), reg, operand);
+ DeoptimizeIf(cc, instr, "out of bounds", reg, operand);
}
}
@@ -4463,13 +4479,12 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister()));
- DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister()));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- Handle<Code> ic = (instr->strict_mode() == STRICT)
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4518,7 +4533,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
ne, &no_memento_found);
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr, "memento found");
__ bind(&no_memento_found);
}
@@ -4535,14 +4550,14 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) {
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
+ class DeferredStringCharCodeAt FINAL : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStringCharCodeAt(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@@ -4590,14 +4605,14 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
+ class DeferredStringCharFromCode FINAL : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStringCharFromCode(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -4668,18 +4683,18 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagU FINAL : public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagIU(instr_,
instr_->value(),
instr_->temp1(),
instr_->temp2(),
UNSIGNED_INT32);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
@@ -4758,14 +4773,14 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagD FINAL : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagD(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -4821,12 +4836,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ And(at, input, Operand(0x80000000));
- DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "overflow", at, Operand(zero_reg));
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTagCheckOverflow(output, input, at);
- DeoptimizeIf(lt, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "overflow", at, Operand(zero_reg));
} else {
__ SmiTag(output, input);
}
@@ -4842,19 +4857,20 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
// If the input is a HeapObject, value of scratch won't be zero.
__ And(scratch, input, Operand(kHeapObjectTag));
__ SmiUntag(result, input);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg));
} else {
__ SmiUntag(result, input);
}
}
-void LCodeGen::EmitNumberUntagD(Register input_reg,
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
DoubleRegister result_reg,
- bool can_convert_undefined_to_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
NumberUntagDMode mode) {
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+ bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
Register scratch = scratch0();
Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
@@ -4866,7 +4882,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
if (can_convert_undefined_to_nan) {
__ Branch(&convert, ne, scratch, Operand(at));
} else {
- DeoptimizeIf(ne, env, scratch, Operand(at));
+ DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at));
}
// Load heap number.
__ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
@@ -4874,14 +4890,16 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
__ mfc1(at, result_reg);
__ Branch(&done, ne, at, Operand(zero_reg));
__ mfhc1(scratch, result_reg); // Get exponent/sign bits.
- DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(eq, instr, "minus zero", scratch,
+ Operand(HeapNumber::kSignMask));
}
__ Branch(&done);
if (can_convert_undefined_to_nan) {
__ bind(&convert);
// Convert undefined (and hole) to NaN.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, env, input_reg, Operand(at));
+ DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg,
+ Operand(at));
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
__ Branch(&done);
@@ -4945,12 +4963,12 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ bind(&check_false);
__ LoadRoot(at, Heap::kFalseValueRootIndex);
- DeoptimizeIf(ne, instr->environment(), scratch2, Operand(at));
+ DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false", scratch2,
+ Operand(at));
__ Branch(USE_DELAY_SLOT, &done);
__ mov(input_reg, zero_reg); // In delay slot.
} else {
- // Deoptimize if we don't have a heap number.
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
+ DeoptimizeIf(ne, instr, "not a heap number", scratch1, Operand(at));
// Load the double value.
__ ldc1(double_scratch,
@@ -4965,15 +4983,15 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
except_flag,
kCheckForInexactConversion);
- // Deopt if the operation did not succeed.
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
+ Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Branch(&done, ne, input_reg, Operand(zero_reg));
__ mfhc1(scratch1, double_scratch); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
}
}
__ bind(&done);
@@ -4981,14 +4999,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI V8_FINAL : public LDeferredCode {
+ class DeferredTaggedToI FINAL : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredTaggedToI(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
@@ -5027,11 +5045,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
NumberUntagDMode mode = value->representation().IsSmi()
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
- EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->can_convert_undefined_to_nan(),
- instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment(),
- mode);
+ EmitNumberUntagD(instr, input_reg, result_reg, mode);
}
@@ -5054,14 +5068,15 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
+ Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ mfhc1(scratch1, double_input); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
__ bind(&done);
}
}
@@ -5087,14 +5102,15 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
+ Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ mfhc1(scratch1, double_input); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
__ bind(&done);
}
}
@@ -5105,7 +5121,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
- DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "not a Smi", at, Operand(zero_reg));
}
@@ -5113,7 +5129,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
}
}
@@ -5131,12 +5147,12 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
+ DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(first));
} else {
- DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
+ DeoptimizeIf(lo, instr, "wrong instance type", scratch, Operand(first));
// Omit check for the last type.
if (last != LAST_TYPE) {
- DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
+ DeoptimizeIf(hi, instr, "wrong instance type", scratch, Operand(last));
}
}
} else {
@@ -5144,14 +5160,14 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
uint8_t tag;
instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
- if (IsPowerOf2(mask)) {
- DCHECK(tag == 0 || IsPowerOf2(tag));
+ if (base::bits::IsPowerOfTwo32(mask)) {
+ DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ And(at, scratch, mask);
- DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
- at, Operand(zero_reg));
+ DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type", at,
+ Operand(zero_reg));
} else {
__ And(scratch, scratch, Operand(mask));
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
+ DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(tag));
}
}
}
@@ -5166,11 +5182,9 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ li(at, Operand(Handle<Object>(cell)));
__ ld(at, FieldMemOperand(at, Cell::kValueOffset));
- DeoptimizeIf(ne, instr->environment(), reg,
- Operand(at));
+ DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(at));
} else {
- DeoptimizeIf(ne, instr->environment(), reg,
- Operand(object));
+ DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(object));
}
}
@@ -5186,22 +5200,22 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(v0, scratch0());
}
__ SmiTst(scratch0(), at);
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "instance migration failed", at, Operand(zero_reg));
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+ class DeferredCheckMaps FINAL : public LDeferredCode {
public:
DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
: LDeferredCode(codegen), instr_(instr), object_(object) {
SetExit(check_maps());
}
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredInstanceMigration(instr_, object_);
}
Label* check_maps() { return &check_maps_; }
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LCheckMaps* instr_;
Label check_maps_;
@@ -5239,7 +5253,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ Branch(deferred->entry(), ne, map_reg, Operand(map));
} else {
- DeoptimizeIf(ne, instr->environment(), map_reg, Operand(map));
+ DeoptimizeIf(ne, instr, "wrong map", map_reg, Operand(map));
}
__ bind(&success);
@@ -5277,7 +5291,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
- DeoptimizeIf(ne, instr->environment(), input_reg,
+ DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg,
Operand(factory()->undefined_value()));
__ mov(result_reg, zero_reg);
__ jmp(&done);
@@ -5316,14 +5330,14 @@ void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate V8_FINAL : public LDeferredCode {
+ class DeferredAllocate FINAL : public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredAllocate(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LAllocate* instr_;
};
@@ -5495,9 +5509,8 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(),
- instr->hydrogen()->strict_mode(),
- instr->hydrogen()->is_generator());
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ instr->hydrogen()->kind());
__ li(a2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
@@ -5704,8 +5717,8 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
type = Deoptimizer::LAZY;
}
- Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
- DeoptimizeIf(al, instr->environment(), type, zero_reg, Operand(zero_reg));
+ DeoptimizeIf(al, instr, type, instr->hydrogen()->reason(), zero_reg,
+ Operand(zero_reg));
}
@@ -5732,14 +5745,14 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck V8_FINAL : public LDeferredCode {
+ class DeferredStackCheck FINAL : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStackCheck(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
@@ -5796,18 +5809,19 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Register result = ToRegister(instr->result());
Register object = ToRegister(instr->object());
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), object, Operand(at));
+ DeoptimizeIf(eq, instr, "undefined", object, Operand(at));
Register null_value = a5;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
+ DeoptimizeIf(eq, instr, "null", object, Operand(null_value));
__ And(at, object, kSmiTagMask);
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ GetObjectType(object, a1, a1);
- DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
+ DeoptimizeIf(le, instr, "not a JavaScript object", a1,
+ Operand(LAST_JS_PROXY_TYPE));
Label use_cache, call_runtime;
DCHECK(object.is(a0));
@@ -5824,7 +5838,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
DCHECK(result.is(v0));
__ LoadRoot(at, Heap::kMetaMapRootIndex);
- DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
+ DeoptimizeIf(ne, instr, "wrong map", a1, Operand(at));
__ bind(&use_cache);
}
@@ -5844,7 +5858,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ ld(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "no cache", result, Operand(zero_reg));
__ bind(&done);
}
@@ -5854,7 +5868,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
Register map = ToRegister(instr->map());
__ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
+ DeoptimizeIf(ne, instr, "wrong map", map, Operand(scratch0()));
}
@@ -5873,7 +5887,7 @@ void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ class DeferredLoadMutableDouble FINAL : public LDeferredCode {
public:
DeferredLoadMutableDouble(LCodeGen* codegen,
LLoadFieldByIndex* instr,
@@ -5886,10 +5900,10 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
object_(object),
index_(index) {
}
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LLoadFieldByIndex* instr_;
Register result_;
diff --git a/deps/v8/src/mips64/lithium-codegen-mips64.h b/deps/v8/src/mips64/lithium-codegen-mips64.h
index d42a10e04e..b320dcb817 100644
--- a/deps/v8/src/mips64/lithium-codegen-mips64.h
+++ b/deps/v8/src/mips64/lithium-codegen-mips64.h
@@ -25,7 +25,7 @@ class LCodeGen: public LCodeGenBase {
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
- deopt_jump_table_(4, info->zone()),
+ jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
@@ -170,10 +170,10 @@ class LCodeGen: public LCodeGenBase {
// Code generation passes. Returns true if code generation should
// continue.
- void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+ void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
bool GeneratePrologue();
bool GenerateDeferredCode();
- bool GenerateDeoptJumpTable();
+ bool GenerateJumpTable();
bool GenerateSafepointTable();
// Generates the custom OSR entrypoint and sets the osr_pc_offset.
@@ -229,14 +229,12 @@ class LCodeGen: public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition condition,
- LEnvironment* environment,
- Deoptimizer::BailoutType bailout_type,
+ void DeoptimizeIf(Condition condition, LInstruction* instr,
+ Deoptimizer::BailoutType bailout_type, const char* detail,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
- void DeoptimizeIf(Condition condition,
- LEnvironment* environment,
- Register src1 = zero_reg,
+ void DeoptimizeIf(Condition condition, LInstruction* instr,
+ const char* detail, Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
void AddToTranslation(LEnvironment* environment,
@@ -271,7 +269,7 @@ class LCodeGen: public LCodeGenBase {
int arguments,
Safepoint::DeoptMode mode);
- void RecordAndWritePosition(int position) V8_OVERRIDE;
+ void RecordAndWritePosition(int position) OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
@@ -298,12 +296,8 @@ class LCodeGen: public LCodeGenBase {
FPURegister src1,
FPURegister src2);
void EmitCmpI(LOperand* left, LOperand* right);
- void EmitNumberUntagD(Register input,
- DoubleRegister result,
- bool allow_undefined_as_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode);
+ void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+ DoubleRegister result, NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
@@ -357,7 +351,7 @@ class LCodeGen: public LCodeGenBase {
LEnvironment* environment);
- void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+ void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
@@ -365,8 +359,11 @@ class LCodeGen: public LCodeGenBase {
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+ template <class T>
+ void EmitVectorLoadICRegisters(T* instr);
+
ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
+ ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
@@ -384,7 +381,7 @@ class LCodeGen: public LCodeGenBase {
Safepoint::Kind expected_safepoint_kind_;
- class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
+ class PushSafepointRegistersScope FINAL BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
diff --git a/deps/v8/src/mips64/lithium-gap-resolver-mips64.h b/deps/v8/src/mips64/lithium-gap-resolver-mips64.h
index 0072e526cb..9e6f14e5aa 100644
--- a/deps/v8/src/mips64/lithium-gap-resolver-mips64.h
+++ b/deps/v8/src/mips64/lithium-gap-resolver-mips64.h
@@ -15,7 +15,7 @@ namespace internal {
class LCodeGen;
class LGapResolver;
-class LGapResolver V8_FINAL BASE_EMBEDDED {
+class LGapResolver FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
diff --git a/deps/v8/src/mips64/lithium-mips64.cc b/deps/v8/src/mips64/lithium-mips64.cc
index 0b6b0ddb36..48926117bb 100644
--- a/deps/v8/src/mips64/lithium-mips64.cc
+++ b/deps/v8/src/mips64/lithium-mips64.cc
@@ -430,12 +430,6 @@ LPlatformChunk* LChunkBuilder::Build() {
}
-void LChunkBuilder::Abort(BailoutReason reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
Register::ToAllocationIndex(reg));
@@ -1087,15 +1081,15 @@ LInstruction* LChunkBuilder::DoCallJSFunction(
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
- const InterfaceDescriptor* descriptor = instr->descriptor();
+ CallInterfaceDescriptor descriptor = instr->descriptor();
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
ops.Add(target, zone());
for (int i = 1; i < instr->OperandCount(); i++) {
- LOperand* op = UseFixed(instr->OperandAt(i),
- descriptor->GetParameterRegister(i - 1));
- ops.Add(op, zone());
+ LOperand* op =
+ UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
+ ops.Add(op, zone());
}
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
@@ -1104,6 +1098,19 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
}
+LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
+ HTailCallThroughMegamorphicCache* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* receiver_register =
+ UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
+ LOperand* name_register =
+ UseFixed(instr->name(), LoadDescriptor::NameRegister());
+ // Not marked as call. It can't deoptimize, and it never returns.
+ return new (zone()) LTailCallThroughMegamorphicCache(
+ context, receiver_register, name_register);
+}
+
+
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), a1);
@@ -1628,9 +1635,10 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
Representation exponent_type = instr->right()->representation();
DCHECK(instr->left()->representation().IsDouble());
LOperand* left = UseFixedDouble(instr->left(), f2);
- LOperand* right = exponent_type.IsDouble() ?
- UseFixedDouble(instr->right(), f4) :
- UseFixed(instr->right(), a2);
+ LOperand* right =
+ exponent_type.IsDouble()
+ ? UseFixedDouble(instr->right(), f4)
+ : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
LPower* result = new(zone()) LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, f0),
instr,
@@ -2049,11 +2057,11 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* global_object = UseFixed(instr->global_object(),
- LoadIC::ReceiverRegister());
+ LOperand* global_object =
+ UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
new(zone()) LLoadGlobalGeneric(context, global_object, vector);
@@ -2108,10 +2116,11 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister());
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LInstruction* result =
@@ -2174,11 +2183,12 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), LoadIC::NameRegister());
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LInstruction* result =
@@ -2235,9 +2245,10 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* obj = UseFixed(instr->object(), KeyedStoreIC::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), KeyedStoreIC::NameRegister());
- LOperand* val = UseFixed(instr->value(), KeyedStoreIC::ValueRegister());
+ LOperand* obj =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+ LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
DCHECK(instr->object()->representation().IsTagged());
DCHECK(instr->key()->representation().IsTagged());
@@ -2294,7 +2305,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
}
LOperand* val;
- if (needs_write_barrier || instr->field_representation().IsSmi()) {
+ if (needs_write_barrier) {
val = UseTempRegister(instr->value());
} else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
@@ -2311,8 +2322,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* obj = UseFixed(instr->object(), StoreIC::ReceiverRegister());
- LOperand* val = UseFixed(instr->value(), StoreIC::ValueRegister());
+ LOperand* obj =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val);
return MarkAsCall(result, instr);
@@ -2388,10 +2400,10 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
return DefineAsSpilled(result, spill_index);
} else {
DCHECK(info()->IsStub());
- CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor();
+ CallInterfaceDescriptor descriptor =
+ info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index());
- Register reg = descriptor->GetEnvironmentParameterRegister(index);
+ Register reg = descriptor.GetEnvironmentParameterRegister(index);
return DefineFixed(result, reg);
}
}
@@ -2407,7 +2419,7 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
} else {
spill_index = env_index - instr->environment()->first_local_index();
if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Abort(kTooManySpillSlotsNeededForOSR);
+ Retry(kTooManySpillSlotsNeededForOSR);
spill_index = 0;
}
}
@@ -2504,6 +2516,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
}
+ inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
diff --git a/deps/v8/src/mips64/lithium-mips64.h b/deps/v8/src/mips64/lithium-mips64.h
index 77e5b9c38d..c6257a4cdc 100644
--- a/deps/v8/src/mips64/lithium-mips64.h
+++ b/deps/v8/src/mips64/lithium-mips64.h
@@ -151,6 +151,7 @@ class LCodeGen;
V(StringCompareAndBranch) \
V(SubI) \
V(TaggedToI) \
+ V(TailCallThroughMegamorphicCache) \
V(ThisFunction) \
V(ToFastProperties) \
V(TransitionElementsKind) \
@@ -162,11 +163,11 @@ class LCodeGen;
V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ virtual Opcode opcode() const FINAL OVERRIDE { \
return LInstruction::k##type; \
} \
- virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
- virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE; \
+ virtual const char* Mnemonic() const FINAL OVERRIDE { \
return mnemonic; \
} \
static L##type* cast(LInstruction* instr) { \
@@ -286,7 +287,7 @@ class LTemplateResultInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ virtual bool HasResult() const FINAL OVERRIDE {
return R != 0 && result() != NULL;
}
void set_result(LOperand* operand) { results_[0] = operand; }
@@ -308,11 +309,11 @@ class LTemplateInstruction : public LTemplateResultInstruction<R> {
private:
// Iterator support.
- virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
- virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+ virtual int InputCount() FINAL OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
- virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
- virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
+ virtual int TempCount() FINAL OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
};
@@ -327,8 +328,8 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsGap() const FINAL OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
static LGap* cast(LInstruction* instr) {
DCHECK(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@@ -364,11 +365,11 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
};
-class LInstructionGap V8_FINAL : public LGap {
+class LInstructionGap FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return !IsRedundant();
}
@@ -376,14 +377,14 @@ class LInstructionGap V8_FINAL : public LGap {
};
-class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGoto(HBasicBlock* block) : block_(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual bool IsControl() const V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ virtual bool IsControl() const OVERRIDE { return true; }
int block_id() const { return block_->block_id(); }
@@ -392,7 +393,7 @@ class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -408,14 +409,14 @@ class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LDummy FINAL : public LTemplateInstruction<1, 0, 0> {
public:
LDummy() {}
DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
};
-class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
inputs_[0] = value;
@@ -424,25 +425,25 @@ class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- virtual bool IsControl() const V8_OVERRIDE { return true; }
+ virtual bool IsControl() const OVERRIDE { return true; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
-class LLabel V8_FINAL : public LGap {
+class LLabel FINAL : public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -458,16 +459,16 @@ class LLabel V8_FINAL : public LGap {
};
-class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallStub(LOperand* context) {
inputs_[0] = context;
@@ -480,9 +481,30 @@ class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LTailCallThroughMegamorphicCache FINAL
+ : public LTemplateInstruction<0, 3, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ explicit LTailCallThroughMegamorphicCache(LOperand* context,
+ LOperand* receiver,
+ LOperand* name) {
+ inputs_[0] = context;
+ inputs_[1] = receiver;
+ inputs_[2] = name;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* name() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
+ "tail-call-through-megamorphic-cache")
+ DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
+};
+
+
+class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
@@ -494,7 +516,7 @@ class LControlInstruction : public LTemplateInstruction<0, I, T> {
public:
LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
- virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual bool IsControl() const FINAL OVERRIDE { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -533,7 +555,7 @@ class LControlInstruction : public LTemplateInstruction<0, I, T> {
};
-class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LWrapReceiver FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LWrapReceiver(LOperand* receiver, LOperand* function) {
inputs_[0] = receiver;
@@ -548,7 +570,7 @@ class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
@@ -569,7 +591,7 @@ class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
};
-class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
inputs_[0] = arguments;
@@ -583,11 +605,11 @@ class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
LOperand* length() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -599,14 +621,14 @@ class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
};
-class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LModByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -624,7 +646,7 @@ class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LModByConstI FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LModByConstI(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -642,7 +664,7 @@ class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LModI V8_FINAL : public LTemplateInstruction<1, 2, 3> {
+class LModI FINAL : public LTemplateInstruction<1, 2, 3> {
public:
LModI(LOperand* left,
LOperand* right) {
@@ -658,7 +680,7 @@ class LModI V8_FINAL : public LTemplateInstruction<1, 2, 3> {
};
-class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -676,7 +698,7 @@ class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDivByConstI FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDivByConstI(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -694,7 +716,7 @@ class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LDivI FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
inputs_[0] = dividend;
@@ -711,7 +733,7 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFlooringDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -730,7 +752,7 @@ class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LFlooringDivByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
inputs_[0] = dividend;
@@ -750,7 +772,7 @@ class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LFlooringDivI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LFlooringDivI(LOperand* dividend, LOperand* divisor) {
inputs_[0] = dividend;
@@ -765,7 +787,7 @@ class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMulI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMulI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -781,7 +803,7 @@ class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
// Instruction for computing multiplier * multiplicand + addend.
-class LMultiplyAddD V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LMultiplyAddD FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LMultiplyAddD(LOperand* addend, LOperand* multiplier,
LOperand* multiplicand) {
@@ -798,13 +820,13 @@ class LMultiplyAddD V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
};
-class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
public:
LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -823,11 +845,11 @@ class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
return hydrogen()->representation().IsDouble();
}
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LMathFloor FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LMathFloor(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -842,7 +864,7 @@ class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LMathRound FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LMathRound(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -857,7 +879,7 @@ class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LMathFround V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathFround FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathFround(LOperand* value) { inputs_[0] = value; }
@@ -867,7 +889,7 @@ class LMathFround V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathAbs FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathAbs(LOperand* context, LOperand* value) {
inputs_[1] = context;
@@ -882,7 +904,7 @@ class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathLog FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathLog(LOperand* value) {
inputs_[0] = value;
@@ -894,7 +916,7 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathClz32 FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathClz32(LOperand* value) {
inputs_[0] = value;
@@ -906,7 +928,7 @@ class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+class LMathExp FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LMathExp(LOperand* value,
LOperand* double_temp,
@@ -928,7 +950,7 @@ class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
};
-class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathSqrt FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSqrt(LOperand* value) {
inputs_[0] = value;
@@ -940,7 +962,7 @@ class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LMathPowHalf FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LMathPowHalf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -954,7 +976,7 @@ class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -969,7 +991,7 @@ class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
};
-class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LCmpHoleAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LCmpHoleAndBranch(LOperand* object) {
inputs_[0] = object;
@@ -982,7 +1004,7 @@ class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
};
-class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -998,7 +1020,7 @@ class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsObjectAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1015,7 +1037,7 @@ class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1028,11 +1050,11 @@ class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1043,11 +1065,11 @@ class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1061,11 +1083,11 @@ class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
+class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
public:
LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1083,11 +1105,11 @@ class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
Token::Value op() const { return hydrogen()->token(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1099,11 +1121,11 @@ class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -1116,7 +1138,7 @@ class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndexAndBranch V8_FINAL
+class LHasCachedArrayIndexAndBranch FINAL
: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@@ -1129,11 +1151,11 @@ class LHasCachedArrayIndexAndBranch V8_FINAL
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1147,11 +1169,11 @@ class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LCmpT FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LCmpT(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1170,7 +1192,7 @@ class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LInstanceOf FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1186,7 +1208,7 @@ class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LInstanceOfKnownGlobal FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
@@ -1207,7 +1229,7 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
return lazy_deopt_env_;
}
virtual void SetDeferredLazyDeoptimizationEnvironment(
- LEnvironment* env) V8_OVERRIDE {
+ LEnvironment* env) OVERRIDE {
lazy_deopt_env_ = env;
}
@@ -1216,7 +1238,7 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
@@ -1231,7 +1253,7 @@ class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
};
-class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LBitI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1248,7 +1270,7 @@ class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LShiftI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -1269,7 +1291,7 @@ class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSubI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1284,7 +1306,7 @@ class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantI FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1293,7 +1315,7 @@ class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantS FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1302,7 +1324,7 @@ class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantD FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1311,7 +1333,7 @@ class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantE FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1322,7 +1344,7 @@ class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantT FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1333,7 +1355,7 @@ class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LBranch(LOperand* value) {
inputs_[0] = value;
@@ -1344,11 +1366,11 @@ class LBranch V8_FINAL : public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCmpMapAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LCmpMapAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1365,7 +1387,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
@@ -1377,7 +1399,7 @@ class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LDateField FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
inputs_[0] = date;
@@ -1396,7 +1418,7 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSeqStringGetChar FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
inputs_[0] = string;
@@ -1411,7 +1433,7 @@ class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LSeqStringSetChar FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LSeqStringSetChar(LOperand* context,
LOperand* string,
@@ -1432,7 +1454,7 @@ class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
};
-class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LAddI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1447,7 +1469,7 @@ class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathMinMax(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1462,7 +1484,7 @@ class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LPower FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1477,7 +1499,7 @@ class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1489,18 +1511,18 @@ class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- virtual Opcode opcode() const V8_OVERRIDE {
+ virtual Opcode opcode() const OVERRIDE {
return LInstruction::kArithmeticD;
}
- virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
- virtual const char* Mnemonic() const V8_OVERRIDE;
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
private:
Token::Value op_;
};
-class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LArithmeticT(Token::Value op,
LOperand* context,
@@ -1517,16 +1539,16 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
LOperand* right() { return inputs_[2]; }
Token::Value op() const { return op_; }
- virtual Opcode opcode() const V8_FINAL { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
- virtual const char* Mnemonic() const V8_OVERRIDE;
+ virtual Opcode opcode() const FINAL { return LInstruction::kArithmeticT; }
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
private:
Token::Value op_;
};
-class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LReturn FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
inputs_[0] = value;
@@ -1549,7 +1571,7 @@ class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
-class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
@@ -1562,7 +1584,7 @@ class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LLoadNamedGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
inputs_[0] = context;
@@ -1581,7 +1603,7 @@ class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadFunctionPrototype FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadFunctionPrototype(LOperand* function) {
inputs_[0] = function;
@@ -1594,7 +1616,7 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadRoot FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
@@ -1603,7 +1625,7 @@ class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
@@ -1633,7 +1655,7 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> {
+class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
public:
LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
LOperand* vector) {
@@ -1653,14 +1675,14 @@ class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> {
};
-class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
LOperand* vector) {
@@ -1681,7 +1703,7 @@ class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1696,7 +1718,7 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
@@ -1713,7 +1735,7 @@ class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LStoreContextSlot(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -1728,11 +1750,11 @@ class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LPushArgument FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
@@ -1744,7 +1766,7 @@ class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDrop FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
@@ -1757,7 +1779,7 @@ class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
+class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 0> {
public:
LStoreCodeEntry(LOperand* function, LOperand* code_object) {
inputs_[0] = function;
@@ -1774,7 +1796,7 @@ class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
};
-class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
+class LInnerAllocatedObject FINAL: public LTemplateInstruction<1, 2, 0> {
public:
LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
inputs_[0] = base_object;
@@ -1784,27 +1806,27 @@ class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
LOperand* base_object() const { return inputs_[0]; }
LOperand* offset() const { return inputs_[1]; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
};
-class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LThisFunction FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
-class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LContext FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
DECLARE_HYDROGEN_ACCESSOR(Context)
};
-class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LDeclareGlobals FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
inputs_[0] = context;
@@ -1817,7 +1839,7 @@ class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallJSFunction(LOperand* function) {
inputs_[0] = function;
@@ -1828,49 +1850,48 @@ class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
+class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
public:
- LCallWithDescriptor(const InterfaceDescriptor* descriptor,
- const ZoneList<LOperand*>& operands,
- Zone* zone)
- : descriptor_(descriptor),
- inputs_(descriptor->GetRegisterParameterCount() + 1, zone) {
- DCHECK(descriptor->GetRegisterParameterCount() + 1 == operands.length());
+ LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+ const ZoneList<LOperand*>& operands, Zone* zone)
+ : descriptor_(descriptor),
+ inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
+ DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
inputs_.AddAll(operands, zone);
}
LOperand* target() const { return inputs_[0]; }
- const InterfaceDescriptor* descriptor() { return descriptor_; }
+ const CallInterfaceDescriptor descriptor() { return descriptor_; }
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
- const InterfaceDescriptor* descriptor_;
+ CallInterfaceDescriptor descriptor_;
ZoneList<LOperand*> inputs_;
// Iterator support.
- virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
- virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+ virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
- virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
- virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+ virtual int TempCount() FINAL OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
};
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
@@ -1883,13 +1904,13 @@ class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
@@ -1906,7 +1927,7 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNew(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
@@ -1919,13 +1940,13 @@ class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
@@ -1938,13 +1959,13 @@ class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallRuntime(LOperand* context) {
inputs_[0] = context;
@@ -1955,7 +1976,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
@@ -1965,7 +1986,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1977,7 +1998,7 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LUint32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1989,7 +2010,7 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagU FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -2005,7 +2026,7 @@ class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagD FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
@@ -2022,7 +2043,7 @@ class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToSmi FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleToSmi(LOperand* value) {
inputs_[0] = value;
@@ -2038,7 +2059,7 @@ class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToI FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleToI(LOperand* value) {
inputs_[0] = value;
@@ -2054,7 +2075,7 @@ class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LTaggedToI FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LTaggedToI(LOperand* value,
LOperand* temp,
@@ -2075,7 +2096,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiTag FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -2088,7 +2109,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberUntagD FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value;
@@ -2101,7 +2122,7 @@ class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -2118,7 +2139,7 @@ class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
inputs_[0] = object;
@@ -2133,7 +2154,7 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Representation representation() const {
return hydrogen()->field_representation();
@@ -2141,7 +2162,7 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
inputs_[0] = context;
@@ -2156,14 +2177,14 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
-class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
@@ -2188,13 +2209,13 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
uint32_t base_offset() const { return hydrogen()->base_offset(); }
};
-class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyedGeneric(LOperand* context,
LOperand* obj,
@@ -2214,13 +2235,13 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
-class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* context,
@@ -2238,7 +2259,7 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 1> {
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
Handle<Map> transitioned_map() {
@@ -2249,7 +2270,7 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 1> {
};
-class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LTrapAllocationMemento(LOperand* object,
LOperand* temp) {
@@ -2265,7 +2286,7 @@ class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringAdd FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -2283,7 +2304,7 @@ class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
-class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringCharCodeAt FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
inputs_[0] = context;
@@ -2300,7 +2321,7 @@ class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringCharFromCode FINAL : public LTemplateInstruction<1, 2, 0> {
public:
explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
inputs_[0] = context;
@@ -2315,7 +2336,7 @@ class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckValue FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
@@ -2328,7 +2349,7 @@ class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckInstanceType FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckInstanceType(LOperand* value) {
inputs_[0] = value;
@@ -2341,7 +2362,7 @@ class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMaps(LOperand* value = NULL) {
inputs_[0] = value;
@@ -2354,7 +2375,7 @@ class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2366,7 +2387,7 @@ class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
@@ -2379,7 +2400,7 @@ class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LClampDToUint8 FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LClampDToUint8(LOperand* unclamped, LOperand* temp) {
inputs_[0] = unclamped;
@@ -2393,7 +2414,7 @@ class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -2405,7 +2426,7 @@ class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LClampTToUint8(LOperand* unclamped, LOperand* temp) {
inputs_[0] = unclamped;
@@ -2419,7 +2440,7 @@ class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleBits FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleBits(LOperand* value) {
inputs_[0] = value;
@@ -2432,7 +2453,7 @@ class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LConstructDouble FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LConstructDouble(LOperand* hi, LOperand* lo) {
inputs_[0] = hi;
@@ -2446,7 +2467,7 @@ class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
+class LAllocate FINAL : public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* context,
LOperand* size,
@@ -2468,7 +2489,7 @@ class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
};
-class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LRegExpLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LRegExpLiteral(LOperand* context) {
inputs_[0] = context;
@@ -2481,7 +2502,7 @@ class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFunctionLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LFunctionLiteral(LOperand* context) {
inputs_[0] = context;
@@ -2494,7 +2515,7 @@ class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
@@ -2507,7 +2528,7 @@ class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LTypeof FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -2521,7 +2542,7 @@ class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -2534,11 +2555,11 @@ class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
Handle<String> type_literal() { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch FINAL : public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
@@ -2551,18 +2572,18 @@ class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
};
-class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
};
-class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LStackCheck FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LStackCheck(LOperand* context) {
inputs_[0] = context;
@@ -2580,7 +2601,7 @@ class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LForInPrepareMap FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LForInPrepareMap(LOperand* context, LOperand* object) {
inputs_[0] = context;
@@ -2594,7 +2615,7 @@ class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
@@ -2610,7 +2631,7 @@ class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
@@ -2624,7 +2645,7 @@ class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
};
-class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
@@ -2668,7 +2689,7 @@ class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LPlatformChunk V8_FINAL : public LChunk {
+class LPlatformChunk FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
@@ -2678,20 +2699,14 @@ class LPlatformChunk V8_FINAL : public LChunk {
};
-class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
+class LChunkBuilder FINAL : public LChunkBuilderBase {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : LChunkBuilderBase(graph->zone()),
- chunk_(NULL),
- info_(info),
- graph_(graph),
- status_(UNUSED),
+ : LChunkBuilderBase(info, graph),
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- allocator_(allocator) { }
-
- Isolate* isolate() const { return graph_->isolate(); }
+ allocator_(allocator) {}
// Build the sequence for the graph.
LPlatformChunk* Build();
@@ -2725,24 +2740,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
private:
- enum Status {
- UNUSED,
- BUILDING,
- DONE,
- ABORTED
- };
-
- LPlatformChunk* chunk() const { return chunk_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_building() const { return status_ == BUILDING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- void Abort(BailoutReason reason);
-
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(DoubleRegister reg);
@@ -2784,7 +2781,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
- virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
@@ -2828,10 +2825,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoArithmeticT(Token::Value op,
HBinaryOperation* instr);
- LPlatformChunk* chunk_;
- CompilationInfo* info_;
- HGraph* const graph_;
- Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
HBasicBlock* next_block_;
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index 87124dca14..9aa75700cb 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -8,12 +8,13 @@
#if V8_TARGET_ARCH_MIPS64
+#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/isolate-inl.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
@@ -211,8 +212,8 @@ void MacroAssembler::RecordWriteField(
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- li(value, Operand(BitCast<int64_t>(kZapValue + 4)));
- li(dst, Operand(BitCast<int64_t>(kZapValue + 8)));
+ li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
+ li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
}
}
@@ -286,8 +287,8 @@ void MacroAssembler::RecordWriteForMap(Register object,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- li(dst, Operand(BitCast<int64_t>(kZapValue + 12)));
- li(map, Operand(BitCast<int64_t>(kZapValue + 16)));
+ li(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
+ li(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
}
}
@@ -361,8 +362,8 @@ void MacroAssembler::RecordWrite(
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- li(address, Operand(BitCast<int64_t>(kZapValue + 12)));
- li(value, Operand(BitCast<int64_t>(kZapValue + 16)));
+ li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
+ li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
}
}
@@ -400,8 +401,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Ret(eq, t8, Operand(zero_reg));
}
push(ra);
- StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(isolate(), fp_mode);
+ StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
CallStub(&store_buffer_overflow);
pop(ra);
bind(&done);
@@ -3406,12 +3406,10 @@ void MacroAssembler::AllocateTwoByteString(Register result,
}
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string
// while observing object alignment.
DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
@@ -3420,7 +3418,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
- // Allocate ASCII string in new space.
+ // Allocate one-byte string in new space.
Allocate(scratch1,
result,
scratch2,
@@ -3429,11 +3427,8 @@ void MacroAssembler::AllocateAsciiString(Register result,
TAG_OBJECT);
// Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
@@ -3452,11 +3447,10 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
}
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
Allocate(ConsString::kSize,
result,
scratch1,
@@ -3464,11 +3458,8 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
gc_required,
TAG_OBJECT);
- InitializeNewString(result,
- length,
- Heap::kConsAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
@@ -3488,24 +3479,21 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
}
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
- InitializeNewString(result,
- length,
- Heap::kSlicedAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
-void MacroAssembler::JumpIfNotUniqueName(Register reg,
- Label* not_unique_name) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
+ Label* not_unique_name) {
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
Label succeed;
And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
@@ -4869,7 +4857,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
DCHECK(stack_space >= 0);
Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
if (frame_alignment > 0) {
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
And(sp, sp, Operand(-frame_alignment)); // Align stack.
}
@@ -4967,7 +4955,7 @@ void MacroAssembler::AssertStackIsAligned() {
if (frame_alignment > kPointerSize) {
Label alignment_as_expected;
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
andi(at, sp, frame_alignment_mask);
Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
// Don't use Check here, as it will call Runtime_Abort re-entering here.
@@ -5301,71 +5289,59 @@ void MacroAssembler::LookupNumberStringCache(Register object,
}
-void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
+void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
+ Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
- // Test that both first and second are sequential ASCII strings.
+ // Test that both first and second are sequential one-byte strings.
// Assume that they are non-smis.
ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
- JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
- scratch2,
- scratch1,
- scratch2,
- failure);
+ JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
+ scratch2, failure);
}
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
+void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
// Check that neither is a smi.
STATIC_ASSERT(kSmiTag == 0);
And(scratch1, first, Operand(second));
JumpIfSmi(scratch1, failure);
- JumpIfNonSmisNotBothSequentialAsciiStrings(first,
- second,
- scratch1,
- scratch2,
- failure);
+ JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
+ scratch2, failure);
}
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
+ Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
- const int kFlatAsciiStringMask =
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatAsciiStringTag =
+ const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
- DCHECK(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
- andi(scratch1, first, kFlatAsciiStringMask);
- Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
- andi(scratch2, second, kFlatAsciiStringMask);
- Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
+ DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
+ andi(scratch1, first, kFlatOneByteStringMask);
+ Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
+ andi(scratch2, second, kFlatOneByteStringMask);
+ Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
}
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure) {
- const int kFlatAsciiStringMask =
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
+ Register scratch,
+ Label* failure) {
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatAsciiStringTag =
+ const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
- And(scratch, type, Operand(kFlatAsciiStringMask));
- Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
+ And(scratch, type, Operand(kFlatOneByteStringMask));
+ Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
}
@@ -5431,7 +5407,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
// and the original value of sp.
mov(scratch, sp);
Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
And(sp, sp, Operand(-frame_alignment));
sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
@@ -5488,7 +5464,7 @@ void MacroAssembler::CallCFunctionHelper(Register function,
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
Label alignment_as_expected;
And(at, sp, Operand(frame_alignment_mask));
Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
@@ -5790,8 +5766,8 @@ void MacroAssembler::EnsureNotWhite(
bind(&skip);
}
- // Sequential string, either ASCII or UC16.
- // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+ // Sequential string, either Latin1 or UC16.
+ // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
@@ -6091,16 +6067,18 @@ void MacroAssembler::TruncatingDiv(Register result,
DCHECK(!dividend.is(result));
DCHECK(!dividend.is(at));
DCHECK(!result.is(at));
- MultiplierAndShift ms(divisor);
- li(at, Operand(ms.multiplier()));
+ base::MagicNumbersForDivision<uint32_t> mag =
+ base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+ li(at, Operand(mag.multiplier));
Mulh(result, dividend, Operand(at));
- if (divisor > 0 && ms.multiplier() < 0) {
+ bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+ if (divisor > 0 && neg) {
Addu(result, result, Operand(dividend));
}
- if (divisor < 0 && ms.multiplier() > 0) {
+ if (divisor < 0 && !neg && mag.multiplier > 0) {
Subu(result, result, Operand(dividend));
}
- if (ms.shift() > 0) sra(result, result, ms.shift());
+ if (mag.shift > 0) sra(result, result, mag.shift);
srl(at, dividend, 31);
Addu(result, result, Operand(at));
}
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index 89ae1e50fd..2da48fbd89 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -539,32 +539,25 @@ class MacroAssembler: public Assembler {
Register scratch2,
Register scratch3,
Label* gc_required);
- void AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
+ void AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3, Label* gc_required);
void AllocateTwoByteConsString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
- void AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateOneByteConsString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
void AllocateTwoByteSlicedString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
- void AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateOneByteSlicedString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. All registers are clobbered also
@@ -1550,22 +1543,18 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Register scratch3,
Label* not_found);
- // Checks if both instance types are sequential ASCII strings and jumps to
+ // Checks if both instance types are sequential one-byte strings and jumps to
// label if either is not.
- void JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* failure);
+ void JumpIfBothInstanceTypesAreNotSequentialOneByte(
+ Register first_object_instance_type, Register second_object_instance_type,
+ Register scratch1, Register scratch2, Label* failure);
- // Check if instance type is sequential ASCII string and jump to label if
+ // Check if instance type is sequential one-byte string and jump to label if
// it is not.
- void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure);
+ void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
+ Label* failure);
- void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
+ void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
void EmitSeqStringSetCharCheck(Register string,
Register index,
@@ -1573,21 +1562,20 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Register scratch,
uint32_t encoding_mask);
- // Test that both first and second are sequential ASCII strings.
- // Assume that they are non-smis.
- void JumpIfNonSmisNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure);
-
- // Test that both first and second are sequential ASCII strings.
- // Check that they are non-smis.
- void JumpIfNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure);
+ // Checks if both objects are sequential one-byte strings and jumps to label
+ // if either is not. Assumes that neither object is a smi.
+ void JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ // Checks if both objects are sequential one-byte strings and jumps to label
+ // if either is not.
+ void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* not_flat_one_byte_strings);
void ClampUint8(Register output_reg, Register input_reg);
diff --git a/deps/v8/src/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/mips64/regexp-macro-assembler-mips64.cc
index bcd133424b..b20cab9825 100644
--- a/deps/v8/src/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/regexp-macro-assembler-mips64.cc
@@ -277,7 +277,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
// Check that there are enough characters left in the input.
BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg));
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
Label success;
Label fail;
Label loop_check;
@@ -401,7 +401,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReference(
Label loop;
__ bind(&loop);
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
__ lbu(a3, MemOperand(a0, 0));
__ daddiu(a0, a0, char_size());
__ lbu(a4, MemOperand(a2, 0));
@@ -482,7 +482,7 @@ void RegExpMacroAssemblerMIPS::CheckBitInTable(
Handle<ByteArray> table,
Label* on_bit_set) {
__ li(a0, Operand(table));
- if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
+ if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
__ And(a1, current_character(), Operand(kTableSize - 1));
__ Daddu(a0, a0, a1);
} else {
@@ -501,7 +501,7 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
switch (type) {
case 's':
// Match space-characters.
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
// One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success;
__ Branch(&success, eq, current_character(), Operand(' '));
@@ -518,12 +518,12 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
// The emitted code for generic character classes is good enough.
return false;
case 'd':
- // Match ASCII digits ('0'..'9').
+ // Match Latin1 digits ('0'..'9').
__ Dsubu(a0, current_character(), Operand('0'));
BranchOrBacktrack(on_no_match, hi, a0, Operand('9' - '0'));
return true;
case 'D':
- // Match non ASCII-digits.
+ // Match non Latin1-digits.
__ Dsubu(a0, current_character(), Operand('0'));
BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0'));
return true;
@@ -547,7 +547,7 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
__ Xor(a0, current_character(), Operand(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
__ Dsubu(a0, a0, Operand(0x0b));
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0c - 0x0b));
} else {
Label done;
@@ -562,8 +562,8 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
return true;
}
case 'w': {
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
BranchOrBacktrack(on_no_match, hi, current_character(), Operand('z'));
}
ExternalReference map = ExternalReference::re_word_character_map();
@@ -575,8 +575,8 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
}
case 'W': {
Label done;
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
__ Branch(&done, hi, current_character(), Operand('z'));
}
ExternalReference map = ExternalReference::re_word_character_map();
@@ -584,7 +584,7 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
__ Daddu(a0, a0, current_character());
__ lbu(a0, MemOperand(a0, 0));
BranchOrBacktrack(on_no_match, ne, a0, Operand(zero_reg));
- if (mode_ != ASCII) {
+ if (mode_ != LATIN1) {
__ bind(&done);
}
return true;
@@ -1092,7 +1092,7 @@ void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
// Align the stack pointer and save the original sp value on the stack.
__ mov(scratch, sp);
__ Dsubu(sp, sp, Operand(kPointerSize));
- DCHECK(IsPowerOf2(stack_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(stack_alignment));
__ And(sp, sp, Operand(-stack_alignment));
__ sd(scratch, MemOperand(sp));
@@ -1172,7 +1172,7 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+ bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
DCHECK(re_code->instruction_start() <= *return_address);
DCHECK(*return_address <=
@@ -1203,8 +1203,8 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
- // If we changed between an ASCII and an UC16 string, the specialized
+ if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
+ // If we changed between an Latin1 and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
return RETRY;
@@ -1354,7 +1354,7 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
// must only be used to load a single character at a time.
DCHECK(characters == 1);
__ Daddu(t1, end_of_input_address(), Operand(offset));
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
__ lbu(current_character(), MemOperand(t1, 0));
} else {
DCHECK(mode_ == UC16);
diff --git a/deps/v8/src/mips64/regexp-macro-assembler-mips64.h b/deps/v8/src/mips64/regexp-macro-assembler-mips64.h
index 647e4150cb..dd4e8a93bc 100644
--- a/deps/v8/src/mips64/regexp-macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/regexp-macro-assembler-mips64.h
@@ -240,7 +240,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
MacroAssembler* masm_;
- // Which mode to generate code for (ASCII or UC16).
+ // Which mode to generate code for (Latin1 or UC16).
Mode mode_;
// One greater than maximal register index actually used.
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index c07558465f..4c74939d43 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -12,8 +12,8 @@
#if V8_TARGET_ARCH_MIPS64
#include "src/assembler.h"
+#include "src/base/bits.h"
#include "src/disasm.h"
-#include "src/globals.h" // Need the BitCast.
#include "src/mips64/constants-mips64.h"
#include "src/mips64/simulator-mips64.h"
#include "src/ostreams.h"
@@ -1056,13 +1056,13 @@ void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) {
void Simulator::set_fpu_register_float(int fpureg, float value) {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- *BitCast<float*>(&FPUregisters_[fpureg]) = value;
+ *bit_cast<float*>(&FPUregisters_[fpureg]) = value;
}
void Simulator::set_fpu_register_double(int fpureg, double value) {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- *BitCast<double*>(&FPUregisters_[fpureg]) = value;
+ *bit_cast<double*>(&FPUregisters_[fpureg]) = value;
}
@@ -1109,22 +1109,21 @@ int32_t Simulator::get_fpu_register_signed_word(int fpureg) const {
}
-uint32_t Simulator::get_fpu_register_hi_word(int fpureg) const {
+int32_t Simulator::get_fpu_register_hi_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return static_cast<uint32_t>((FPUregisters_[fpureg] >> 32) & 0xffffffff);
+ return static_cast<int32_t>((FPUregisters_[fpureg] >> 32) & 0xffffffff);
}
float Simulator::get_fpu_register_float(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return *BitCast<float*>(
- const_cast<int64_t*>(&FPUregisters_[fpureg]));
+ return *bit_cast<float*>(const_cast<int64_t*>(&FPUregisters_[fpureg]));
}
double Simulator::get_fpu_register_double(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return *BitCast<double*>(&FPUregisters_[fpureg]);
+ return *bit_cast<double*>(&FPUregisters_[fpureg]);
}
@@ -2074,10 +2073,8 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
} else {
// MIPS spec: If no bits were set in GPR rs, the result written to
// GPR rd is 32.
- // GCC __builtin_clz: If input is 0, the result is undefined.
DCHECK(instr->SaValue() == 1);
- *alu_out =
- rs_u == 0 ? 32 : CompilerIntrinsics::CountLeadingZeros(rs_u);
+ *alu_out = base::bits::CountLeadingZeros32(rs_u);
}
break;
case MFLO:
@@ -2220,9 +2217,7 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
case CLZ:
// MIPS32 spec: If no bits were set in GPR rs, the result written to
// GPR rd is 32.
- // GCC __builtin_clz: If input is 0, the result is undefined.
- *alu_out =
- rs_u == 0 ? 32 : CompilerIntrinsics::CountLeadingZeros(rs_u);
+ *alu_out = base::bits::CountLeadingZeros32(rs_u);
break;
default:
UNREACHABLE();
diff --git a/deps/v8/src/mips64/simulator-mips64.h b/deps/v8/src/mips64/simulator-mips64.h
index 6bad6324a0..5241554be0 100644
--- a/deps/v8/src/mips64/simulator-mips64.h
+++ b/deps/v8/src/mips64/simulator-mips64.h
@@ -200,7 +200,7 @@ class Simulator {
int64_t get_fpu_register(int fpureg) const;
int32_t get_fpu_register_word(int fpureg) const;
int32_t get_fpu_register_signed_word(int fpureg) const;
- uint32_t get_fpu_register_hi_word(int fpureg) const;
+ int32_t get_fpu_register_hi_word(int fpureg) const;
float get_fpu_register_float(int fpureg) const;
double get_fpu_register_double(int fpureg) const;
void set_fcsr_bit(uint32_t cc, bool value);
diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js
index 6da847fd54..c36d6fd720 100644
--- a/deps/v8/src/mirror-debugger.js
+++ b/deps/v8/src/mirror-debugger.js
@@ -87,6 +87,8 @@ function MakeMirror(value, opt_transient) {
mirror = new SetMirror(value);
} else if (ObjectIsPromise(value)) {
mirror = new PromiseMirror(value);
+ } else if (IS_GENERATOR(value)) {
+ mirror = new GeneratorMirror(value);
} else {
mirror = new ObjectMirror(value, OBJECT_TYPE, opt_transient);
}
@@ -161,6 +163,7 @@ var SCOPE_TYPE = 'scope';
var PROMISE_TYPE = 'promise';
var MAP_TYPE = 'map';
var SET_TYPE = 'set';
+var GENERATOR_TYPE = 'generator';
// Maximum length when sending strings through the JSON protocol.
var kMaxProtocolStringLength = 80;
@@ -177,9 +180,6 @@ PropertyType.Normal = 0;
PropertyType.Field = 1;
PropertyType.Constant = 2;
PropertyType.Callbacks = 3;
-PropertyType.Handler = 4;
-PropertyType.Interceptor = 5;
-PropertyType.Nonexistent = 6;
// Different attributes for a property.
@@ -217,6 +217,7 @@ var ScopeType = { Global: 0,
// - PromiseMirror
// - MapMirror
// - SetMirror
+// - GeneratorMirror
// - PropertyMirror
// - InternalPropertyMirror
// - FrameMirror
@@ -374,6 +375,15 @@ Mirror.prototype.isPromise = function() {
/**
+ * Check whether the mirror reflects a generator object.
+ * @returns {boolean} True if the mirror reflects a generator object
+ */
+Mirror.prototype.isGenerator = function() {
+ return this instanceof GeneratorMirror;
+};
+
+
+/**
* Check whether the mirror reflects a property.
* @returns {boolean} True if the mirror reflects a property
*/
@@ -989,8 +999,8 @@ FunctionMirror.prototype.script = function() {
* @return {Number or undefined} in-script position for the function
*/
FunctionMirror.prototype.sourcePosition_ = function() {
- // Return script if function is resolved. Otherwise just fall through
- // to return undefined.
+ // Return position if function is resolved. Otherwise just fall
+ // through to return undefined.
if (this.resolved()) {
return %FunctionGetScriptSourcePosition(this.value_);
}
@@ -1355,6 +1365,66 @@ SetMirror.prototype.values = function() {
/**
+ * Mirror object for a Generator object.
+ * @param {Object} data The Generator object
+ * @constructor
+ * @extends Mirror
+ */
+function GeneratorMirror(value) {
+ %_CallFunction(this, value, GENERATOR_TYPE, ObjectMirror);
+}
+inherits(GeneratorMirror, ObjectMirror);
+
+
+GeneratorMirror.prototype.status = function() {
+ var continuation = %GeneratorGetContinuation(this.value_);
+ if (continuation < 0) return "running";
+ if (continuation == 0) return "closed";
+ return "suspended";
+};
+
+
+GeneratorMirror.prototype.sourcePosition_ = function() {
+ return %GeneratorGetSourcePosition(this.value_);
+};
+
+
+GeneratorMirror.prototype.sourceLocation = function() {
+ var pos = this.sourcePosition_();
+ if (!IS_UNDEFINED(pos)) {
+ var script = this.func().script();
+ if (script) {
+ return script.locationFromPosition(pos, true);
+ }
+ }
+};
+
+
+GeneratorMirror.prototype.func = function() {
+ if (!this.func_) {
+ this.func_ = MakeMirror(%GeneratorGetFunction(this.value_));
+ }
+ return this.func_;
+};
+
+
+GeneratorMirror.prototype.context = function() {
+ if (!this.context_) {
+ this.context_ = new ContextMirror(%GeneratorGetContext(this.value_));
+ }
+ return this.context_;
+};
+
+
+GeneratorMirror.prototype.receiver = function() {
+ if (!this.receiver_) {
+ this.receiver_ = MakeMirror(%GeneratorGetReceiver(this.value_));
+ }
+ return this.receiver_;
+};
+
+
+/**
* Base mirror object for properties.
* @param {ObjectMirror} mirror The mirror object having this property
* @param {string} name The name of the property
@@ -1368,10 +1438,11 @@ function PropertyMirror(mirror, name, details) {
this.name_ = name;
this.value_ = details[0];
this.details_ = details[1];
- if (details.length > 2) {
- this.exception_ = details[2];
- this.getter_ = details[3];
- this.setter_ = details[4];
+ this.is_interceptor_ = details[2];
+ if (details.length > 3) {
+ this.exception_ = details[3];
+ this.getter_ = details[4];
+ this.setter_ = details[5];
}
}
inherits(PropertyMirror, Mirror);
@@ -1489,7 +1560,7 @@ PropertyMirror.prototype.setter = function() {
* UndefinedMirror if there is no setter for this property
*/
PropertyMirror.prototype.isNative = function() {
- return (this.propertyType() == PropertyType.Interceptor) ||
+ return this.is_interceptor_ ||
((this.propertyType() == PropertyType.Callbacks) &&
!this.hasGetter() && !this.hasSetter());
};
@@ -2541,6 +2612,7 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
case ERROR_TYPE:
case REGEXP_TYPE:
case PROMISE_TYPE:
+ case GENERATOR_TYPE:
// Add object representation.
this.serializeObject_(mirror, content, details);
break;
@@ -2670,6 +2742,21 @@ JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
}
}
+ if (mirror.isGenerator()) {
+ // Add generator specific properties.
+
+ // Either 'running', 'closed', or 'suspended'.
+ content.status = mirror.status();
+
+ content.func = this.serializeReference(mirror.func())
+ content.receiver = this.serializeReference(mirror.receiver())
+
+ // If the generator is suspended, the content add line/column properties.
+ serializeLocationFields(mirror.sourceLocation(), content);
+
+ // TODO(wingo): Also serialize a reference to the context (scope chain).
+ }
+
if (mirror.isDate()) {
// Add date specific properties.
content.value = mirror.value();
diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/mksnapshot.cc
index 37b174e207..eacd098234 100644
--- a/deps/v8/src/mksnapshot.cc
+++ b/deps/v8/src/mksnapshot.cc
@@ -84,19 +84,19 @@ class SnapshotWriter {
i::List<i::byte> startup_blob;
i::ListSnapshotSink sink(&startup_blob);
- int spaces[] = {
- i::NEW_SPACE, i::OLD_POINTER_SPACE, i::OLD_DATA_SPACE, i::CODE_SPACE,
- i::MAP_SPACE, i::CELL_SPACE, i::PROPERTY_CELL_SPACE
- };
+ int spaces[] = {i::NEW_SPACE, i::OLD_POINTER_SPACE,
+ i::OLD_DATA_SPACE, i::CODE_SPACE,
+ i::MAP_SPACE, i::CELL_SPACE,
+ i::PROPERTY_CELL_SPACE, i::LO_SPACE};
i::byte* snapshot_bytes = snapshot_data.begin();
sink.PutBlob(snapshot_bytes, snapshot_data.length(), "snapshot");
- for (size_t i = 0; i < ARRAY_SIZE(spaces); ++i)
+ for (size_t i = 0; i < arraysize(spaces); ++i)
sink.PutInt(serializer.CurrentAllocationAddress(spaces[i]), "spaces");
i::byte* context_bytes = context_snapshot_data.begin();
sink.PutBlob(context_bytes, context_snapshot_data.length(), "context");
- for (size_t i = 0; i < ARRAY_SIZE(spaces); ++i)
+ for (size_t i = 0; i < arraysize(spaces); ++i)
sink.PutInt(context_serializer.CurrentAllocationAddress(spaces[i]),
"spaces");
@@ -197,6 +197,7 @@ class SnapshotWriter {
WriteSizeVar(ser, prefix, "map", i::MAP_SPACE);
WriteSizeVar(ser, prefix, "cell", i::CELL_SPACE);
WriteSizeVar(ser, prefix, "property_cell", i::PROPERTY_CELL_SPACE);
+ WriteSizeVar(ser, prefix, "lo", i::LO_SPACE);
fprintf(fp_, "\n");
}
@@ -303,11 +304,6 @@ void DumpException(Handle<Message> message) {
int main(int argc, char** argv) {
- V8::InitializeICU();
- v8::Platform* platform = v8::platform::CreateDefaultPlatform();
- v8::V8::InitializePlatform(platform);
- i::CpuFeatures::Probe(true);
-
// By default, log code create information in the snapshot.
i::FLAG_log_code = true;
@@ -319,6 +315,13 @@ int main(int argc, char** argv) {
i::FlagList::PrintHelp();
return !i::FLAG_help;
}
+
+ i::CpuFeatures::Probe(true);
+ V8::InitializeICU();
+ v8::Platform* platform = v8::platform::CreateDefaultPlatform();
+ v8::V8::InitializePlatform(platform);
+ v8::V8::Initialize();
+
#ifdef COMPRESS_STARTUP_DATA_BZ2
BZip2Decompressor natives_decompressor;
int bz2_result = natives_decompressor.Decompress();
@@ -329,10 +332,11 @@ int main(int argc, char** argv) {
#endif
i::FLAG_logfile_per_isolate = false;
- Isolate* isolate = v8::Isolate::New();
+ Isolate::CreateParams params;
+ params.enable_serializer = true;
+ Isolate* isolate = v8::Isolate::New(params);
{ Isolate::Scope isolate_scope(isolate);
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- internal_isolate->enable_serializer();
Persistent<Context> context;
{
diff --git a/deps/v8/src/msan.h b/deps/v8/src/msan.h
index 4130d22a65..f099595e54 100644
--- a/deps/v8/src/msan.h
+++ b/deps/v8/src/msan.h
@@ -17,12 +17,17 @@
# define MEMORY_SANITIZER
#endif
-#if defined(MEMORY_SANITIZER) && !defined(USE_SIMULATOR)
+#if defined(MEMORY_SANITIZER)
# include <sanitizer/msan_interface.h> // NOLINT
-// Marks a memory range as fully initialized.
-# define MSAN_MEMORY_IS_INITIALIZED_IN_JIT(p, s) __msan_unpoison((p), (s))
+
+// Marks a memory range as uninitialized, as if it was allocated here.
+# define MSAN_ALLOCATED_UNINITIALIZED_MEMORY(p, s) \
+ __msan_allocated_memory((p), (s))
+// Marks a memory range as initialized.
+#define MSAN_MEMORY_IS_INITIALIZED(p, s) __msan_unpoison((p), (s))
#else
-# define MSAN_MEMORY_IS_INITIALIZED_IN_JIT(p, s)
+# define MSAN_ALLOCATED_UNINITIALIZED_MEMORY(p, s)
+#define MSAN_MEMORY_IS_INITIALIZED(p, s)
#endif
#endif // V8_MSAN_H_
diff --git a/deps/v8/src/natives-external.cc b/deps/v8/src/natives-external.cc
index dfe3f82650..fc6614949c 100644
--- a/deps/v8/src/natives-external.cc
+++ b/deps/v8/src/natives-external.cc
@@ -34,7 +34,9 @@ class NativesStore {
int GetIndex(const char* name) {
for (int i = 0; i < native_names_.length(); ++i) {
- if (strcmp(name, native_names_[i].start()) == 0) {
+ int native_name_length = native_names_[i].length();
+ if ((static_cast<int>(strlen(name)) == native_name_length) &&
+ (strncmp(name, native_names_[i].start(), native_name_length) == 0)) {
return i;
}
}
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 4834ef2033..1d5af5b9e7 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -371,9 +371,9 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
if (!is_the_hole(i)) {
double value = get_scalar(i);
CHECK(!std::isnan(value) ||
- (BitCast<uint64_t>(value) ==
- BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())) ||
- ((BitCast<uint64_t>(value) & Double::kSignMask) != 0));
+ (bit_cast<uint64_t>(value) ==
+ bit_cast<uint64_t>(canonical_not_the_hole_nan_as_double())) ||
+ ((bit_cast<uint64_t>(value) & Double::kSignMask) != 0));
}
}
}
@@ -547,7 +547,7 @@ void JSGlobalProxy::JSGlobalProxyVerify() {
VerifyObjectField(JSGlobalProxy::kNativeContextOffset);
// Make sure that this object has no properties, elements.
CHECK_EQ(0, properties()->length());
- CHECK(HasFastSmiElements());
+ CHECK_EQ(FAST_HOLEY_SMI_ELEMENTS, GetElementsKind());
CHECK_EQ(0, FixedArray::cast(elements())->length());
}
@@ -752,19 +752,21 @@ void JSRegExp::JSRegExpVerify() {
bool is_native = RegExpImpl::UsesNativeRegExp();
FixedArray* arr = FixedArray::cast(data());
- Object* ascii_data = arr->get(JSRegExp::kIrregexpASCIICodeIndex);
+ Object* one_byte_data = arr->get(JSRegExp::kIrregexpLatin1CodeIndex);
// Smi : Not compiled yet (-1) or code prepared for flushing.
// JSObject: Compilation error.
// Code/ByteArray: Compiled code.
- CHECK(ascii_data->IsSmi() ||
- (is_native ? ascii_data->IsCode() : ascii_data->IsByteArray()));
+ CHECK(
+ one_byte_data->IsSmi() ||
+ (is_native ? one_byte_data->IsCode() : one_byte_data->IsByteArray()));
Object* uc16_data = arr->get(JSRegExp::kIrregexpUC16CodeIndex);
CHECK(uc16_data->IsSmi() ||
(is_native ? uc16_data->IsCode() : uc16_data->IsByteArray()));
- Object* ascii_saved = arr->get(JSRegExp::kIrregexpASCIICodeSavedIndex);
- CHECK(ascii_saved->IsSmi() || ascii_saved->IsString() ||
- ascii_saved->IsCode());
+ Object* one_byte_saved =
+ arr->get(JSRegExp::kIrregexpLatin1CodeSavedIndex);
+ CHECK(one_byte_saved->IsSmi() || one_byte_saved->IsString() ||
+ one_byte_saved->IsCode());
Object* uc16_saved = arr->get(JSRegExp::kIrregexpUC16CodeSavedIndex);
CHECK(uc16_saved->IsSmi() || uc16_saved->IsString() ||
uc16_saved->IsCode());
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 7e1c5b1eb3..e46dd8e7dd 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -13,6 +13,7 @@
#define V8_OBJECTS_INL_H_
#include "src/base/atomicops.h"
+#include "src/base/bits.h"
#include "src/contexts.h"
#include "src/conversions-inl.h"
#include "src/elements.h"
@@ -30,6 +31,7 @@
#include "src/property.h"
#include "src/prototype.h"
#include "src/transitions-inl.h"
+#include "src/type-feedback-vector-inl.h"
#include "src/v8memory.h"
namespace v8 {
@@ -251,7 +253,7 @@ bool Object::IsExternalString() const {
}
-bool Object::IsExternalAsciiString() const {
+bool Object::IsExternalOneByteString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsExternal() &&
String::cast(this)->IsOneByteRepresentation();
@@ -431,7 +433,7 @@ STATIC_ASSERT(static_cast<uint32_t>(kStringEncodingMask) ==
Internals::kStringEncodingMask);
-bool StringShape::IsSequentialAscii() {
+bool StringShape::IsSequentialOneByte() {
return full_representation_tag() == (kSeqStringTag | kOneByteStringTag);
}
@@ -441,15 +443,15 @@ bool StringShape::IsSequentialTwoByte() {
}
-bool StringShape::IsExternalAscii() {
+bool StringShape::IsExternalOneByte() {
return full_representation_tag() == (kExternalStringTag | kOneByteStringTag);
}
STATIC_ASSERT((kExternalStringTag | kOneByteStringTag) ==
- Internals::kExternalAsciiRepresentationTag);
+ Internals::kExternalOneByteRepresentationTag);
-STATIC_ASSERT(v8::String::ASCII_ENCODING == kOneByteStringTag);
+STATIC_ASSERT(v8::String::ONE_BYTE_ENCODING == kOneByteStringTag);
bool StringShape::IsExternalTwoByte() {
@@ -464,7 +466,7 @@ STATIC_ASSERT(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag);
uc32 FlatStringReader::Get(int index) {
DCHECK(0 <= index && index <= length_);
- if (is_ascii_) {
+ if (is_one_byte_) {
return static_cast<const byte*>(start_)[index];
} else {
return static_cast<const uc16*>(start_)[index];
@@ -499,7 +501,7 @@ class SequentialStringKey : public HashTableKey {
explicit SequentialStringKey(Vector<const Char> string, uint32_t seed)
: string_(string), hash_field_(0), seed_(seed) { }
- virtual uint32_t Hash() V8_OVERRIDE {
+ virtual uint32_t Hash() OVERRIDE {
hash_field_ = StringHasher::HashSequentialString<Char>(string_.start(),
string_.length(),
seed_);
@@ -510,7 +512,7 @@ class SequentialStringKey : public HashTableKey {
}
- virtual uint32_t HashForObject(Object* other) V8_OVERRIDE {
+ virtual uint32_t HashForObject(Object* other) OVERRIDE {
return String::cast(other)->Hash();
}
@@ -525,29 +527,25 @@ class OneByteStringKey : public SequentialStringKey<uint8_t> {
OneByteStringKey(Vector<const uint8_t> str, uint32_t seed)
: SequentialStringKey<uint8_t>(str, seed) { }
- virtual bool IsMatch(Object* string) V8_OVERRIDE {
+ virtual bool IsMatch(Object* string) OVERRIDE {
return String::cast(string)->IsOneByteEqualTo(string_);
}
- virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE;
+ virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE;
};
-template<class Char>
-class SubStringKey : public HashTableKey {
+class SeqOneByteSubStringKey : public HashTableKey {
public:
- SubStringKey(Handle<String> string, int from, int length)
+ SeqOneByteSubStringKey(Handle<SeqOneByteString> string, int from, int length)
: string_(string), from_(from), length_(length) {
- if (string_->IsSlicedString()) {
- string_ = Handle<String>(Unslice(*string_, &from_));
- }
- DCHECK(string_->IsSeqString() || string->IsExternalString());
+ DCHECK(string_->IsSeqOneByteString());
}
- virtual uint32_t Hash() V8_OVERRIDE {
+ virtual uint32_t Hash() OVERRIDE {
DCHECK(length_ >= 0);
DCHECK(from_ + length_ <= string_->length());
- const Char* chars = GetChars() + from_;
+ const uint8_t* chars = string_->GetChars() + from_;
hash_field_ = StringHasher::HashSequentialString(
chars, length_, string_->GetHeap()->HashSeed());
uint32_t result = hash_field_ >> String::kHashShift;
@@ -555,25 +553,15 @@ class SubStringKey : public HashTableKey {
return result;
}
- virtual uint32_t HashForObject(Object* other) V8_OVERRIDE {
+ virtual uint32_t HashForObject(Object* other) OVERRIDE {
return String::cast(other)->Hash();
}
- virtual bool IsMatch(Object* string) V8_OVERRIDE;
- virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE;
+ virtual bool IsMatch(Object* string) OVERRIDE;
+ virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE;
private:
- const Char* GetChars();
- String* Unslice(String* string, int* offset) {
- while (string->IsSlicedString()) {
- SlicedString* sliced = SlicedString::cast(string);
- *offset += sliced->offset();
- string = sliced->parent();
- }
- return string;
- }
-
- Handle<String> string_;
+ Handle<SeqOneByteString> string_;
int from_;
int length_;
uint32_t hash_field_;
@@ -585,11 +573,11 @@ class TwoByteStringKey : public SequentialStringKey<uc16> {
explicit TwoByteStringKey(Vector<const uc16> str, uint32_t seed)
: SequentialStringKey<uc16>(str, seed) { }
- virtual bool IsMatch(Object* string) V8_OVERRIDE {
+ virtual bool IsMatch(Object* string) OVERRIDE {
return String::cast(string)->IsTwoByteEqualTo(string_);
}
- virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE;
+ virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE;
};
@@ -599,11 +587,11 @@ class Utf8StringKey : public HashTableKey {
explicit Utf8StringKey(Vector<const char> string, uint32_t seed)
: string_(string), hash_field_(0), seed_(seed) { }
- virtual bool IsMatch(Object* string) V8_OVERRIDE {
+ virtual bool IsMatch(Object* string) OVERRIDE {
return String::cast(string)->IsUtf8EqualTo(string_);
}
- virtual uint32_t Hash() V8_OVERRIDE {
+ virtual uint32_t Hash() OVERRIDE {
if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
hash_field_ = StringHasher::ComputeUtf8Hash(string_, seed_, &chars_);
uint32_t result = hash_field_ >> String::kHashShift;
@@ -611,11 +599,11 @@ class Utf8StringKey : public HashTableKey {
return result;
}
- virtual uint32_t HashForObject(Object* other) V8_OVERRIDE {
+ virtual uint32_t HashForObject(Object* other) OVERRIDE {
return String::cast(other)->Hash();
}
- virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
+ virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
if (hash_field_ == 0) Hash();
return isolate->factory()->NewInternalizedStringFromUtf8(
string_, chars_, hash_field_);
@@ -721,6 +709,9 @@ bool Object::IsTransitionArray() const {
}
+bool Object::IsTypeFeedbackVector() const { return IsFixedArray(); }
+
+
bool Object::IsDeoptimizationInputData() const {
// Must be a fixed array.
if (!IsFixedArray()) return false;
@@ -731,19 +722,9 @@ bool Object::IsDeoptimizationInputData() const {
// the entry size.
int length = FixedArray::cast(this)->length();
if (length == 0) return true;
- if (length < DeoptimizationInputData::kFirstDeoptEntryIndex) return false;
-
- FixedArray* self = FixedArray::cast(const_cast<Object*>(this));
- int deopt_count =
- Smi::cast(self->get(DeoptimizationInputData::kDeoptEntryCountIndex))
- ->value();
- int patch_count =
- Smi::cast(
- self->get(
- DeoptimizationInputData::kReturnAddressPatchEntryCountIndex))
- ->value();
- return length == DeoptimizationInputData::LengthFor(deopt_count, patch_count);
+ length -= DeoptimizationInputData::kFirstDeoptEntryIndex;
+ return length >= 0 && length % DeoptimizationInputData::kDeoptEntrySize == 0;
}
@@ -1496,21 +1477,22 @@ int HeapObject::Size() {
}
-bool HeapObject::MayContainNewSpacePointers() {
+bool HeapObject::MayContainRawValues() {
InstanceType type = map()->instance_type();
if (type <= LAST_NAME_TYPE) {
if (type == SYMBOL_TYPE) {
- return true;
+ return false;
}
DCHECK(type < FIRST_NONSTRING_TYPE);
// There are four string representations: sequential strings, external
// strings, cons strings, and sliced strings.
- // Only the latter two contain non-map-word pointers to heap objects.
- return ((type & kIsIndirectStringMask) == kIsIndirectStringTag);
+ // Only the former two contain raw values and no heap pointers (besides the
+ // map-word).
+ return ((type & kIsIndirectStringMask) != kIsIndirectStringTag);
}
- // The ConstantPoolArray contains heap pointers, but not new space pointers.
- if (type == CONSTANT_POOL_ARRAY_TYPE) return false;
- return (type > LAST_DATA_TYPE);
+ // The ConstantPoolArray contains heap pointers, but also raw values.
+ if (type == CONSTANT_POOL_ARRAY_TYPE) return true;
+ return (type <= LAST_DATA_TYPE);
}
@@ -2216,18 +2198,18 @@ void FixedArray::set(int index, Object* value) {
inline bool FixedDoubleArray::is_the_hole_nan(double value) {
- return BitCast<uint64_t, double>(value) == kHoleNanInt64;
+ return bit_cast<uint64_t, double>(value) == kHoleNanInt64;
}
inline double FixedDoubleArray::hole_nan_as_double() {
- return BitCast<double, uint64_t>(kHoleNanInt64);
+ return bit_cast<double, uint64_t>(kHoleNanInt64);
}
inline double FixedDoubleArray::canonical_not_the_hole_nan_as_double() {
- DCHECK(BitCast<uint64_t>(base::OS::nan_value()) != kHoleNanInt64);
- DCHECK((BitCast<uint64_t>(base::OS::nan_value()) >> 32) != kHoleNanUpper32);
+ DCHECK(bit_cast<uint64_t>(base::OS::nan_value()) != kHoleNanInt64);
+ DCHECK((bit_cast<uint64_t>(base::OS::nan_value()) >> 32) != kHoleNanUpper32);
return base::OS::nan_value();
}
@@ -2920,9 +2902,6 @@ FixedArrayBase* Map::GetInitialElements() {
GetHeap()->EmptyFixedTypedArrayForMap(this);
DCHECK(!GetHeap()->InNewSpace(empty_array));
return empty_array;
- } else if (has_dictionary_elements()) {
- DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_slow_element_dictionary()));
- return GetHeap()->empty_slow_element_dictionary();
} else {
UNREACHABLE();
}
@@ -3076,27 +3055,6 @@ void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
}
-void DescriptorArray::Append(Descriptor* desc,
- const WhitenessWitness& witness) {
- DisallowHeapAllocation no_gc;
- int descriptor_number = number_of_descriptors();
- SetNumberOfDescriptors(descriptor_number + 1);
- Set(descriptor_number, desc, witness);
-
- uint32_t hash = desc->GetKey()->Hash();
-
- int insertion;
-
- for (insertion = descriptor_number; insertion > 0; --insertion) {
- Name* key = GetSortedKey(insertion - 1);
- if (key->Hash() <= hash) break;
- SetSortedKey(insertion, GetSortedKeyIndex(insertion - 1));
- }
-
- SetSortedKey(insertion, descriptor_number);
-}
-
-
void DescriptorArray::Append(Descriptor* desc) {
DisallowHeapAllocation no_gc;
int descriptor_number = number_of_descriptors();
@@ -3140,7 +3098,7 @@ DescriptorArray::WhitenessWitness::~WhitenessWitness() {
template<typename Derived, typename Shape, typename Key>
int HashTable<Derived, Shape, Key>::ComputeCapacity(int at_least_space_for) {
const int kMinCapacity = 32;
- int capacity = RoundUpToPowerOf2(at_least_space_for * 2);
+ int capacity = base::bits::RoundUpToPowerOfTwo32(at_least_space_for * 2);
if (capacity < kMinCapacity) {
capacity = kMinCapacity; // Guarantee min capacity.
}
@@ -3211,7 +3169,7 @@ CAST_ACCESSOR(DeoptimizationOutputData)
CAST_ACCESSOR(DependentCode)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(ExternalArray)
-CAST_ACCESSOR(ExternalAsciiString)
+CAST_ACCESSOR(ExternalOneByteString)
CAST_ACCESSOR(ExternalFloat32Array)
CAST_ACCESSOR(ExternalFloat64Array)
CAST_ACCESSOR(ExternalInt16Array)
@@ -3412,7 +3370,7 @@ uint16_t String::Get(int index) {
case kConsStringTag | kTwoByteStringTag:
return ConsString::cast(this)->ConsStringGet(index);
case kExternalStringTag | kOneByteStringTag:
- return ExternalAsciiString::cast(this)->ExternalAsciiStringGet(index);
+ return ExternalOneByteString::cast(this)->ExternalOneByteStringGet(index);
case kExternalStringTag | kTwoByteStringTag:
return ExternalTwoByteString::cast(this)->ExternalTwoByteStringGet(index);
case kSlicedStringTag | kOneByteStringTag:
@@ -3478,7 +3436,7 @@ ConsString* String::VisitFlat(Visitor* visitor,
case kExternalStringTag | kOneByteStringTag:
visitor->VisitOneByteString(
- ExternalAsciiString::cast(string)->GetChars() + slice_offset,
+ ExternalOneByteString::cast(string)->GetChars() + slice_offset,
length - offset);
return NULL;
@@ -3616,12 +3574,12 @@ bool ExternalString::is_short() {
}
-const ExternalAsciiString::Resource* ExternalAsciiString::resource() {
+const ExternalOneByteString::Resource* ExternalOneByteString::resource() {
return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
}
-void ExternalAsciiString::update_data_cache() {
+void ExternalOneByteString::update_data_cache() {
if (is_short()) return;
const char** data_field =
reinterpret_cast<const char**>(FIELD_ADDR(this, kResourceDataOffset));
@@ -3629,8 +3587,8 @@ void ExternalAsciiString::update_data_cache() {
}
-void ExternalAsciiString::set_resource(
- const ExternalAsciiString::Resource* resource) {
+void ExternalOneByteString::set_resource(
+ const ExternalOneByteString::Resource* resource) {
DCHECK(IsAligned(reinterpret_cast<intptr_t>(resource), kPointerSize));
*reinterpret_cast<const Resource**>(
FIELD_ADDR(this, kResourceOffset)) = resource;
@@ -3638,12 +3596,12 @@ void ExternalAsciiString::set_resource(
}
-const uint8_t* ExternalAsciiString::GetChars() {
+const uint8_t* ExternalOneByteString::GetChars() {
return reinterpret_cast<const uint8_t*>(resource()->data());
}
-uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) {
+uint16_t ExternalOneByteString::ExternalOneByteStringGet(int index) {
DCHECK(index >= 0 && index < length());
return GetChars()[index];
}
@@ -4306,8 +4264,8 @@ int HeapObject::SizeFromMap(Map* map) {
if (instance_type == FIXED_ARRAY_TYPE) {
return FixedArray::BodyDescriptor::SizeOf(map, this);
}
- if (instance_type == ASCII_STRING_TYPE ||
- instance_type == ASCII_INTERNALIZED_STRING_TYPE) {
+ if (instance_type == ONE_BYTE_STRING_TYPE ||
+ instance_type == ONE_BYTE_INTERNALIZED_STRING_TYPE) {
return SeqOneByteString::SizeFor(
reinterpret_cast<SeqOneByteString*>(this)->length());
}
@@ -4821,9 +4779,10 @@ int Code::profiler_ticks() {
void Code::set_profiler_ticks(int ticks) {
- DCHECK_EQ(FUNCTION, kind());
DCHECK(ticks < 256);
- WRITE_BYTE_FIELD(this, kProfilerTicksOffset, ticks);
+ if (kind() == FUNCTION) {
+ WRITE_BYTE_FIELD(this, kProfilerTicksOffset, ticks);
+ }
}
@@ -5445,7 +5404,7 @@ ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
ACCESSORS(SharedFunctionInfo, optimized_code_map, Object,
kOptimizedCodeMapOffset)
ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
-ACCESSORS(SharedFunctionInfo, feedback_vector, FixedArray,
+ACCESSORS(SharedFunctionInfo, feedback_vector, TypeFeedbackVector,
kFeedbackVectorOffset)
ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
kInstanceClassNameOffset)
@@ -5488,6 +5447,7 @@ BOOL_ACCESSORS(SharedFunctionInfo,
compiler_hints,
has_duplicate_parameters,
kHasDuplicateParameters)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, asm_function, kIsAsmFunction)
#if V8_HOST_ARCH_32_BIT
@@ -5604,6 +5564,19 @@ void SharedFunctionInfo::set_strict_mode(StrictMode strict_mode) {
}
+FunctionKind SharedFunctionInfo::kind() {
+ return FunctionKindBits::decode(compiler_hints());
+}
+
+
+void SharedFunctionInfo::set_kind(FunctionKind kind) {
+ DCHECK(IsValidFunctionKind(kind));
+ int hints = compiler_hints();
+ hints = FunctionKindBits::update(hints, kind);
+ set_compiler_hints(hints);
+}
+
+
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, inline_builtin,
kInlineBuiltin)
@@ -5615,8 +5588,10 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_anonymous, kIsAnonymous)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_function, kIsFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_cache, kDontCache)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_flush, kDontFlush)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_generator, kIsGenerator)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_arrow, kIsArrow)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_generator, kIsGenerator)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_concise_method,
+ kIsConciseMethod)
ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
@@ -5629,7 +5604,7 @@ bool Script::HasValidSource() {
String* src_str = String::cast(src);
if (!StringShape(src_str).IsExternal()) return true;
if (src_str->IsOneByteRepresentation()) {
- return ExternalAsciiString::cast(src)->resource() != NULL;
+ return ExternalOneByteString::cast(src)->resource() != NULL;
} else if (src_str->IsTwoByteRepresentation()) {
return ExternalTwoByteString::cast(src)->resource() != NULL;
}
@@ -5697,8 +5672,7 @@ void SharedFunctionInfo::set_scope_info(ScopeInfo* value,
bool SharedFunctionInfo::is_compiled() {
- return code() !=
- GetIsolate()->builtins()->builtin(Builtins::kCompileUnoptimized);
+ return code() != GetIsolate()->builtins()->builtin(Builtins::kCompileLazy);
}
@@ -5972,8 +5946,7 @@ bool JSFunction::should_have_prototype() {
bool JSFunction::is_compiled() {
- return code() !=
- GetIsolate()->builtins()->builtin(Builtins::kCompileUnoptimized);
+ return code() != GetIsolate()->builtins()->builtin(Builtins::kCompileLazy);
}
@@ -6069,8 +6042,8 @@ ACCESSORS(JSCollection, table, Object, kTableOffset)
}
ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(table, Object, kTableOffset)
-ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(index, Smi, kIndexOffset)
-ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(kind, Smi, kKindOffset)
+ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(index, Object, kIndexOffset)
+ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(kind, Object, kKindOffset)
#undef ORDERED_HASH_TABLE_ITERATOR_ACCESSORS
@@ -7026,27 +6999,6 @@ void JSArray::SetContent(Handle<JSArray> array,
}
-Handle<Object> TypeFeedbackInfo::UninitializedSentinel(Isolate* isolate) {
- return isolate->factory()->uninitialized_symbol();
-}
-
-
-Handle<Object> TypeFeedbackInfo::MegamorphicSentinel(Isolate* isolate) {
- return isolate->factory()->megamorphic_symbol();
-}
-
-
-Handle<Object> TypeFeedbackInfo::MonomorphicArraySentinel(Isolate* isolate,
- ElementsKind elements_kind) {
- return Handle<Object>(Smi::FromInt(static_cast<int>(elements_kind)), isolate);
-}
-
-
-Object* TypeFeedbackInfo::RawUninitializedSentinel(Heap* heap) {
- return heap->uninitialized_symbol();
-}
-
-
int TypeFeedbackInfo::ic_total_count() {
int current = Smi::cast(READ_FIELD(this, kStorage1Offset))->value();
return ICTotalCountField::decode(current);
@@ -7176,17 +7128,17 @@ void Foreign::ForeignIterateBody() {
}
-void ExternalAsciiString::ExternalAsciiStringIterateBody(ObjectVisitor* v) {
- typedef v8::String::ExternalAsciiStringResource Resource;
- v->VisitExternalAsciiString(
+void ExternalOneByteString::ExternalOneByteStringIterateBody(ObjectVisitor* v) {
+ typedef v8::String::ExternalOneByteStringResource Resource;
+ v->VisitExternalOneByteString(
reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
}
-template<typename StaticVisitor>
-void ExternalAsciiString::ExternalAsciiStringIterateBody() {
- typedef v8::String::ExternalAsciiStringResource Resource;
- StaticVisitor::VisitExternalAsciiString(
+template <typename StaticVisitor>
+void ExternalOneByteString::ExternalOneByteStringIterateBody() {
+ typedef v8::String::ExternalOneByteStringResource Resource;
+ StaticVisitor::VisitExternalOneByteString(
reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
}
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 8fbe2182c5..d709a207c1 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -240,10 +240,6 @@ void JSObject::PrintProperties(OStream& os) { // NOLINT
os << Brief(descs->GetCallbacksObject(i)) << " (callback)\n";
break;
case NORMAL: // only in slow mode
- case HANDLER: // only in lookup results, not in descriptors
- case INTERCEPTOR: // only in lookup results, not in descriptors
- // There are no transitions in the descriptor array.
- case NONEXISTENT:
UNREACHABLE();
break;
}
@@ -375,9 +371,6 @@ void JSObject::PrintTransitions(OStream& os) { // NOLINT
break;
// Values below are never in the target descriptor array.
case NORMAL:
- case HANDLER:
- case INTERCEPTOR:
- case NONEXISTENT:
UNREACHABLE();
break;
}
@@ -614,7 +607,7 @@ void Name::NamePrint(OStream& os) { // NOLINT
// This method is only meant to be called from gdb for debugging purposes.
-// Since the string can also be in two-byte encoding, non-ASCII characters
+// Since the string can also be in two-byte encoding, non-Latin1 characters
// will be ignored in the output.
char* String::ToAsciiArray() {
// Static so that subsequent calls frees previously allocated space.
@@ -758,7 +751,7 @@ void JSArrayBuffer::JSArrayBufferPrint(OStream& os) { // NOLINT
void JSTypedArray::JSTypedArrayPrint(OStream& os) { // NOLINT
HeapObject::PrintHeader(os, "JSTypedArray");
os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - buffer =" << Brief(buffer());
+ os << " - buffer = " << Brief(buffer());
os << "\n - byte_offset = " << Brief(byte_offset());
os << "\n - byte_length = " << Brief(byte_length());
os << "\n - length = " << Brief(length());
@@ -1105,9 +1098,6 @@ void TransitionArray::PrintTransitions(OStream& os) { // NOLINT
break;
// Values below are never in the target descriptor array.
case NORMAL:
- case HANDLER:
- case INTERCEPTOR:
- case NONEXISTENT:
UNREACHABLE();
break;
}
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index cfdb9ccb63..482b5beae8 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -8,6 +8,7 @@
#include "src/allocation-site-scopes.h"
#include "src/api.h"
#include "src/arguments.h"
+#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
@@ -23,6 +24,7 @@
#include "src/heap/mark-compact.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/hydrogen.h"
+#include "src/ic/ic.h"
#include "src/isolate-inl.h"
#include "src/log.h"
#include "src/lookup.h"
@@ -104,35 +106,11 @@ bool Object::IsCallable() const {
}
-void Object::Lookup(Handle<Name> name, LookupResult* result) {
- DisallowHeapAllocation no_gc;
- Object* holder = NULL;
- if (IsJSReceiver()) {
- holder = this;
- } else {
- Context* native_context = result->isolate()->context()->native_context();
- if (IsNumber()) {
- holder = native_context->number_function()->instance_prototype();
- } else if (IsString()) {
- holder = native_context->string_function()->instance_prototype();
- } else if (IsSymbol()) {
- holder = native_context->symbol_function()->instance_prototype();
- } else if (IsBoolean()) {
- holder = native_context->boolean_function()->instance_prototype();
- } else {
- result->isolate()->PushStackTraceAndDie(
- 0xDEAD0000, this, JSReceiver::cast(this)->map(), 0xDEAD0001);
- }
- }
- DCHECK(holder != NULL); // Cannot handle null or undefined.
- JSReceiver::cast(holder)->Lookup(name, result);
-}
-
-
MaybeHandle<Object> Object::GetProperty(LookupIterator* it) {
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
UNREACHABLE();
case LookupIterator::JSPROXY:
return JSProxy::GetPropertyWithHandler(it->GetHolder<JSProxy>(),
@@ -147,24 +125,53 @@ MaybeHandle<Object> Object::GetProperty(LookupIterator* it) {
case LookupIterator::ACCESS_CHECK:
if (it->HasAccess(v8::ACCESS_GET)) break;
return JSObject::GetPropertyWithFailedAccessCheck(it);
- case LookupIterator::PROPERTY:
- if (it->HasProperty()) {
- switch (it->property_kind()) {
- case LookupIterator::ACCESSOR:
- return GetPropertyWithAccessor(it->GetReceiver(), it->name(),
- it->GetHolder<JSObject>(),
- it->GetAccessors());
- case LookupIterator::DATA:
- return it->GetDataValue();
- }
- }
- break;
+ case LookupIterator::ACCESSOR:
+ return GetPropertyWithAccessor(it->GetReceiver(), it->name(),
+ it->GetHolder<JSObject>(),
+ it->GetAccessors());
+ case LookupIterator::DATA:
+ return it->GetDataValue();
}
}
return it->factory()->undefined_value();
}
+Handle<Object> JSObject::GetDataProperty(Handle<JSObject> object,
+ Handle<Name> key) {
+ LookupIterator it(object, key,
+ LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ return GetDataProperty(&it);
+}
+
+
+Handle<Object> JSObject::GetDataProperty(LookupIterator* it) {
+ for (; it->IsFound(); it->Next()) {
+ switch (it->state()) {
+ case LookupIterator::INTERCEPTOR:
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+ case LookupIterator::ACCESS_CHECK:
+ if (it->HasAccess(v8::ACCESS_GET)) continue;
+ // Fall through.
+ case LookupIterator::JSPROXY:
+ it->NotFound();
+ return it->isolate()->factory()->undefined_value();
+ case LookupIterator::ACCESSOR:
+ // TODO(verwaest): For now this doesn't call into
+ // ExecutableAccessorInfo, since clients don't need it. Update once
+ // relevant.
+ it->NotFound();
+ return it->isolate()->factory()->undefined_value();
+ case LookupIterator::DATA:
+ return it->GetDataValue();
+ }
+ }
+ return it->isolate()->factory()->undefined_value();
+}
+
+
bool Object::ToInt32(int32_t* value) {
if (IsSmi()) {
*value = Smi::cast(this)->value();
@@ -397,7 +404,7 @@ MaybeHandle<Object> JSProxy::GetPropertyWithHandler(Handle<JSProxy> proxy,
Handle<Object> args[] = { receiver, name };
return CallTrap(
- proxy, "get", isolate->derived_get_trap(), ARRAY_SIZE(args), args);
+ proxy, "get", isolate->derived_get_trap(), arraysize(args), args);
}
@@ -412,15 +419,11 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(Handle<Object> receiver,
Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(structure);
if (!info->IsCompatibleReceiver(*receiver)) {
Handle<Object> args[2] = { name, receiver };
- Handle<Object> error =
- isolate->factory()->NewTypeError("incompatible_method_receiver",
- HandleVector(args,
- ARRAY_SIZE(args)));
- return isolate->Throw<Object>(error);
- }
- // TODO(rossberg): Handling symbols in the API requires changing the API,
- // so we do not support it for now.
- if (name->IsSymbol()) return isolate->factory()->undefined_value();
+ THROW_NEW_ERROR(isolate,
+ NewTypeError("incompatible_method_receiver",
+ HandleVector(args, arraysize(args))),
+ Object);
+ }
if (structure->IsDeclaredAccessorInfo()) {
return GetDeclaredAccessorProperty(
receiver,
@@ -430,15 +433,14 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(Handle<Object> receiver,
Handle<ExecutableAccessorInfo> data =
Handle<ExecutableAccessorInfo>::cast(structure);
- v8::AccessorGetterCallback call_fun =
- v8::ToCData<v8::AccessorGetterCallback>(data->getter());
+ v8::AccessorNameGetterCallback call_fun =
+ v8::ToCData<v8::AccessorNameGetterCallback>(data->getter());
if (call_fun == NULL) return isolate->factory()->undefined_value();
- Handle<String> key = Handle<String>::cast(name);
LOG(isolate, ApiNamedPropertyAccess("load", *holder, *name));
PropertyCallbackArguments args(isolate, data->data(), *receiver, *holder);
v8::Handle<v8::Value> result =
- args.Call(call_fun, v8::Utils::ToLocal(key));
+ args.Call(call_fun, v8::Utils::ToLocal(name));
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (result.IsEmpty()) {
return isolate->factory()->undefined_value();
@@ -488,23 +490,19 @@ MaybeHandle<Object> Object::SetPropertyWithAccessor(
ExecutableAccessorInfo* info = ExecutableAccessorInfo::cast(*structure);
if (!info->IsCompatibleReceiver(*receiver)) {
Handle<Object> args[2] = { name, receiver };
- Handle<Object> error =
- isolate->factory()->NewTypeError("incompatible_method_receiver",
- HandleVector(args,
- ARRAY_SIZE(args)));
- return isolate->Throw<Object>(error);
- }
- // TODO(rossberg): Support symbols in the API.
- if (name->IsSymbol()) return value;
+ THROW_NEW_ERROR(isolate,
+ NewTypeError("incompatible_method_receiver",
+ HandleVector(args, arraysize(args))),
+ Object);
+ }
Object* call_obj = info->setter();
- v8::AccessorSetterCallback call_fun =
- v8::ToCData<v8::AccessorSetterCallback>(call_obj);
+ v8::AccessorNameSetterCallback call_fun =
+ v8::ToCData<v8::AccessorNameSetterCallback>(call_obj);
if (call_fun == NULL) return value;
- Handle<String> key = Handle<String>::cast(name);
LOG(isolate, ApiNamedPropertyAccess("store", *holder, *name));
PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder);
args.Call(call_fun,
- v8::Utils::ToLocal(key),
+ v8::Utils::ToLocal(name),
v8::Utils::ToLocal(value));
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return value;
@@ -519,10 +517,9 @@ MaybeHandle<Object> Object::SetPropertyWithAccessor(
} else {
if (strict_mode == SLOPPY) return value;
Handle<Object> args[2] = { name, holder };
- Handle<Object> error =
- isolate->factory()->NewTypeError("no_setter_in_callback",
- HandleVector(args, 2));
- return isolate->Throw<Object>(error);
+ THROW_NEW_ERROR(
+ isolate, NewTypeError("no_setter_in_callback", HandleVector(args, 2)),
+ Object);
}
}
@@ -568,19 +565,15 @@ MaybeHandle<Object> Object::SetPropertyWithDefinedSetter(
Handle<Object> argv[] = { value };
RETURN_ON_EXCEPTION(isolate, Execution::Call(isolate, setter, receiver,
- ARRAY_SIZE(argv), argv, true),
+ arraysize(argv), argv, true),
Object);
return value;
}
static bool FindAllCanReadHolder(LookupIterator* it) {
- it->skip_interceptor();
- it->skip_access_check();
for (; it->IsFound(); it->Next()) {
- if (it->state() == LookupIterator::PROPERTY &&
- it->HasProperty() &&
- it->property_kind() == LookupIterator::ACCESSOR) {
+ if (it->state() == LookupIterator::ACCESSOR) {
Handle<Object> accessors = it->GetAccessors();
if (accessors->IsAccessorInfo()) {
if (AccessorInfo::cast(*accessors)->all_can_read()) return true;
@@ -618,11 +611,8 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithFailedAccessCheck(
static bool FindAllCanWriteHolder(LookupIterator* it) {
- it->skip_interceptor();
- it->skip_access_check();
for (; it->IsFound(); it->Next()) {
- if (it->state() == LookupIterator::PROPERTY && it->HasProperty() &&
- it->property_kind() == LookupIterator::ACCESSOR) {
+ if (it->state() == LookupIterator::ACCESSOR) {
Handle<Object> accessors = it->GetAccessors();
if (accessors->IsAccessorInfo()) {
if (AccessorInfo::cast(*accessors)->all_can_write()) return true;
@@ -648,47 +638,6 @@ MaybeHandle<Object> JSObject::SetPropertyWithFailedAccessCheck(
}
-Object* JSObject::GetNormalizedProperty(const LookupResult* result) {
- DCHECK(!HasFastProperties());
- Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
- if (IsGlobalObject()) {
- value = PropertyCell::cast(value)->value();
- }
- DCHECK(!value->IsPropertyCell() && !value->IsCell());
- return value;
-}
-
-
-Handle<Object> JSObject::GetNormalizedProperty(Handle<JSObject> object,
- const LookupResult* result) {
- DCHECK(!object->HasFastProperties());
- Isolate* isolate = object->GetIsolate();
- Handle<Object> value(object->property_dictionary()->ValueAt(
- result->GetDictionaryEntry()), isolate);
- if (object->IsGlobalObject()) {
- value = handle(Handle<PropertyCell>::cast(value)->value(), isolate);
- DCHECK(!value->IsTheHole());
- }
- DCHECK(!value->IsPropertyCell() && !value->IsCell());
- return value;
-}
-
-
-void JSObject::SetNormalizedProperty(Handle<JSObject> object,
- const LookupResult* result,
- Handle<Object> value) {
- DCHECK(!object->HasFastProperties());
- NameDictionary* property_dictionary = object->property_dictionary();
- if (object->IsGlobalObject()) {
- Handle<PropertyCell> cell(PropertyCell::cast(
- property_dictionary->ValueAt(result->GetDictionaryEntry())));
- PropertyCell::SetValueInferType(cell, value);
- } else {
- property_dictionary->ValueAtPut(result->GetDictionaryEntry(), *value);
- }
-}
-
-
void JSObject::SetNormalizedProperty(Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
@@ -751,11 +700,11 @@ Handle<Object> JSObject::DeleteNormalizedProperty(Handle<JSObject> object,
// If we have a global object set the cell to the hole.
if (object->IsGlobalObject()) {
PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.IsDontDelete()) {
+ if (!details.IsConfigurable()) {
if (mode != FORCE_DELETION) return isolate->factory()->false_value();
// When forced to delete global properties, we have to make a
// map change to invalidate any ICs that think they can load
- // from the DontDelete cell without checking if it contains
+ // from the non-configurable cell without checking if it contains
// the hole value.
Handle<Map> new_map = Map::CopyDropDescriptors(handle(object->map()));
DCHECK(new_map->is_dictionary_map());
@@ -1056,38 +1005,30 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Abort if size does not allow in-place conversion.
if (size < ExternalString::kShortSize) return false;
Heap* heap = GetHeap();
- bool is_ascii = this->IsOneByteRepresentation();
+ bool is_one_byte = this->IsOneByteRepresentation();
bool is_internalized = this->IsInternalizedString();
// Morph the string to an external string by replacing the map and
- // reinitializing the fields. This won't work if
- // - the space the existing string occupies is too small for a regular
- // external string.
- // - the existing string is in old pointer space and the backing store of
- // the external string is not aligned. The GC cannot deal with a field
- // containing a possibly unaligned address to outside of V8's heap.
- // In either case we resort to a short external string instead, omitting
+ // reinitializing the fields. This won't work if the space the existing
+ // string occupies is too small for a regular external string.
+ // Instead, we resort to a short external string instead, omitting
// the field caching the address of the backing store. When we encounter
// short external strings in generated code, we need to bailout to runtime.
Map* new_map;
- if (size < ExternalString::kSize ||
- heap->old_pointer_space()->Contains(this)) {
+ if (size < ExternalString::kSize) {
new_map = is_internalized
- ? (is_ascii
- ? heap->
- short_external_internalized_string_with_one_byte_data_map()
- : heap->short_external_internalized_string_map())
- : (is_ascii
- ? heap->short_external_string_with_one_byte_data_map()
- : heap->short_external_string_map());
+ ? (is_one_byte
+ ? heap->short_external_internalized_string_with_one_byte_data_map()
+ : heap->short_external_internalized_string_map())
+ : (is_one_byte ? heap->short_external_string_with_one_byte_data_map()
+ : heap->short_external_string_map());
} else {
new_map = is_internalized
- ? (is_ascii
- ? heap->external_internalized_string_with_one_byte_data_map()
- : heap->external_internalized_string_map())
- : (is_ascii
- ? heap->external_string_with_one_byte_data_map()
- : heap->external_string_map());
+ ? (is_one_byte
+ ? heap->external_internalized_string_with_one_byte_data_map()
+ : heap->external_internalized_string_map())
+ : (is_one_byte ? heap->external_string_with_one_byte_data_map()
+ : heap->external_string_map());
}
// Byte size of the external String object.
@@ -1107,7 +1048,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
}
-bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
+bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
// Externalizing twice leaks the external resource, so it's
// prohibited by the API.
DCHECK(!this->IsExternalString());
@@ -1134,25 +1075,20 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
bool is_internalized = this->IsInternalizedString();
// Morph the string to an external string by replacing the map and
- // reinitializing the fields. This won't work if
- // - the space the existing string occupies is too small for a regular
- // external string.
- // - the existing string is in old pointer space and the backing store of
- // the external string is not aligned. The GC cannot deal with a field
- // containing a possibly unaligned address to outside of V8's heap.
- // In either case we resort to a short external string instead, omitting
+ // reinitializing the fields. This won't work if the space the existing
+ // string occupies is too small for a regular external string.
+ // Instead, we resort to a short external string instead, omitting
// the field caching the address of the backing store. When we encounter
// short external strings in generated code, we need to bailout to runtime.
Map* new_map;
- if (size < ExternalString::kSize ||
- heap->old_pointer_space()->Contains(this)) {
+ if (size < ExternalString::kSize) {
new_map = is_internalized
- ? heap->short_external_ascii_internalized_string_map()
- : heap->short_external_ascii_string_map();
+ ? heap->short_external_one_byte_internalized_string_map()
+ : heap->short_external_one_byte_string_map();
} else {
new_map = is_internalized
- ? heap->external_ascii_internalized_string_map()
- : heap->external_ascii_string_map();
+ ? heap->external_one_byte_internalized_string_map()
+ : heap->external_one_byte_string_map();
}
// Byte size of the external String object.
@@ -1163,7 +1099,7 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
// the left-over space to avoid races with the sweeper thread.
this->synchronized_set_map(new_map);
- ExternalAsciiString* self = ExternalAsciiString::cast(this);
+ ExternalOneByteString* self = ExternalOneByteString::cast(this);
self->set_resource(resource);
if (is_internalized) self->Hash(); // Force regeneration of the hash value.
@@ -1192,16 +1128,16 @@ void String::StringShortPrint(StringStream* accumulator) {
len = kMaxShortPrintLength;
truncated = true;
}
- bool ascii = true;
+ bool one_byte = true;
for (int i = 0; i < len; i++) {
uint16_t c = stream.GetNext();
if (c < 32 || c >= 127) {
- ascii = false;
+ one_byte = false;
}
}
stream.Reset(this);
- if (ascii) {
+ if (one_byte) {
accumulator->Add("<String[%u]: ", length());
for (int i = 0; i < len; i++) {
accumulator->Put(static_cast<char>(stream.GetNext()));
@@ -1615,8 +1551,8 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
break;
case kExternalStringTag:
if ((type & kStringEncodingMask) == kOneByteStringTag) {
- reinterpret_cast<ExternalAsciiString*>(this)->
- ExternalAsciiStringIterateBody(v);
+ reinterpret_cast<ExternalOneByteString*>(this)
+ ->ExternalOneByteStringIterateBody(v);
} else {
reinterpret_cast<ExternalTwoByteString*>(this)->
ExternalTwoByteStringIterateBody(v);
@@ -1739,7 +1675,7 @@ void HeapNumber::HeapNumberPrint(OStream& os) { // NOLINT
String* JSReceiver::class_name() {
if (IsJSFunction() || IsJSFunctionProxy()) {
- return GetHeap()->function_class_string();
+ return GetHeap()->Function_string();
}
if (map()->constructor()->IsJSFunction()) {
JSFunction* constructor = JSFunction::cast(map()->constructor());
@@ -1823,37 +1759,6 @@ MaybeHandle<Map> Map::CopyWithConstant(Handle<Map> map,
}
-void JSObject::AddFastProperty(Handle<JSObject> object,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes,
- StoreFromKeyed store_mode,
- TransitionFlag flag) {
- DCHECK(!object->IsJSGlobalProxy());
-
- MaybeHandle<Map> maybe_map;
- if (value->IsJSFunction()) {
- maybe_map = Map::CopyWithConstant(
- handle(object->map()), name, value, attributes, flag);
- } else if (!object->map()->TooManyFastProperties(store_mode)) {
- Isolate* isolate = object->GetIsolate();
- Representation representation = value->OptimalRepresentation();
- maybe_map = Map::CopyWithField(
- handle(object->map(), isolate), name,
- value->OptimalType(isolate, representation),
- attributes, representation, flag);
- }
-
- Handle<Map> new_map;
- if (!maybe_map.ToHandle(&new_map)) {
- NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
- return;
- }
-
- JSObject::MigrateToNewProperty(object, new_map, value);
-}
-
-
void JSObject::AddSlowProperty(Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
@@ -1886,45 +1791,6 @@ void JSObject::AddSlowProperty(Handle<JSObject> object,
}
-MaybeHandle<Object> JSObject::AddPropertyInternal(
- Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
- PropertyAttributes attributes, JSReceiver::StoreFromKeyed store_mode,
- ExtensibilityCheck extensibility_check, TransitionFlag transition_flag) {
- DCHECK(!object->IsJSGlobalProxy());
- Isolate* isolate = object->GetIsolate();
-
- if (!name->IsUniqueName()) {
- name = isolate->factory()->InternalizeString(
- Handle<String>::cast(name));
- }
-
- if (extensibility_check == PERFORM_EXTENSIBILITY_CHECK &&
- !object->map()->is_extensible()) {
- Handle<Object> args[1] = {name};
- Handle<Object> error = isolate->factory()->NewTypeError(
- "object_not_extensible", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw<Object>(error);
- }
-
- if (object->HasFastProperties()) {
- AddFastProperty(object, name, value, attributes, store_mode,
- transition_flag);
- }
-
- if (!object->HasFastProperties()) {
- AddSlowProperty(object, name, value, attributes);
- }
-
- if (object->map()->is_observed() &&
- *name != isolate->heap()->hidden_string()) {
- Handle<Object> old_value = isolate->factory()->the_hole_value();
- EnqueueChangeRecord(object, "add", name, old_value);
- }
-
- return value;
-}
-
-
Context* JSObject::GetCreationContext() {
Object* constructor = this->map()->constructor();
JSFunction* function;
@@ -1959,23 +1825,6 @@ void JSObject::EnqueueChangeRecord(Handle<JSObject> object,
}
-static void ReplaceSlowProperty(Handle<JSObject> object,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes) {
- NameDictionary* dictionary = object->property_dictionary();
- int old_index = dictionary->FindEntry(name);
- int new_enumeration_index = 0; // 0 means "Use the next available index."
- if (old_index != -1) {
- // All calls to ReplaceSlowProperty have had all transitions removed.
- new_enumeration_index = dictionary->DetailsAt(old_index).dictionary_index();
- }
-
- PropertyDetails new_details(attributes, NORMAL, new_enumeration_index);
- JSObject::SetNormalizedProperty(object, name, value, new_details);
-}
-
-
const char* Representation::Mnemonic() const {
switch (kind_) {
case kNone: return "v";
@@ -2097,9 +1946,21 @@ void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
int total_size = number_of_fields + unused;
int external = total_size - inobject;
- if ((old_map->unused_property_fields() == 0) &&
- (number_of_fields != old_number_of_fields) &&
- (new_map->GetBackPointer() == *old_map)) {
+ if (number_of_fields != old_number_of_fields &&
+ new_map->GetBackPointer() == *old_map) {
+ PropertyDetails details = new_map->GetLastDescriptorDetails();
+
+ if (old_map->unused_property_fields() > 0) {
+ if (details.representation().IsDouble()) {
+ Handle<Object> value = isolate->factory()->NewHeapNumber(0, MUTABLE);
+ FieldIndex index =
+ FieldIndex::ForDescriptor(*new_map, new_map->LastAdded());
+ object->FastPropertyAtPut(index, *value);
+ }
+ object->synchronized_set_map(*new_map);
+ return;
+ }
+
DCHECK(number_of_fields == old_number_of_fields + 1);
// This migration is a transition from a map that has run out out property
// space. Therefore it could be done by extending the backing store.
@@ -2108,7 +1969,6 @@ void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
FixedArray::CopySize(old_storage, external);
// Properly initialize newly added property.
- PropertyDetails details = new_map->GetLastDescriptorDetails();
Handle<Object> value;
if (details.representation().IsDouble()) {
value = isolate->factory()->NewHeapNumber(0, MUTABLE);
@@ -2843,6 +2703,7 @@ MaybeHandle<Map> Map::TryUpdate(Handle<Map> map) {
// static
Handle<Map> Map::Update(Handle<Map> map) {
+ if (!map->is_deprecated()) return map;
return GeneralizeRepresentation(map, 0, Representation::None(),
HeapType::None(map->GetIsolate()),
ALLOW_AS_CONSTANT);
@@ -2901,9 +2762,6 @@ MaybeHandle<Map> Map::TryUpdateInternal(Handle<Map> old_map) {
break;
case NORMAL:
- case HANDLER:
- case INTERCEPTOR:
- case NONEXISTENT:
UNREACHABLE();
}
}
@@ -2949,7 +2807,8 @@ MaybeHandle<Object> Object::SetProperty(Handle<Object> object,
MaybeHandle<Object> Object::SetProperty(LookupIterator* it,
Handle<Object> value,
StrictMode strict_mode,
- StoreFromKeyed store_mode) {
+ StoreFromKeyed store_mode,
+ StorePropertyMode data_store_mode) {
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc(it->isolate());
@@ -3003,32 +2862,57 @@ MaybeHandle<Object> Object::SetProperty(LookupIterator* it,
}
break;
- case LookupIterator::PROPERTY:
- if (!it->HasProperty()) break;
+ case LookupIterator::ACCESSOR:
if (it->property_details().IsReadOnly()) {
return WriteToReadOnlyProperty(it, value, strict_mode);
}
- switch (it->property_kind()) {
- case LookupIterator::ACCESSOR:
- if (it->HolderIsReceiverOrHiddenPrototype() ||
- !it->GetAccessors()->IsDeclaredAccessorInfo()) {
- return SetPropertyWithAccessor(it->GetReceiver(), it->name(),
- value, it->GetHolder<JSObject>(),
- it->GetAccessors(), strict_mode);
- }
- break;
- case LookupIterator::DATA:
- if (it->HolderIsReceiverOrHiddenPrototype()) {
- return SetDataProperty(it, value);
- }
+ if (it->HolderIsReceiverOrHiddenPrototype() ||
+ !it->GetAccessors()->IsDeclaredAccessorInfo()) {
+ return SetPropertyWithAccessor(it->GetReceiver(), it->name(), value,
+ it->GetHolder<JSObject>(),
+ it->GetAccessors(), strict_mode);
}
done = true;
break;
+
+ case LookupIterator::DATA:
+ if (it->property_details().IsReadOnly()) {
+ return WriteToReadOnlyProperty(it, value, strict_mode);
+ }
+ if (it->HolderIsReceiverOrHiddenPrototype()) {
+ return SetDataProperty(it, value);
+ }
+ done = true;
+ break;
+
+ case LookupIterator::TRANSITION:
+ done = true;
+ break;
}
if (done) break;
}
+ // If the receiver is the JSGlobalObject, the store was contextual. In case
+ // the property did not exist yet on the global object itself, we have to
+ // throw a reference error in strict mode.
+ if (it->GetReceiver()->IsJSGlobalObject() && strict_mode == STRICT) {
+ Handle<Object> args[1] = {it->name()};
+ THROW_NEW_ERROR(it->isolate(),
+ NewReferenceError("not_defined", HandleVector(args, 1)),
+ Object);
+ }
+
+ if (data_store_mode == SUPER_PROPERTY) {
+ if (strict_mode == STRICT) {
+ Handle<Object> args[1] = {it->name()};
+ THROW_NEW_ERROR(it->isolate(),
+ NewReferenceError("not_defined", HandleVector(args, 1)),
+ Object);
+ }
+ return value;
+ }
+
return AddDataProperty(it, value, NONE, strict_mode, store_mode);
}
@@ -3039,14 +2923,15 @@ MaybeHandle<Object> Object::WriteToReadOnlyProperty(LookupIterator* it,
if (strict_mode != STRICT) return value;
Handle<Object> args[] = {it->name(), it->GetReceiver()};
- Handle<Object> error = it->factory()->NewTypeError(
- "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
- return it->isolate()->Throw<Object>(error);
+ THROW_NEW_ERROR(it->isolate(),
+ NewTypeError("strict_read_only_property",
+ HandleVector(args, arraysize(args))),
+ Object);
}
-MaybeHandle<Object> Object::SetDataProperty(LookupIterator* it,
- Handle<Object> value) {
+Handle<Object> Object::SetDataProperty(LookupIterator* it,
+ Handle<Object> value) {
// Proxies are handled on the WithHandler path. Other non-JSObjects cannot
// have own properties.
Handle<JSObject> receiver = Handle<JSObject>::cast(it->GetReceiver());
@@ -3090,30 +2975,26 @@ MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
// TODO(verwaest): Throw a TypeError with a more specific message.
return WriteToReadOnlyProperty(it, value, strict_mode);
}
- Handle<JSObject> receiver = Handle<JSObject>::cast(it->GetReceiver());
+
+ Handle<JSObject> receiver = it->GetStoreTarget();
// If the receiver is a JSGlobalProxy, store on the prototype (JSGlobalObject)
// instead. If the prototype is Null, the proxy is detached.
- if (receiver->IsJSGlobalProxy()) {
- // Trying to assign to a detached proxy.
- PrototypeIterator iter(it->isolate(), receiver);
- if (iter.IsAtEnd()) return value;
- receiver =
- Handle<JSGlobalObject>::cast(PrototypeIterator::GetCurrent(iter));
- }
+ if (receiver->IsJSGlobalProxy()) return value;
- if (!receiver->map()->is_extensible()) {
+ // Possibly migrate to the most up-to-date map that will be able to store
+ // |value| under it->name() with |attributes|.
+ it->PrepareTransitionToDataProperty(value, attributes, store_mode);
+ if (it->state() != LookupIterator::TRANSITION) {
if (strict_mode == SLOPPY) return value;
Handle<Object> args[1] = {it->name()};
- Handle<Object> error = it->factory()->NewTypeError(
- "object_not_extensible", HandleVector(args, ARRAY_SIZE(args)));
- return it->isolate()->Throw<Object>(error);
+ THROW_NEW_ERROR(it->isolate(),
+ NewTypeError("object_not_extensible",
+ HandleVector(args, arraysize(args))),
+ Object);
}
-
- // Possibly migrate to the most up-to-date map that will be able to store
- // |value| under it->name() with |attributes|.
- it->TransitionToDataProperty(value, attributes, store_mode);
+ it->ApplyTransitionToDataProperty();
// TODO(verwaest): Encapsulate dictionary handling better.
if (receiver->map()->is_dictionary_map()) {
@@ -3508,77 +3389,6 @@ Handle<Map> JSObject::GetElementsTransitionMap(Handle<JSObject> object,
}
-void JSObject::LookupOwnRealNamedProperty(Handle<Name> name,
- LookupResult* result) {
- DisallowHeapAllocation no_gc;
- if (IsJSGlobalProxy()) {
- PrototypeIterator iter(GetIsolate(), this);
- if (iter.IsAtEnd()) return result->NotFound();
- DCHECK(iter.GetCurrent()->IsJSGlobalObject());
- return JSObject::cast(iter.GetCurrent())
- ->LookupOwnRealNamedProperty(name, result);
- }
-
- if (HasFastProperties()) {
- map()->LookupDescriptor(this, *name, result);
- // A property or a map transition was found. We return all of these result
- // types because LookupOwnRealNamedProperty is used when setting
- // properties where map transitions are handled.
- DCHECK(!result->IsFound() ||
- (result->holder() == this && result->IsFastPropertyType()));
- return;
- }
-
- int entry = property_dictionary()->FindEntry(name);
- if (entry != NameDictionary::kNotFound) {
- Object* value = property_dictionary()->ValueAt(entry);
- if (IsGlobalObject()) {
- PropertyDetails d = property_dictionary()->DetailsAt(entry);
- if (d.IsDeleted() || PropertyCell::cast(value)->value()->IsTheHole()) {
- result->NotFound();
- return;
- }
- value = PropertyCell::cast(value)->value();
- }
- result->DictionaryResult(this, entry);
- return;
- }
-
- result->NotFound();
-}
-
-
-void JSObject::LookupRealNamedProperty(Handle<Name> name,
- LookupResult* result) {
- DisallowHeapAllocation no_gc;
- LookupOwnRealNamedProperty(name, result);
- if (result->IsFound()) return;
-
- LookupRealNamedPropertyInPrototypes(name, result);
-}
-
-
-void JSObject::LookupRealNamedPropertyInPrototypes(Handle<Name> name,
- LookupResult* result) {
- if (name->IsOwn()) {
- result->NotFound();
- return;
- }
-
- DisallowHeapAllocation no_gc;
- Isolate* isolate = GetIsolate();
- for (PrototypeIterator iter(isolate, this); !iter.IsAtEnd(); iter.Advance()) {
- if (iter.GetCurrent()->IsJSProxy()) {
- return result->HandlerResult(JSProxy::cast(iter.GetCurrent()));
- }
- JSObject::cast(iter.GetCurrent())->LookupOwnRealNamedProperty(name, result);
- DCHECK(!(result->IsFound() && result->type() == INTERCEPTOR));
- if (result->IsFound()) return;
- }
- result->NotFound();
-}
-
-
Maybe<bool> JSProxy::HasPropertyWithHandler(Handle<JSProxy> proxy,
Handle<Name> name) {
Isolate* isolate = proxy->GetIsolate();
@@ -3590,7 +3400,7 @@ Maybe<bool> JSProxy::HasPropertyWithHandler(Handle<JSProxy> proxy,
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, result, CallTrap(proxy, "has", isolate->derived_has_trap(),
- ARRAY_SIZE(args), args),
+ arraysize(args), args),
Maybe<bool>());
return maybe(result->BooleanValue());
@@ -3613,7 +3423,7 @@ MaybeHandle<Object> JSProxy::SetPropertyWithHandler(Handle<JSProxy> proxy,
CallTrap(proxy,
"set",
isolate->derived_set_trap(),
- ARRAY_SIZE(args),
+ arraysize(args),
args),
Object);
@@ -3641,7 +3451,7 @@ MaybeHandle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
CallTrap(proxy,
"getPropertyDescriptor",
Handle<Object>(),
- ARRAY_SIZE(args),
+ arraysize(args),
args),
Object);
@@ -3658,39 +3468,37 @@ MaybeHandle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
Execution::Call(isolate,
isolate->to_complete_property_descriptor(),
result,
- ARRAY_SIZE(argv),
+ arraysize(argv),
argv),
Object);
// [[GetProperty]] requires to check that all properties are configurable.
Handle<String> configurable_name =
isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("configurable_"));
+ STATIC_CHAR_VECTOR("configurable_"));
Handle<Object> configurable =
Object::GetProperty(desc, configurable_name).ToHandleChecked();
DCHECK(configurable->IsBoolean());
if (configurable->IsFalse()) {
- Handle<String> trap =
- isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("getPropertyDescriptor"));
+ Handle<String> trap = isolate->factory()->InternalizeOneByteString(
+ STATIC_CHAR_VECTOR("getPropertyDescriptor"));
Handle<Object> args[] = { handler, trap, name };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw<Object>(error);
+ THROW_NEW_ERROR(isolate, NewTypeError("proxy_prop_not_configurable",
+ HandleVector(args, arraysize(args))),
+ Object);
}
DCHECK(configurable->IsTrue());
// Check for DataDescriptor.
Handle<String> hasWritable_name =
isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("hasWritable_"));
+ STATIC_CHAR_VECTOR("hasWritable_"));
Handle<Object> hasWritable =
Object::GetProperty(desc, hasWritable_name).ToHandleChecked();
DCHECK(hasWritable->IsBoolean());
if (hasWritable->IsTrue()) {
- Handle<String> writable_name =
- isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("writable_"));
+ Handle<String> writable_name = isolate->factory()->InternalizeOneByteString(
+ STATIC_CHAR_VECTOR("writable_"));
Handle<Object> writable =
Object::GetProperty(desc, writable_name).ToHandleChecked();
DCHECK(writable->IsBoolean());
@@ -3698,14 +3506,14 @@ MaybeHandle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
if (!*done) return isolate->factory()->the_hole_value();
if (strict_mode == SLOPPY) return value;
Handle<Object> args[] = { name, receiver };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw<Object>(error);
+ THROW_NEW_ERROR(isolate, NewTypeError("strict_read_only_property",
+ HandleVector(args, arraysize(args))),
+ Object);
}
// We have an AccessorDescriptor.
- Handle<String> set_name = isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("set_"));
+ Handle<String> set_name =
+ isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("set_"));
Handle<Object> setter = Object::GetProperty(desc, set_name).ToHandleChecked();
if (!setter->IsUndefined()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
@@ -3715,9 +3523,9 @@ MaybeHandle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
if (strict_mode == SLOPPY) return value;
Handle<Object> args2[] = { name, proxy };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "no_setter_in_callback", HandleVector(args2, ARRAY_SIZE(args2)));
- return isolate->Throw<Object>(error);
+ THROW_NEW_ERROR(isolate, NewTypeError("no_setter_in_callback",
+ HandleVector(args2, arraysize(args2))),
+ Object);
}
@@ -3735,7 +3543,7 @@ MaybeHandle<Object> JSProxy::DeletePropertyWithHandler(
CallTrap(proxy,
"delete",
Handle<Object>(),
- ARRAY_SIZE(args),
+ arraysize(args),
args),
Object);
@@ -3743,11 +3551,11 @@ MaybeHandle<Object> JSProxy::DeletePropertyWithHandler(
if (mode == STRICT_DELETION && !result_bool) {
Handle<Object> handler(proxy->handler(), isolate);
Handle<String> trap_name = isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("delete"));
+ STATIC_CHAR_VECTOR("delete"));
Handle<Object> args[] = { handler, trap_name };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "handler_failed", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw<Object>(error);
+ THROW_NEW_ERROR(isolate, NewTypeError("handler_failed",
+ HandleVector(args, arraysize(args))),
+ Object);
}
return isolate->factory()->ToBoolean(result_bool);
}
@@ -3774,7 +3582,7 @@ Maybe<PropertyAttributes> JSProxy::GetPropertyAttributesWithHandler(
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, result,
proxy->CallTrap(proxy, "getPropertyDescriptor", Handle<Object>(),
- ARRAY_SIZE(args), args),
+ arraysize(args), args),
Maybe<PropertyAttributes>());
if (result->IsUndefined()) return maybe(ABSENT);
@@ -3784,31 +3592,31 @@ Maybe<PropertyAttributes> JSProxy::GetPropertyAttributesWithHandler(
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, desc,
Execution::Call(isolate, isolate->to_complete_property_descriptor(),
- result, ARRAY_SIZE(argv), argv),
+ result, arraysize(argv), argv),
Maybe<PropertyAttributes>());
// Convert result to PropertyAttributes.
Handle<String> enum_n = isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("enumerable_"));
+ STATIC_CHAR_VECTOR("enumerable_"));
Handle<Object> enumerable;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, enumerable,
Object::GetProperty(desc, enum_n),
Maybe<PropertyAttributes>());
Handle<String> conf_n = isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("configurable_"));
+ STATIC_CHAR_VECTOR("configurable_"));
Handle<Object> configurable;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, configurable,
Object::GetProperty(desc, conf_n),
Maybe<PropertyAttributes>());
Handle<String> writ_n = isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("writable_"));
+ STATIC_CHAR_VECTOR("writable_"));
Handle<Object> writable;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, writable,
Object::GetProperty(desc, writ_n),
Maybe<PropertyAttributes>());
if (!writable->BooleanValue()) {
Handle<String> set_n = isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("set_"));
+ STATIC_CHAR_VECTOR("set_"));
Handle<Object> setter;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, setter,
Object::GetProperty(desc, set_n),
@@ -3819,11 +3627,12 @@ Maybe<PropertyAttributes> JSProxy::GetPropertyAttributesWithHandler(
if (configurable->IsFalse()) {
Handle<Object> handler(proxy->handler(), isolate);
Handle<String> trap = isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("getPropertyDescriptor"));
+ STATIC_CHAR_VECTOR("getPropertyDescriptor"));
Handle<Object> args[] = { handler, trap, name };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
- isolate->Throw(*error);
+ Handle<Object> error;
+ MaybeHandle<Object> maybe_error = isolate->factory()->NewTypeError(
+ "proxy_prop_not_configurable", HandleVector(args, arraysize(args)));
+ if (maybe_error.ToHandle(&error)) isolate->Throw(*error);
return maybe(NONE);
}
@@ -3883,9 +3692,10 @@ MaybeHandle<Object> JSProxy::CallTrap(Handle<JSProxy> proxy,
if (trap->IsUndefined()) {
if (derived.is_null()) {
Handle<Object> args[] = { handler, trap_name };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw<Object>(error);
+ THROW_NEW_ERROR(isolate,
+ NewTypeError("handler_trap_missing",
+ HandleVector(args, arraysize(args))),
+ Object);
}
trap = Handle<Object>(derived);
}
@@ -3943,45 +3753,6 @@ bool JSObject::TryMigrateInstance(Handle<JSObject> object) {
}
-MaybeHandle<Object> JSObject::SetPropertyUsingTransition(
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes) {
- Handle<Map> transition_map(lookup->GetTransitionTarget());
- int descriptor = transition_map->LastAdded();
-
- Handle<DescriptorArray> descriptors(transition_map->instance_descriptors());
- PropertyDetails details = descriptors->GetDetails(descriptor);
-
- if (details.type() == CALLBACKS || attributes != details.attributes()) {
- // AddPropertyInternal will either normalize the object, or create a new
- // fast copy of the map. If we get a fast copy of the map, all field
- // representations will be tagged since the transition is omitted.
- return JSObject::AddPropertyInternal(
- object, name, value, attributes,
- JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED,
- JSReceiver::OMIT_EXTENSIBILITY_CHECK, OMIT_TRANSITION);
- }
-
- // Keep the target CONSTANT if the same value is stored.
- // TODO(verwaest): Also support keeping the placeholder
- // (value->IsUninitialized) as constant.
- if (!lookup->CanHoldValue(value)) {
- Representation field_representation = value->OptimalRepresentation();
- Handle<HeapType> field_type = value->OptimalType(
- lookup->isolate(), field_representation);
- transition_map = Map::GeneralizeRepresentation(
- transition_map, descriptor,
- field_representation, field_type, FORCE_FIELD);
- }
-
- JSObject::MigrateToNewProperty(object, transition_map, value);
- return value;
-}
-
-
void JSObject::MigrateToNewProperty(Handle<JSObject> object,
Handle<Map> map,
Handle<Object> value) {
@@ -4012,80 +3783,23 @@ void JSObject::WriteToField(int descriptor, Object* value) {
}
-void JSObject::SetPropertyToField(LookupResult* lookup, Handle<Object> value) {
- if (lookup->type() == CONSTANT || !lookup->CanHoldValue(value)) {
- Representation field_representation = value->OptimalRepresentation();
- Handle<HeapType> field_type = value->OptimalType(
- lookup->isolate(), field_representation);
- JSObject::GeneralizeFieldRepresentation(handle(lookup->holder()),
- lookup->GetDescriptorIndex(),
- field_representation, field_type);
- }
- lookup->holder()->WriteToField(lookup->GetDescriptorIndex(), *value);
-}
-
-
-void JSObject::ConvertAndSetOwnProperty(LookupResult* lookup,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes) {
- Handle<JSObject> object(lookup->holder());
- if (object->map()->TooManyFastProperties(Object::MAY_BE_STORE_FROM_KEYED)) {
- JSObject::NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
- } else if (object->map()->is_prototype_map()) {
- JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, 0);
- }
-
- if (!object->HasFastProperties()) {
- ReplaceSlowProperty(object, name, value, attributes);
- ReoptimizeIfPrototype(object);
- return;
- }
-
- int descriptor_index = lookup->GetDescriptorIndex();
- if (lookup->GetAttributes() == attributes) {
- JSObject::GeneralizeFieldRepresentation(object, descriptor_index,
- Representation::Tagged(),
- HeapType::Any(lookup->isolate()));
- } else {
- Handle<Map> old_map(object->map());
- Handle<Map> new_map = Map::CopyGeneralizeAllRepresentations(old_map,
- descriptor_index, FORCE_FIELD, attributes, "attributes mismatch");
- JSObject::MigrateToMap(object, new_map);
- }
-
- object->WriteToField(descriptor_index, *value);
-}
-
-
-void JSObject::SetPropertyToFieldWithAttributes(LookupResult* lookup,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes) {
- if (lookup->GetAttributes() == attributes) {
- if (value->IsUninitialized()) return;
- SetPropertyToField(lookup, value);
- } else {
- ConvertAndSetOwnProperty(lookup, name, value, attributes);
- }
-}
-
-
void JSObject::AddProperty(Handle<JSObject> object, Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes) {
+ LookupIterator it(object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
#ifdef DEBUG
uint32_t index;
DCHECK(!object->IsJSProxy());
DCHECK(!name->AsArrayIndex(&index));
- LookupIterator it(object, name, LookupIterator::CHECK_OWN_REAL);
Maybe<PropertyAttributes> maybe = GetPropertyAttributes(&it);
DCHECK(maybe.has_value);
DCHECK(!it.IsFound());
- DCHECK(object->map()->is_extensible());
+ DCHECK(object->map()->is_extensible() ||
+ name.is_identical_to(it.isolate()->factory()->hidden_string()));
#endif
- SetOwnPropertyIgnoreAttributes(object, name, value, attributes,
- OMIT_EXTENSIBILITY_CHECK).Check();
+ AddDataProperty(&it, value, attributes, STRICT,
+ CERTAINLY_NOT_STORE_FROM_KEYED).Check();
}
@@ -4096,160 +3810,130 @@ MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- ExtensibilityCheck extensibility_check,
- StoreFromKeyed store_from_keyed,
ExecutableAccessorInfoHandling handling) {
DCHECK(!value->IsTheHole());
- Isolate* isolate = object->GetIsolate();
-
- // Make sure that the top context does not change when doing callbacks or
- // interceptor calls.
- AssertNoContextChange ncc(isolate);
-
- LookupResult lookup(isolate);
- object->LookupOwn(name, &lookup, true);
- if (!lookup.IsFound()) {
- object->map()->LookupTransition(*object, *name, &lookup);
- }
-
- // Check access rights if needed.
- if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(object, name, v8::ACCESS_SET)) {
- LookupIterator it(object, name, LookupIterator::CHECK_OWN);
- return SetPropertyWithFailedAccessCheck(&it, value, SLOPPY);
- }
- }
-
- if (object->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate, object);
- if (iter.IsAtEnd()) return value;
- DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
- return SetOwnPropertyIgnoreAttributes(
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), name,
- value, attributes, extensibility_check);
- }
+ LookupIterator it(object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ bool is_observed = object->map()->is_observed() &&
+ *name != it.isolate()->heap()->hidden_string();
+ for (; it.IsFound(); it.Next()) {
+ switch (it.state()) {
+ case LookupIterator::INTERCEPTOR:
+ case LookupIterator::JSPROXY:
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
- if (lookup.IsInterceptor() ||
- (lookup.IsDescriptorOrDictionary() && lookup.type() == CALLBACKS)) {
- object->LookupOwnRealNamedProperty(name, &lookup);
- }
+ case LookupIterator::ACCESS_CHECK:
+ if (!it.isolate()->MayNamedAccess(object, name, v8::ACCESS_SET)) {
+ return SetPropertyWithFailedAccessCheck(&it, value, SLOPPY);
+ }
+ break;
- // Check for accessor in prototype chain removed here in clone.
- if (!lookup.IsFound()) {
- object->map()->LookupTransition(*object, *name, &lookup);
- TransitionFlag flag = lookup.IsFound()
- ? OMIT_TRANSITION : INSERT_TRANSITION;
- // Neither properties nor transitions found.
- return AddPropertyInternal(object, name, value, attributes,
- store_from_keyed, extensibility_check, flag);
- }
+ case LookupIterator::ACCESSOR: {
+ PropertyDetails details = it.property_details();
+ Handle<Object> old_value = it.isolate()->factory()->the_hole_value();
+ // Ensure the context isn't changed after calling into accessors.
+ AssertNoContextChange ncc(it.isolate());
- Handle<Object> old_value = isolate->factory()->the_hole_value();
- PropertyAttributes old_attributes = ABSENT;
- bool is_observed = object->map()->is_observed() &&
- *name != isolate->heap()->hidden_string();
- if (is_observed && lookup.IsProperty()) {
- if (lookup.IsDataProperty()) {
- old_value = Object::GetPropertyOrElement(object, name).ToHandleChecked();
- }
- old_attributes = lookup.GetAttributes();
- }
-
- bool executed_set_prototype = false;
+ Handle<Object> accessors = it.GetAccessors();
- // Check of IsReadOnly removed from here in clone.
- if (lookup.IsTransition()) {
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- SetPropertyUsingTransition(
- handle(lookup.holder()), &lookup, name, value, attributes),
- Object);
- } else {
- switch (lookup.type()) {
- case NORMAL:
- ReplaceSlowProperty(object, name, value, attributes);
- break;
- case FIELD:
- SetPropertyToFieldWithAttributes(&lookup, name, value, attributes);
- break;
- case CONSTANT:
- // Only replace the constant if necessary.
- if (lookup.GetAttributes() != attributes ||
- *value != lookup.GetConstant()) {
- SetPropertyToFieldWithAttributes(&lookup, name, value, attributes);
+ if (is_observed && accessors->IsAccessorInfo()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ it.isolate(), old_value,
+ GetPropertyWithAccessor(it.GetReceiver(), it.name(),
+ it.GetHolder<JSObject>(), accessors),
+ Object);
}
- break;
- case CALLBACKS:
- {
- Handle<Object> callback(lookup.GetCallbackObject(), isolate);
- if (callback->IsExecutableAccessorInfo() &&
- handling == DONT_FORCE_FIELD) {
+
+ // Special handling for ExecutableAccessorInfo, which behaves like a
+ // data property.
+ if (handling == DONT_FORCE_FIELD &&
+ accessors->IsExecutableAccessorInfo()) {
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result, JSObject::SetPropertyWithAccessor(
- object, name, value, handle(lookup.holder()),
- callback, STRICT),
+ it.isolate(), result,
+ JSObject::SetPropertyWithAccessor(it.GetReceiver(), it.name(),
+ value, it.GetHolder<JSObject>(),
+ accessors, STRICT),
Object);
-
- if (attributes != lookup.GetAttributes()) {
- Handle<ExecutableAccessorInfo> new_data =
- Accessors::CloneAccessor(
- isolate, Handle<ExecutableAccessorInfo>::cast(callback));
- new_data->set_property_attributes(attributes);
- if (attributes & READ_ONLY) {
- // This way we don't have to introduce a lookup to the setter,
- // simply make it unavailable to reflect the attributes.
- new_data->clear_setter();
+ DCHECK(result->SameValue(*value));
+
+ if (details.attributes() == attributes) {
+ // Regular property update if the attributes match.
+ if (is_observed && !old_value->SameValue(*value)) {
+ // If we are setting the prototype of a function and are
+ // observed, don't send change records because the prototype
+ // handles that itself.
+ if (!object->IsJSFunction() ||
+ !Name::Equals(it.isolate()->factory()->prototype_string(),
+ name) ||
+ !Handle<JSFunction>::cast(object)->should_have_prototype()) {
+ EnqueueChangeRecord(object, "update", name, old_value);
+ }
}
-
- SetPropertyCallback(object, name, new_data, attributes);
+ return value;
}
+
+ // Reconfigure the accessor if attributes mismatch.
+ Handle<ExecutableAccessorInfo> new_data = Accessors::CloneAccessor(
+ it.isolate(), Handle<ExecutableAccessorInfo>::cast(accessors));
+ new_data->set_property_attributes(attributes);
+ // By clearing the setter we don't have to introduce a lookup to
+ // the setter, simply make it unavailable to reflect the
+ // attributes.
+ if (attributes & READ_ONLY) new_data->clear_setter();
+ SetPropertyCallback(object, name, new_data, attributes);
if (is_observed) {
- // If we are setting the prototype of a function and are observed,
- // don't send change records because the prototype handles that
- // itself.
- executed_set_prototype = object->IsJSFunction() &&
- String::Equals(isolate->factory()->prototype_string(),
- Handle<String>::cast(name)) &&
- Handle<JSFunction>::cast(object)->should_have_prototype();
+ if (old_value->SameValue(*value)) {
+ old_value = it.isolate()->factory()->the_hole_value();
+ }
+ EnqueueChangeRecord(object, "reconfigure", name, old_value);
}
- } else {
- ConvertAndSetOwnProperty(&lookup, name, value, attributes);
+ return value;
}
- break;
- }
- case NONEXISTENT:
- case HANDLER:
- case INTERCEPTOR:
- UNREACHABLE();
- }
- }
- if (is_observed && !executed_set_prototype) {
- if (lookup.IsTransition()) {
- EnqueueChangeRecord(object, "add", name, old_value);
- } else if (old_value->IsTheHole()) {
- EnqueueChangeRecord(object, "reconfigure", name, old_value);
- } else {
- LookupResult new_lookup(isolate);
- object->LookupOwn(name, &new_lookup, true);
- bool value_changed = false;
- if (new_lookup.IsDataProperty()) {
- Handle<Object> new_value =
- Object::GetPropertyOrElement(object, name).ToHandleChecked();
- value_changed = !old_value->SameValue(*new_value);
+ it.ReconfigureDataProperty(value, attributes);
+ it.PrepareForDataProperty(value);
+ it.WriteDataValue(value);
+
+ if (is_observed) {
+ if (old_value->SameValue(*value)) {
+ old_value = it.isolate()->factory()->the_hole_value();
+ }
+ EnqueueChangeRecord(object, "reconfigure", name, old_value);
+ }
+
+ return value;
}
- if (new_lookup.GetAttributes() != old_attributes) {
- if (!value_changed) old_value = isolate->factory()->the_hole_value();
- EnqueueChangeRecord(object, "reconfigure", name, old_value);
- } else if (value_changed) {
- EnqueueChangeRecord(object, "update", name, old_value);
+
+ case LookupIterator::DATA: {
+ PropertyDetails details = it.property_details();
+ Handle<Object> old_value = it.isolate()->factory()->the_hole_value();
+ // Regular property update if the attributes match.
+ if (details.attributes() == attributes) {
+ return SetDataProperty(&it, value);
+ }
+ // Reconfigure the data property if the attributes mismatch.
+ if (is_observed) old_value = it.GetDataValue();
+
+ it.ReconfigureDataProperty(value, attributes);
+ it.PrepareForDataProperty(value);
+ it.WriteDataValue(value);
+
+ if (is_observed) {
+ if (old_value->SameValue(*value)) {
+ old_value = it.isolate()->factory()->the_hole_value();
+ }
+ EnqueueChangeRecord(object, "reconfigure", name, old_value);
+ }
+
+ return value;
}
}
}
- return value;
+ return AddDataProperty(&it, value, attributes, STRICT,
+ CERTAINLY_NOT_STORE_FROM_KEYED);
}
@@ -4303,7 +3987,7 @@ Maybe<PropertyAttributes> JSReceiver::GetOwnPropertyAttributes(
if (object->IsJSObject() && name->AsArrayIndex(&index)) {
return GetOwnElementAttribute(object, index);
}
- LookupIterator it(object, name, LookupIterator::CHECK_OWN);
+ LookupIterator it(object, name, LookupIterator::HIDDEN);
return GetPropertyAttributes(&it);
}
@@ -4313,6 +3997,7 @@ Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
UNREACHABLE();
case LookupIterator::JSPROXY:
return JSProxy::GetPropertyAttributesWithHandler(
@@ -4328,11 +4013,9 @@ Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
case LookupIterator::ACCESS_CHECK:
if (it->HasAccess(v8::ACCESS_HAS)) break;
return JSObject::GetPropertyAttributesWithFailedAccessCheck(it);
- case LookupIterator::PROPERTY:
- if (it->HasProperty()) {
- return maybe(it->property_details().attributes());
- }
- break;
+ case LookupIterator::ACCESSOR:
+ case LookupIterator::DATA:
+ return maybe(it->property_details().attributes());
}
}
return maybe(ABSENT);
@@ -4550,11 +4233,7 @@ void JSObject::MigrateFastToSlow(Handle<JSObject> object,
dictionary = NameDictionary::Add(dictionary, key, value, d);
break;
}
- case INTERCEPTOR:
- break;
- case HANDLER:
case NORMAL:
- case NONEXISTENT:
UNREACHABLE();
break;
}
@@ -4735,9 +4414,15 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
void JSObject::ResetElements(Handle<JSObject> object) {
- Heap* heap = object->GetIsolate()->heap();
- CHECK(object->map() != heap->sloppy_arguments_elements_map());
- object->set_elements(object->map()->GetInitialElements());
+ Isolate* isolate = object->GetIsolate();
+ CHECK(object->map() != isolate->heap()->sloppy_arguments_elements_map());
+ if (object->map()->has_dictionary_elements()) {
+ Handle<SeededNumberDictionary> new_elements =
+ SeededNumberDictionary::New(isolate, 0);
+ object->set_elements(*new_elements);
+ } else {
+ object->set_elements(object->map()->GetInitialElements());
+ }
}
@@ -5011,7 +4696,7 @@ void JSObject::DeleteHiddenProperty(Handle<JSObject> object, Handle<Name> key) {
bool JSObject::HasHiddenProperties(Handle<JSObject> object) {
Handle<Name> hidden = object->GetIsolate()->factory()->hidden_string();
- LookupIterator it(object, hidden, LookupIterator::CHECK_OWN_REAL);
+ LookupIterator it(object, hidden, LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = GetPropertyAttributes(&it);
// Cannot get an exception since the hidden_string isn't accessible to JS.
DCHECK(maybe.has_value);
@@ -5045,14 +4730,10 @@ Object* JSObject::GetHiddenPropertiesHashTable() {
}
} else {
Isolate* isolate = GetIsolate();
- LookupResult result(isolate);
- LookupOwnRealNamedProperty(isolate->factory()->hidden_string(), &result);
- if (result.IsFound()) {
- DCHECK(result.IsNormal());
- DCHECK(result.holder() == this);
- return GetNormalizedProperty(&result);
- }
- return GetHeap()->undefined_value();
+ LookupIterator it(handle(this), isolate->factory()->hidden_string(),
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ // Access check is always skipped for the hidden string anyways.
+ return *GetDataProperty(&it);
}
}
@@ -5077,10 +4758,7 @@ Handle<ObjectHashTable> JSObject::GetOrCreateHiddenPropertiesHashtable(
inline_value);
}
- JSObject::SetOwnPropertyIgnoreAttributes(
- object, isolate->factory()->hidden_string(),
- hashtable, DONT_ENUM).Assert();
-
+ SetHiddenPropertiesHashTable(object, hashtable);
return hashtable;
}
@@ -5088,85 +4766,39 @@ Handle<ObjectHashTable> JSObject::GetOrCreateHiddenPropertiesHashtable(
Handle<Object> JSObject::SetHiddenPropertiesHashTable(Handle<JSObject> object,
Handle<Object> value) {
DCHECK(!object->IsJSGlobalProxy());
-
Isolate* isolate = object->GetIsolate();
-
- // We can store the identity hash inline iff there is no backing store
- // for hidden properties yet.
- DCHECK(JSObject::HasHiddenProperties(object) != value->IsSmi());
- if (object->HasFastProperties()) {
- // If the object has fast properties, check whether the first slot
- // in the descriptor array matches the hidden string. Since the
- // hidden strings hash code is zero (and no other name has hash
- // code zero) it will always occupy the first entry if present.
- DescriptorArray* descriptors = object->map()->instance_descriptors();
- if (descriptors->number_of_descriptors() > 0) {
- int sorted_index = descriptors->GetSortedKeyIndex(0);
- if (descriptors->GetKey(sorted_index) == isolate->heap()->hidden_string()
- && sorted_index < object->map()->NumberOfOwnDescriptors()) {
- object->WriteToField(sorted_index, *value);
- return object;
- }
- }
- }
-
- SetOwnPropertyIgnoreAttributes(object, isolate->factory()->hidden_string(),
- value, DONT_ENUM,
- OMIT_EXTENSIBILITY_CHECK).Assert();
+ Handle<Name> name = isolate->factory()->hidden_string();
+ SetOwnPropertyIgnoreAttributes(object, name, value, DONT_ENUM).Assert();
return object;
}
-Handle<Object> JSObject::DeletePropertyPostInterceptor(Handle<JSObject> object,
- Handle<Name> name,
- DeleteMode delete_mode) {
- // Check own property, ignore interceptor.
- Isolate* isolate = object->GetIsolate();
- LookupResult lookup(isolate);
- object->LookupOwnRealNamedProperty(name, &lookup);
- if (!lookup.IsFound()) return isolate->factory()->true_value();
-
- PropertyNormalizationMode mode = object->map()->is_prototype_map()
- ? KEEP_INOBJECT_PROPERTIES
- : CLEAR_INOBJECT_PROPERTIES;
- // Normalize object if needed.
- NormalizeProperties(object, mode, 0);
-
- Handle<Object> result = DeleteNormalizedProperty(object, name, delete_mode);
- ReoptimizeIfPrototype(object);
- return result;
-}
-
-
MaybeHandle<Object> JSObject::DeletePropertyWithInterceptor(
- Handle<JSObject> object, Handle<Name> name) {
- Isolate* isolate = object->GetIsolate();
+ Handle<JSObject> holder, Handle<JSObject> receiver, Handle<Name> name) {
+ Isolate* isolate = holder->GetIsolate();
// TODO(rossberg): Support symbols in the API.
- if (name->IsSymbol()) return isolate->factory()->false_value();
+ if (name->IsSymbol()) return MaybeHandle<Object>();
- Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
- if (!interceptor->deleter()->IsUndefined()) {
- v8::NamedPropertyDeleterCallback deleter =
- v8::ToCData<v8::NamedPropertyDeleterCallback>(interceptor->deleter());
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-delete", *object, *name));
- PropertyCallbackArguments args(
- isolate, interceptor->data(), *object, *object);
- v8::Handle<v8::Boolean> result =
- args.Call(deleter, v8::Utils::ToLocal(Handle<String>::cast(name)));
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- if (!result.IsEmpty()) {
- DCHECK(result->IsBoolean());
- Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
- result_internal->VerifyApiCallResultType();
- // Rebox CustomArguments::kReturnValueOffset before returning.
- return handle(*result_internal, isolate);
- }
- }
- Handle<Object> result =
- DeletePropertyPostInterceptor(object, name, NORMAL_DELETION);
- return result;
+ Handle<InterceptorInfo> interceptor(holder->GetNamedInterceptor());
+ if (interceptor->deleter()->IsUndefined()) return MaybeHandle<Object>();
+
+ v8::NamedPropertyDeleterCallback deleter =
+ v8::ToCData<v8::NamedPropertyDeleterCallback>(interceptor->deleter());
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-delete", *holder, *name));
+ PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+ *holder);
+ v8::Handle<v8::Boolean> result =
+ args.Call(deleter, v8::Utils::ToLocal(Handle<String>::cast(name)));
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ if (result.IsEmpty()) return MaybeHandle<Object>();
+
+ DCHECK(result->IsBoolean());
+ Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
+ result_internal->VerifyApiCallResultType();
+ // Rebox CustomArguments::kReturnValueOffset before returning.
+ return handle(*result_internal, isolate);
}
@@ -5222,11 +4854,9 @@ MaybeHandle<Object> JSObject::DeleteElement(Handle<JSObject> object,
// Deleting a non-configurable property in strict mode.
Handle<Object> name = factory->NewNumberFromUint(index);
Handle<Object> args[2] = { name, object };
- Handle<Object> error =
- factory->NewTypeError("strict_delete_property",
- HandleVector(args, 2));
- isolate->Throw(*error);
- return Handle<Object>();
+ THROW_NEW_ERROR(isolate, NewTypeError("strict_delete_property",
+ HandleVector(args, 2)),
+ Object);
}
return factory->false_value();
}
@@ -5282,87 +4912,90 @@ MaybeHandle<Object> JSObject::DeleteElement(Handle<JSObject> object,
MaybeHandle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
Handle<Name> name,
DeleteMode delete_mode) {
- Isolate* isolate = object->GetIsolate();
// ECMA-262, 3rd, 8.6.2.5
DCHECK(name->IsName());
- // Check access rights if needed.
- if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(object, name, v8::ACCESS_DELETE)) {
- isolate->ReportFailedAccessCheck(object, v8::ACCESS_DELETE);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return isolate->factory()->false_value();
- }
-
- if (object->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate, object);
- if (iter.IsAtEnd()) return isolate->factory()->false_value();
- DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
- return JSGlobalObject::DeleteProperty(
- Handle<JSGlobalObject>::cast(PrototypeIterator::GetCurrent(iter)), name,
- delete_mode);
- }
-
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
return DeleteElement(object, index, delete_mode);
}
- LookupResult lookup(isolate);
- object->LookupOwn(name, &lookup, true);
- if (!lookup.IsFound()) return isolate->factory()->true_value();
- // Ignore attributes if forcing a deletion.
- if (lookup.IsDontDelete() && delete_mode != FORCE_DELETION) {
- if (delete_mode == STRICT_DELETION) {
- // Deleting a non-configurable property in strict mode.
- Handle<Object> args[2] = { name, object };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "strict_delete_property", HandleVector(args, ARRAY_SIZE(args)));
- isolate->Throw(*error);
- return Handle<Object>();
- }
- return isolate->factory()->false_value();
- }
+ // Skip interceptors on FORCE_DELETION.
+ LookupIterator::Configuration config =
+ delete_mode == FORCE_DELETION ? LookupIterator::HIDDEN_SKIP_INTERCEPTOR
+ : LookupIterator::HIDDEN;
+
+ LookupIterator it(object, name, config);
- Handle<Object> old_value = isolate->factory()->the_hole_value();
bool is_observed = object->map()->is_observed() &&
- *name != isolate->heap()->hidden_string();
- if (is_observed && lookup.IsDataProperty()) {
- old_value = Object::GetPropertyOrElement(object, name).ToHandleChecked();
- }
- Handle<Object> result;
+ *name != it.isolate()->heap()->hidden_string();
+ Handle<Object> old_value = it.isolate()->factory()->the_hole_value();
- // Check for interceptor.
- if (lookup.IsInterceptor()) {
- // Skip interceptor if forcing a deletion.
- if (delete_mode == FORCE_DELETION) {
- result = DeletePropertyPostInterceptor(object, name, delete_mode);
- } else {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- DeletePropertyWithInterceptor(object, name),
- Object);
- }
- } else {
- PropertyNormalizationMode mode = object->map()->is_prototype_map()
- ? KEEP_INOBJECT_PROPERTIES
- : CLEAR_INOBJECT_PROPERTIES;
- // Normalize object if needed.
- NormalizeProperties(object, mode, 0);
- // Make sure the properties are normalized before removing the entry.
- result = DeleteNormalizedProperty(object, name, delete_mode);
- ReoptimizeIfPrototype(object);
- }
+ for (; it.IsFound(); it.Next()) {
+ switch (it.state()) {
+ case LookupIterator::JSPROXY:
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+ case LookupIterator::ACCESS_CHECK:
+ if (it.HasAccess(v8::ACCESS_DELETE)) break;
+ it.isolate()->ReportFailedAccessCheck(it.GetHolder<JSObject>(),
+ v8::ACCESS_DELETE);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it.isolate(), Object);
+ return it.isolate()->factory()->false_value();
+ case LookupIterator::INTERCEPTOR: {
+ MaybeHandle<Object> maybe_result =
+ JSObject::DeletePropertyWithInterceptor(it.GetHolder<JSObject>(),
+ object, it.name());
+ // Delete with interceptor succeeded. Return result.
+ if (!maybe_result.is_null()) return maybe_result;
+ // An exception was thrown in the interceptor. Propagate.
+ if (it.isolate()->has_pending_exception()) return maybe_result;
+ break;
+ }
+ case LookupIterator::DATA:
+ if (is_observed) {
+ old_value = it.GetDataValue();
+ }
+ // Fall through.
+ case LookupIterator::ACCESSOR: {
+ if (delete_mode != FORCE_DELETION && !it.IsConfigurable()) {
+ // Fail if the property is not configurable.
+ if (delete_mode == STRICT_DELETION) {
+ Handle<Object> args[2] = {name, object};
+ THROW_NEW_ERROR(it.isolate(),
+ NewTypeError("strict_delete_property",
+ HandleVector(args, arraysize(args))),
+ Object);
+ }
+ return it.isolate()->factory()->false_value();
+ }
- if (is_observed) {
- Maybe<bool> maybe = HasOwnProperty(object, name);
- if (!maybe.has_value) return MaybeHandle<Object>();
- if (!maybe.value) {
- EnqueueChangeRecord(object, "delete", name, old_value);
+ PropertyNormalizationMode mode = object->map()->is_prototype_map()
+ ? KEEP_INOBJECT_PROPERTIES
+ : CLEAR_INOBJECT_PROPERTIES;
+ Handle<JSObject> holder = it.GetHolder<JSObject>();
+ // TODO(verwaest): Remove this temporary compatibility hack when blink
+ // tests are updated.
+ if (!holder.is_identical_to(object) &&
+ !(object->IsJSGlobalProxy() && holder->IsJSGlobalObject())) {
+ return it.isolate()->factory()->true_value();
+ }
+ NormalizeProperties(holder, mode, 0);
+ Handle<Object> result =
+ DeleteNormalizedProperty(holder, name, delete_mode);
+ ReoptimizeIfPrototype(holder);
+
+ if (is_observed) {
+ EnqueueChangeRecord(object, "delete", name, old_value);
+ }
+
+ return result;
+ }
}
}
- return result;
+ return it.isolate()->factory()->true_value();
}
@@ -5547,11 +5180,10 @@ MaybeHandle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
// It's not possible to seal objects with external array elements
if (object->HasExternalArrayElements() ||
object->HasFixedTypedArrayElements()) {
- Handle<Object> error =
- isolate->factory()->NewTypeError(
- "cant_prevent_ext_external_array_elements",
- HandleVector(&object, 1));
- return isolate->Throw<Object>(error);
+ THROW_NEW_ERROR(isolate,
+ NewTypeError("cant_prevent_ext_external_array_elements",
+ HandleVector(&object, 1)),
+ Object);
}
// If there are fast elements we normalize.
@@ -5630,11 +5262,10 @@ MaybeHandle<Object> JSObject::Freeze(Handle<JSObject> object) {
// It's not possible to freeze objects with external array elements
if (object->HasExternalArrayElements() ||
object->HasFixedTypedArrayElements()) {
- Handle<Object> error =
- isolate->factory()->NewTypeError(
- "cant_prevent_ext_external_array_elements",
- HandleVector(&object, 1));
- return isolate->Throw<Object>(error);
+ THROW_NEW_ERROR(isolate,
+ NewTypeError("cant_prevent_ext_external_array_elements",
+ HandleVector(&object, 1)),
+ Object);
}
Handle<SeededNumberDictionary> new_element_dictionary;
@@ -5971,41 +5602,6 @@ MaybeHandle<JSObject> JSObject::DeepCopy(
}
-Handle<Object> JSObject::GetDataProperty(Handle<JSObject> object,
- Handle<Name> key) {
- Isolate* isolate = object->GetIsolate();
- LookupResult lookup(isolate);
- {
- DisallowHeapAllocation no_allocation;
- object->LookupRealNamedProperty(key, &lookup);
- }
- Handle<Object> result = isolate->factory()->undefined_value();
- if (lookup.IsFound() && !lookup.IsTransition()) {
- switch (lookup.type()) {
- case NORMAL:
- result = GetNormalizedProperty(
- Handle<JSObject>(lookup.holder(), isolate), &lookup);
- break;
- case FIELD:
- result = FastPropertyAt(Handle<JSObject>(lookup.holder(), isolate),
- lookup.representation(),
- lookup.GetFieldIndex());
- break;
- case CONSTANT:
- result = Handle<Object>(lookup.GetConstant(), isolate);
- break;
- case CALLBACKS:
- case HANDLER:
- case INTERCEPTOR:
- break;
- case NONEXISTENT:
- UNREACHABLE();
- }
- }
- return result;
-}
-
-
// Tests for the fast common case for property enumeration:
// - This object and all prototypes has an enum cache (which means that
// it is no proxy, has no interceptors and needs no access checks).
@@ -6078,68 +5674,6 @@ int Map::NextFreePropertyIndex() {
}
-void JSReceiver::LookupOwn(
- Handle<Name> name, LookupResult* result, bool search_hidden_prototypes) {
- DisallowHeapAllocation no_gc;
- DCHECK(name->IsName());
-
- if (IsJSGlobalProxy()) {
- PrototypeIterator iter(GetIsolate(), this);
- if (iter.IsAtEnd()) return result->NotFound();
- DCHECK(iter.GetCurrent()->IsJSGlobalObject());
- return JSReceiver::cast(iter.GetCurrent())
- ->LookupOwn(name, result, search_hidden_prototypes);
- }
-
- if (IsJSProxy()) {
- result->HandlerResult(JSProxy::cast(this));
- return;
- }
-
- // Do not use inline caching if the object is a non-global object
- // that requires access checks.
- if (IsAccessCheckNeeded()) {
- result->DisallowCaching();
- }
-
- JSObject* js_object = JSObject::cast(this);
-
- // Check for lookup interceptor except when bootstrapping.
- if (js_object->HasNamedInterceptor() &&
- !GetIsolate()->bootstrapper()->IsActive()) {
- result->InterceptorResult(js_object);
- return;
- }
-
- js_object->LookupOwnRealNamedProperty(name, result);
- if (result->IsFound() || name->IsOwn() || !search_hidden_prototypes) return;
-
- PrototypeIterator iter(GetIsolate(), js_object);
- if (!iter.GetCurrent()->IsJSReceiver()) return;
- JSReceiver* receiver = JSReceiver::cast(iter.GetCurrent());
- if (receiver->map()->is_hidden_prototype()) {
- receiver->LookupOwn(name, result, search_hidden_prototypes);
- }
-}
-
-
-void JSReceiver::Lookup(Handle<Name> name, LookupResult* result) {
- DisallowHeapAllocation no_gc;
- // Ecma-262 3rd 8.6.2.4
- for (PrototypeIterator iter(GetIsolate(), this,
- PrototypeIterator::START_AT_RECEIVER);
- !iter.IsAtEnd(); iter.Advance()) {
- JSReceiver::cast(iter.GetCurrent())->LookupOwn(name, result, false);
- if (result->IsFound()) return;
- if (name->IsOwn()) {
- result->NotFound();
- return;
- }
- }
- result->NotFound();
-}
-
-
static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
int len = array->length();
for (int i = 0; i < len; i++) {
@@ -6282,7 +5816,7 @@ MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
Execution::Call(isolate,
isolate->proxy_enumerate(),
object,
- ARRAY_SIZE(args),
+ arraysize(args),
args),
FixedArray);
ASSIGN_RETURN_ON_EXCEPTION(
@@ -6386,7 +5920,7 @@ static bool UpdateGetterSetterInDictionary(
Object* result = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS && result->IsAccessorPair()) {
- DCHECK(!details.IsDontDelete());
+ DCHECK(details.IsConfigurable());
if (details.attributes() != attributes) {
dictionary->DetailsAtPut(
entry,
@@ -6466,51 +6000,6 @@ void JSObject::DefineElementAccessor(Handle<JSObject> object,
}
-Handle<AccessorPair> JSObject::CreateAccessorPairFor(Handle<JSObject> object,
- Handle<Name> name) {
- Isolate* isolate = object->GetIsolate();
- LookupResult result(isolate);
- object->LookupOwnRealNamedProperty(name, &result);
- if (result.IsPropertyCallbacks()) {
- // Note that the result can actually have IsDontDelete() == true when we
- // e.g. have to fall back to the slow case while adding a setter after
- // successfully reusing a map transition for a getter. Nevertheless, this is
- // OK, because the assertion only holds for the whole addition of both
- // accessors, not for the addition of each part. See first comment in
- // DefinePropertyAccessor below.
- Object* obj = result.GetCallbackObject();
- if (obj->IsAccessorPair()) {
- return AccessorPair::Copy(handle(AccessorPair::cast(obj), isolate));
- }
- }
- return isolate->factory()->NewAccessorPair();
-}
-
-
-void JSObject::DefinePropertyAccessor(Handle<JSObject> object,
- Handle<Name> name,
- Handle<Object> getter,
- Handle<Object> setter,
- PropertyAttributes attributes) {
- // We could assert that the property is configurable here, but we would need
- // to do a lookup, which seems to be a bit of overkill.
- bool only_attribute_changes = getter->IsNull() && setter->IsNull();
- if (object->HasFastProperties() && !only_attribute_changes &&
- (object->map()->NumberOfOwnDescriptors() <= kMaxNumberOfDescriptors)) {
- bool getterOk = getter->IsNull() ||
- DefineFastAccessor(object, name, ACCESSOR_GETTER, getter, attributes);
- bool setterOk = !getterOk || setter->IsNull() ||
- DefineFastAccessor(object, name, ACCESSOR_SETTER, setter, attributes);
- if (getterOk && setterOk) return;
- }
-
- Handle<AccessorPair> accessors = CreateAccessorPairFor(object, name);
- accessors->SetComponents(*getter, *setter);
-
- SetPropertyCallback(object, name, accessors, attributes);
-}
-
-
bool Map::DictionaryElementsInPrototypeChainOnly() {
if (IsDictionaryElementsKind(elements_kind())) {
return false;
@@ -6655,12 +6144,12 @@ MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
Object::GetElement(isolate, object, index).ToHandleChecked();
}
} else {
- LookupResult lookup(isolate);
- object->LookupOwn(name, &lookup, true);
- preexists = lookup.IsProperty();
- if (preexists && lookup.IsDataProperty()) {
- old_value =
- Object::GetPropertyOrElement(object, name).ToHandleChecked();
+ LookupIterator it(object, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+ CHECK(GetPropertyAttributes(&it).has_value);
+ preexists = it.IsFound();
+ if (preexists && (it.state() == LookupIterator::DATA ||
+ it.GetAccessors()->IsAccessorInfo())) {
+ old_value = GetProperty(&it).ToHandleChecked();
}
}
}
@@ -6668,7 +6157,23 @@ MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
if (is_element) {
DefineElementAccessor(object, index, getter, setter, attributes);
} else {
- DefinePropertyAccessor(object, name, getter, setter, attributes);
+ DCHECK(getter->IsSpecFunction() || getter->IsUndefined() ||
+ getter->IsNull());
+ DCHECK(setter->IsSpecFunction() || setter->IsUndefined() ||
+ setter->IsNull());
+ // At least one of the accessors needs to be a new value.
+ DCHECK(!getter->IsNull() || !setter->IsNull());
+ LookupIterator it(object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ if (it.state() == LookupIterator::ACCESS_CHECK) {
+ // We already did an access check before. We do have access.
+ it.Next();
+ }
+ if (!getter->IsNull()) {
+ it.TransitionToAccessorProperty(ACCESSOR_GETTER, getter, attributes);
+ }
+ if (!setter->IsNull()) {
+ it.TransitionToAccessorProperty(ACCESSOR_SETTER, setter, attributes);
+ }
}
if (is_observed) {
@@ -6680,111 +6185,6 @@ MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
}
-static bool TryAccessorTransition(Handle<JSObject> self,
- Handle<Map> transitioned_map,
- int target_descriptor,
- AccessorComponent component,
- Handle<Object> accessor,
- PropertyAttributes attributes) {
- DescriptorArray* descs = transitioned_map->instance_descriptors();
- PropertyDetails details = descs->GetDetails(target_descriptor);
-
- // If the transition target was not callbacks, fall back to the slow case.
- if (details.type() != CALLBACKS) return false;
- Object* descriptor = descs->GetCallbacksObject(target_descriptor);
- if (!descriptor->IsAccessorPair()) return false;
-
- Object* target_accessor = AccessorPair::cast(descriptor)->get(component);
- PropertyAttributes target_attributes = details.attributes();
-
- // Reuse transition if adding same accessor with same attributes.
- if (target_accessor == *accessor && target_attributes == attributes) {
- JSObject::MigrateToMap(self, transitioned_map);
- return true;
- }
-
- // If either not the same accessor, or not the same attributes, fall back to
- // the slow case.
- return false;
-}
-
-
-bool JSObject::DefineFastAccessor(Handle<JSObject> object,
- Handle<Name> name,
- AccessorComponent component,
- Handle<Object> accessor,
- PropertyAttributes attributes) {
- DCHECK(accessor->IsSpecFunction() || accessor->IsUndefined());
- Isolate* isolate = object->GetIsolate();
- LookupResult result(isolate);
- object->LookupOwn(name, &result);
-
- if (result.IsFound() && !result.IsPropertyCallbacks()) {
- return false;
- }
-
- // Return success if the same accessor with the same attributes already exist.
- AccessorPair* source_accessors = NULL;
- if (result.IsPropertyCallbacks()) {
- Object* callback_value = result.GetCallbackObject();
- if (callback_value->IsAccessorPair()) {
- source_accessors = AccessorPair::cast(callback_value);
- Object* entry = source_accessors->get(component);
- if (entry == *accessor && result.GetAttributes() == attributes) {
- return true;
- }
- } else {
- return false;
- }
-
- int descriptor_number = result.GetDescriptorIndex();
-
- object->map()->LookupTransition(*object, *name, &result);
-
- if (result.IsFound()) {
- Handle<Map> target(result.GetTransitionTarget());
- DCHECK(target->NumberOfOwnDescriptors() ==
- object->map()->NumberOfOwnDescriptors());
- // This works since descriptors are sorted in order of addition.
- DCHECK(Name::Equals(
- handle(object->map()->instance_descriptors()->GetKey(
- descriptor_number)),
- name));
- return TryAccessorTransition(object, target, descriptor_number,
- component, accessor, attributes);
- }
- } else {
- // If not, lookup a transition.
- object->map()->LookupTransition(*object, *name, &result);
-
- // If there is a transition, try to follow it.
- if (result.IsFound()) {
- Handle<Map> target(result.GetTransitionTarget());
- int descriptor_number = target->LastAdded();
- DCHECK(Name::Equals(name,
- handle(target->instance_descriptors()->GetKey(descriptor_number))));
- return TryAccessorTransition(object, target, descriptor_number,
- component, accessor, attributes);
- }
- }
-
- // If there is no transition yet, add a transition to the a new accessor pair
- // containing the accessor. Allocate a new pair if there were no source
- // accessors. Otherwise, copy the pair and modify the accessor.
- Handle<AccessorPair> accessors = source_accessors != NULL
- ? AccessorPair::Copy(Handle<AccessorPair>(source_accessors))
- : isolate->factory()->NewAccessorPair();
- accessors->set(component, *accessor);
-
- CallbacksDescriptor new_accessors_desc(name, accessors, attributes);
- Handle<Map> new_map = Map::CopyInsertDescriptor(
- handle(object->map()), &new_accessors_desc, INSERT_TRANSITION);
-
- JSObject::MigrateToMap(object, new_map);
- return true;
-}
-
-
MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
Handle<AccessorInfo> info) {
Isolate* isolate = object->GetIsolate();
@@ -6850,11 +6250,11 @@ MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
SetElementCallback(object, index, info, info->property_attributes());
} else {
// Lookup the name.
- LookupResult result(isolate);
- object->LookupOwn(name, &result, true);
+ LookupIterator it(object, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+ CHECK(GetPropertyAttributes(&it).has_value);
// ES5 forbids turning a property into an accessor if it's not
- // configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5).
- if (result.IsFound() && (result.IsReadOnly() || result.IsDontDelete())) {
+ // configurable. See 8.6.1 (Table 5).
+ if (it.IsFound() && (it.IsReadOnly() || !it.IsConfigurable())) {
return factory->undefined_value();
}
@@ -6874,25 +6274,26 @@ MaybeHandle<Object> JSObject::GetAccessor(Handle<JSObject> object,
// interceptor calls.
AssertNoContextChange ncc(isolate);
- // Check access rights if needed.
- if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(object, name, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(object, v8::ACCESS_HAS);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return isolate->factory()->undefined_value();
- }
-
// Make the lookup and include prototypes.
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
for (PrototypeIterator iter(isolate, object,
PrototypeIterator::START_AT_RECEIVER);
!iter.IsAtEnd(); iter.Advance()) {
- if (PrototypeIterator::GetCurrent(iter)->IsJSObject() &&
- JSObject::cast(*PrototypeIterator::GetCurrent(iter))
- ->HasDictionaryElements()) {
- JSObject* js_object =
- JSObject::cast(*PrototypeIterator::GetCurrent(iter));
+ Handle<Object> current = PrototypeIterator::GetCurrent(iter);
+ // Check access rights if needed.
+ if (current->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(Handle<JSObject>::cast(current), name,
+ v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(Handle<JSObject>::cast(current),
+ v8::ACCESS_HAS);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->undefined_value();
+ }
+
+ if (current->IsJSObject() &&
+ Handle<JSObject>::cast(current)->HasDictionaryElements()) {
+ JSObject* js_object = JSObject::cast(*current);
SeededNumberDictionary* dictionary = js_object->element_dictionary();
int entry = dictionary->FindEntry(index);
if (entry != SeededNumberDictionary::kNotFound) {
@@ -6906,19 +6307,33 @@ MaybeHandle<Object> JSObject::GetAccessor(Handle<JSObject> object,
}
}
} else {
- for (PrototypeIterator iter(isolate, object,
- PrototypeIterator::START_AT_RECEIVER);
- !iter.IsAtEnd(); iter.Advance()) {
- LookupResult result(isolate);
- JSReceiver::cast(*PrototypeIterator::GetCurrent(iter))
- ->LookupOwn(name, &result);
- if (result.IsFound()) {
- if (result.IsReadOnly()) return isolate->factory()->undefined_value();
- if (result.IsPropertyCallbacks()) {
- Object* obj = result.GetCallbackObject();
- if (obj->IsAccessorPair()) {
- return handle(AccessorPair::cast(obj)->GetComponent(component),
- isolate);
+ LookupIterator it(object, name,
+ LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ for (; it.IsFound(); it.Next()) {
+ switch (it.state()) {
+ case LookupIterator::INTERCEPTOR:
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+
+ case LookupIterator::ACCESS_CHECK:
+ if (it.HasAccess(v8::ACCESS_HAS)) continue;
+ isolate->ReportFailedAccessCheck(it.GetHolder<JSObject>(),
+ v8::ACCESS_HAS);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->undefined_value();
+
+ case LookupIterator::JSPROXY:
+ return isolate->factory()->undefined_value();
+
+ case LookupIterator::DATA:
+ continue;
+ case LookupIterator::ACCESSOR: {
+ Handle<Object> maybe_pair = it.GetAccessors();
+ if (maybe_pair->IsAccessorPair()) {
+ return handle(
+ AccessorPair::cast(*maybe_pair)->GetComponent(component),
+ isolate);
}
}
}
@@ -7256,30 +6671,26 @@ Handle<Map> Map::Copy(Handle<Map> map) {
}
-Handle<Map> Map::Create(Handle<JSFunction> constructor,
- int extra_inobject_properties) {
- Handle<Map> copy = Copy(handle(constructor->initial_map()));
+Handle<Map> Map::Create(Isolate* isolate, int inobject_properties) {
+ Handle<Map> copy = Copy(handle(isolate->object_function()->initial_map()));
- // Check that we do not overflow the instance size when adding the
- // extra inobject properties.
- int instance_size_delta = extra_inobject_properties * kPointerSize;
- int max_instance_size_delta =
- JSObject::kMaxInstanceSize - copy->instance_size();
- int max_extra_properties = max_instance_size_delta >> kPointerSizeLog2;
+ // Check that we do not overflow the instance size when adding the extra
+ // inobject properties. If the instance size overflows, we allocate as many
+ // properties as we can as inobject properties.
+ int max_extra_properties =
+ (JSObject::kMaxInstanceSize - JSObject::kHeaderSize) >> kPointerSizeLog2;
- // If the instance size overflows, we allocate as many properties as we can as
- // inobject properties.
- if (extra_inobject_properties > max_extra_properties) {
- instance_size_delta = max_instance_size_delta;
- extra_inobject_properties = max_extra_properties;
+ if (inobject_properties > max_extra_properties) {
+ inobject_properties = max_extra_properties;
}
+ int new_instance_size =
+ JSObject::kHeaderSize + kPointerSize * inobject_properties;
+
// Adjust the map with the extra inobject properties.
- int inobject_properties =
- copy->inobject_properties() + extra_inobject_properties;
copy->set_inobject_properties(inobject_properties);
copy->set_unused_property_fields(inobject_properties);
- copy->set_instance_size(copy->instance_size() + instance_size_delta);
+ copy->set_instance_size(new_instance_size);
copy->set_visitor_id(StaticVisitorBase::GetVisitorId(*copy));
return copy;
}
@@ -7315,9 +6726,7 @@ bool DescriptorArray::CanHoldValue(int descriptor, Object* value) {
return false;
case NORMAL:
- case INTERCEPTOR:
- case HANDLER:
- case NONEXISTENT:
+ UNREACHABLE();
break;
}
@@ -7332,7 +6741,7 @@ Handle<Map> Map::PrepareForDataProperty(Handle<Map> map, int descriptor,
if (map->is_dictionary_map()) return map;
// Migrate to the newest map before storing the property.
- if (map->is_deprecated()) map = Update(map);
+ map = Update(map);
Handle<DescriptorArray> descriptors(map->instance_descriptors());
@@ -7354,8 +6763,8 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
// Dictionary maps can always have additional data properties.
if (map->is_dictionary_map()) return map;
- // Migrate to the newest map before transitioning to the new property.
- if (map->is_deprecated()) map = Update(map);
+ // Migrate to the newest map before storing the property.
+ map = Update(map);
int index = map->SearchTransition(*name);
if (index != TransitionArray::kNotFound) {
@@ -7365,9 +6774,7 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
// TODO(verwaest): Handle attributes better.
DescriptorArray* descriptors = transition->instance_descriptors();
if (descriptors->GetDetails(descriptor).attributes() != attributes) {
- return CopyGeneralizeAllRepresentations(transition, descriptor,
- FORCE_FIELD, attributes,
- "attributes mismatch");
+ return Map::Normalize(map, CLEAR_INOBJECT_PROPERTIES);
}
return Map::PrepareForDataProperty(transition, descriptor, value);
@@ -7394,6 +6801,113 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
}
+Handle<Map> Map::ReconfigureDataProperty(Handle<Map> map, int descriptor,
+ PropertyAttributes attributes) {
+ // Dictionaries have to be reconfigured in-place.
+ DCHECK(!map->is_dictionary_map());
+
+ // For now, give up on transitioning and just create a unique map.
+ // TODO(verwaest/ishell): Cache transitions with different attributes.
+ return CopyGeneralizeAllRepresentations(map, descriptor, FORCE_FIELD,
+ attributes, "attributes mismatch");
+}
+
+
+Handle<Map> Map::TransitionToAccessorProperty(Handle<Map> map,
+ Handle<Name> name,
+ AccessorComponent component,
+ Handle<Object> accessor,
+ PropertyAttributes attributes) {
+ Isolate* isolate = name->GetIsolate();
+
+ // Dictionary maps can always have additional data properties.
+ if (map->is_dictionary_map()) {
+ // For global objects, property cells are inlined. We need to change the
+ // map.
+ if (map->IsGlobalObjectMap()) return Copy(map);
+ return map;
+ }
+
+ // Migrate to the newest map before transitioning to the new property.
+ map = Update(map);
+
+ PropertyNormalizationMode mode = map->is_prototype_map()
+ ? KEEP_INOBJECT_PROPERTIES
+ : CLEAR_INOBJECT_PROPERTIES;
+
+ int index = map->SearchTransition(*name);
+ if (index != TransitionArray::kNotFound) {
+ Handle<Map> transition(map->GetTransition(index));
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ // Fast path, assume that we're modifying the last added descriptor.
+ int descriptor = transition->LastAdded();
+ if (descriptors->GetKey(descriptor) != *name) {
+ // If not, search for the descriptor.
+ descriptor = descriptors->SearchWithCache(*name, *transition);
+ }
+
+ if (descriptors->GetDetails(descriptor).type() != CALLBACKS) {
+ return Map::Normalize(map, mode);
+ }
+
+ // TODO(verwaest): Handle attributes better.
+ if (descriptors->GetDetails(descriptor).attributes() != attributes) {
+ return Map::Normalize(map, mode);
+ }
+
+ Handle<Object> maybe_pair(descriptors->GetValue(descriptor), isolate);
+ if (!maybe_pair->IsAccessorPair()) {
+ return Map::Normalize(map, mode);
+ }
+
+ Handle<AccessorPair> pair = Handle<AccessorPair>::cast(maybe_pair);
+ if (pair->get(component) != *accessor) {
+ return Map::Normalize(map, mode);
+ }
+
+ return transition;
+ }
+
+ Handle<AccessorPair> pair;
+ DescriptorArray* old_descriptors = map->instance_descriptors();
+ int descriptor = old_descriptors->SearchWithCache(*name, *map);
+ if (descriptor != DescriptorArray::kNotFound) {
+ PropertyDetails old_details = old_descriptors->GetDetails(descriptor);
+ if (old_details.type() != CALLBACKS) {
+ return Map::Normalize(map, mode);
+ }
+
+ if (old_details.attributes() != attributes) {
+ return Map::Normalize(map, mode);
+ }
+
+ Handle<Object> maybe_pair(old_descriptors->GetValue(descriptor), isolate);
+ if (!maybe_pair->IsAccessorPair()) {
+ return Map::Normalize(map, mode);
+ }
+
+ Object* current = Handle<AccessorPair>::cast(maybe_pair)->get(component);
+ if (current == *accessor) return map;
+
+ if (!current->IsTheHole()) {
+ return Map::Normalize(map, mode);
+ }
+
+ pair = AccessorPair::Copy(Handle<AccessorPair>::cast(maybe_pair));
+ } else if (map->NumberOfOwnDescriptors() >= kMaxNumberOfDescriptors ||
+ map->TooManyFastProperties(CERTAINLY_NOT_STORE_FROM_KEYED)) {
+ return Map::Normalize(map, CLEAR_INOBJECT_PROPERTIES);
+ } else {
+ pair = isolate->factory()->NewAccessorPair();
+ }
+
+ pair->set(component, *accessor);
+ TransitionFlag flag = INSERT_TRANSITION;
+ CallbacksDescriptor new_desc(name, pair, attributes);
+ return Map::CopyInsertDescriptor(map, &new_desc, flag);
+}
+
+
Handle<Map> Map::CopyAddDescriptor(Handle<Map> map,
Descriptor* descriptor,
TransitionFlag flag) {
@@ -7939,7 +7453,7 @@ class CodeCacheHashTableKey : public HashTableKey {
CodeCacheHashTableKey(Handle<Name> name, Handle<Code> code)
: name_(name), flags_(code->flags()), code_(code) { }
- bool IsMatch(Object* other) V8_OVERRIDE {
+ bool IsMatch(Object* other) OVERRIDE {
if (!other->IsFixedArray()) return false;
FixedArray* pair = FixedArray::cast(other);
Name* name = Name::cast(pair->get(0));
@@ -7954,16 +7468,16 @@ class CodeCacheHashTableKey : public HashTableKey {
return name->Hash() ^ flags;
}
- uint32_t Hash() V8_OVERRIDE { return NameFlagsHashHelper(*name_, flags_); }
+ uint32_t Hash() OVERRIDE { return NameFlagsHashHelper(*name_, flags_); }
- uint32_t HashForObject(Object* obj) V8_OVERRIDE {
+ uint32_t HashForObject(Object* obj) OVERRIDE {
FixedArray* pair = FixedArray::cast(obj);
Name* name = Name::cast(pair->get(0));
Code* code = Code::cast(pair->get(1));
return NameFlagsHashHelper(name, code->flags());
}
- MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
+ MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
Handle<Code> code = code_.ToHandleChecked();
Handle<FixedArray> pair = isolate->factory()->NewFixedArray(2);
pair->set(0, *name_);
@@ -8067,7 +7581,7 @@ class PolymorphicCodeCacheHashTableKey : public HashTableKey {
: maps_(maps),
code_flags_(code_flags) {}
- bool IsMatch(Object* other) V8_OVERRIDE {
+ bool IsMatch(Object* other) OVERRIDE {
MapHandleList other_maps(kDefaultListAllocationSize);
int other_flags;
FromObject(other, &other_flags, &other_maps);
@@ -8102,18 +7616,18 @@ class PolymorphicCodeCacheHashTableKey : public HashTableKey {
return hash;
}
- uint32_t Hash() V8_OVERRIDE {
+ uint32_t Hash() OVERRIDE {
return MapsHashHelper(maps_, code_flags_);
}
- uint32_t HashForObject(Object* obj) V8_OVERRIDE {
+ uint32_t HashForObject(Object* obj) OVERRIDE {
MapHandleList other_maps(kDefaultListAllocationSize);
int other_flags;
FromObject(obj, &other_flags, &other_maps);
return MapsHashHelper(&other_maps, other_flags);
}
- MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
+ MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
// The maps in |maps_| must be copied to a newly allocated FixedArray,
// both because the referenced MapList is short-lived, and because C++
// objects can't be stored in the heap anyway.
@@ -8402,17 +7916,11 @@ Object* AccessorPair::GetComponent(AccessorComponent component) {
Handle<DeoptimizationInputData> DeoptimizationInputData::New(
- Isolate* isolate, int deopt_entry_count, int return_patch_address_count,
- PretenureFlag pretenure) {
- DCHECK(deopt_entry_count + return_patch_address_count > 0);
- Handle<FixedArray> deoptimization_data =
- Handle<FixedArray>::cast(isolate->factory()->NewFixedArray(
- LengthFor(deopt_entry_count, return_patch_address_count), pretenure));
- deoptimization_data->set(kDeoptEntryCountIndex,
- Smi::FromInt(deopt_entry_count));
- deoptimization_data->set(kReturnAddressPatchEntryCountIndex,
- Smi::FromInt(return_patch_address_count));
- return Handle<DeoptimizationInputData>::cast(deoptimization_data);
+ Isolate* isolate, int deopt_entry_count, PretenureFlag pretenure) {
+ DCHECK(deopt_entry_count > 0);
+ return Handle<DeoptimizationInputData>::cast(
+ isolate->factory()->NewFixedArray(LengthFor(deopt_entry_count),
+ pretenure));
}
@@ -8477,7 +7985,7 @@ String::FlatContent String::GetFlatContent() {
if (shape.representation_tag() == kSeqStringTag) {
start = SeqOneByteString::cast(string)->GetChars();
} else {
- start = ExternalAsciiString::cast(string)->GetChars();
+ start = ExternalOneByteString::cast(string)->GetChars();
}
return FlatContent(start + offset, length);
} else {
@@ -8662,9 +8170,9 @@ FlatStringReader::FlatStringReader(Isolate* isolate, Handle<String> str)
FlatStringReader::FlatStringReader(Isolate* isolate, Vector<const char> input)
: Relocatable(isolate),
str_(0),
- is_ascii_(true),
+ is_one_byte_(true),
length_(input.length()),
- start_(input.start()) { }
+ start_(input.start()) {}
void FlatStringReader::PostGarbageCollection() {
@@ -8675,8 +8183,8 @@ void FlatStringReader::PostGarbageCollection() {
// This does not actually prevent the vector from being relocated later.
String::FlatContent content = str->GetFlatContent();
DCHECK(content.IsFlat());
- is_ascii_ = content.IsAscii();
- if (is_ascii_) {
+ is_one_byte_ = content.IsOneByte();
+ if (is_one_byte_) {
start_ = content.ToOneByteVector().start();
} else {
start_ = content.ToUC16Vector().start();
@@ -8870,8 +8378,7 @@ void String::WriteToFlat(String* src,
DCHECK(0 <= from && from <= to && to <= source->length());
switch (StringShape(source).full_representation_tag()) {
case kOneByteStringTag | kExternalStringTag: {
- CopyChars(sink,
- ExternalAsciiString::cast(source)->GetChars() + from,
+ CopyChars(sink, ExternalOneByteString::cast(source)->GetChars() + from,
to - from);
return;
}
@@ -8917,7 +8424,7 @@ void String::WriteToFlat(String* src,
String* second = cons_string->second();
// When repeatedly appending to a string, we get a cons string that
// is unbalanced to the left, a list, essentially. We inline the
- // common case of sequential ascii right child.
+ // common case of sequential one-byte right child.
if (to - boundary == 1) {
sink[boundary - from] = static_cast<sinkchar>(second->Get(0));
} else if (second->IsSeqOneByteString()) {
@@ -8955,7 +8462,7 @@ static void CalculateLineEndsImpl(Isolate* isolate,
Vector<const SourceChar> src,
bool include_ending_line) {
const int src_len = src.length();
- StringSearch<uint8_t, SourceChar> search(isolate, STATIC_ASCII_VECTOR("\n"));
+ StringSearch<uint8_t, SourceChar> search(isolate, STATIC_CHAR_VECTOR("\n"));
// Find and record line ends.
int position = 0;
@@ -8985,7 +8492,7 @@ Handle<FixedArray> String::CalculateLineEnds(Handle<String> src,
// Dispatch on type of strings.
String::FlatContent content = src->GetFlatContent();
DCHECK(content.IsFlat());
- if (content.IsAscii()) {
+ if (content.IsOneByte()) {
CalculateLineEndsImpl(isolate,
&line_ends,
content.ToOneByteVector(),
@@ -9012,36 +8519,7 @@ template <typename Char>
static inline bool CompareRawStringContents(const Char* const a,
const Char* const b,
int length) {
- int i = 0;
-#ifndef V8_HOST_CAN_READ_UNALIGNED
- // If this architecture isn't comfortable reading unaligned ints
- // then we have to check that the strings are aligned before
- // comparing them blockwise.
- const int kAlignmentMask = sizeof(uint32_t) - 1; // NOLINT
- uintptr_t pa_addr = reinterpret_cast<uintptr_t>(a);
- uintptr_t pb_addr = reinterpret_cast<uintptr_t>(b);
- if (((pa_addr & kAlignmentMask) | (pb_addr & kAlignmentMask)) == 0) {
-#endif
- const int kStepSize = sizeof(int) / sizeof(Char); // NOLINT
- int endpoint = length - kStepSize;
- // Compare blocks until we reach near the end of the string.
- for (; i <= endpoint; i += kStepSize) {
- uint32_t wa = *reinterpret_cast<const uint32_t*>(a + i);
- uint32_t wb = *reinterpret_cast<const uint32_t*>(b + i);
- if (wa != wb) {
- return false;
- }
- }
-#ifndef V8_HOST_CAN_READ_UNALIGNED
- }
-#endif
- // Compare the remaining characters that didn't fit into a block.
- for (; i < length; i++) {
- if (a[i] != b[i]) {
- return false;
- }
- }
- return true;
+ return CompareChars(a, b, length) == 0;
}
@@ -9272,7 +8750,7 @@ bool String::SlowEquals(Handle<String> one, Handle<String> two) {
String::FlatContent flat1 = one->GetFlatContent();
String::FlatContent flat2 = two->GetFlatContent();
- if (flat1.IsAscii() && flat2.IsAscii()) {
+ if (flat1.IsOneByte() && flat2.IsOneByte()) {
return CompareRawStringContents(flat1.ToOneByteVector().start(),
flat2.ToOneByteVector().start(),
one_length);
@@ -9293,8 +8771,8 @@ bool String::MarkAsUndetectable() {
if (map == heap->string_map()) {
this->set_map(heap->undetectable_string_map());
return true;
- } else if (map == heap->ascii_string_map()) {
- this->set_map(heap->undetectable_ascii_string_map());
+ } else if (map == heap->one_byte_string_map()) {
+ this->set_map(heap->undetectable_one_byte_string_map());
return true;
}
// Rest cannot be marked as undetectable
@@ -9337,7 +8815,7 @@ bool String::IsOneByteEqualTo(Vector<const uint8_t> str) {
if (str.length() != slen) return false;
DisallowHeapAllocation no_gc;
FlatContent content = GetFlatContent();
- if (content.IsAscii()) {
+ if (content.IsOneByte()) {
return CompareChars(content.ToOneByteVector().start(),
str.start(), slen) == 0;
}
@@ -9532,19 +9010,25 @@ void String::PrintOn(FILE* file) {
}
+inline static uint32_t ObjectAddressForHashing(Object* object) {
+ uint32_t value = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(object));
+ return value & MemoryChunk::kAlignmentMask;
+}
+
+
int Map::Hash() {
// For performance reasons we only hash the 3 most variable fields of a map:
- // constructor, prototype and bit_field2.
+ // constructor, prototype and bit_field2. For predictability reasons we
+ // use objects' offsets in respective pages for hashing instead of raw
+ // addresses.
// Shift away the tag.
- int hash = (static_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(constructor())) >> 2);
+ int hash = ObjectAddressForHashing(constructor()) >> 2;
// XOR-ing the prototype and constructor directly yields too many zero bits
// when the two pointers are close (which is fairly common).
- // To avoid this we shift the prototype 4 bits relatively to the constructor.
- hash ^= (static_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(prototype())) << 2);
+ // To avoid this we shift the prototype bits relatively to the constructor.
+ hash ^= ObjectAddressForHashing(prototype()) << (32 - kPageSizeBits);
return hash ^ (hash >> 16) ^ bit_field2();
}
@@ -9628,7 +9112,6 @@ void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
void JSFunction::MarkForOptimization() {
- DCHECK(is_compiled() || GetIsolate()->DebuggerHasBreakPoints());
DCHECK(!IsOptimized());
DCHECK(shared()->allows_lazy_compilation() ||
code()->optimizable());
@@ -9647,7 +9130,7 @@ void JSFunction::MarkForConcurrentOptimization() {
DCHECK(GetIsolate()->concurrent_recompilation_enabled());
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Marking ");
- PrintName();
+ ShortPrint();
PrintF(" for concurrent recompilation.\n");
}
set_code_no_write_barrier(
@@ -9665,7 +9148,7 @@ void JSFunction::MarkInOptimizationQueue() {
DCHECK(GetIsolate()->concurrent_recompilation_enabled());
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Queueing ");
- PrintName();
+ ShortPrint();
PrintF(" for concurrent recompilation.\n");
}
set_code_no_write_barrier(
@@ -9674,6 +9157,30 @@ void JSFunction::MarkInOptimizationQueue() {
}
+Handle<JSFunction> JSFunction::CloneClosure(Handle<JSFunction> function) {
+ Isolate* isolate = function->GetIsolate();
+ Handle<Map> map(function->map());
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<Context> context(function->context());
+ Handle<JSFunction> clone =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context);
+
+ if (shared->bound()) {
+ clone->set_function_bindings(function->function_bindings());
+ }
+
+ // In typical case, __proto__ of ``function`` is the default Function
+ // prototype, which means that SetPrototype below is a no-op.
+ // In rare cases when that is not true, we mutate the clone's __proto__.
+ Handle<Object> original_prototype(map->prototype(), isolate);
+ if (*original_prototype != clone->map()->prototype()) {
+ JSObject::SetPrototype(clone, original_prototype, false).Assert();
+ }
+
+ return clone;
+}
+
+
void SharedFunctionInfo::AddToOptimizedCodeMap(
Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
@@ -10191,7 +9698,7 @@ Handle<Object> Script::GetNameOrSourceURL(Handle<Script> script) {
Isolate* isolate = script->GetIsolate();
Handle<String> name_or_source_url_key =
isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("nameOrSourceURL"));
+ STATIC_CHAR_VECTOR("nameOrSourceURL"));
Handle<JSObject> script_wrapper = Script::GetWrapper(script);
Handle<Object> property = Object::GetProperty(
script_wrapper, name_or_source_url_key).ToHandleChecked();
@@ -10736,26 +10243,7 @@ int Code::SourceStatementPosition(Address pc) {
SafepointEntry Code::GetSafepointEntry(Address pc) {
SafepointTable table(this);
- SafepointEntry entry = table.FindEntry(pc);
- if (entry.is_valid() || !is_turbofanned()) {
- return entry;
- }
-
- // If the code is turbofanned, we might be looking for
- // an address that was patched by lazy deoptimization.
- // In that case look through the patch table, try to
- // lookup the original address there, and then use this
- // to find the safepoint entry.
- DeoptimizationInputData* deopt_data =
- DeoptimizationInputData::cast(deoptimization_data());
- intptr_t offset = pc - instruction_start();
- for (int i = 0; i < deopt_data->ReturnAddressPatchCount(); i++) {
- if (deopt_data->PatchedAddressPc(i)->value() == offset) {
- int original_offset = deopt_data->ReturnAddressPc(i)->value();
- return table.FindEntry(instruction_start() + original_offset);
- }
- }
- return SafepointEntry();
+ return table.FindEntry(pc);
}
@@ -10914,7 +10402,7 @@ void Code::ClearInlineCaches(Code::Kind* kind) {
void SharedFunctionInfo::ClearTypeFeedbackInfo() {
- FixedArray* vector = feedback_vector();
+ TypeFeedbackVector* vector = feedback_vector();
Heap* heap = GetHeap();
int length = vector->length();
@@ -10930,7 +10418,7 @@ void SharedFunctionInfo::ClearTypeFeedbackInfo() {
break;
// Fall through...
default:
- vector->set(i, TypeFeedbackInfo::RawUninitializedSentinel(heap),
+ vector->set(i, TypeFeedbackVector::RawUninitializedSentinel(heap),
SKIP_WRITE_BARRIER);
}
}
@@ -11308,19 +10796,6 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
os << "\n";
}
}
-
- int return_address_patch_count = ReturnAddressPatchCount();
- if (return_address_patch_count != 0) {
- os << "Return address patch data (count = " << return_address_patch_count
- << ")\n";
- os << " index pc patched_pc\n";
- }
- for (int i = 0; i < return_address_patch_count; i++) {
- Vector<char> buf = Vector<char>::New(128);
- SNPrintF(buf, "%6d %6d %12d\n", i, ReturnAddressPc(i)->value(),
- PatchedAddressPc(i)->value());
- os << buf.start();
- }
}
@@ -11398,14 +10873,11 @@ void Code::Disassemble(const char* name, OStream& os) { // NOLINT
}
if (is_compare_ic_stub()) {
DCHECK(CodeStub::GetMajorKey(this) == CodeStub::CompareIC);
- CompareIC::State left_state, right_state, handler_state;
- Token::Value op;
- ICCompareStub::DecodeKey(stub_key(), &left_state, &right_state,
- &handler_state, &op);
- os << "compare_state = " << CompareIC::GetStateName(left_state) << "*"
- << CompareIC::GetStateName(right_state) << " -> "
- << CompareIC::GetStateName(handler_state) << "\n";
- os << "compare_operation = " << Token::Name(op) << "\n";
+ CompareICStub stub(stub_key(), GetIsolate());
+ os << "compare_state = " << CompareICState::GetStateName(stub.left())
+ << "*" << CompareICState::GetStateName(stub.right()) << " -> "
+ << CompareICState::GetStateName(stub.state()) << "\n";
+ os << "compare_operation = " << Token::Name(stub.op()) << "\n";
}
}
if ((name != NULL) && (name[0] != '\0')) {
@@ -11416,10 +10888,19 @@ void Code::Disassemble(const char* name, OStream& os) { // NOLINT
}
os << "Instructions (size = " << instruction_size() << ")\n";
- // TODO(svenpanne) The Disassembler should use streams, too!
{
- CodeTracer::Scope trace_scope(GetIsolate()->GetCodeTracer());
- Disassembler::Decode(trace_scope.file(), this);
+ Isolate* isolate = GetIsolate();
+ int decode_size = is_crankshafted()
+ ? static_cast<int>(safepoint_table_offset())
+ : instruction_size();
+ // If there might be a back edge table, stop before reaching it.
+ if (kind() == Code::FUNCTION) {
+ decode_size =
+ Min(decode_size, static_cast<int>(back_edge_table_offset()));
+ }
+ byte* begin = instruction_start();
+ byte* end = begin + decode_size;
+ Disassembler::Decode(isolate, &os, begin, end, this);
}
os << "\n";
@@ -11652,7 +11133,7 @@ static void EnqueueSpliceRecord(Handle<JSArray> object,
Execution::Call(isolate,
Handle<JSFunction>(isolate->observers_enqueue_splice()),
isolate->factory()->undefined_value(),
- ARRAY_SIZE(args),
+ arraysize(args),
args).Assert();
}
@@ -11665,7 +11146,7 @@ static void BeginPerformSplice(Handle<JSArray> object) {
Execution::Call(isolate,
Handle<JSFunction>(isolate->observers_begin_perform_splice()),
isolate->factory()->undefined_value(),
- ARRAY_SIZE(args),
+ arraysize(args),
args).Assert();
}
@@ -11678,7 +11159,7 @@ static void EndPerformSplice(Handle<JSArray> object) {
Execution::Call(isolate,
Handle<JSFunction>(isolate->observers_end_perform_splice()),
isolate->factory()->undefined_value(),
- ARRAY_SIZE(args),
+ arraysize(args),
args).Assert();
}
@@ -12067,7 +11548,7 @@ bool DependentCode::MarkCodeForDeoptimization(
if (is_code_at(i)) {
Code* code = code_at(i);
if (!code->marked_for_deoptimization()) {
- code->set_marked_for_deoptimization(true);
+ SetMarkedForDeoptimization(code, group);
marked = true;
}
} else {
@@ -12118,6 +11599,50 @@ void DependentCode::AddToDependentICList(Handle<Code> stub) {
}
+void DependentCode::SetMarkedForDeoptimization(Code* code,
+ DependencyGroup group) {
+ code->set_marked_for_deoptimization(true);
+ if (FLAG_trace_deopt &&
+ (code->deoptimization_data() != code->GetHeap()->empty_fixed_array())) {
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ CodeTracer::Scope scope(code->GetHeap()->isolate()->GetCodeTracer());
+ PrintF(scope.file(), "[marking dependent code 0x%08" V8PRIxPTR
+ " (opt #%d) for deoptimization, reason: %s]\n",
+ reinterpret_cast<intptr_t>(code),
+ deopt_data->OptimizationId()->value(), DependencyGroupName(group));
+ }
+}
+
+
+const char* DependentCode::DependencyGroupName(DependencyGroup group) {
+ switch (group) {
+ case kWeakICGroup:
+ return "weak-ic";
+ case kWeakCodeGroup:
+ return "weak-code";
+ case kTransitionGroup:
+ return "transition";
+ case kPrototypeCheckGroup:
+ return "prototype-check";
+ case kElementsCantBeAddedGroup:
+ return "elements-cant-be-added";
+ case kPropertyCellChangedGroup:
+ return "property-cell-changed";
+ case kFieldTypeGroup:
+ return "field-type";
+ case kInitialMapChangedGroup:
+ return "initial-map-changed";
+ case kAllocationSiteTenuringChangedGroup:
+ return "allocation-site-tenuring-changed";
+ case kAllocationSiteTransitionChangedGroup:
+ return "allocation-site-transition-changed";
+ }
+ UNREACHABLE();
+ return "?";
+}
+
+
Handle<Map> Map::TransitionToPrototype(Handle<Map> map,
Handle<Object> prototype) {
Handle<Map> new_map = GetPrototypeTransition(map, prototype);
@@ -12153,9 +11678,9 @@ MaybeHandle<Object> JSObject::SetPrototype(Handle<JSObject> object,
// paragraph.
if (!object->map()->is_extensible()) {
Handle<Object> args[] = { object };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "non_extensible_proto", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw<Object>(error);
+ THROW_NEW_ERROR(isolate, NewTypeError("non_extensible_proto",
+ HandleVector(args, arraysize(args))),
+ Object);
}
// Before we can set the prototype we need to be sure
@@ -12167,9 +11692,9 @@ MaybeHandle<Object> JSObject::SetPrototype(Handle<JSObject> object,
!iter.IsAtEnd(); iter.Advance()) {
if (JSReceiver::cast(iter.GetCurrent()) == *object) {
// Cycle detected.
- Handle<Object> error = isolate->factory()->NewError(
- "cyclic_proto", HandleVector<Object>(NULL, 0));
- return isolate->Throw<Object>(error);
+ THROW_NEW_ERROR(isolate,
+ NewError("cyclic_proto", HandleVector<Object>(NULL, 0)),
+ Object);
}
}
@@ -12297,8 +11822,8 @@ MaybeHandle<Object> JSObject::GetElementWithCallback(
Handle<ExecutableAccessorInfo> data =
Handle<ExecutableAccessorInfo>::cast(structure);
Object* fun_obj = data->getter();
- v8::AccessorGetterCallback call_fun =
- v8::ToCData<v8::AccessorGetterCallback>(fun_obj);
+ v8::AccessorNameGetterCallback call_fun =
+ v8::ToCData<v8::AccessorNameGetterCallback>(fun_obj);
if (call_fun == NULL) return isolate->factory()->undefined_value();
Handle<JSObject> holder_handle = Handle<JSObject>::cast(holder);
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
@@ -12355,8 +11880,8 @@ MaybeHandle<Object> JSObject::SetElementWithCallback(Handle<JSObject> object,
Handle<ExecutableAccessorInfo> data =
Handle<ExecutableAccessorInfo>::cast(structure);
Object* call_obj = data->setter();
- v8::AccessorSetterCallback call_fun =
- v8::ToCData<v8::AccessorSetterCallback>(call_obj);
+ v8::AccessorNameSetterCallback call_fun =
+ v8::ToCData<v8::AccessorNameSetterCallback>(call_obj);
if (call_fun == NULL) return value;
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<String> key(isolate->factory()->NumberToString(number));
@@ -12380,9 +11905,9 @@ MaybeHandle<Object> JSObject::SetElementWithCallback(Handle<JSObject> object,
if (strict_mode == SLOPPY) return value;
Handle<Object> key(isolate->factory()->NewNumberFromUint(index));
Handle<Object> args[2] = { key, holder };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "no_setter_in_callback", HandleVector(args, 2));
- return isolate->Throw<Object>(error);
+ THROW_NEW_ERROR(
+ isolate, NewTypeError("no_setter_in_callback", HandleVector(args, 2)),
+ Object);
}
}
@@ -12591,10 +12116,9 @@ MaybeHandle<Object> JSObject::SetDictionaryElement(
} else {
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<Object> args[2] = { number, object };
- Handle<Object> error =
- isolate->factory()->NewTypeError("strict_read_only_property",
- HandleVector(args, 2));
- return isolate->Throw<Object>(error);
+ THROW_NEW_ERROR(isolate, NewTypeError("strict_read_only_property",
+ HandleVector(args, 2)),
+ Object);
}
}
// Elements of the arguments object in slow mode might be slow aliases.
@@ -12629,10 +12153,9 @@ MaybeHandle<Object> JSObject::SetDictionaryElement(
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<String> name = isolate->factory()->NumberToString(number);
Handle<Object> args[1] = { name };
- Handle<Object> error =
- isolate->factory()->NewTypeError("object_not_extensible",
- HandleVector(args, 1));
- return isolate->Throw<Object>(error);
+ THROW_NEW_ERROR(isolate, NewTypeError("object_not_extensible",
+ HandleVector(args, 1)),
+ Object);
}
}
@@ -12852,9 +12375,9 @@ MaybeHandle<Object> JSObject::SetElement(Handle<JSObject> object,
set_mode == DEFINE_PROPERTY) {
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<Object> args[] = { object, number };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "redef_external_array_element", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw<Object>(error);
+ THROW_NEW_ERROR(isolate, NewTypeError("redef_external_array_element",
+ HandleVector(args, arraysize(args))),
+ Object);
}
// Normalize the elements to enable attributes on the property.
@@ -13278,12 +12801,12 @@ void JSArray::JSArrayUpdateLengthFromIndex(Handle<JSArray> array,
bool JSArray::IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map) {
- Isolate* isolate = jsarray_map->GetIsolate();
- DCHECK(!jsarray_map->is_dictionary_map());
- LookupResult lookup(isolate);
- Handle<Name> length_string = isolate->factory()->length_string();
- jsarray_map->LookupDescriptor(NULL, *length_string, &lookup);
- return lookup.IsReadOnly();
+ Isolate* isolate = jsarray_map->GetIsolate();
+ DCHECK(!jsarray_map->is_dictionary_map());
+ LookupResult lookup(isolate);
+ Handle<Name> length_string = isolate->factory()->length_string();
+ jsarray_map->LookupDescriptor(NULL, *length_string, &lookup);
+ return lookup.IsReadOnly();
}
@@ -13292,11 +12815,12 @@ bool JSArray::WouldChangeReadOnlyLength(Handle<JSArray> array,
uint32_t length = 0;
CHECK(array->length()->ToArrayIndex(&length));
if (length <= index) {
- Isolate* isolate = array->GetIsolate();
- LookupResult lookup(isolate);
- Handle<Name> length_string = isolate->factory()->length_string();
- array->LookupOwnRealNamedProperty(length_string, &lookup);
- return lookup.IsReadOnly();
+ LookupIterator it(array, array->GetIsolate()->factory()->length_string(),
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
+ CHECK(it.IsFound());
+ CHECK_EQ(LookupIterator::ACCESSOR, it.state());
+ return it.IsReadOnly();
}
return false;
}
@@ -13306,9 +12830,9 @@ MaybeHandle<Object> JSArray::ReadOnlyLengthError(Handle<JSArray> array) {
Isolate* isolate = array->GetIsolate();
Handle<Name> length = isolate->factory()->length_string();
Handle<Object> args[2] = { length, array };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw<Object>(error);
+ THROW_NEW_ERROR(isolate, NewTypeError("strict_read_only_property",
+ HandleVector(args, arraysize(args))),
+ Object);
}
@@ -13680,20 +13204,10 @@ MaybeHandle<JSObject> JSObject::GetKeysForIndexedInterceptor(
Maybe<bool> JSObject::HasRealNamedProperty(Handle<JSObject> object,
Handle<Name> key) {
- Isolate* isolate = object->GetIsolate();
- SealHandleScope shs(isolate);
- // Check access rights if needed.
- if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(object, key, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(object, v8::ACCESS_HAS);
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Maybe<bool>());
- return maybe(false);
- }
- }
-
- LookupResult result(isolate);
- object->LookupOwnRealNamedProperty(key, &result);
- return maybe(result.IsFound() && !result.IsInterceptor());
+ LookupIterator it(object, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ Maybe<PropertyAttributes> maybe_result = GetPropertyAttributes(&it);
+ if (!maybe_result.has_value) return Maybe<bool>();
+ return maybe(it.IsFound());
}
@@ -13728,20 +13242,10 @@ Maybe<bool> JSObject::HasRealElementProperty(Handle<JSObject> object,
Maybe<bool> JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object,
Handle<Name> key) {
- Isolate* isolate = object->GetIsolate();
- SealHandleScope shs(isolate);
- // Check access rights if needed.
- if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(object, key, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(object, v8::ACCESS_HAS);
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Maybe<bool>());
- return maybe(false);
- }
- }
-
- LookupResult result(isolate);
- object->LookupOwnRealNamedProperty(key, &result);
- return maybe(result.IsPropertyCallbacks());
+ LookupIterator it(object, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ Maybe<PropertyAttributes> maybe_result = GetPropertyAttributes(&it);
+ if (!maybe_result.has_value) return Maybe<bool>();
+ return maybe(it.state() == LookupIterator::ACCESSOR);
}
@@ -14058,7 +13562,7 @@ class StringSharedKey : public HashTableKey {
strict_mode_(strict_mode),
scope_position_(scope_position) { }
- bool IsMatch(Object* other) V8_OVERRIDE {
+ bool IsMatch(Object* other) OVERRIDE {
DisallowHeapAllocation no_allocation;
if (!other->IsFixedArray()) return false;
FixedArray* other_array = FixedArray::cast(other);
@@ -14093,12 +13597,12 @@ class StringSharedKey : public HashTableKey {
return hash;
}
- uint32_t Hash() V8_OVERRIDE {
+ uint32_t Hash() OVERRIDE {
return StringSharedHashHelper(*source_, *shared_, strict_mode_,
scope_position_);
}
- uint32_t HashForObject(Object* obj) V8_OVERRIDE {
+ uint32_t HashForObject(Object* obj) OVERRIDE {
DisallowHeapAllocation no_allocation;
FixedArray* other_array = FixedArray::cast(obj);
SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
@@ -14112,7 +13616,7 @@ class StringSharedKey : public HashTableKey {
}
- Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
+ Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
Handle<FixedArray> array = isolate->factory()->NewFixedArray(4);
array->set(0, *shared_);
array->set(1, *source_);
@@ -14140,22 +13644,22 @@ class RegExpKey : public HashTableKey {
// stored value is stored where the key should be. IsMatch then
// compares the search key to the found object, rather than comparing
// a key to a key.
- bool IsMatch(Object* obj) V8_OVERRIDE {
+ bool IsMatch(Object* obj) OVERRIDE {
FixedArray* val = FixedArray::cast(obj);
return string_->Equals(String::cast(val->get(JSRegExp::kSourceIndex)))
&& (flags_ == val->get(JSRegExp::kFlagsIndex));
}
- uint32_t Hash() V8_OVERRIDE { return RegExpHash(*string_, flags_); }
+ uint32_t Hash() OVERRIDE { return RegExpHash(*string_, flags_); }
- Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
+ Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
// Plain hash maps, which is where regexp keys are used, don't
// use this function.
UNREACHABLE();
return MaybeHandle<Object>().ToHandleChecked();
}
- uint32_t HashForObject(Object* obj) V8_OVERRIDE {
+ uint32_t HashForObject(Object* obj) OVERRIDE {
FixedArray* val = FixedArray::cast(obj);
return RegExpHash(String::cast(val->get(JSRegExp::kSourceIndex)),
Smi::cast(val->get(JSRegExp::kFlagsIndex)));
@@ -14182,73 +13686,36 @@ Handle<Object> TwoByteStringKey::AsHandle(Isolate* isolate) {
}
-template<>
-const uint8_t* SubStringKey<uint8_t>::GetChars() {
- return string_->IsSeqOneByteString()
- ? SeqOneByteString::cast(*string_)->GetChars()
- : ExternalAsciiString::cast(*string_)->GetChars();
-}
-
-
-template<>
-const uint16_t* SubStringKey<uint16_t>::GetChars() {
- return string_->IsSeqTwoByteString()
- ? SeqTwoByteString::cast(*string_)->GetChars()
- : ExternalTwoByteString::cast(*string_)->GetChars();
-}
-
-
-template<>
-Handle<Object> SubStringKey<uint8_t>::AsHandle(Isolate* isolate) {
+Handle<Object> SeqOneByteSubStringKey::AsHandle(Isolate* isolate) {
if (hash_field_ == 0) Hash();
- Vector<const uint8_t> chars(GetChars() + from_, length_);
- return isolate->factory()->NewOneByteInternalizedString(chars, hash_field_);
+ return isolate->factory()->NewOneByteInternalizedSubString(
+ string_, from_, length_, hash_field_);
}
-template<>
-Handle<Object> SubStringKey<uint16_t>::AsHandle(Isolate* isolate) {
- if (hash_field_ == 0) Hash();
- Vector<const uint16_t> chars(GetChars() + from_, length_);
- return isolate->factory()->NewTwoByteInternalizedString(chars, hash_field_);
-}
-
-
-template<>
-bool SubStringKey<uint8_t>::IsMatch(Object* string) {
- Vector<const uint8_t> chars(GetChars() + from_, length_);
+bool SeqOneByteSubStringKey::IsMatch(Object* string) {
+ Vector<const uint8_t> chars(string_->GetChars() + from_, length_);
return String::cast(string)->IsOneByteEqualTo(chars);
}
-template<>
-bool SubStringKey<uint16_t>::IsMatch(Object* string) {
- Vector<const uint16_t> chars(GetChars() + from_, length_);
- return String::cast(string)->IsTwoByteEqualTo(chars);
-}
-
-
-template class SubStringKey<uint8_t>;
-template class SubStringKey<uint16_t>;
-
-
// InternalizedStringKey carries a string/internalized-string object as key.
class InternalizedStringKey : public HashTableKey {
public:
explicit InternalizedStringKey(Handle<String> string)
: string_(string) { }
- virtual bool IsMatch(Object* string) V8_OVERRIDE {
+ virtual bool IsMatch(Object* string) OVERRIDE {
return String::cast(string)->Equals(*string_);
}
- virtual uint32_t Hash() V8_OVERRIDE { return string_->Hash(); }
+ virtual uint32_t Hash() OVERRIDE { return string_->Hash(); }
- virtual uint32_t HashForObject(Object* other) V8_OVERRIDE {
+ virtual uint32_t HashForObject(Object* other) OVERRIDE {
return String::cast(other)->Hash();
}
- virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
+ virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
// Internalize the string if possible.
MaybeHandle<Map> maybe_map =
isolate->factory()->InternalizedStringMapForString(string_);
@@ -14292,7 +13759,7 @@ Handle<Derived> HashTable<Derived, Shape, Key>::New(
MinimumCapacity capacity_option,
PretenureFlag pretenure) {
DCHECK(0 <= at_least_space_for);
- DCHECK(!capacity_option || IsPowerOf2(at_least_space_for));
+ DCHECK(!capacity_option || base::bits::IsPowerOfTwo32(at_least_space_for));
int capacity = (capacity_option == USE_CUSTOM_MINIMUM_CAPACITY)
? at_least_space_for
: ComputeCapacity(at_least_space_for);
@@ -15106,13 +14573,6 @@ Handle<Object> ExternalFloat64Array::SetValue(
}
-PropertyCell* GlobalObject::GetPropertyCell(LookupResult* result) {
- DCHECK(!HasFastProperties());
- Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
- return PropertyCell::cast(value);
-}
-
-
Handle<PropertyCell> JSGlobalObject::EnsurePropertyCell(
Handle<JSGlobalObject> global,
Handle<Name> name) {
@@ -15172,7 +14632,7 @@ class TwoCharHashTableKey : public HashTableKey {
#endif
}
- bool IsMatch(Object* o) V8_OVERRIDE {
+ bool IsMatch(Object* o) OVERRIDE {
if (!o->IsString()) return false;
String* other = String::cast(o);
if (other->length() != 2) return false;
@@ -15180,13 +14640,13 @@ class TwoCharHashTableKey : public HashTableKey {
return other->Get(1) == c2_;
}
- uint32_t Hash() V8_OVERRIDE { return hash_; }
- uint32_t HashForObject(Object* key) V8_OVERRIDE {
+ uint32_t Hash() OVERRIDE { return hash_; }
+ uint32_t HashForObject(Object* key) OVERRIDE {
if (!key->IsString()) return 0;
return String::cast(key)->Hash();
}
- Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
+ Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
// The TwoCharHashTableKey is only used for looking in the string
// table, not for adding to it.
UNREACHABLE();
@@ -15384,7 +14844,7 @@ class StringsKey : public HashTableKey {
public:
explicit StringsKey(Handle<FixedArray> strings) : strings_(strings) { }
- bool IsMatch(Object* strings) V8_OVERRIDE {
+ bool IsMatch(Object* strings) OVERRIDE {
FixedArray* o = FixedArray::cast(strings);
int len = strings_->length();
if (o->length() != len) return false;
@@ -15394,9 +14854,9 @@ class StringsKey : public HashTableKey {
return true;
}
- uint32_t Hash() V8_OVERRIDE { return HashForObject(*strings_); }
+ uint32_t Hash() OVERRIDE { return HashForObject(*strings_); }
- uint32_t HashForObject(Object* obj) V8_OVERRIDE {
+ uint32_t HashForObject(Object* obj) OVERRIDE {
FixedArray* strings = FixedArray::cast(obj);
int len = strings->length();
uint32_t hash = 0;
@@ -15406,7 +14866,7 @@ class StringsKey : public HashTableKey {
return hash;
}
- Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE { return strings_; }
+ Handle<Object> AsHandle(Isolate* isolate) OVERRIDE { return strings_; }
private:
Handle<FixedArray> strings_;
@@ -15526,7 +14986,7 @@ Handle<Object> Dictionary<Derived, Shape, Key>::DeleteProperty(
Factory* factory = dictionary->GetIsolate()->factory();
PropertyDetails details = dictionary->DetailsAt(entry);
// Ignore attributes if forcing a deletion.
- if (details.IsDontDelete() && mode != JSReceiver::FORCE_DELETION) {
+ if (!details.IsConfigurable() && mode != JSReceiver::FORCE_DELETION) {
return factory->false_value();
}
@@ -15956,7 +15416,7 @@ Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Allocate(
// from number of buckets. If we decide to change kLoadFactor
// to something other than 2, capacity should be stored as another
// field of this object.
- capacity = RoundUpToPowerOf2(Max(kMinCapacity, capacity));
+ capacity = base::bits::RoundUpToPowerOfTwo32(Max(kMinCapacity, capacity));
if (capacity > kMaxCapacity) {
v8::internal::Heap::FatalProcessOutOfMemory("invalid table size", true);
}
@@ -16312,7 +15772,7 @@ Smi* OrderedHashTableIterator<Derived, TableType>::Next(JSArray* value_array) {
FixedArray* array = FixedArray::cast(value_array->elements());
static_cast<Derived*>(this)->PopulateValueArray(array);
MoveNext();
- return kind();
+ return Smi::cast(kind());
}
return Smi::FromInt(0);
}
@@ -16926,16 +16386,4 @@ void PropertyCell::AddDependentCompilationInfo(Handle<PropertyCell> cell,
cell, info->zone());
}
-
-const char* GetBailoutReason(BailoutReason reason) {
- DCHECK(reason < kLastErrorMessage);
-#define ERROR_MESSAGES_TEXTS(C, T) T,
- static const char* error_messages_[] = {
- ERROR_MESSAGES_LIST(ERROR_MESSAGES_TEXTS)
- };
-#undef ERROR_MESSAGES_TEXTS
- return error_messages_[reason];
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 2bb47e80f5..f2e17d34bb 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -7,6 +7,8 @@
#include "src/allocation.h"
#include "src/assert-scope.h"
+#include "src/bailout-reason.h"
+#include "src/base/bits.h"
#include "src/builtins.h"
#include "src/checks.h"
#include "src/elements-kind.h"
@@ -78,6 +80,7 @@
// - OrderedHashSet
// - OrderedHashMap
// - Context
+// - TypeFeedbackVector
// - JSFunctionResultCache
// - ScopeInfo
// - TransitionArray
@@ -99,7 +102,7 @@
// - SlicedString
// - ConsString
// - ExternalString
-// - ExternalAsciiString
+// - ExternalOneByteString
// - ExternalTwoByteString
// - InternalizedString
// - SeqInternalizedString
@@ -107,7 +110,7 @@
// - SeqTwoByteInternalizedString
// - ConsInternalizedString
// - ExternalInternalizedString
-// - ExternalAsciiInternalizedString
+// - ExternalOneByteInternalizedString
// - ExternalTwoByteInternalizedString
// - Symbol
// - HeapNumber
@@ -318,7 +321,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
//
// The names of the string instance types are intended to systematically
// mirror their encoding in the instance_type field of the map. The default
-// encoding is considered TWO_BYTE. It is not mentioned in the name. ASCII
+// encoding is considered TWO_BYTE. It is not mentioned in the name. ONE_BYTE
// encoding is mentioned explicitly in the name. Likewise, the default
// representation is considered sequential. It is not mentioned in the
// name. The other representations (e.g. CONS, EXTERNAL) are explicitly
@@ -333,206 +336,172 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
// NOTE: Everything following JS_VALUE_TYPE is considered a
// JSObject for GC purposes. The first four entries here have typeof
// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
-#define INSTANCE_TYPE_LIST(V) \
- V(STRING_TYPE) \
- V(ASCII_STRING_TYPE) \
- V(CONS_STRING_TYPE) \
- V(CONS_ASCII_STRING_TYPE) \
- V(SLICED_STRING_TYPE) \
- V(SLICED_ASCII_STRING_TYPE) \
- V(EXTERNAL_STRING_TYPE) \
- V(EXTERNAL_ASCII_STRING_TYPE) \
- V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
- V(SHORT_EXTERNAL_STRING_TYPE) \
- V(SHORT_EXTERNAL_ASCII_STRING_TYPE) \
- V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
- \
- V(INTERNALIZED_STRING_TYPE) \
- V(ASCII_INTERNALIZED_STRING_TYPE) \
- V(EXTERNAL_INTERNALIZED_STRING_TYPE) \
- V(EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE) \
- V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
- V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE) \
- V(SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE) \
- V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
- \
- V(SYMBOL_TYPE) \
- \
- V(MAP_TYPE) \
- V(CODE_TYPE) \
- V(ODDBALL_TYPE) \
- V(CELL_TYPE) \
- V(PROPERTY_CELL_TYPE) \
- \
- V(HEAP_NUMBER_TYPE) \
- V(MUTABLE_HEAP_NUMBER_TYPE) \
- V(FOREIGN_TYPE) \
- V(BYTE_ARRAY_TYPE) \
- V(FREE_SPACE_TYPE) \
- /* Note: the order of these external array */ \
- /* types is relied upon in */ \
- /* Object::IsExternalArray(). */ \
- V(EXTERNAL_INT8_ARRAY_TYPE) \
- V(EXTERNAL_UINT8_ARRAY_TYPE) \
- V(EXTERNAL_INT16_ARRAY_TYPE) \
- V(EXTERNAL_UINT16_ARRAY_TYPE) \
- V(EXTERNAL_INT32_ARRAY_TYPE) \
- V(EXTERNAL_UINT32_ARRAY_TYPE) \
- V(EXTERNAL_FLOAT32_ARRAY_TYPE) \
- V(EXTERNAL_FLOAT64_ARRAY_TYPE) \
- V(EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE) \
- \
- V(FIXED_INT8_ARRAY_TYPE) \
- V(FIXED_UINT8_ARRAY_TYPE) \
- V(FIXED_INT16_ARRAY_TYPE) \
- V(FIXED_UINT16_ARRAY_TYPE) \
- V(FIXED_INT32_ARRAY_TYPE) \
- V(FIXED_UINT32_ARRAY_TYPE) \
- V(FIXED_FLOAT32_ARRAY_TYPE) \
- V(FIXED_FLOAT64_ARRAY_TYPE) \
- V(FIXED_UINT8_CLAMPED_ARRAY_TYPE) \
- \
- V(FILLER_TYPE) \
- \
- V(DECLARED_ACCESSOR_DESCRIPTOR_TYPE) \
- V(DECLARED_ACCESSOR_INFO_TYPE) \
- V(EXECUTABLE_ACCESSOR_INFO_TYPE) \
- V(ACCESSOR_PAIR_TYPE) \
- V(ACCESS_CHECK_INFO_TYPE) \
- V(INTERCEPTOR_INFO_TYPE) \
- V(CALL_HANDLER_INFO_TYPE) \
- V(FUNCTION_TEMPLATE_INFO_TYPE) \
- V(OBJECT_TEMPLATE_INFO_TYPE) \
- V(SIGNATURE_INFO_TYPE) \
- V(TYPE_SWITCH_INFO_TYPE) \
- V(ALLOCATION_MEMENTO_TYPE) \
- V(ALLOCATION_SITE_TYPE) \
- V(SCRIPT_TYPE) \
- V(CODE_CACHE_TYPE) \
- V(POLYMORPHIC_CODE_CACHE_TYPE) \
- V(TYPE_FEEDBACK_INFO_TYPE) \
- V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
- V(BOX_TYPE) \
- \
- V(FIXED_ARRAY_TYPE) \
- V(FIXED_DOUBLE_ARRAY_TYPE) \
- V(CONSTANT_POOL_ARRAY_TYPE) \
- V(SHARED_FUNCTION_INFO_TYPE) \
- \
- V(JS_MESSAGE_OBJECT_TYPE) \
- \
- V(JS_VALUE_TYPE) \
- V(JS_DATE_TYPE) \
- V(JS_OBJECT_TYPE) \
- V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
- V(JS_GENERATOR_OBJECT_TYPE) \
- V(JS_MODULE_TYPE) \
- V(JS_GLOBAL_OBJECT_TYPE) \
- V(JS_BUILTINS_OBJECT_TYPE) \
- V(JS_GLOBAL_PROXY_TYPE) \
- V(JS_ARRAY_TYPE) \
- V(JS_ARRAY_BUFFER_TYPE) \
- V(JS_TYPED_ARRAY_TYPE) \
- V(JS_DATA_VIEW_TYPE) \
- V(JS_PROXY_TYPE) \
- V(JS_SET_TYPE) \
- V(JS_MAP_TYPE) \
- V(JS_SET_ITERATOR_TYPE) \
- V(JS_MAP_ITERATOR_TYPE) \
- V(JS_WEAK_MAP_TYPE) \
- V(JS_WEAK_SET_TYPE) \
- V(JS_REGEXP_TYPE) \
- \
- V(JS_FUNCTION_TYPE) \
- V(JS_FUNCTION_PROXY_TYPE) \
- V(DEBUG_INFO_TYPE) \
+#define INSTANCE_TYPE_LIST(V) \
+ V(STRING_TYPE) \
+ V(ONE_BYTE_STRING_TYPE) \
+ V(CONS_STRING_TYPE) \
+ V(CONS_ONE_BYTE_STRING_TYPE) \
+ V(SLICED_STRING_TYPE) \
+ V(SLICED_ONE_BYTE_STRING_TYPE) \
+ V(EXTERNAL_STRING_TYPE) \
+ V(EXTERNAL_ONE_BYTE_STRING_TYPE) \
+ V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
+ V(SHORT_EXTERNAL_STRING_TYPE) \
+ V(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE) \
+ V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
+ \
+ V(INTERNALIZED_STRING_TYPE) \
+ V(ONE_BYTE_INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
+ V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE) \
+ V(SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
+ V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
+ \
+ V(SYMBOL_TYPE) \
+ \
+ V(MAP_TYPE) \
+ V(CODE_TYPE) \
+ V(ODDBALL_TYPE) \
+ V(CELL_TYPE) \
+ V(PROPERTY_CELL_TYPE) \
+ \
+ V(HEAP_NUMBER_TYPE) \
+ V(MUTABLE_HEAP_NUMBER_TYPE) \
+ V(FOREIGN_TYPE) \
+ V(BYTE_ARRAY_TYPE) \
+ V(FREE_SPACE_TYPE) \
+ /* Note: the order of these external array */ \
+ /* types is relied upon in */ \
+ /* Object::IsExternalArray(). */ \
+ V(EXTERNAL_INT8_ARRAY_TYPE) \
+ V(EXTERNAL_UINT8_ARRAY_TYPE) \
+ V(EXTERNAL_INT16_ARRAY_TYPE) \
+ V(EXTERNAL_UINT16_ARRAY_TYPE) \
+ V(EXTERNAL_INT32_ARRAY_TYPE) \
+ V(EXTERNAL_UINT32_ARRAY_TYPE) \
+ V(EXTERNAL_FLOAT32_ARRAY_TYPE) \
+ V(EXTERNAL_FLOAT64_ARRAY_TYPE) \
+ V(EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE) \
+ \
+ V(FIXED_INT8_ARRAY_TYPE) \
+ V(FIXED_UINT8_ARRAY_TYPE) \
+ V(FIXED_INT16_ARRAY_TYPE) \
+ V(FIXED_UINT16_ARRAY_TYPE) \
+ V(FIXED_INT32_ARRAY_TYPE) \
+ V(FIXED_UINT32_ARRAY_TYPE) \
+ V(FIXED_FLOAT32_ARRAY_TYPE) \
+ V(FIXED_FLOAT64_ARRAY_TYPE) \
+ V(FIXED_UINT8_CLAMPED_ARRAY_TYPE) \
+ \
+ V(FILLER_TYPE) \
+ \
+ V(DECLARED_ACCESSOR_DESCRIPTOR_TYPE) \
+ V(DECLARED_ACCESSOR_INFO_TYPE) \
+ V(EXECUTABLE_ACCESSOR_INFO_TYPE) \
+ V(ACCESSOR_PAIR_TYPE) \
+ V(ACCESS_CHECK_INFO_TYPE) \
+ V(INTERCEPTOR_INFO_TYPE) \
+ V(CALL_HANDLER_INFO_TYPE) \
+ V(FUNCTION_TEMPLATE_INFO_TYPE) \
+ V(OBJECT_TEMPLATE_INFO_TYPE) \
+ V(SIGNATURE_INFO_TYPE) \
+ V(TYPE_SWITCH_INFO_TYPE) \
+ V(ALLOCATION_MEMENTO_TYPE) \
+ V(ALLOCATION_SITE_TYPE) \
+ V(SCRIPT_TYPE) \
+ V(CODE_CACHE_TYPE) \
+ V(POLYMORPHIC_CODE_CACHE_TYPE) \
+ V(TYPE_FEEDBACK_INFO_TYPE) \
+ V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
+ V(BOX_TYPE) \
+ \
+ V(FIXED_ARRAY_TYPE) \
+ V(FIXED_DOUBLE_ARRAY_TYPE) \
+ V(CONSTANT_POOL_ARRAY_TYPE) \
+ V(SHARED_FUNCTION_INFO_TYPE) \
+ \
+ V(JS_MESSAGE_OBJECT_TYPE) \
+ \
+ V(JS_VALUE_TYPE) \
+ V(JS_DATE_TYPE) \
+ V(JS_OBJECT_TYPE) \
+ V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
+ V(JS_GENERATOR_OBJECT_TYPE) \
+ V(JS_MODULE_TYPE) \
+ V(JS_GLOBAL_OBJECT_TYPE) \
+ V(JS_BUILTINS_OBJECT_TYPE) \
+ V(JS_GLOBAL_PROXY_TYPE) \
+ V(JS_ARRAY_TYPE) \
+ V(JS_ARRAY_BUFFER_TYPE) \
+ V(JS_TYPED_ARRAY_TYPE) \
+ V(JS_DATA_VIEW_TYPE) \
+ V(JS_PROXY_TYPE) \
+ V(JS_SET_TYPE) \
+ V(JS_MAP_TYPE) \
+ V(JS_SET_ITERATOR_TYPE) \
+ V(JS_MAP_ITERATOR_TYPE) \
+ V(JS_WEAK_MAP_TYPE) \
+ V(JS_WEAK_SET_TYPE) \
+ V(JS_REGEXP_TYPE) \
+ \
+ V(JS_FUNCTION_TYPE) \
+ V(JS_FUNCTION_PROXY_TYPE) \
+ V(DEBUG_INFO_TYPE) \
V(BREAK_POINT_INFO_TYPE)
// Since string types are not consecutive, this macro is used to
// iterate over them.
-#define STRING_TYPE_LIST(V) \
- V(STRING_TYPE, \
- kVariableSizeSentinel, \
- string, \
- String) \
- V(ASCII_STRING_TYPE, \
- kVariableSizeSentinel, \
- ascii_string, \
- AsciiString) \
- V(CONS_STRING_TYPE, \
- ConsString::kSize, \
- cons_string, \
- ConsString) \
- V(CONS_ASCII_STRING_TYPE, \
- ConsString::kSize, \
- cons_ascii_string, \
- ConsAsciiString) \
- V(SLICED_STRING_TYPE, \
- SlicedString::kSize, \
- sliced_string, \
- SlicedString) \
- V(SLICED_ASCII_STRING_TYPE, \
- SlicedString::kSize, \
- sliced_ascii_string, \
- SlicedAsciiString) \
- V(EXTERNAL_STRING_TYPE, \
- ExternalTwoByteString::kSize, \
- external_string, \
- ExternalString) \
- V(EXTERNAL_ASCII_STRING_TYPE, \
- ExternalAsciiString::kSize, \
- external_ascii_string, \
- ExternalAsciiString) \
- V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, \
- ExternalTwoByteString::kSize, \
- external_string_with_one_byte_data, \
- ExternalStringWithOneByteData) \
- V(SHORT_EXTERNAL_STRING_TYPE, \
- ExternalTwoByteString::kShortSize, \
- short_external_string, \
- ShortExternalString) \
- V(SHORT_EXTERNAL_ASCII_STRING_TYPE, \
- ExternalAsciiString::kShortSize, \
- short_external_ascii_string, \
- ShortExternalAsciiString) \
- V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, \
- ExternalTwoByteString::kShortSize, \
- short_external_string_with_one_byte_data, \
- ShortExternalStringWithOneByteData) \
- \
- V(INTERNALIZED_STRING_TYPE, \
- kVariableSizeSentinel, \
- internalized_string, \
- InternalizedString) \
- V(ASCII_INTERNALIZED_STRING_TYPE, \
- kVariableSizeSentinel, \
- ascii_internalized_string, \
- AsciiInternalizedString) \
- V(EXTERNAL_INTERNALIZED_STRING_TYPE, \
- ExternalTwoByteString::kSize, \
- external_internalized_string, \
- ExternalInternalizedString) \
- V(EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE, \
- ExternalAsciiString::kSize, \
- external_ascii_internalized_string, \
- ExternalAsciiInternalizedString) \
- V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE, \
- ExternalTwoByteString::kSize, \
- external_internalized_string_with_one_byte_data, \
- ExternalInternalizedStringWithOneByteData) \
- V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE, \
- ExternalTwoByteString::kShortSize, \
- short_external_internalized_string, \
- ShortExternalInternalizedString) \
- V(SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE, \
- ExternalAsciiString::kShortSize, \
- short_external_ascii_internalized_string, \
- ShortExternalAsciiInternalizedString) \
- V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE, \
- ExternalTwoByteString::kShortSize, \
- short_external_internalized_string_with_one_byte_data, \
- ShortExternalInternalizedStringWithOneByteData) \
+#define STRING_TYPE_LIST(V) \
+ V(STRING_TYPE, kVariableSizeSentinel, string, String) \
+ V(ONE_BYTE_STRING_TYPE, kVariableSizeSentinel, one_byte_string, \
+ OneByteString) \
+ V(CONS_STRING_TYPE, ConsString::kSize, cons_string, ConsString) \
+ V(CONS_ONE_BYTE_STRING_TYPE, ConsString::kSize, cons_one_byte_string, \
+ ConsOneByteString) \
+ V(SLICED_STRING_TYPE, SlicedString::kSize, sliced_string, SlicedString) \
+ V(SLICED_ONE_BYTE_STRING_TYPE, SlicedString::kSize, sliced_one_byte_string, \
+ SlicedOneByteString) \
+ V(EXTERNAL_STRING_TYPE, ExternalTwoByteString::kSize, external_string, \
+ ExternalString) \
+ V(EXTERNAL_ONE_BYTE_STRING_TYPE, ExternalOneByteString::kSize, \
+ external_one_byte_string, ExternalOneByteString) \
+ V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, ExternalTwoByteString::kSize, \
+ external_string_with_one_byte_data, ExternalStringWithOneByteData) \
+ V(SHORT_EXTERNAL_STRING_TYPE, ExternalTwoByteString::kShortSize, \
+ short_external_string, ShortExternalString) \
+ V(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE, ExternalOneByteString::kShortSize, \
+ short_external_one_byte_string, ShortExternalOneByteString) \
+ V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, \
+ ExternalTwoByteString::kShortSize, \
+ short_external_string_with_one_byte_data, \
+ ShortExternalStringWithOneByteData) \
+ \
+ V(INTERNALIZED_STRING_TYPE, kVariableSizeSentinel, internalized_string, \
+ InternalizedString) \
+ V(ONE_BYTE_INTERNALIZED_STRING_TYPE, kVariableSizeSentinel, \
+ one_byte_internalized_string, OneByteInternalizedString) \
+ V(EXTERNAL_INTERNALIZED_STRING_TYPE, ExternalTwoByteString::kSize, \
+ external_internalized_string, ExternalInternalizedString) \
+ V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE, ExternalOneByteString::kSize, \
+ external_one_byte_internalized_string, ExternalOneByteInternalizedString) \
+ V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE, \
+ ExternalTwoByteString::kSize, \
+ external_internalized_string_with_one_byte_data, \
+ ExternalInternalizedStringWithOneByteData) \
+ V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE, \
+ ExternalTwoByteString::kShortSize, short_external_internalized_string, \
+ ShortExternalInternalizedString) \
+ V(SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE, \
+ ExternalOneByteString::kShortSize, \
+ short_external_one_byte_internalized_string, \
+ ShortExternalOneByteInternalizedString) \
+ V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE, \
+ ExternalTwoByteString::kShortSize, \
+ short_external_internalized_string_with_one_byte_data, \
+ ShortExternalInternalizedStringWithOneByteData)
// A struct is a simple object a set of object-valued fields. Including an
// object type in this causes the compiler to generate most of the boilerplate
@@ -640,51 +609,50 @@ static inline bool IsShortcutCandidate(int type) {
enum InstanceType {
// String types.
- INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kSeqStringTag
- | kInternalizedTag,
- ASCII_INTERNALIZED_STRING_TYPE = kOneByteStringTag | kSeqStringTag
- | kInternalizedTag,
- EXTERNAL_INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kExternalStringTag
- | kInternalizedTag,
- EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE = kOneByteStringTag
- | kExternalStringTag | kInternalizedTag,
+ INTERNALIZED_STRING_TYPE =
+ kTwoByteStringTag | kSeqStringTag | kInternalizedTag,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE =
+ kOneByteStringTag | kSeqStringTag | kInternalizedTag,
+ EXTERNAL_INTERNALIZED_STRING_TYPE =
+ kTwoByteStringTag | kExternalStringTag | kInternalizedTag,
+ EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE =
+ kOneByteStringTag | kExternalStringTag | kInternalizedTag,
EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE =
- EXTERNAL_INTERNALIZED_STRING_TYPE | kOneByteDataHintTag
- | kInternalizedTag,
- SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE =
- EXTERNAL_INTERNALIZED_STRING_TYPE | kShortExternalStringTag
- | kInternalizedTag,
- SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE =
- EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE | kShortExternalStringTag
- | kInternalizedTag,
+ EXTERNAL_INTERNALIZED_STRING_TYPE | kOneByteDataHintTag |
+ kInternalizedTag,
+ SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE = EXTERNAL_INTERNALIZED_STRING_TYPE |
+ kShortExternalStringTag |
+ kInternalizedTag,
+ SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE =
+ EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kShortExternalStringTag |
+ kInternalizedTag,
SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE =
- EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE
- | kShortExternalStringTag | kInternalizedTag,
-
+ EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE |
+ kShortExternalStringTag | kInternalizedTag,
STRING_TYPE = INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
- ASCII_STRING_TYPE = ASCII_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ ONE_BYTE_STRING_TYPE =
+ ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
CONS_STRING_TYPE = kTwoByteStringTag | kConsStringTag | kNotInternalizedTag,
- CONS_ASCII_STRING_TYPE =
+ CONS_ONE_BYTE_STRING_TYPE =
kOneByteStringTag | kConsStringTag | kNotInternalizedTag,
-
SLICED_STRING_TYPE =
kTwoByteStringTag | kSlicedStringTag | kNotInternalizedTag,
- SLICED_ASCII_STRING_TYPE =
+ SLICED_ONE_BYTE_STRING_TYPE =
kOneByteStringTag | kSlicedStringTag | kNotInternalizedTag,
EXTERNAL_STRING_TYPE =
- EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
- EXTERNAL_ASCII_STRING_TYPE =
- EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ EXTERNAL_ONE_BYTE_STRING_TYPE =
+ EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE =
- EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE
- | kNotInternalizedTag,
+ EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE |
+ kNotInternalizedTag,
SHORT_EXTERNAL_STRING_TYPE =
SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
- SHORT_EXTERNAL_ASCII_STRING_TYPE =
- SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE =
+ SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE =
- SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE
- | kNotInternalizedTag,
+ SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE |
+ kNotInternalizedTag,
// Non-string names
SYMBOL_TYPE = kNotStringTag, // FIRST_NONSTRING_TYPE, LAST_NAME_TYPE
@@ -703,7 +671,6 @@ enum InstanceType {
FOREIGN_TYPE,
BYTE_ARRAY_TYPE,
FREE_SPACE_TYPE,
-
EXTERNAL_INT8_ARRAY_TYPE, // FIRST_EXTERNAL_ARRAY_TYPE
EXTERNAL_UINT8_ARRAY_TYPE,
EXTERNAL_INT16_ARRAY_TYPE,
@@ -713,8 +680,7 @@ enum InstanceType {
EXTERNAL_FLOAT32_ARRAY_TYPE,
EXTERNAL_FLOAT64_ARRAY_TYPE,
EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE, // LAST_EXTERNAL_ARRAY_TYPE
-
- FIXED_INT8_ARRAY_TYPE, // FIRST_FIXED_TYPED_ARRAY_TYPE
+ FIXED_INT8_ARRAY_TYPE, // FIRST_FIXED_TYPED_ARRAY_TYPE
FIXED_UINT8_ARRAY_TYPE,
FIXED_INT16_ARRAY_TYPE,
FIXED_UINT16_ARRAY_TYPE,
@@ -723,7 +689,6 @@ enum InstanceType {
FIXED_FLOAT32_ARRAY_TYPE,
FIXED_FLOAT64_ARRAY_TYPE,
FIXED_UINT8_CLAMPED_ARRAY_TYPE, // LAST_FIXED_TYPED_ARRAY_TYPE
-
FIXED_DOUBLE_ARRAY_TYPE,
FILLER_TYPE, // LAST_DATA_TYPE
@@ -749,7 +714,6 @@ enum InstanceType {
BOX_TYPE,
DEBUG_INFO_TYPE,
BREAK_POINT_INFO_TYPE,
-
FIXED_ARRAY_TYPE,
CONSTANT_POOL_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
@@ -760,9 +724,8 @@ enum InstanceType {
// compares for checking the JS_RECEIVER/SPEC_OBJECT range and the
// NONCALLABLE_JS_OBJECT range.
JS_FUNCTION_PROXY_TYPE, // FIRST_JS_RECEIVER_TYPE, FIRST_JS_PROXY_TYPE
- JS_PROXY_TYPE, // LAST_JS_PROXY_TYPE
-
- JS_VALUE_TYPE, // FIRST_JS_OBJECT_TYPE
+ JS_PROXY_TYPE, // LAST_JS_PROXY_TYPE
+ JS_VALUE_TYPE, // FIRST_JS_OBJECT_TYPE
JS_MESSAGE_OBJECT_TYPE,
JS_DATE_TYPE,
JS_OBJECT_TYPE,
@@ -782,9 +745,7 @@ enum InstanceType {
JS_MAP_ITERATOR_TYPE,
JS_WEAK_MAP_TYPE,
JS_WEAK_SET_TYPE,
-
JS_REGEXP_TYPE,
-
JS_FUNCTION_TYPE, // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE
// Pseudo-types
@@ -891,6 +852,7 @@ class GlobalObject;
class ObjectVisitor;
class LookupIterator;
class StringStream;
+class TypeFeedbackVector;
// We cannot just say "class HeapType;" if it is created from a template... =8-?
template<class> class TypeImpl;
struct HeapTypeConfig;
@@ -913,455 +875,115 @@ template <class C> inline bool Is(Object* obj);
#endif
-#define OBJECT_TYPE_LIST(V) \
- V(Smi) \
- V(HeapObject) \
- V(Number) \
-
-#define HEAP_OBJECT_TYPE_LIST(V) \
- V(HeapNumber) \
- V(MutableHeapNumber) \
- V(Name) \
- V(UniqueName) \
- V(String) \
- V(SeqString) \
- V(ExternalString) \
- V(ConsString) \
- V(SlicedString) \
- V(ExternalTwoByteString) \
- V(ExternalAsciiString) \
- V(SeqTwoByteString) \
- V(SeqOneByteString) \
- V(InternalizedString) \
- V(Symbol) \
- \
- V(ExternalArray) \
- V(ExternalInt8Array) \
- V(ExternalUint8Array) \
- V(ExternalInt16Array) \
- V(ExternalUint16Array) \
- V(ExternalInt32Array) \
- V(ExternalUint32Array) \
- V(ExternalFloat32Array) \
- V(ExternalFloat64Array) \
- V(ExternalUint8ClampedArray) \
- V(FixedTypedArrayBase) \
- V(FixedUint8Array) \
- V(FixedInt8Array) \
- V(FixedUint16Array) \
- V(FixedInt16Array) \
- V(FixedUint32Array) \
- V(FixedInt32Array) \
- V(FixedFloat32Array) \
- V(FixedFloat64Array) \
- V(FixedUint8ClampedArray) \
- V(ByteArray) \
- V(FreeSpace) \
- V(JSReceiver) \
- V(JSObject) \
- V(JSContextExtensionObject) \
- V(JSGeneratorObject) \
- V(JSModule) \
- V(Map) \
- V(DescriptorArray) \
- V(TransitionArray) \
- V(DeoptimizationInputData) \
- V(DeoptimizationOutputData) \
- V(DependentCode) \
- V(FixedArray) \
- V(FixedDoubleArray) \
- V(ConstantPoolArray) \
- V(Context) \
- V(NativeContext) \
- V(ScopeInfo) \
- V(JSFunction) \
- V(Code) \
- V(Oddball) \
- V(SharedFunctionInfo) \
- V(JSValue) \
- V(JSDate) \
- V(JSMessageObject) \
- V(StringWrapper) \
- V(Foreign) \
- V(Boolean) \
- V(JSArray) \
- V(JSArrayBuffer) \
- V(JSArrayBufferView) \
- V(JSTypedArray) \
- V(JSDataView) \
- V(JSProxy) \
- V(JSFunctionProxy) \
- V(JSSet) \
- V(JSMap) \
- V(JSSetIterator) \
- V(JSMapIterator) \
- V(JSWeakCollection) \
- V(JSWeakMap) \
- V(JSWeakSet) \
- V(JSRegExp) \
- V(HashTable) \
- V(Dictionary) \
- V(StringTable) \
- V(JSFunctionResultCache) \
- V(NormalizedMapCache) \
- V(CompilationCacheTable) \
- V(CodeCacheHashTable) \
- V(PolymorphicCodeCacheHashTable) \
- V(MapCache) \
- V(Primitive) \
- V(GlobalObject) \
- V(JSGlobalObject) \
- V(JSBuiltinsObject) \
- V(JSGlobalProxy) \
- V(UndetectableObject) \
- V(AccessCheckNeeded) \
- V(Cell) \
- V(PropertyCell) \
- V(ObjectHashTable) \
- V(WeakHashTable) \
+#define OBJECT_TYPE_LIST(V) \
+ V(Smi) \
+ V(HeapObject) \
+ V(Number)
+
+#define HEAP_OBJECT_TYPE_LIST(V) \
+ V(HeapNumber) \
+ V(MutableHeapNumber) \
+ V(Name) \
+ V(UniqueName) \
+ V(String) \
+ V(SeqString) \
+ V(ExternalString) \
+ V(ConsString) \
+ V(SlicedString) \
+ V(ExternalTwoByteString) \
+ V(ExternalOneByteString) \
+ V(SeqTwoByteString) \
+ V(SeqOneByteString) \
+ V(InternalizedString) \
+ V(Symbol) \
+ \
+ V(ExternalArray) \
+ V(ExternalInt8Array) \
+ V(ExternalUint8Array) \
+ V(ExternalInt16Array) \
+ V(ExternalUint16Array) \
+ V(ExternalInt32Array) \
+ V(ExternalUint32Array) \
+ V(ExternalFloat32Array) \
+ V(ExternalFloat64Array) \
+ V(ExternalUint8ClampedArray) \
+ V(FixedTypedArrayBase) \
+ V(FixedUint8Array) \
+ V(FixedInt8Array) \
+ V(FixedUint16Array) \
+ V(FixedInt16Array) \
+ V(FixedUint32Array) \
+ V(FixedInt32Array) \
+ V(FixedFloat32Array) \
+ V(FixedFloat64Array) \
+ V(FixedUint8ClampedArray) \
+ V(ByteArray) \
+ V(FreeSpace) \
+ V(JSReceiver) \
+ V(JSObject) \
+ V(JSContextExtensionObject) \
+ V(JSGeneratorObject) \
+ V(JSModule) \
+ V(Map) \
+ V(DescriptorArray) \
+ V(TransitionArray) \
+ V(TypeFeedbackVector) \
+ V(DeoptimizationInputData) \
+ V(DeoptimizationOutputData) \
+ V(DependentCode) \
+ V(FixedArray) \
+ V(FixedDoubleArray) \
+ V(ConstantPoolArray) \
+ V(Context) \
+ V(NativeContext) \
+ V(ScopeInfo) \
+ V(JSFunction) \
+ V(Code) \
+ V(Oddball) \
+ V(SharedFunctionInfo) \
+ V(JSValue) \
+ V(JSDate) \
+ V(JSMessageObject) \
+ V(StringWrapper) \
+ V(Foreign) \
+ V(Boolean) \
+ V(JSArray) \
+ V(JSArrayBuffer) \
+ V(JSArrayBufferView) \
+ V(JSTypedArray) \
+ V(JSDataView) \
+ V(JSProxy) \
+ V(JSFunctionProxy) \
+ V(JSSet) \
+ V(JSMap) \
+ V(JSSetIterator) \
+ V(JSMapIterator) \
+ V(JSWeakCollection) \
+ V(JSWeakMap) \
+ V(JSWeakSet) \
+ V(JSRegExp) \
+ V(HashTable) \
+ V(Dictionary) \
+ V(StringTable) \
+ V(JSFunctionResultCache) \
+ V(NormalizedMapCache) \
+ V(CompilationCacheTable) \
+ V(CodeCacheHashTable) \
+ V(PolymorphicCodeCacheHashTable) \
+ V(MapCache) \
+ V(Primitive) \
+ V(GlobalObject) \
+ V(JSGlobalObject) \
+ V(JSBuiltinsObject) \
+ V(JSGlobalProxy) \
+ V(UndetectableObject) \
+ V(AccessCheckNeeded) \
+ V(Cell) \
+ V(PropertyCell) \
+ V(ObjectHashTable) \
+ V(WeakHashTable) \
V(OrderedHashTable)
-
-#define ERROR_MESSAGES_LIST(V) \
- V(kNoReason, "no reason") \
- \
- V(k32BitValueInRegisterIsNotZeroExtended, \
- "32 bit value in register is not zero-extended") \
- V(kAlignmentMarkerExpected, "Alignment marker expected") \
- V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned") \
- V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
- V(kArgumentsObjectValueInATestContext, \
- "Arguments object value in a test context") \
- V(kArrayBoilerplateCreationFailed, "Array boilerplate creation failed") \
- V(kArrayIndexConstantValueTooBig, "Array index constant value too big") \
- V(kAssignmentToArguments, "Assignment to arguments") \
- V(kAssignmentToLetVariableBeforeInitialization, \
- "Assignment to let variable before initialization") \
- V(kAssignmentToLOOKUPVariable, "Assignment to LOOKUP variable") \
- V(kAssignmentToParameterFunctionUsesArgumentsObject, \
- "Assignment to parameter, function uses arguments object") \
- V(kAssignmentToParameterInArgumentsObject, \
- "Assignment to parameter in arguments object") \
- V(kAttemptToUseUndefinedCache, "Attempt to use undefined cache") \
- V(kBadValueContextForArgumentsObjectValue, \
- "Bad value context for arguments object value") \
- V(kBadValueContextForArgumentsValue, \
- "Bad value context for arguments value") \
- V(kBailedOutDueToDependencyChange, "Bailed out due to dependency change") \
- V(kBailoutWasNotPrepared, "Bailout was not prepared") \
- V(kBinaryStubGenerateFloatingPointCode, \
- "BinaryStub_GenerateFloatingPointCode") \
- V(kBothRegistersWereSmisInSelectNonSmi, \
- "Both registers were smis in SelectNonSmi") \
- V(kCallToAJavaScriptRuntimeFunction, \
- "Call to a JavaScript runtime function") \
- V(kCannotTranslatePositionInChangedArea, \
- "Cannot translate position in changed area") \
- V(kCodeGenerationFailed, "Code generation failed") \
- V(kCodeObjectNotProperlyPatched, "Code object not properly patched") \
- V(kCompoundAssignmentToLookupSlot, "Compound assignment to lookup slot") \
- V(kContextAllocatedArguments, "Context-allocated arguments") \
- V(kCopyBuffersOverlap, "Copy buffers overlap") \
- V(kCouldNotGenerateZero, "Could not generate +0.0") \
- V(kCouldNotGenerateNegativeZero, "Could not generate -0.0") \
- V(kDebuggerHasBreakPoints, "Debugger has break points") \
- V(kDebuggerStatement, "DebuggerStatement") \
- V(kDeclarationInCatchContext, "Declaration in catch context") \
- V(kDeclarationInWithContext, "Declaration in with context") \
- V(kDefaultNaNModeNotSet, "Default NaN mode not set") \
- V(kDeleteWithGlobalVariable, "Delete with global variable") \
- V(kDeleteWithNonGlobalVariable, "Delete with non-global variable") \
- V(kDestinationOfCopyNotAligned, "Destination of copy not aligned") \
- V(kDontDeleteCellsCannotContainTheHole, \
- "DontDelete cells can't contain the hole") \
- V(kDoPushArgumentNotImplementedForDoubleType, \
- "DoPushArgument not implemented for double type") \
- V(kEliminatedBoundsCheckFailed, "Eliminated bounds check failed") \
- V(kEmitLoadRegisterUnsupportedDoubleImmediate, \
- "EmitLoadRegister: Unsupported double immediate") \
- V(kEval, "eval") \
- V(kExpected0AsASmiSentinel, "Expected 0 as a Smi sentinel") \
- V(kExpectedAlignmentMarker, "Expected alignment marker") \
- V(kExpectedAllocationSite, "Expected allocation site") \
- V(kExpectedFunctionObject, "Expected function object in register") \
- V(kExpectedHeapNumber, "Expected HeapNumber") \
- V(kExpectedNativeContext, "Expected native context") \
- V(kExpectedNonIdenticalObjects, "Expected non-identical objects") \
- V(kExpectedNonNullContext, "Expected non-null context") \
- V(kExpectedPositiveZero, "Expected +0.0") \
- V(kExpectedAllocationSiteInCell, \
- "Expected AllocationSite in property cell") \
- V(kExpectedFixedArrayInFeedbackVector, \
- "Expected fixed array in feedback vector") \
- V(kExpectedFixedArrayInRegisterA2, \
- "Expected fixed array in register a2") \
- V(kExpectedFixedArrayInRegisterEbx, \
- "Expected fixed array in register ebx") \
- V(kExpectedFixedArrayInRegisterR2, \
- "Expected fixed array in register r2") \
- V(kExpectedFixedArrayInRegisterRbx, \
- "Expected fixed array in register rbx") \
- V(kExpectedNewSpaceObject, "Expected new space object") \
- V(kExpectedSmiOrHeapNumber, "Expected smi or HeapNumber") \
- V(kExpectedUndefinedOrCell, \
- "Expected undefined or cell in register") \
- V(kExpectingAlignmentForCopyBytes, \
- "Expecting alignment for CopyBytes") \
- V(kExportDeclaration, "Export declaration") \
- V(kExternalStringExpectedButNotFound, \
- "External string expected, but not found") \
- V(kFailedBailedOutLastTime, "Failed/bailed out last time") \
- V(kForInStatementIsNotFastCase, "ForInStatement is not fast case") \
- V(kForInStatementOptimizationIsDisabled, \
- "ForInStatement optimization is disabled") \
- V(kForInStatementWithNonLocalEachVariable, \
- "ForInStatement with non-local each variable") \
- V(kForOfStatement, "ForOfStatement") \
- V(kFrameIsExpectedToBeAligned, "Frame is expected to be aligned") \
- V(kFunctionCallsEval, "Function calls eval") \
- V(kFunctionIsAGenerator, "Function is a generator") \
- V(kFunctionWithIllegalRedeclaration, "Function with illegal redeclaration") \
- V(kGeneratedCodeIsTooLarge, "Generated code is too large") \
- V(kGeneratorFailedToResume, "Generator failed to resume") \
- V(kGenerator, "Generator") \
- V(kGlobalFunctionsMustHaveInitialMap, \
- "Global functions must have initial map") \
- V(kHeapNumberMapRegisterClobbered, "HeapNumberMap register clobbered") \
- V(kHydrogenFilter, "Optimization disabled by filter") \
- V(kImportDeclaration, "Import declaration") \
- V(kImproperObjectOnPrototypeChainForStore, \
- "Improper object on prototype chain for store") \
- V(kIndexIsNegative, "Index is negative") \
- V(kIndexIsTooLarge, "Index is too large") \
- V(kInlinedRuntimeFunctionClassOf, "Inlined runtime function: ClassOf") \
- V(kInlinedRuntimeFunctionFastAsciiArrayJoin, \
- "Inlined runtime function: FastAsciiArrayJoin") \
- V(kInlinedRuntimeFunctionGeneratorNext, \
- "Inlined runtime function: GeneratorNext") \
- V(kInlinedRuntimeFunctionGeneratorThrow, \
- "Inlined runtime function: GeneratorThrow") \
- V(kInlinedRuntimeFunctionGetFromCache, \
- "Inlined runtime function: GetFromCache") \
- V(kInlinedRuntimeFunctionIsNonNegativeSmi, \
- "Inlined runtime function: IsNonNegativeSmi") \
- V(kInlinedRuntimeFunctionIsStringWrapperSafeForDefaultValueOf, \
- "Inlined runtime function: IsStringWrapperSafeForDefaultValueOf") \
- V(kInliningBailedOut, "Inlining bailed out") \
- V(kInputGPRIsExpectedToHaveUpper32Cleared, \
- "Input GPR is expected to have upper32 cleared") \
- V(kInputStringTooLong, "Input string too long") \
- V(kInstanceofStubUnexpectedCallSiteCacheCheck, \
- "InstanceofStub unexpected call site cache (check)") \
- V(kInstanceofStubUnexpectedCallSiteCacheCmp1, \
- "InstanceofStub unexpected call site cache (cmp 1)") \
- V(kInstanceofStubUnexpectedCallSiteCacheCmp2, \
- "InstanceofStub unexpected call site cache (cmp 2)") \
- V(kInstanceofStubUnexpectedCallSiteCacheMov, \
- "InstanceofStub unexpected call site cache (mov)") \
- V(kInteger32ToSmiFieldWritingToNonSmiLocation, \
- "Integer32ToSmiField writing to non-smi location") \
- V(kInvalidCaptureReferenced, "Invalid capture referenced") \
- V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
- "Invalid ElementsKind for InternalArray or InternalPackedArray") \
- V(kInvalidFullCodegenState, "invalid full-codegen state") \
- V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
- V(kInvalidLeftHandSideInAssignment, "Invalid left-hand side in assignment") \
- V(kInvalidLhsInCompoundAssignment, "Invalid lhs in compound assignment") \
- V(kInvalidLhsInCountOperation, "Invalid lhs in count operation") \
- V(kInvalidMinLength, "Invalid min_length") \
- V(kJSGlobalObjectNativeContextShouldBeANativeContext, \
- "JSGlobalObject::native_context should be a native context") \
- V(kJSGlobalProxyContextShouldNotBeNull, \
- "JSGlobalProxy::context() should not be null") \
- V(kJSObjectWithFastElementsMapHasSlowElements, \
- "JSObject with fast elements map has slow elements") \
- V(kLetBindingReInitialization, "Let binding re-initialization") \
- V(kLhsHasBeenClobbered, "lhs has been clobbered") \
- V(kLiveBytesCountOverflowChunkSize, "Live Bytes Count overflow chunk size") \
- V(kLiveEdit, "LiveEdit") \
- V(kLookupVariableInCountOperation, \
- "Lookup variable in count operation") \
- V(kMapBecameDeprecated, "Map became deprecated") \
- V(kMapBecameUnstable, "Map became unstable") \
- V(kMapIsNoLongerInEax, "Map is no longer in eax") \
- V(kModuleDeclaration, "Module declaration") \
- V(kModuleLiteral, "Module literal") \
- V(kModulePath, "Module path") \
- V(kModuleStatement, "Module statement") \
- V(kModuleVariable, "Module variable") \
- V(kModuleUrl, "Module url") \
- V(kNativeFunctionLiteral, "Native function literal") \
- V(kNeedSmiLiteral, "Need a Smi literal here") \
- V(kNoCasesLeft, "No cases left") \
- V(kNoEmptyArraysHereInEmitFastAsciiArrayJoin, \
- "No empty arrays here in EmitFastAsciiArrayJoin") \
- V(kNonInitializerAssignmentToConst, \
- "Non-initializer assignment to const") \
- V(kNonSmiIndex, "Non-smi index") \
- V(kNonSmiKeyInArrayLiteral, "Non-smi key in array literal") \
- V(kNonSmiValue, "Non-smi value") \
- V(kNonObject, "Non-object value") \
- V(kNotEnoughVirtualRegistersForValues, \
- "Not enough virtual registers for values") \
- V(kNotEnoughSpillSlotsForOsr, \
- "Not enough spill slots for OSR") \
- V(kNotEnoughVirtualRegistersRegalloc, \
- "Not enough virtual registers (regalloc)") \
- V(kObjectFoundInSmiOnlyArray, "Object found in smi-only array") \
- V(kObjectLiteralWithComplexProperty, \
- "Object literal with complex property") \
- V(kOddballInStringTableIsNotUndefinedOrTheHole, \
- "Oddball in string table is not undefined or the hole") \
- V(kOffsetOutOfRange, "Offset out of range") \
- V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name") \
- V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string") \
- V(kOperandIsASmi, "Operand is a smi") \
- V(kOperandIsNotAName, "Operand is not a name") \
- V(kOperandIsNotANumber, "Operand is not a number") \
- V(kOperandIsNotASmi, "Operand is not a smi") \
- V(kOperandIsNotAString, "Operand is not a string") \
- V(kOperandIsNotSmi, "Operand is not smi") \
- V(kOperandNotANumber, "Operand not a number") \
- V(kObjectTagged, "The object is tagged") \
- V(kObjectNotTagged, "The object is not tagged") \
- V(kOptimizationDisabled, "Optimization is disabled") \
- V(kOptimizedTooManyTimes, "Optimized too many times") \
- V(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister, \
- "Out of virtual registers while trying to allocate temp register") \
- V(kParseScopeError, "Parse/scope error") \
- V(kPossibleDirectCallToEval, "Possible direct call to eval") \
- V(kPreconditionsWereNotMet, "Preconditions were not met") \
- V(kPropertyAllocationCountFailed, "Property allocation count failed") \
- V(kReceivedInvalidReturnAddress, "Received invalid return address") \
- V(kReferenceToAVariableWhichRequiresDynamicLookup, \
- "Reference to a variable which requires dynamic lookup") \
- V(kReferenceToGlobalLexicalVariable, \
- "Reference to global lexical variable") \
- V(kReferenceToUninitializedVariable, "Reference to uninitialized variable") \
- V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
- V(kRegisterWasClobbered, "Register was clobbered") \
- V(kRememberedSetPointerInNewSpace, "Remembered set pointer is in new space") \
- V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
- V(kRhsHasBeenClobbered, "Rhs has been clobbered") \
- V(kScopedBlock, "ScopedBlock") \
- V(kSmiAdditionOverflow, "Smi addition overflow") \
- V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
- V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
- V(kStackFrameTypesMustMatch, "Stack frame types must match") \
- V(kSwitchStatementMixedOrNonLiteralSwitchLabels, \
- "SwitchStatement: mixed or non-literal switch labels") \
- V(kSwitchStatementTooManyClauses, "SwitchStatement: too many clauses") \
- V(kTheCurrentStackPointerIsBelowCsp, \
- "The current stack pointer is below csp") \
- V(kTheInstructionShouldBeALui, "The instruction should be a lui") \
- V(kTheInstructionShouldBeAnOri, "The instruction should be an ori") \
- V(kTheInstructionToPatchShouldBeALoadFromConstantPool, \
- "The instruction to patch should be a load from the constant pool") \
- V(kTheInstructionToPatchShouldBeAnLdrLiteral, \
- "The instruction to patch should be a ldr literal") \
- V(kTheInstructionToPatchShouldBeALui, \
- "The instruction to patch should be a lui") \
- V(kTheInstructionToPatchShouldBeAnOri, \
- "The instruction to patch should be an ori") \
- V(kTheSourceAndDestinationAreTheSame, \
- "The source and destination are the same") \
- V(kTheStackPointerIsNotAligned, "The stack pointer is not aligned.") \
- V(kTheStackWasCorruptedByMacroAssemblerCall, \
- "The stack was corrupted by MacroAssembler::Call()") \
- V(kTooManyParametersLocals, "Too many parameters/locals") \
- V(kTooManyParameters, "Too many parameters") \
- V(kTooManySpillSlotsNeededForOSR, "Too many spill slots needed for OSR") \
- V(kToOperand32UnsupportedImmediate, "ToOperand32 unsupported immediate.") \
- V(kToOperandIsDoubleRegisterUnimplemented, \
- "ToOperand IsDoubleRegister unimplemented") \
- V(kToOperandUnsupportedDoubleImmediate, \
- "ToOperand Unsupported double immediate") \
- V(kTryCatchStatement, "TryCatchStatement") \
- V(kTryFinallyStatement, "TryFinallyStatement") \
- V(kUnableToEncodeValueAsSmi, "Unable to encode value as smi") \
- V(kUnalignedAllocationInNewSpace, "Unaligned allocation in new space") \
- V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
- V(kUndefinedValueNotLoaded, "Undefined value not loaded") \
- V(kUndoAllocationOfNonAllocatedMemory, \
- "Undo allocation of non allocated memory") \
- V(kUnexpectedAllocationTop, "Unexpected allocation top") \
- V(kUnexpectedColorFound, "Unexpected color bit pattern found") \
- V(kUnexpectedElementsKindInArrayConstructor, \
- "Unexpected ElementsKind in array constructor") \
- V(kUnexpectedFallthroughFromCharCodeAtSlowCase, \
- "Unexpected fallthrough from CharCodeAt slow case") \
- V(kUnexpectedFallthroughFromCharFromCodeSlowCase, \
- "Unexpected fallthrough from CharFromCode slow case") \
- V(kUnexpectedFallThroughFromStringComparison, \
- "Unexpected fall-through from string comparison") \
- V(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode, \
- "Unexpected fall-through in BinaryStub_GenerateFloatingPointCode") \
- V(kUnexpectedFallthroughToCharCodeAtSlowCase, \
- "Unexpected fallthrough to CharCodeAt slow case") \
- V(kUnexpectedFallthroughToCharFromCodeSlowCase, \
- "Unexpected fallthrough to CharFromCode slow case") \
- V(kUnexpectedFPUStackDepthAfterInstruction, \
- "Unexpected FPU stack depth after instruction") \
- V(kUnexpectedInitialMapForArrayFunction1, \
- "Unexpected initial map for Array function (1)") \
- V(kUnexpectedInitialMapForArrayFunction2, \
- "Unexpected initial map for Array function (2)") \
- V(kUnexpectedInitialMapForArrayFunction, \
- "Unexpected initial map for Array function") \
- V(kUnexpectedInitialMapForInternalArrayFunction, \
- "Unexpected initial map for InternalArray function") \
- V(kUnexpectedLevelAfterReturnFromApiCall, \
- "Unexpected level after return from api call") \
- V(kUnexpectedNegativeValue, "Unexpected negative value") \
- V(kUnexpectedNumberOfPreAllocatedPropertyFields, \
- "Unexpected number of pre-allocated property fields") \
- V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
- V(kUnexpectedSmi, "Unexpected smi value") \
- V(kUnexpectedStringFunction, "Unexpected String function") \
- V(kUnexpectedStringType, "Unexpected string type") \
- V(kUnexpectedStringWrapperInstanceSize, \
- "Unexpected string wrapper instance size") \
- V(kUnexpectedTypeForRegExpDataFixedArrayExpected, \
- "Unexpected type for RegExp data, FixedArray expected") \
- V(kUnexpectedValue, "Unexpected value") \
- V(kUnexpectedUnusedPropertiesOfStringWrapper, \
- "Unexpected unused properties of string wrapper") \
- V(kUnimplemented, "unimplemented") \
- V(kUninitializedKSmiConstantRegister, "Uninitialized kSmiConstantRegister") \
- V(kUnknown, "Unknown") \
- V(kUnsupportedConstCompoundAssignment, \
- "Unsupported const compound assignment") \
- V(kUnsupportedCountOperationWithConst, \
- "Unsupported count operation with const") \
- V(kUnsupportedDoubleImmediate, "Unsupported double immediate") \
- V(kUnsupportedLetCompoundAssignment, "Unsupported let compound assignment") \
- V(kUnsupportedLookupSlotInDeclaration, \
- "Unsupported lookup slot in declaration") \
- V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare") \
- V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments") \
- V(kUnsupportedPhiUseOfConstVariable, \
- "Unsupported phi use of const variable") \
- V(kUnsupportedTaggedImmediate, "Unsupported tagged immediate") \
- V(kVariableResolvedToWithContext, "Variable resolved to with context") \
- V(kWeShouldNotHaveAnEmptyLexicalContext, \
- "We should not have an empty lexical context") \
- V(kWithStatement, "WithStatement") \
- V(kWrongAddressOrValuePassedToRecordWrite, \
- "Wrong address or value passed to RecordWrite") \
- V(kYield, "Yield")
-
-
-#define ERROR_MESSAGES_CONSTANTS(C, T) C,
-enum BailoutReason {
- ERROR_MESSAGES_LIST(ERROR_MESSAGES_CONSTANTS)
- kLastErrorMessage
-};
-#undef ERROR_MESSAGES_CONSTANTS
-
-
-const char* GetBailoutReason(BailoutReason reason);
-
-
// Object is the abstract superclass for all classes in the
// object hierarchy.
// Object does not use any virtual functions to avoid the
@@ -1385,6 +1007,8 @@ class Object {
CERTAINLY_NOT_STORE_FROM_KEYED
};
+ enum StorePropertyMode { NORMAL_PROPERTY, SUPER_PROPERTY };
+
INLINE(bool IsFixedArrayBase() const);
INLINE(bool IsExternal() const);
INLINE(bool IsAccessorInfo() const);
@@ -1484,8 +1108,6 @@ class Object {
static MUST_USE_RESULT inline MaybeHandle<Smi> ToSmi(Isolate* isolate,
Handle<Object> object);
- void Lookup(Handle<Name> name, LookupResult* result);
-
MUST_USE_RESULT static MaybeHandle<Object> GetProperty(LookupIterator* it);
// Implementation of [[Put]], ECMA-262 5th edition, section 8.12.5.
@@ -1496,11 +1118,12 @@ class Object {
MUST_USE_RESULT static MaybeHandle<Object> SetProperty(
LookupIterator* it, Handle<Object> value, StrictMode strict_mode,
- StoreFromKeyed store_mode);
+ StoreFromKeyed store_mode,
+ StorePropertyMode data_store_mode = NORMAL_PROPERTY);
MUST_USE_RESULT static MaybeHandle<Object> WriteToReadOnlyProperty(
LookupIterator* it, Handle<Object> value, StrictMode strict_mode);
- MUST_USE_RESULT static MaybeHandle<Object> SetDataProperty(
- LookupIterator* it, Handle<Object> value);
+ static Handle<Object> SetDataProperty(LookupIterator* it,
+ Handle<Object> value);
MUST_USE_RESULT static MaybeHandle<Object> AddDataProperty(
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
StrictMode strict_mode, StoreFromKeyed store_mode);
@@ -1751,9 +1374,9 @@ class HeapObject: public Object {
// Returns the heap object's size in bytes
inline int Size();
- // Returns true if this heap object may contain pointers to objects in new
- // space.
- inline bool MayContainNewSpacePointers();
+ // Returns true if this heap object may contain raw values, i.e., values that
+ // look like pointers to heap objects.
+ inline bool MayContainRawValues();
// Given a heap object's map pointer, returns the heap size in bytes
// Useful when the map pointer field is used for other purposes.
@@ -1947,13 +1570,6 @@ class JSReceiver: public HeapObject {
FORCE_DELETION
};
- // Internal properties (e.g. the hidden properties dictionary) might
- // be added even though the receiver is non-extensible.
- enum ExtensibilityCheck {
- PERFORM_EXTENSIBILITY_CHECK,
- OMIT_EXTENSIBILITY_CHECK
- };
-
DECLARE_CAST(JSReceiver)
MUST_USE_RESULT static MaybeHandle<Object> SetElement(
@@ -2017,12 +1633,6 @@ class JSReceiver: public HeapObject {
inline static Handle<Smi> GetOrCreateIdentityHash(
Handle<JSReceiver> object);
- // Lookup a property. If found, the result is valid and has
- // detailed information.
- void LookupOwn(Handle<Name> name, LookupResult* result,
- bool search_hidden_prototypes = false);
- void Lookup(Handle<Name> name, LookupResult* result);
-
enum KeyCollectionType { OWN_ONLY, INCLUDE_PROTOS };
// Computes the enumerable keys for a JSObject. Used for implementing
@@ -2158,8 +1768,6 @@ class JSObject: public JSReceiver {
Handle<Name> key,
Handle<Object> value,
PropertyAttributes attributes,
- ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK,
- StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING);
static void AddProperty(Handle<JSObject> object, Handle<Name> key,
@@ -2177,18 +1785,6 @@ class JSObject: public JSReceiver {
// or returns false if such a map is not yet available.
static bool TryMigrateInstance(Handle<JSObject> instance);
- // Retrieve a value in a normalized object given a lookup result.
- // Handles the special representation of JS global objects.
- Object* GetNormalizedProperty(const LookupResult* result);
- static Handle<Object> GetNormalizedProperty(Handle<JSObject> object,
- const LookupResult* result);
-
- // Sets the property value in a normalized object given a lookup result.
- // Handles the special representation of JS global objects.
- static void SetNormalizedProperty(Handle<JSObject> object,
- const LookupResult* result,
- Handle<Object> value);
-
// Sets the property value in a normalized object given (key, value, details).
// Handles the special representation of JS global objects.
static void SetNormalizedProperty(Handle<JSObject> object,
@@ -2403,12 +1999,6 @@ class JSObject: public JSReceiver {
inline void SetInternalField(int index, Object* value);
inline void SetInternalField(int index, Smi* value);
- // The following lookup functions skip interceptors.
- void LookupOwnRealNamedProperty(Handle<Name> name, LookupResult* result);
- void LookupRealNamedProperty(Handle<Name> name, LookupResult* result);
- void LookupRealNamedPropertyInPrototypes(Handle<Name> name,
- LookupResult* result);
-
// Returns the number of properties on this object filtering out properties
// with the specified attributes (ignoring interceptors).
int NumberOfOwnProperties(PropertyAttributes filter = NONE);
@@ -2514,6 +2104,7 @@ class JSObject: public JSReceiver {
static Handle<Object> GetDataProperty(Handle<JSObject> object,
Handle<Name> key);
+ static Handle<Object> GetDataProperty(LookupIterator* it);
DECLARE_CAST(JSObject)
@@ -2632,17 +2223,6 @@ class JSObject: public JSReceiver {
Handle<Map> new_map,
int expected_additional_properties);
- static void SetPropertyToField(LookupResult* lookup, Handle<Object> value);
-
- static void ConvertAndSetOwnProperty(LookupResult* lookup,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes);
-
- static void SetPropertyToFieldWithAttributes(LookupResult* lookup,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes);
static void GeneralizeFieldRepresentation(Handle<JSObject> object,
int modify_index,
Representation new_representation,
@@ -2716,29 +2296,9 @@ class JSObject: public JSReceiver {
StrictMode strict_mode,
bool check_prototype = true);
- MUST_USE_RESULT static MaybeHandle<Object> SetPropertyUsingTransition(
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes);
MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithFailedAccessCheck(
LookupIterator* it, Handle<Object> value, StrictMode strict_mode);
- // Add a property to an object.
- MUST_USE_RESULT static MaybeHandle<Object> AddPropertyInternal(
- Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
- PropertyAttributes attributes, StoreFromKeyed store_mode,
- ExtensibilityCheck extensibility_check, TransitionFlag flag);
-
- // Add a property to a fast-case object.
- static void AddFastProperty(Handle<JSObject> object,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes,
- StoreFromKeyed store_mode,
- TransitionFlag flag);
-
// Add a property to a slow-case object.
static void AddSlowProperty(Handle<JSObject> object,
Handle<Name> name,
@@ -2749,12 +2309,8 @@ class JSObject: public JSReceiver {
Handle<JSObject> object,
Handle<Name> name,
DeleteMode mode);
- static Handle<Object> DeletePropertyPostInterceptor(Handle<JSObject> object,
- Handle<Name> name,
- DeleteMode mode);
MUST_USE_RESULT static MaybeHandle<Object> DeletePropertyWithInterceptor(
- Handle<JSObject> object,
- Handle<Name> name);
+ Handle<JSObject> holder, Handle<JSObject> receiver, Handle<Name> name);
// Deletes the named property in a normalized object.
static Handle<Object> DeleteNormalizedProperty(Handle<JSObject> object,
@@ -2793,22 +2349,6 @@ class JSObject: public JSReceiver {
Handle<Object> getter,
Handle<Object> setter,
PropertyAttributes attributes);
- static Handle<AccessorPair> CreateAccessorPairFor(Handle<JSObject> object,
- Handle<Name> name);
- static void DefinePropertyAccessor(Handle<JSObject> object,
- Handle<Name> name,
- Handle<Object> getter,
- Handle<Object> setter,
- PropertyAttributes attributes);
-
- // Try to define a single accessor paying attention to map transitions.
- // Returns false if this was not possible and we have to use the slow case.
- static bool DefineFastAccessor(Handle<JSObject> object,
- Handle<Name> name,
- AccessorComponent component,
- Handle<Object> accessor,
- PropertyAttributes attributes);
-
// Return the hash table backing store or the inline stored identity hash,
// whatever is found.
@@ -3580,8 +3120,6 @@ class DescriptorArray: public FixedArray {
Descriptor* desc,
const WhitenessWitness&);
- inline void Append(Descriptor* desc, const WhitenessWitness&);
-
// Swap first and second descriptor.
inline void SwapSortedKeys(int first, int second);
@@ -3792,7 +3330,7 @@ class HashTable: public FixedArray {
// Returns probe entry.
static uint32_t GetProbe(uint32_t hash, uint32_t number, uint32_t size) {
- DCHECK(IsPowerOf2(size));
+ DCHECK(base::bits::IsPowerOfTwo32(size));
return (hash + GetProbeOffset(number)) & (size - 1);
}
@@ -3898,7 +3436,8 @@ class StringTable: public HashTable<StringTable,
DECLARE_CAST(StringTable)
private:
- template <bool seq_ascii> friend class JsonParser;
+ template <bool seq_one_byte>
+ friend class JsonParser;
DISALLOW_IMPLICIT_CONSTRUCTORS(StringTable);
};
@@ -4604,6 +4143,12 @@ class ScopeInfo : public FixedArray {
// Return if contexts are allocated for this scope.
bool HasContext();
+ // Return if this is a function scope with "use asm".
+ bool IsAsmModule() { return AsmModuleField::decode(Flags()); }
+
+ // Return if this is a nested function within an asm module scope.
+ bool IsAsmFunction() { return AsmFunctionField::decode(Flags()); }
+
// Return the function_name if present.
String* FunctionName();
@@ -4758,6 +4303,8 @@ class ScopeInfo : public FixedArray {
class StrictModeField: public BitField<StrictMode, 4, 1> {};
class FunctionVariableField: public BitField<FunctionVariableInfo, 5, 2> {};
class FunctionVariableMode: public BitField<VariableMode, 7, 3> {};
+ class AsmModuleField : public BitField<bool, 10, 1> {};
+ class AsmFunctionField : public BitField<bool, 11, 1> {};
// BitFields representing the encoded information for context locals in the
// ContextLocalInfoEntries part.
@@ -5247,16 +4794,14 @@ TYPED_ARRAYS(FIXED_TYPED_ARRAY_TRAITS)
class DeoptimizationInputData: public FixedArray {
public:
// Layout description. Indices in the array.
- static const int kDeoptEntryCountIndex = 0;
- static const int kReturnAddressPatchEntryCountIndex = 1;
- static const int kTranslationByteArrayIndex = 2;
- static const int kInlinedFunctionCountIndex = 3;
- static const int kLiteralArrayIndex = 4;
- static const int kOsrAstIdIndex = 5;
- static const int kOsrPcOffsetIndex = 6;
- static const int kOptimizationIdIndex = 7;
- static const int kSharedFunctionInfoIndex = 8;
- static const int kFirstDeoptEntryIndex = 9;
+ static const int kTranslationByteArrayIndex = 0;
+ static const int kInlinedFunctionCountIndex = 1;
+ static const int kLiteralArrayIndex = 2;
+ static const int kOsrAstIdIndex = 3;
+ static const int kOsrPcOffsetIndex = 4;
+ static const int kOptimizationIdIndex = 5;
+ static const int kSharedFunctionInfoIndex = 6;
+ static const int kFirstDeoptEntryIndex = 7;
// Offsets of deopt entry elements relative to the start of the entry.
static const int kAstIdRawOffset = 0;
@@ -5265,12 +4810,6 @@ class DeoptimizationInputData: public FixedArray {
static const int kPcOffset = 3;
static const int kDeoptEntrySize = 4;
- // Offsets of return address patch entry elements relative to the start of the
- // entry
- static const int kReturnAddressPcOffset = 0;
- static const int kPatchedAddressPcOffset = 1;
- static const int kReturnAddressPatchEntrySize = 2;
-
// Simple element accessors.
#define DEFINE_ELEMENT_ACCESSORS(name, type) \
type* name() { \
@@ -5291,7 +4830,7 @@ class DeoptimizationInputData: public FixedArray {
#undef DEFINE_ELEMENT_ACCESSORS
// Accessors for elements of the ith deoptimization entry.
-#define DEFINE_DEOPT_ENTRY_ACCESSORS(name, type) \
+#define DEFINE_ENTRY_ACCESSORS(name, type) \
type* name(int i) { \
return type::cast(get(IndexForEntry(i) + k##name##Offset)); \
} \
@@ -5299,28 +4838,13 @@ class DeoptimizationInputData: public FixedArray {
set(IndexForEntry(i) + k##name##Offset, value); \
}
- DEFINE_DEOPT_ENTRY_ACCESSORS(AstIdRaw, Smi)
- DEFINE_DEOPT_ENTRY_ACCESSORS(TranslationIndex, Smi)
- DEFINE_DEOPT_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi)
- DEFINE_DEOPT_ENTRY_ACCESSORS(Pc, Smi)
+ DEFINE_ENTRY_ACCESSORS(AstIdRaw, Smi)
+ DEFINE_ENTRY_ACCESSORS(TranslationIndex, Smi)
+ DEFINE_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi)
+ DEFINE_ENTRY_ACCESSORS(Pc, Smi)
#undef DEFINE_DEOPT_ENTRY_ACCESSORS
-// Accessors for elements of the ith deoptimization entry.
-#define DEFINE_PATCH_ENTRY_ACCESSORS(name, type) \
- type* name(int i) { \
- return type::cast( \
- get(IndexForReturnAddressPatchEntry(i) + k##name##Offset)); \
- } \
- void Set##name(int i, type* value) { \
- set(IndexForReturnAddressPatchEntry(i) + k##name##Offset, value); \
- }
-
- DEFINE_PATCH_ENTRY_ACCESSORS(ReturnAddressPc, Smi)
- DEFINE_PATCH_ENTRY_ACCESSORS(PatchedAddressPc, Smi)
-
-#undef DEFINE_PATCH_ENTRY_ACCESSORS
-
BailoutId AstId(int i) {
return BailoutId(AstIdRaw(i)->value());
}
@@ -5330,19 +4854,12 @@ class DeoptimizationInputData: public FixedArray {
}
int DeoptCount() {
- return length() == 0 ? 0 : Smi::cast(get(kDeoptEntryCountIndex))->value();
- }
-
- int ReturnAddressPatchCount() {
- return length() == 0
- ? 0
- : Smi::cast(get(kReturnAddressPatchEntryCountIndex))->value();
+ return (length() - kFirstDeoptEntryIndex) / kDeoptEntrySize;
}
// Allocates a DeoptimizationInputData.
static Handle<DeoptimizationInputData> New(Isolate* isolate,
int deopt_entry_count,
- int return_address_patch_count,
PretenureFlag pretenure);
DECLARE_CAST(DeoptimizationInputData)
@@ -5352,21 +4869,12 @@ class DeoptimizationInputData: public FixedArray {
#endif
private:
- friend class Object; // For accessing LengthFor.
-
static int IndexForEntry(int i) {
return kFirstDeoptEntryIndex + (i * kDeoptEntrySize);
}
- int IndexForReturnAddressPatchEntry(int i) {
- return kFirstDeoptEntryIndex + (DeoptCount() * kDeoptEntrySize) +
- (i * kReturnAddressPatchEntrySize);
- }
- static int LengthFor(int deopt_count, int return_address_patch_count) {
- return kFirstDeoptEntryIndex + (deopt_count * kDeoptEntrySize) +
- (return_address_patch_count * kReturnAddressPatchEntrySize);
- }
+ static int LengthFor(int entry_count) { return IndexForEntry(entry_count); }
};
@@ -6020,10 +5528,11 @@ class DependentCode: public FixedArray {
kAllocationSiteTenuringChangedGroup,
// Group of code that depends on element transition information in
// AllocationSites not being changed.
- kAllocationSiteTransitionChangedGroup,
- kGroupCount = kAllocationSiteTransitionChangedGroup + 1
+ kAllocationSiteTransitionChangedGroup
};
+ static const int kGroupCount = kAllocationSiteTransitionChangedGroup + 1;
+
// Array for holding the index of the first code object of each group.
// The last element stores the total number of code objects.
class GroupStartIndexes {
@@ -6070,6 +5579,9 @@ class DependentCode: public FixedArray {
static DependentCode* ForObject(Handle<HeapObject> object,
DependencyGroup group);
+ static const char* DependencyGroupName(DependencyGroup group);
+ static void SetMarkedForDeoptimization(Code* code, DependencyGroup group);
+
private:
// Make a room at the end of the given group by moving out the first
// code objects of the subsequent groups.
@@ -6475,10 +5987,10 @@ class Map: public HeapObject {
// is found by re-transitioning from the root of the transition tree using the
// descriptor array of the map. Returns NULL if no updated map is found.
// This method also applies any pending migrations along the prototype chain.
- static MaybeHandle<Map> TryUpdate(Handle<Map> map) V8_WARN_UNUSED_RESULT;
+ static MaybeHandle<Map> TryUpdate(Handle<Map> map) WARN_UNUSED_RESULT;
// Same as above, but does not touch the prototype chain.
static MaybeHandle<Map> TryUpdateInternal(Handle<Map> map)
- V8_WARN_UNUSED_RESULT;
+ WARN_UNUSED_RESULT;
// Returns a non-deprecated version of the input. This method may deprecate
// existing maps along the way if encodings conflict. Not for use while
@@ -6528,14 +6040,18 @@ class Map: public HeapObject {
Handle<Object> value,
PropertyAttributes attributes,
StoreFromKeyed store_mode);
+ static Handle<Map> TransitionToAccessorProperty(
+ Handle<Map> map, Handle<Name> name, AccessorComponent component,
+ Handle<Object> accessor, PropertyAttributes attributes);
+ static Handle<Map> ReconfigureDataProperty(Handle<Map> map, int descriptor,
+ PropertyAttributes attributes);
inline void AppendDescriptor(Descriptor* desc);
// Returns a copy of the map, with all transitions dropped from the
// instance descriptors.
static Handle<Map> Copy(Handle<Map> map);
- static Handle<Map> Create(Handle<JSFunction> constructor,
- int extra_inobject_properties);
+ static Handle<Map> Create(Isolate* isolate, int inobject_properties);
// Returns the next free property index (only valid for FAST MODE).
int NextFreePropertyIndex();
@@ -7141,9 +6657,8 @@ class SharedFunctionInfo: public HeapObject {
// [feedback_vector] - accumulates ast node feedback from full-codegen and
// (increasingly) from crankshafted code where sufficient feedback isn't
- // available. Currently the field is duplicated in
- // TypeFeedbackInfo::feedback_vector, but the allocation is done here.
- DECL_ACCESSORS(feedback_vector, FixedArray)
+ // available.
+ DECL_ACCESSORS(feedback_vector, TypeFeedbackVector)
// [instance class name]: class name for instances.
DECL_ACCESSORS(instance_class_name, Object)
@@ -7288,6 +6803,15 @@ class SharedFunctionInfo: public HeapObject {
// Indicates that this function is an arrow function.
DECL_BOOLEAN_ACCESSORS(is_arrow)
+ // Indicates that this function is a concise method.
+ DECL_BOOLEAN_ACCESSORS(is_concise_method)
+
+ // Indicates that this function is an asm function.
+ DECL_BOOLEAN_ACCESSORS(asm_function)
+
+ inline FunctionKind kind();
+ inline void set_kind(FunctionKind kind);
+
// Indicates whether or not the code in the shared function support
// deoptimization.
inline bool has_deoptimization_support();
@@ -7482,17 +7006,21 @@ class SharedFunctionInfo: public HeapObject {
kIsFunction,
kDontCache,
kDontFlush,
- kIsGenerator,
kIsArrow,
+ kIsGenerator,
+ kIsConciseMethod,
+ kIsAsmFunction,
kCompilerHintsCount // Pseudo entry
};
- class DeoptCountBits: public BitField<int, 0, 4> {};
- class OptReenableTriesBits: public BitField<int, 4, 18> {};
- class ICAgeBits: public BitField<int, 22, 8> {};
+ class FunctionKindBits : public BitField<FunctionKind, kIsArrow, 3> {};
- class OptCountBits: public BitField<int, 0, 22> {};
- class DisabledOptimizationReasonBits: public BitField<int, 22, 8> {};
+ class DeoptCountBits : public BitField<int, 0, 4> {};
+ class OptReenableTriesBits : public BitField<int, 4, 18> {};
+ class ICAgeBits : public BitField<int, 22, 8> {};
+
+ class OptCountBits : public BitField<int, 0, 22> {};
+ class DisabledOptimizationReasonBits : public BitField<int, 22, 8> {};
private:
#if V8_HOST_ARCH_32_BIT
@@ -7791,6 +7319,11 @@ class JSFunction: public JSObject {
static void SetInstancePrototype(Handle<JSFunction> function,
Handle<Object> value);
+ // Creates a new closure for the fucntion with the same bindings,
+ // bound values, and prototype. An equivalent of spec operations
+ // ``CloneMethod`` and ``CloneBoundFunction``.
+ static Handle<JSFunction> CloneClosure(Handle<JSFunction> function);
+
// After prototype is removed, it will not be created when accessed, and
// [[Construct]] from this function will not be allowed.
bool RemovePrototype();
@@ -7919,9 +7452,6 @@ class GlobalObject: public JSObject {
// [global proxy]: the global proxy object of the context
DECL_ACCESSORS(global_proxy, JSObject)
- // Retrieve the property cell used to store a property.
- PropertyCell* GetPropertyCell(LookupResult* result);
-
DECLARE_CAST(GlobalObject)
// Layout description.
@@ -8173,7 +7703,7 @@ class JSMessageObject: public JSObject {
// If it is an atom regexp
// - a reference to a literal string to search for
// If it is an irregexp regexp:
-// - a reference to code for ASCII inputs (bytecode or compiled), or a smi
+// - a reference to code for Latin1 inputs (bytecode or compiled), or a smi
// used for tracking the last usage (used for code flushing).
// - a reference to code for UC16 inputs (bytecode or compiled), or a smi
// used for tracking the last usage (used for code flushing)..
@@ -8187,7 +7717,13 @@ class JSRegExp: public JSObject {
// IRREGEXP: Compiled with Irregexp.
// IRREGEXP_NATIVE: Compiled to native code with Irregexp.
enum Type { NOT_COMPILED, ATOM, IRREGEXP };
- enum Flag { NONE = 0, GLOBAL = 1, IGNORE_CASE = 2, MULTILINE = 4 };
+ enum Flag {
+ NONE = 0,
+ GLOBAL = 1,
+ IGNORE_CASE = 2,
+ MULTILINE = 4,
+ STICKY = 8
+ };
class Flags {
public:
@@ -8195,6 +7731,7 @@ class JSRegExp: public JSObject {
bool is_global() { return (value_ & GLOBAL) != 0; }
bool is_ignore_case() { return (value_ & IGNORE_CASE) != 0; }
bool is_multiline() { return (value_ & MULTILINE) != 0; }
+ bool is_sticky() { return (value_ & STICKY) != 0; }
uint32_t value() { return value_; }
private:
uint32_t value_;
@@ -8210,17 +7747,17 @@ class JSRegExp: public JSObject {
// Set implementation data after the object has been prepared.
inline void SetDataAt(int index, Object* value);
- static int code_index(bool is_ascii) {
- if (is_ascii) {
- return kIrregexpASCIICodeIndex;
+ static int code_index(bool is_latin1) {
+ if (is_latin1) {
+ return kIrregexpLatin1CodeIndex;
} else {
return kIrregexpUC16CodeIndex;
}
}
- static int saved_code_index(bool is_ascii) {
- if (is_ascii) {
- return kIrregexpASCIICodeSavedIndex;
+ static int saved_code_index(bool is_latin1) {
+ if (is_latin1) {
+ return kIrregexpLatin1CodeSavedIndex;
} else {
return kIrregexpUC16CodeSavedIndex;
}
@@ -8246,23 +7783,23 @@ class JSRegExp: public JSObject {
static const int kAtomDataSize = kAtomPatternIndex + 1;
- // Irregexp compiled code or bytecode for ASCII. If compilation
+ // Irregexp compiled code or bytecode for Latin1. If compilation
// fails, this fields hold an exception object that should be
// thrown if the regexp is used again.
- static const int kIrregexpASCIICodeIndex = kDataIndex;
+ static const int kIrregexpLatin1CodeIndex = kDataIndex;
// Irregexp compiled code or bytecode for UC16. If compilation
// fails, this fields hold an exception object that should be
// thrown if the regexp is used again.
static const int kIrregexpUC16CodeIndex = kDataIndex + 1;
- // Saved instance of Irregexp compiled code or bytecode for ASCII that
+ // Saved instance of Irregexp compiled code or bytecode for Latin1 that
// is a potential candidate for flushing.
- static const int kIrregexpASCIICodeSavedIndex = kDataIndex + 2;
+ static const int kIrregexpLatin1CodeSavedIndex = kDataIndex + 2;
// Saved instance of Irregexp compiled code or bytecode for UC16 that is
// a potential candidate for flushing.
static const int kIrregexpUC16CodeSavedIndex = kDataIndex + 3;
- // Maximal number of registers used by either ASCII or UC16.
+ // Maximal number of registers used by either Latin1 or UC16.
// Only used to check that there is enough stack space
static const int kIrregexpMaxRegisterCountIndex = kDataIndex + 4;
// Number of captures in the compiled regexp.
@@ -8273,8 +7810,8 @@ class JSRegExp: public JSObject {
// Offsets directly into the data fixed array.
static const int kDataTagOffset =
FixedArray::kHeaderSize + kTagIndex * kPointerSize;
- static const int kDataAsciiCodeOffset =
- FixedArray::kHeaderSize + kIrregexpASCIICodeIndex * kPointerSize;
+ static const int kDataOneByteCodeOffset =
+ FixedArray::kHeaderSize + kIrregexpLatin1CodeIndex * kPointerSize;
static const int kDataUC16CodeOffset =
FixedArray::kHeaderSize + kIrregexpUC16CodeIndex * kPointerSize;
static const int kIrregexpCaptureCountOffset =
@@ -8525,22 +8062,6 @@ class TypeFeedbackInfo: public Struct {
static const int kStorage3Offset = kStorage2Offset + kPointerSize;
static const int kSize = kStorage3Offset + kPointerSize;
- // TODO(mvstanton): move these sentinel declarations to shared function info.
- // The object that indicates an uninitialized cache.
- static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
-
- // The object that indicates a megamorphic state.
- static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
-
- // The object that indicates a monomorphic state of Array with
- // ElementsKind
- static inline Handle<Object> MonomorphicArraySentinel(Isolate* isolate,
- ElementsKind elements_kind);
-
- // A raw version of the uninitialized sentinel that's safe to read during
- // garbage collection (e.g., for patching the cache).
- static inline Object* RawUninitializedSentinel(Heap* heap);
-
private:
static const int kTypeChangeChecksumBits = 7;
@@ -8905,9 +8426,9 @@ class StringShape BASE_EMBEDDED {
inline bool IsCons();
inline bool IsSliced();
inline bool IsIndirect();
- inline bool IsExternalAscii();
+ inline bool IsExternalOneByte();
inline bool IsExternalTwoByte();
- inline bool IsSequentialAscii();
+ inline bool IsSequentialOneByte();
inline bool IsSequentialTwoByte();
inline bool IsInternalized();
inline StringRepresentationTag representation_tag();
@@ -9102,21 +8623,21 @@ class String: public Name {
// Representation of the flat content of a String.
// A non-flat string doesn't have flat content.
// A flat string has content that's encoded as a sequence of either
- // ASCII chars or two-byte UC16.
+ // one-byte chars or two-byte UC16.
// Returned by String::GetFlatContent().
class FlatContent {
public:
// Returns true if the string is flat and this structure contains content.
bool IsFlat() { return state_ != NON_FLAT; }
- // Returns true if the structure contains ASCII content.
- bool IsAscii() { return state_ == ASCII; }
+ // Returns true if the structure contains one-byte content.
+ bool IsOneByte() { return state_ == ONE_BYTE; }
// Returns true if the structure contains two-byte content.
bool IsTwoByte() { return state_ == TWO_BYTE; }
- // Return the one byte content of the string. Only use if IsAscii() returns
- // true.
+ // Return the one byte content of the string. Only use if IsOneByte()
+ // returns true.
Vector<const uint8_t> ToOneByteVector() {
- DCHECK_EQ(ASCII, state_);
+ DCHECK_EQ(ONE_BYTE, state_);
return Vector<const uint8_t>(onebyte_start, length_);
}
// Return the two-byte content of the string. Only use if IsTwoByte()
@@ -9129,16 +8650,16 @@ class String: public Name {
uc16 Get(int i) {
DCHECK(i < length_);
DCHECK(state_ != NON_FLAT);
- if (state_ == ASCII) return onebyte_start[i];
+ if (state_ == ONE_BYTE) return onebyte_start[i];
return twobyte_start[i];
}
private:
- enum State { NON_FLAT, ASCII, TWO_BYTE };
+ enum State { NON_FLAT, ONE_BYTE, TWO_BYTE };
// Constructors only used by String::GetFlatContent().
explicit FlatContent(const uint8_t* start, int length)
- : onebyte_start(start), length_(length), state_(ASCII) { }
+ : onebyte_start(start), length_(length), state_(ONE_BYTE) {}
explicit FlatContent(const uc16* start, int length)
: twobyte_start(start), length_(length), state_(TWO_BYTE) { }
FlatContent() : onebyte_start(NULL), length_(0), state_(NON_FLAT) { }
@@ -9162,10 +8683,10 @@ class String: public Name {
inline int synchronized_length() const;
inline void synchronized_set_length(int value);
- // Returns whether this string has only ASCII chars, i.e. all of them can
- // be ASCII encoded. This might be the case even if the string is
+ // Returns whether this string has only one-byte chars, i.e. all of them can
+ // be one-byte encoded. This might be the case even if the string is
// two-byte. Such strings may appear when the embedder prefers
- // two-byte external representations even for ASCII data.
+ // two-byte external representations even for one-byte data.
inline bool IsOneByteRepresentation() const;
inline bool IsTwoByteRepresentation() const;
@@ -9213,7 +8734,7 @@ class String: public Name {
inline String* GetUnderlying();
// Mark the string as an undetectable object. It only applies to
- // ASCII and two byte string types.
+ // one-byte and two-byte string types.
bool MarkAsUndetectable();
// String equality operations.
@@ -9254,7 +8775,7 @@ class String: public Name {
// Externalization.
bool MakeExternal(v8::String::ExternalStringResource* resource);
- bool MakeExternal(v8::String::ExternalAsciiStringResource* resource);
+ bool MakeExternal(v8::String::ExternalOneByteStringResource* resource);
// Conversion.
inline bool AsArrayIndex(uint32_t* index);
@@ -9315,28 +8836,40 @@ class String: public Name {
int from,
int to);
- // The return value may point to the first aligned word containing the
- // first non-ascii character, rather than directly to the non-ascii character.
- // If the return value is >= the passed length, the entire string was ASCII.
+ // The return value may point to the first aligned word containing the first
+ // non-one-byte character, rather than directly to the non-one-byte character.
+ // If the return value is >= the passed length, the entire string was
+ // one-byte.
static inline int NonAsciiStart(const char* chars, int length) {
const char* start = chars;
const char* limit = chars + length;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- DCHECK(unibrow::Utf8::kMaxOneByteChar == 0x7F);
- const uintptr_t non_ascii_mask = kUintptrAllBitsSet / 0xFF * 0x80;
- while (chars + sizeof(uintptr_t) <= limit) {
- if (*reinterpret_cast<const uintptr_t*>(chars) & non_ascii_mask) {
- return static_cast<int>(chars - start);
+
+ if (length >= kIntptrSize) {
+ // Check unaligned bytes.
+ while (!IsAligned(reinterpret_cast<intptr_t>(chars), sizeof(uintptr_t))) {
+ if (static_cast<uint8_t>(*chars) > unibrow::Utf8::kMaxOneByteChar) {
+ return static_cast<int>(chars - start);
+ }
+ ++chars;
+ }
+ // Check aligned words.
+ DCHECK(unibrow::Utf8::kMaxOneByteChar == 0x7F);
+ const uintptr_t non_one_byte_mask = kUintptrAllBitsSet / 0xFF * 0x80;
+ while (chars + sizeof(uintptr_t) <= limit) {
+ if (*reinterpret_cast<const uintptr_t*>(chars) & non_one_byte_mask) {
+ return static_cast<int>(chars - start);
+ }
+ chars += sizeof(uintptr_t);
}
- chars += sizeof(uintptr_t);
}
-#endif
+ // Check remaining unaligned bytes.
while (chars < limit) {
if (static_cast<uint8_t>(*chars) > unibrow::Utf8::kMaxOneByteChar) {
return static_cast<int>(chars - start);
}
++chars;
}
+
return static_cast<int>(chars - start);
}
@@ -9417,11 +8950,11 @@ class SeqString: public String {
};
-// The AsciiString class captures sequential ASCII string objects.
-// Each character in the AsciiString is an ASCII character.
+// The OneByteString class captures sequential one-byte string objects.
+// Each character in the OneByteString is an one-byte character.
class SeqOneByteString: public SeqString {
public:
- static const bool kHasAsciiEncoding = true;
+ static const bool kHasOneByteEncoding = true;
// Dispatched behavior.
inline uint16_t SeqOneByteStringGet(int index);
@@ -9435,16 +8968,16 @@ class SeqOneByteString: public SeqString {
DECLARE_CAST(SeqOneByteString)
// Garbage collection support. This method is called by the
- // garbage collector to compute the actual size of an AsciiString
+ // garbage collector to compute the actual size of an OneByteString
// instance.
inline int SeqOneByteStringSize(InstanceType instance_type);
- // Computes the size for an AsciiString instance of a given length.
+ // Computes the size for an OneByteString instance of a given length.
static int SizeFor(int length) {
return OBJECT_POINTER_ALIGN(kHeaderSize + length * kCharSize);
}
- // Maximal memory usage for a single sequential ASCII string.
+ // Maximal memory usage for a single sequential one-byte string.
static const int kMaxSize = 512 * MB - 1;
STATIC_ASSERT((kMaxSize - kHeaderSize) >= String::kMaxLength);
@@ -9457,7 +8990,7 @@ class SeqOneByteString: public SeqString {
// Each character in the TwoByteString is a two-byte uint16_t.
class SeqTwoByteString: public SeqString {
public:
- static const bool kHasAsciiEncoding = false;
+ static const bool kHasOneByteEncoding = false;
// Dispatched behavior.
inline uint16_t SeqTwoByteStringGet(int index);
@@ -9618,13 +9151,13 @@ class ExternalString: public String {
};
-// The ExternalAsciiString class is an external string backed by an
-// ASCII string.
-class ExternalAsciiString: public ExternalString {
+// The ExternalOneByteString class is an external string backed by an
+// one-byte string.
+class ExternalOneByteString : public ExternalString {
public:
- static const bool kHasAsciiEncoding = true;
+ static const bool kHasOneByteEncoding = true;
- typedef v8::String::ExternalAsciiStringResource Resource;
+ typedef v8::String::ExternalOneByteStringResource Resource;
// The underlying resource.
inline const Resource* resource();
@@ -9639,18 +9172,18 @@ class ExternalAsciiString: public ExternalString {
inline const uint8_t* GetChars();
// Dispatched behavior.
- inline uint16_t ExternalAsciiStringGet(int index);
+ inline uint16_t ExternalOneByteStringGet(int index);
- DECLARE_CAST(ExternalAsciiString)
+ DECLARE_CAST(ExternalOneByteString)
// Garbage collection support.
- inline void ExternalAsciiStringIterateBody(ObjectVisitor* v);
+ inline void ExternalOneByteStringIterateBody(ObjectVisitor* v);
- template<typename StaticVisitor>
- inline void ExternalAsciiStringIterateBody();
+ template <typename StaticVisitor>
+ inline void ExternalOneByteStringIterateBody();
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalAsciiString);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalOneByteString);
};
@@ -9658,7 +9191,7 @@ class ExternalAsciiString: public ExternalString {
// encoded string.
class ExternalTwoByteString: public ExternalString {
public:
- static const bool kHasAsciiEncoding = false;
+ static const bool kHasOneByteEncoding = false;
typedef v8::String::ExternalStringResource Resource;
@@ -9729,7 +9262,7 @@ class FlatStringReader : public Relocatable {
int length() { return length_; }
private:
String** str_;
- bool is_ascii_;
+ bool is_one_byte_;
int length_;
const void* start_;
};
@@ -10175,10 +9708,10 @@ class OrderedHashTableIterator: public JSObject {
DECL_ACCESSORS(table, Object)
// [index]: The index into the data table.
- DECL_ACCESSORS(index, Smi)
+ DECL_ACCESSORS(index, Object)
// [kind]: The kind of iteration this is. One of the [Kind] enum values.
- DECL_ACCESSORS(kind, Smi)
+ DECL_ACCESSORS(kind, Object)
#ifdef OBJECT_PRINT
void OrderedHashTableIteratorPrint(OStream& os); // NOLINT
@@ -11210,9 +10743,9 @@ class ObjectVisitor BASE_EMBEDDED {
// Visits a runtime entry in the instruction stream.
virtual void VisitRuntimeEntry(RelocInfo* rinfo) {}
- // Visits the resource of an ASCII or two-byte string.
- virtual void VisitExternalAsciiString(
- v8::String::ExternalAsciiStringResource** resource) {}
+ // Visits the resource of an one-byte or two-byte string.
+ virtual void VisitExternalOneByteString(
+ v8::String::ExternalOneByteStringResource** resource) {}
virtual void VisitExternalTwoByteString(
v8::String::ExternalStringResource** resource) {}
diff --git a/deps/v8/src/optimizing-compiler-thread.cc b/deps/v8/src/optimizing-compiler-thread.cc
index 0074adbefe..387e9c055c 100644
--- a/deps/v8/src/optimizing-compiler-thread.cc
+++ b/deps/v8/src/optimizing-compiler-thread.cc
@@ -226,7 +226,7 @@ void OptimizingCompilerThread::InstallOptimizedFunctions() {
if (info->is_osr()) {
if (FLAG_trace_osr) {
PrintF("[COSR - ");
- info->closure()->PrintName();
+ function->ShortPrint();
PrintF(" is ready for install and entry at AST id %d]\n",
info->osr_ast_id().ToInt());
}
@@ -237,6 +237,11 @@ void OptimizingCompilerThread::InstallOptimizedFunctions() {
BackEdgeTable::RemoveStackCheck(code, offset);
} else {
if (function->IsOptimized()) {
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Aborting compilation for ");
+ function->ShortPrint();
+ PrintF(" as it has already been optimized.\n");
+ }
DisposeOptimizedCompileJob(job, false);
} else {
Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
diff --git a/deps/v8/src/ostreams.cc b/deps/v8/src/ostreams.cc
index 62304eb908..e927e6bbfd 100644
--- a/deps/v8/src/ostreams.cc
+++ b/deps/v8/src/ostreams.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/ostreams.h"
+
#include <algorithm>
-#include <cctype>
#include <cmath>
#include "src/base/platform/platform.h" // For isinf/isnan with MSVC
-#include "src/ostreams.h"
#if V8_OS_WIN
#define snprintf sprintf_s
@@ -164,22 +164,26 @@ OFStream& OFStream::flush() {
}
-OStream& operator<<(OStream& os, const AsReversiblyEscapedUC16& c) {
+// Locale-independent predicates.
+static bool IsPrint(uint16_t c) { return 0x20 <= c && c <= 0x7e; }
+static bool IsSpace(uint16_t c) { return (0x9 <= c && c <= 0xd) || c == 0x20; }
+static bool IsOK(uint16_t c) { return (IsPrint(c) || IsSpace(c)) && c != '\\'; }
+
+
+static OStream& PrintUC16(OStream& os, uint16_t c, bool (*pred)(uint16_t)) {
char buf[10];
- const char* format =
- (std::isprint(c.value) || std::isspace(c.value)) && c.value != '\\'
- ? "%c"
- : (c.value <= 0xff) ? "\\x%02x" : "\\u%04x";
- snprintf(buf, sizeof(buf), format, c.value);
+ const char* format = pred(c) ? "%c" : (c <= 0xff) ? "\\x%02x" : "\\u%04x";
+ snprintf(buf, sizeof(buf), format, c);
return os << buf;
}
+OStream& operator<<(OStream& os, const AsReversiblyEscapedUC16& c) {
+ return PrintUC16(os, c.value, IsOK);
+}
+
+
OStream& operator<<(OStream& os, const AsUC16& c) {
- char buf[10];
- const char* format =
- std::isprint(c.value) ? "%c" : (c.value <= 0xff) ? "\\x%02x" : "\\u%04x";
- snprintf(buf, sizeof(buf), format, c.value);
- return os << buf;
+ return PrintUC16(os, c.value, IsPrint);
}
} } // namespace v8::internal
diff --git a/deps/v8/src/ostreams.h b/deps/v8/src/ostreams.h
index 08f53c52ac..508a88d886 100644
--- a/deps/v8/src/ostreams.h
+++ b/deps/v8/src/ostreams.h
@@ -83,8 +83,8 @@ class OStringStream: public OStream {
// Internally, our character data is always 0-terminated.
const char* c_str() const { return data(); }
- virtual OStringStream& write(const char* s, size_t n) V8_OVERRIDE;
- virtual OStringStream& flush() V8_OVERRIDE;
+ virtual OStringStream& write(const char* s, size_t n) OVERRIDE;
+ virtual OStringStream& flush() OVERRIDE;
private:
// Primitive allocator interface, can be extracted if needed.
@@ -107,8 +107,8 @@ class OFStream: public OStream {
explicit OFStream(FILE* f) : f_(f) { }
virtual ~OFStream() { }
- virtual OFStream& write(const char* s, size_t n) V8_OVERRIDE;
- virtual OFStream& flush() V8_OVERRIDE;
+ virtual OFStream& write(const char* s, size_t n) OVERRIDE;
+ virtual OFStream& flush() OVERRIDE;
private:
FILE* const f_;
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 6c941daa97..9d1a40d39b 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -6,6 +6,7 @@
#include "src/api.h"
#include "src/ast.h"
+#include "src/bailout-reason.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
#include "src/char-predicates-inl.h"
@@ -14,7 +15,7 @@
#include "src/messages.h"
#include "src/parser.h"
#include "src/preparser.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
#include "src/scanner-character-streams.h"
#include "src/scopeinfo.h"
#include "src/string-stream.h"
@@ -266,9 +267,9 @@ void Parser::SetCachedData() {
Scope* Parser::NewScope(Scope* parent, ScopeType scope_type) {
- DCHECK(ast_value_factory_);
+ DCHECK(ast_value_factory());
Scope* result =
- new (zone()) Scope(parent, scope_type, ast_value_factory_, zone());
+ new (zone()) Scope(parent, scope_type, ast_value_factory(), zone());
result->Initialize();
return result;
}
@@ -341,9 +342,38 @@ class TargetScope BASE_EMBEDDED {
// ----------------------------------------------------------------------------
// Implementation of Parser
+class ParserTraits::Checkpoint
+ : public ParserBase<ParserTraits>::CheckpointBase {
+ public:
+ explicit Checkpoint(ParserBase<ParserTraits>* parser)
+ : CheckpointBase(parser), parser_(parser) {
+ saved_ast_node_id_gen_ = *parser_->ast_node_id_gen_;
+ }
+
+ void Restore() {
+ CheckpointBase::Restore();
+ *parser_->ast_node_id_gen_ = saved_ast_node_id_gen_;
+ }
+
+ private:
+ ParserBase<ParserTraits>* parser_;
+ AstNode::IdGen saved_ast_node_id_gen_;
+};
+
+
bool ParserTraits::IsEvalOrArguments(const AstRawString* identifier) const {
- return identifier == parser_->ast_value_factory_->eval_string() ||
- identifier == parser_->ast_value_factory_->arguments_string();
+ return identifier == parser_->ast_value_factory()->eval_string() ||
+ identifier == parser_->ast_value_factory()->arguments_string();
+}
+
+
+bool ParserTraits::IsPrototype(const AstRawString* identifier) const {
+ return identifier == parser_->ast_value_factory()->prototype_string();
+}
+
+
+bool ParserTraits::IsConstructor(const AstRawString* identifier) const {
+ return identifier == parser_->ast_value_factory()->constructor_string();
}
@@ -368,7 +398,7 @@ void ParserTraits::PushPropertyName(FuncNameInferrer* fni,
fni->PushLiteralName(expression->AsLiteral()->AsRawPropertyName());
} else {
fni->PushLiteralName(
- parser_->ast_value_factory_->anonymous_function_string());
+ parser_->ast_value_factory()->anonymous_function_string());
}
}
@@ -387,7 +417,7 @@ void ParserTraits::CheckPossibleEvalCall(Expression* expression,
Scope* scope) {
VariableProxy* callee = expression->AsVariableProxy();
if (callee != NULL &&
- callee->raw_name() == parser_->ast_value_factory_->eval_string()) {
+ callee->raw_name() == parser_->ast_value_factory()->eval_string()) {
scope->DeclarationScope()->RecordEvalCall();
}
}
@@ -507,21 +537,21 @@ Expression* ParserTraits::BuildUnaryExpression(
Expression* ParserTraits::NewThrowReferenceError(const char* message, int pos) {
return NewThrowError(
- parser_->ast_value_factory_->make_reference_error_string(), message, NULL,
- pos);
+ parser_->ast_value_factory()->make_reference_error_string(), message,
+ NULL, pos);
}
Expression* ParserTraits::NewThrowSyntaxError(
const char* message, const AstRawString* arg, int pos) {
- return NewThrowError(parser_->ast_value_factory_->make_syntax_error_string(),
+ return NewThrowError(parser_->ast_value_factory()->make_syntax_error_string(),
message, arg, pos);
}
Expression* ParserTraits::NewThrowTypeError(
const char* message, const AstRawString* arg, int pos) {
- return NewThrowError(parser_->ast_value_factory_->make_type_error_string(),
+ return NewThrowError(parser_->ast_value_factory()->make_type_error_string(),
message, arg, pos);
}
@@ -532,7 +562,7 @@ Expression* ParserTraits::NewThrowError(
Zone* zone = parser_->zone();
int argc = arg != NULL ? 1 : 0;
const AstRawString* type =
- parser_->ast_value_factory_->GetOneByteString(message);
+ parser_->ast_value_factory()->GetOneByteString(message);
ZoneList<const AstRawString*>* array =
new (zone) ZoneList<const AstRawString*>(argc, zone);
if (arg != NULL) {
@@ -603,14 +633,23 @@ void ParserTraits::ReportMessageAt(Scanner::Location source_location,
const AstRawString* ParserTraits::GetSymbol(Scanner* scanner) {
const AstRawString* result =
- parser_->scanner()->CurrentSymbol(parser_->ast_value_factory_);
+ parser_->scanner()->CurrentSymbol(parser_->ast_value_factory());
DCHECK(result != NULL);
return result;
}
+const AstRawString* ParserTraits::GetNumberAsSymbol(Scanner* scanner) {
+ double double_value = parser_->scanner()->DoubleValue();
+ char array[100];
+ const char* string =
+ DoubleToCString(double_value, Vector<char>(array, arraysize(array)));
+ return ast_value_factory()->GetOneByteString(string);
+}
+
+
const AstRawString* ParserTraits::GetNextSymbol(Scanner* scanner) {
- return parser_->scanner()->NextSymbol(parser_->ast_value_factory_);
+ return parser_->scanner()->NextSymbol(parser_->ast_value_factory());
}
@@ -619,6 +658,19 @@ Expression* ParserTraits::ThisExpression(
return factory->NewVariableProxy(scope->receiver(), pos);
}
+Expression* ParserTraits::SuperReference(
+ Scope* scope, AstNodeFactory<AstConstructionVisitor>* factory, int pos) {
+ return factory->NewSuperReference(
+ ThisExpression(scope, factory, pos)->AsVariableProxy(),
+ pos);
+}
+
+Expression* ParserTraits::ClassLiteral(
+ const AstRawString* name, Expression* extends, Expression* constructor,
+ ZoneList<ObjectLiteral::Property*>* properties, int pos,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ return factory->NewClassLiteral(name, extends, constructor, properties, pos);
+}
Literal* ParserTraits::ExpressionFromLiteral(
Token::Value token, int pos,
@@ -690,65 +742,67 @@ Expression* ParserTraits::ParseV8Intrinsic(bool* ok) {
FunctionLiteral* ParserTraits::ParseFunctionLiteral(
- const AstRawString* name,
- Scanner::Location function_name_location,
- bool name_is_strict_reserved,
- bool is_generator,
- int function_token_position,
- FunctionLiteral::FunctionType type,
- FunctionLiteral::ArityRestriction arity_restriction,
- bool* ok) {
- return parser_->ParseFunctionLiteral(name, function_name_location,
- name_is_strict_reserved, is_generator,
- function_token_position, type,
- arity_restriction, ok);
+ const AstRawString* name, Scanner::Location function_name_location,
+ bool name_is_strict_reserved, FunctionKind kind,
+ int function_token_position, FunctionLiteral::FunctionType type,
+ FunctionLiteral::ArityRestriction arity_restriction, bool* ok) {
+ return parser_->ParseFunctionLiteral(
+ name, function_name_location, name_is_strict_reserved, kind,
+ function_token_position, type, arity_restriction, ok);
}
-Parser::Parser(CompilationInfo* info)
- : ParserBase<ParserTraits>(&scanner_,
- info->isolate()->stack_guard()->real_climit(),
- info->extension(), NULL, info->zone(), this),
- isolate_(info->isolate()),
- script_(info->script()),
- scanner_(isolate_->unicode_cache()),
+Parser::Parser(CompilationInfo* info, ParseInfo* parse_info)
+ : ParserBase<ParserTraits>(&scanner_, parse_info->stack_limit,
+ info->extension(), NULL, info->zone(),
+ info->ast_node_id_gen(), this),
+ scanner_(parse_info->unicode_cache),
reusable_preparser_(NULL),
original_scope_(NULL),
target_stack_(NULL),
cached_parse_data_(NULL),
- ast_value_factory_(NULL),
info_(info),
has_pending_error_(false),
pending_error_message_(NULL),
pending_error_arg_(NULL),
- pending_error_char_arg_(NULL) {
- DCHECK(!script_.is_null());
- isolate_->set_ast_node_id(0);
+ pending_error_char_arg_(NULL),
+ total_preparse_skipped_(0),
+ pre_parse_timer_(NULL) {
+ DCHECK(!script().is_null() || info->source_stream() != NULL);
set_allow_harmony_scoping(!info->is_native() && FLAG_harmony_scoping);
set_allow_modules(!info->is_native() && FLAG_harmony_modules);
set_allow_natives_syntax(FLAG_allow_natives_syntax || info->is_native());
set_allow_lazy(false); // Must be explicitly enabled.
- set_allow_generators(FLAG_harmony_generators);
set_allow_arrow_functions(FLAG_harmony_arrow_functions);
set_allow_harmony_numeric_literals(FLAG_harmony_numeric_literals);
+ set_allow_classes(FLAG_harmony_classes);
+ set_allow_harmony_object_literals(FLAG_harmony_object_literals);
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
use_counts_[feature] = 0;
}
+ if (info->ast_value_factory() == NULL) {
+ // info takes ownership of AstValueFactory.
+ info->SetAstValueFactory(
+ new AstValueFactory(zone(), parse_info->hash_seed));
+ }
}
FunctionLiteral* Parser::ParseProgram() {
// TODO(bmeurer): We temporarily need to pass allow_nesting = true here,
// see comment for HistogramTimerScope class.
+
+ // It's OK to use the counters here, since this function is only called in
+ // the main thread.
HistogramTimerScope timer_scope(isolate()->counters()->parse(), true);
- Handle<String> source(String::cast(script_->source()));
+ Handle<String> source(String::cast(script()->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
base::ElapsedTimer timer;
if (FLAG_trace_parse) {
timer.Start();
}
- fni_ = new(zone()) FuncNameInferrer(ast_value_factory_, zone());
+ fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
// Initialize parser state.
CompleteParserRecorder recorder;
@@ -761,6 +815,9 @@ FunctionLiteral* Parser::ParseProgram() {
source = String::Flatten(source);
FunctionLiteral* result;
+
+ Scope* top_scope = NULL;
+ Scope* eval_scope = NULL;
if (source->IsExternalTwoByteString()) {
// Notice that the stream is destroyed at the end of the branch block.
// The last line of the blocks can't be moved outside, even though they're
@@ -768,12 +825,17 @@ FunctionLiteral* Parser::ParseProgram() {
ExternalTwoByteStringUtf16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source), 0, source->length());
scanner_.Initialize(&stream);
- result = DoParseProgram(info(), source);
+ result = DoParseProgram(info(), &top_scope, &eval_scope);
} else {
GenericStringUtf16CharacterStream stream(source, 0, source->length());
scanner_.Initialize(&stream);
- result = DoParseProgram(info(), source);
+ result = DoParseProgram(info(), &top_scope, &eval_scope);
}
+ top_scope->set_end_position(source->length());
+ if (eval_scope != NULL) {
+ eval_scope->set_end_position(source->length());
+ }
+ HandleSourceURLComments();
if (FLAG_trace_parse && result != NULL) {
double ms = timer.Elapsed().InMillisecondsF();
@@ -796,53 +858,52 @@ FunctionLiteral* Parser::ParseProgram() {
}
-FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
- Handle<String> source) {
+FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info, Scope** scope,
+ Scope** eval_scope) {
DCHECK(scope_ == NULL);
DCHECK(target_stack_ == NULL);
FunctionLiteral* result = NULL;
- { Scope* scope = NewScope(scope_, GLOBAL_SCOPE);
- info->SetGlobalScope(scope);
+ {
+ *scope = NewScope(scope_, GLOBAL_SCOPE);
+ info->SetGlobalScope(*scope);
if (!info->context().is_null() && !info->context()->IsNativeContext()) {
- scope = Scope::DeserializeScopeChain(*info->context(), scope, zone());
+ *scope = Scope::DeserializeScopeChain(*info->context(), *scope, zone());
// The Scope is backed up by ScopeInfo (which is in the V8 heap); this
// means the Parser cannot operate independent of the V8 heap. Tell the
// string table to internalize strings and values right after they're
// created.
- ast_value_factory_->Internalize(isolate());
+ ast_value_factory()->Internalize(isolate());
}
- original_scope_ = scope;
+ original_scope_ = *scope;
if (info->is_eval()) {
- if (!scope->is_global_scope() || info->strict_mode() == STRICT) {
- scope = NewScope(scope, EVAL_SCOPE);
+ if (!(*scope)->is_global_scope() || info->strict_mode() == STRICT) {
+ *scope = NewScope(*scope, EVAL_SCOPE);
}
} else if (info->is_global()) {
- scope = NewScope(scope, GLOBAL_SCOPE);
+ *scope = NewScope(*scope, GLOBAL_SCOPE);
}
- scope->set_start_position(0);
- scope->set_end_position(source->length());
+ (*scope)->set_start_position(0);
+ // End position will be set by the caller.
// Compute the parsing mode.
Mode mode = (FLAG_lazy && allow_lazy()) ? PARSE_LAZILY : PARSE_EAGERLY;
- if (allow_natives_syntax() ||
- extension_ != NULL ||
- scope->is_eval_scope()) {
+ if (allow_natives_syntax() || extension_ != NULL ||
+ (*scope)->is_eval_scope()) {
mode = PARSE_EAGERLY;
}
ParsingModeScope parsing_mode(this, mode);
// Enters 'scope'.
- FunctionState function_state(&function_state_, &scope_, scope, zone(),
- ast_value_factory_);
+ FunctionState function_state(&function_state_, &scope_, *scope, zone(),
+ ast_value_factory(), info->ast_node_id_gen());
scope_->SetStrictMode(info->strict_mode());
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
bool ok = true;
int beg_pos = scanner()->location().beg_pos;
- ParseSourceElements(body, Token::EOS, info->is_eval(), true, &ok);
-
- HandleSourceURLComments();
+ ParseSourceElements(body, Token::EOS, info->is_eval(), true, eval_scope,
+ &ok);
if (ok && strict_mode() == STRICT) {
CheckOctalLiteral(beg_pos, scanner()->location().end_pos, &ok);
@@ -862,24 +923,18 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
}
}
- ast_value_factory_->Internalize(isolate());
if (ok) {
result = factory()->NewFunctionLiteral(
- ast_value_factory_->empty_string(), ast_value_factory_, scope_, body,
- function_state.materialized_literal_count(),
+ ast_value_factory()->empty_string(), ast_value_factory(), scope_,
+ body, function_state.materialized_literal_count(),
function_state.expected_property_count(),
function_state.handler_count(), 0,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::ANONYMOUS_EXPRESSION, FunctionLiteral::kGlobalOrEval,
- FunctionLiteral::kNotParenthesized, FunctionLiteral::kNormalFunction,
- 0);
+ FunctionLiteral::kNotParenthesized, FunctionKind::kNormalFunction, 0);
result->set_ast_properties(factory()->visitor()->ast_properties());
result->set_dont_optimize_reason(
factory()->visitor()->dont_optimize_reason());
- } else if (stack_overflow()) {
- isolate()->StackOverflow();
- } else {
- ThrowPendingError();
}
}
@@ -891,8 +946,10 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
FunctionLiteral* Parser::ParseLazy() {
+ // It's OK to use the counters here, since this function is only called in
+ // the main thread.
HistogramTimerScope timer_scope(isolate()->counters()->parse_lazy());
- Handle<String> source(String::cast(script_->source()));
+ Handle<String> source(String::cast(script()->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
base::ElapsedTimer timer;
if (FLAG_trace_parse) {
@@ -932,9 +989,9 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
DCHECK(target_stack_ == NULL);
Handle<String> name(String::cast(shared_info->name()));
- DCHECK(ast_value_factory_);
- fni_ = new(zone()) FuncNameInferrer(ast_value_factory_, zone());
- const AstRawString* raw_name = ast_value_factory_->GetString(name);
+ DCHECK(ast_value_factory());
+ fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
+ const AstRawString* raw_name = ast_value_factory()->GetString(name);
fni_->PushEnclosingName(raw_name);
ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
@@ -952,7 +1009,8 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
}
original_scope_ = scope;
FunctionState function_state(&function_state_, &scope_, scope, zone(),
- ast_value_factory_);
+ ast_value_factory(),
+ info()->ast_node_id_gen());
DCHECK(scope->strict_mode() == SLOPPY || info()->strict_mode() == STRICT);
DCHECK(info()->strict_mode() == shared_info->strict_mode());
scope->SetStrictMode(shared_info->strict_mode());
@@ -961,18 +1019,16 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
? FunctionLiteral::ANONYMOUS_EXPRESSION
: FunctionLiteral::NAMED_EXPRESSION)
: FunctionLiteral::DECLARATION;
- bool is_generator = shared_info->is_generator();
bool ok = true;
if (shared_info->is_arrow()) {
- DCHECK(!is_generator);
Expression* expression = ParseExpression(false, &ok);
DCHECK(expression->IsFunctionLiteral());
result = expression->AsFunctionLiteral();
} else {
result = ParseFunctionLiteral(raw_name, Scanner::Location::invalid(),
false, // Strict mode name already checked.
- is_generator, RelocInfo::kNoPosition,
+ shared_info->kind(), RelocInfo::kNoPosition,
function_type,
FunctionLiteral::NORMAL_ARITY, &ok);
}
@@ -983,14 +1039,7 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
// Make sure the target stack is empty.
DCHECK(target_stack_ == NULL);
- ast_value_factory_->Internalize(isolate());
- if (result == NULL) {
- if (stack_overflow()) {
- isolate()->StackOverflow();
- } else {
- ThrowPendingError();
- }
- } else {
+ if (result != NULL) {
Handle<String> inferred_name(shared_info->inferred_name());
result->set_inferred_name(inferred_name);
}
@@ -999,10 +1048,8 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
- int end_token,
- bool is_eval,
- bool is_global,
- bool* ok) {
+ int end_token, bool is_eval, bool is_global,
+ Scope** eval_scope, bool* ok) {
// SourceElements ::
// (ModuleElement)* <end_token>
@@ -1044,9 +1091,9 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
// one can be present.
if (strict_mode() == SLOPPY &&
literal->raw_value()->AsString() ==
- ast_value_factory_->use_strict_string() &&
+ ast_value_factory()->use_strict_string() &&
token_loc.end_pos - token_loc.beg_pos ==
- ast_value_factory_->use_strict_string()->length() + 2) {
+ ast_value_factory()->use_strict_string()->length() + 2) {
// TODO(mstarzinger): Global strict eval calls, need their own scope
// as specified in ES5 10.4.2(3). The correct fix would be to always
// add this scope in DoParseProgram(), but that requires adaptations
@@ -1058,18 +1105,23 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
scope->set_start_position(scope_->start_position());
scope->set_end_position(scope_->end_position());
scope_ = scope;
+ if (eval_scope != NULL) {
+ // Caller will correct the positions of the ad hoc eval scope.
+ *eval_scope = scope;
+ }
mode_ = PARSE_EAGERLY;
}
scope_->SetStrictMode(STRICT);
// "use strict" is the only directive for now.
directive_prologue = false;
} else if (literal->raw_value()->AsString() ==
- ast_value_factory_->use_asm_string() &&
+ ast_value_factory()->use_asm_string() &&
token_loc.end_pos - token_loc.beg_pos ==
- ast_value_factory_->use_asm_string()->length() + 2) {
+ ast_value_factory()->use_asm_string()->length() + 2) {
// Store the usage count; The actual use counter on the isolate is
// incremented after parsing is done.
++use_counts_[v8::Isolate::kUseAsm];
+ scope_->SetAsmModule();
}
} else {
// End of the directive prologue.
@@ -1103,6 +1155,8 @@ Statement* Parser::ParseModuleElement(ZoneList<const AstRawString*>* labels,
switch (peek()) {
case Token::FUNCTION:
return ParseFunctionDeclaration(NULL, ok);
+ case Token::CLASS:
+ return ParseClassDeclaration(NULL, ok);
case Token::IMPORT:
return ParseImportDeclaration(ok);
case Token::EXPORT:
@@ -1125,7 +1179,7 @@ Statement* Parser::ParseModuleElement(ZoneList<const AstRawString*>* labels,
ExpressionStatement* estmt = stmt->AsExpressionStatement();
if (estmt != NULL && estmt->expression()->AsVariableProxy() != NULL &&
estmt->expression()->AsVariableProxy()->raw_name() ==
- ast_value_factory_->module_string() &&
+ ast_value_factory()->module_string() &&
!scanner()->literal_contains_escapes()) {
return ParseModuleDeclaration(NULL, ok);
}
@@ -1423,7 +1477,7 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
const AstRawString* name =
ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
// Handle 'module' as a context-sensitive keyword.
- if (name != ast_value_factory_->module_string()) {
+ if (name != ast_value_factory()->module_string()) {
names.Add(name, zone());
while (peek() == Token::COMMA) {
Consume(Token::COMMA);
@@ -1442,6 +1496,10 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
result = ParseFunctionDeclaration(&names, CHECK_OK);
break;
+ case Token::CLASS:
+ result = ParseClassDeclaration(&names, CHECK_OK);
+ break;
+
case Token::VAR:
case Token::LET:
case Token::CONST:
@@ -1504,10 +1562,13 @@ Statement* Parser::ParseBlockElement(ZoneList<const AstRawString*>* labels,
// LetDeclaration
// ConstDeclaration
// GeneratorDeclaration
+ // ClassDeclaration
switch (peek()) {
case Token::FUNCTION:
return ParseFunctionDeclaration(NULL, ok);
+ case Token::CLASS:
+ return ParseClassDeclaration(NULL, ok);
case Token::CONST:
return ParseVariableStatement(kModuleElement, NULL, ok);
case Token::LET:
@@ -1619,6 +1680,9 @@ Statement* Parser::ParseStatement(ZoneList<const AstRawString*>* labels,
return ParseFunctionDeclaration(NULL, ok);
}
+ case Token::CLASS:
+ return ParseClassDeclaration(NULL, ok);
+
case Token::DEBUGGER:
return ParseDebuggerStatement(ok);
@@ -1859,18 +1923,16 @@ Statement* Parser::ParseFunctionDeclaration(
// '{' FunctionBody '}'
Expect(Token::FUNCTION, CHECK_OK);
int pos = position();
- bool is_generator = allow_generators() && Check(Token::MUL);
+ bool is_generator = Check(Token::MUL);
bool is_strict_reserved = false;
const AstRawString* name = ParseIdentifierOrStrictReservedWord(
&is_strict_reserved, CHECK_OK);
- FunctionLiteral* fun = ParseFunctionLiteral(name,
- scanner()->location(),
- is_strict_reserved,
- is_generator,
- pos,
- FunctionLiteral::DECLARATION,
- FunctionLiteral::NORMAL_ARITY,
- CHECK_OK);
+ FunctionLiteral* fun =
+ ParseFunctionLiteral(name, scanner()->location(), is_strict_reserved,
+ is_generator ? FunctionKind::kGeneratorFunction
+ : FunctionKind::kNormalFunction,
+ pos, FunctionLiteral::DECLARATION,
+ FunctionLiteral::NORMAL_ARITY, CHECK_OK);
// Even if we're not at the top-level of the global or a function
// scope, we treat it as such and introduce the function with its
// initial value upon entering the corresponding scope.
@@ -1889,6 +1951,47 @@ Statement* Parser::ParseFunctionDeclaration(
}
+Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
+ bool* ok) {
+ // ClassDeclaration ::
+ // 'class' Identifier ('extends' LeftHandExpression)? '{' ClassBody '}'
+ //
+ // A ClassDeclaration
+ //
+ // class C { ... }
+ //
+ // has the same semantics as:
+ //
+ // let C = class C { ... };
+ //
+ // so rewrite it as such.
+
+ Expect(Token::CLASS, CHECK_OK);
+ int pos = position();
+ bool is_strict_reserved = false;
+ const AstRawString* name =
+ ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
+ Expression* value = ParseClassLiteral(name, scanner()->location(),
+ is_strict_reserved, pos, CHECK_OK);
+
+ Block* block = factory()->NewBlock(NULL, 1, true, pos);
+ VariableMode mode = LET;
+ VariableProxy* proxy = NewUnresolved(name, mode, Interface::NewValue());
+ Declaration* declaration =
+ factory()->NewVariableDeclaration(proxy, mode, scope_, pos);
+ Declare(declaration, true, CHECK_OK);
+
+ Token::Value init_op = Token::INIT_LET;
+ Assignment* assignment = factory()->NewAssignment(init_op, proxy, value, pos);
+ block->AddStatement(
+ factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
+ zone());
+
+ if (names) names->Add(name, zone());
+ return block;
+}
+
+
Block* Parser::ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok) {
if (allow_harmony_scoping() && strict_mode() == STRICT) {
return ParseScopedBlock(labels, ok);
@@ -2200,9 +2303,9 @@ Block* Parser::ParseVariableDeclarations(
// Note that the function does different things depending on
// the number of arguments (1 or 2).
initialize = factory()->NewCallRuntime(
- ast_value_factory_->initialize_const_global_string(),
- Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
- arguments, pos);
+ ast_value_factory()->initialize_const_global_string(),
+ Runtime::FunctionForId(Runtime::kInitializeConstGlobal), arguments,
+ pos);
} else {
// Add strict mode.
// We may want to pass singleton to avoid Literal allocations.
@@ -2219,7 +2322,7 @@ Block* Parser::ParseVariableDeclarations(
// Construct the call to Runtime_InitializeVarGlobal
// and add it to the initialization statement block.
initialize = factory()->NewCallRuntime(
- ast_value_factory_->initialize_var_global_string(),
+ ast_value_factory()->initialize_var_global_string(),
Runtime::FunctionForId(Runtime::kInitializeVarGlobal), arguments,
pos);
} else {
@@ -2334,25 +2437,22 @@ Statement* Parser::ParseExpressionOrLabelledStatement(
// If we have an extension, we allow a native function declaration.
// A native function declaration starts with "native function" with
// no line-terminator between the two words.
- if (extension_ != NULL &&
- peek() == Token::FUNCTION &&
- !scanner()->HasAnyLineTerminatorBeforeNext() &&
- expr != NULL &&
+ if (extension_ != NULL && peek() == Token::FUNCTION &&
+ !scanner()->HasAnyLineTerminatorBeforeNext() && expr != NULL &&
expr->AsVariableProxy() != NULL &&
expr->AsVariableProxy()->raw_name() ==
- ast_value_factory_->native_string() &&
+ ast_value_factory()->native_string() &&
!scanner()->literal_contains_escapes()) {
return ParseNativeDeclaration(ok);
}
// Parsed expression statement, or the context-sensitive 'module' keyword.
// Only expect semicolon in the former case.
- if (!FLAG_harmony_modules ||
- peek() != Token::IDENTIFIER ||
+ if (!FLAG_harmony_modules || peek() != Token::IDENTIFIER ||
scanner()->HasAnyLineTerminatorBeforeNext() ||
expr->AsVariableProxy() == NULL ||
expr->AsVariableProxy()->raw_name() !=
- ast_value_factory_->module_string() ||
+ ast_value_factory()->module_string() ||
scanner()->literal_contains_escapes()) {
ExpectSemicolon(CHECK_OK);
}
@@ -2475,7 +2575,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
Expression* generator = factory()->NewVariableProxy(
function_state_->generator_object_variable());
Expression* yield = factory()->NewYield(
- generator, return_value, Yield::FINAL, loc.beg_pos);
+ generator, return_value, Yield::kFinal, loc.beg_pos);
result = factory()->NewExpressionStatement(yield, loc.beg_pos);
} else {
result = factory()->NewReturnStatement(return_value, loc.beg_pos);
@@ -2774,9 +2874,9 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
if (for_of != NULL) {
Variable* iterator = scope_->DeclarationScope()->NewTemporary(
- ast_value_factory_->dot_iterator_string());
+ ast_value_factory()->dot_iterator_string());
Variable* result = scope_->DeclarationScope()->NewTemporary(
- ast_value_factory_->dot_result_string());
+ ast_value_factory()->dot_result_string());
Expression* assign_iterator;
Expression* next_result;
@@ -2792,7 +2892,7 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
{
Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
Expression* next_literal = factory()->NewStringLiteral(
- ast_value_factory_->next_string(), RelocInfo::kNoPosition);
+ ast_value_factory()->next_string(), RelocInfo::kNoPosition);
Expression* next_property = factory()->NewProperty(
iterator_proxy, next_literal, RelocInfo::kNoPosition);
ZoneList<Expression*>* next_arguments =
@@ -2807,7 +2907,7 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
// result.done
{
Expression* done_literal = factory()->NewStringLiteral(
- ast_value_factory_->done_string(), RelocInfo::kNoPosition);
+ ast_value_factory()->done_string(), RelocInfo::kNoPosition);
Expression* result_proxy = factory()->NewVariableProxy(result);
result_done = factory()->NewProperty(
result_proxy, done_literal, RelocInfo::kNoPosition);
@@ -2816,7 +2916,7 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
// each = result.value
{
Expression* value_literal = factory()->NewStringLiteral(
- ast_value_factory_->value_string(), RelocInfo::kNoPosition);
+ ast_value_factory()->value_string(), RelocInfo::kNoPosition);
Expression* result_proxy = factory()->NewVariableProxy(result);
Expression* result_value = factory()->NewProperty(
result_proxy, value_literal, RelocInfo::kNoPosition);
@@ -2879,7 +2979,7 @@ Statement* Parser::DesugarLetBindingsInForStatement(
RelocInfo::kNoPosition);
outer_block->AddStatement(init, zone());
- const AstRawString* temp_name = ast_value_factory_->dot_for_string();
+ const AstRawString* temp_name = ast_value_factory()->dot_for_string();
// For each let variable x:
// make statement: temp_x = x.
@@ -3077,7 +3177,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
// TODO(keuchel): Move the temporary variable to the block scope, after
// implementing stack allocated block scoped variables.
Variable* temp = scope_->DeclarationScope()->NewTemporary(
- ast_value_factory_->dot_for_string());
+ ast_value_factory()->dot_for_string());
VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
ForEachStatement* loop =
factory()->NewForEachStatement(mode, labels, pos);
@@ -3334,14 +3434,10 @@ int ParserTraits::DeclareArrowParametersFromExpression(
FunctionLiteral* Parser::ParseFunctionLiteral(
- const AstRawString* function_name,
- Scanner::Location function_name_location,
- bool name_is_strict_reserved,
- bool is_generator,
- int function_token_pos,
+ const AstRawString* function_name, Scanner::Location function_name_location,
+ bool name_is_strict_reserved, FunctionKind kind, int function_token_pos,
FunctionLiteral::FunctionType function_type,
- FunctionLiteral::ArityRestriction arity_restriction,
- bool* ok) {
+ FunctionLiteral::ArityRestriction arity_restriction, bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
//
@@ -3354,6 +3450,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
int pos = function_token_pos == RelocInfo::kNoPosition
? peek_position() : function_token_pos;
+ bool is_generator = IsGeneratorFunction(kind);
+
// Anonymous functions were passed either the empty symbol or a null
// handle as the function name. Remember if we were passed a non-empty
// handle to decide whether to invoke function name inference.
@@ -3361,7 +3459,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// We want a non-null handle as the function name.
if (should_infer_name) {
- function_name = ast_value_factory_->empty_string();
+ function_name = ast_value_factory()->empty_string();
}
int num_parameters = 0;
@@ -3415,7 +3513,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// Parse function body.
{
FunctionState function_state(&function_state_, &scope_, scope, zone(),
- ast_value_factory_);
+ ast_value_factory(),
+ info()->ast_node_id_gen());
scope_->SetScopeName(function_name);
if (is_generator) {
@@ -3428,7 +3527,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// in a temporary variable, a definition that is used by "yield"
// expressions. This also marks the FunctionState as a generator.
Variable* temp = scope_->DeclarationScope()->NewTemporary(
- ast_value_factory_->dot_generator_object_string());
+ ast_value_factory()->dot_generator_object_string());
function_state.set_generator_object_variable(temp);
}
@@ -3562,7 +3661,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
}
// Validate strict mode.
- if (strict_mode() == STRICT) {
+ // Concise methods use StrictFormalParameters.
+ if (strict_mode() == STRICT || IsConciseMethod(kind)) {
CheckStrictFunctionNameAndParameters(function_name,
name_is_strict_reserved,
function_name_location,
@@ -3570,6 +3670,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
dupe_error_loc,
reserved_loc,
CHECK_OK);
+ }
+ if (strict_mode() == STRICT) {
CheckOctalLiteral(scope->start_position(),
scope->end_position(),
CHECK_OK);
@@ -3582,11 +3684,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
}
}
- FunctionLiteral::KindFlag kind = is_generator
- ? FunctionLiteral::kGeneratorFunction
- : FunctionLiteral::kNormalFunction;
FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
- function_name, ast_value_factory_, scope, body,
+ function_name, ast_value_factory(), scope, body,
materialized_literal_count, expected_property_count, handler_count,
num_parameters, duplicate_parameters, function_type,
FunctionLiteral::kIsFunction, parenthesized, kind, pos);
@@ -3620,8 +3719,7 @@ void Parser::SkipLazyFunctionBody(const AstRawString* function_name,
if (!*ok) {
return;
}
- isolate()->counters()->total_preparse_skipped()->Increment(
- scope_->end_position() - function_block_pos);
+ total_preparse_skipped_ += scope_->end_position() - function_block_pos;
*materialized_literal_count = entry.literal_count();
*expected_property_count = entry.property_count();
scope_->SetStrictMode(entry.strict_mode());
@@ -3649,8 +3747,7 @@ void Parser::SkipLazyFunctionBody(const AstRawString* function_name,
if (!*ok) {
return;
}
- isolate()->counters()->total_preparse_skipped()->Increment(
- scope_->end_position() - function_block_pos);
+ total_preparse_skipped_ += scope_->end_position() - function_block_pos;
*materialized_literal_count = logger.literals();
*expected_property_count = logger.properties();
scope_->SetStrictMode(logger.strict_mode());
@@ -3691,9 +3788,9 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
ZoneList<Expression*>* arguments =
new(zone()) ZoneList<Expression*>(0, zone());
CallRuntime* allocation = factory()->NewCallRuntime(
- ast_value_factory_->empty_string(),
- Runtime::FunctionForId(Runtime::kCreateJSGeneratorObject),
- arguments, pos);
+ ast_value_factory()->empty_string(),
+ Runtime::FunctionForId(Runtime::kCreateJSGeneratorObject), arguments,
+ pos);
VariableProxy* init_proxy = factory()->NewVariableProxy(
function_state_->generator_object_variable());
Assignment* assignment = factory()->NewAssignment(
@@ -3701,19 +3798,19 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
VariableProxy* get_proxy = factory()->NewVariableProxy(
function_state_->generator_object_variable());
Yield* yield = factory()->NewYield(
- get_proxy, assignment, Yield::INITIAL, RelocInfo::kNoPosition);
+ get_proxy, assignment, Yield::kInitial, RelocInfo::kNoPosition);
body->Add(factory()->NewExpressionStatement(
yield, RelocInfo::kNoPosition), zone());
}
- ParseSourceElements(body, Token::RBRACE, false, false, CHECK_OK);
+ ParseSourceElements(body, Token::RBRACE, false, false, NULL, CHECK_OK);
if (is_generator) {
VariableProxy* get_proxy = factory()->NewVariableProxy(
function_state_->generator_object_variable());
Expression* undefined =
factory()->NewUndefinedLiteral(RelocInfo::kNoPosition);
- Yield* yield = factory()->NewYield(get_proxy, undefined, Yield::FINAL,
+ Yield* yield = factory()->NewYield(get_proxy, undefined, Yield::kFinal,
RelocInfo::kNoPosition);
body->Add(factory()->NewExpressionStatement(
yield, RelocInfo::kNoPosition), zone());
@@ -3728,25 +3825,33 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
SingletonLogger* logger) {
- HistogramTimerScope preparse_scope(isolate()->counters()->pre_parse());
+ // This function may be called on a background thread too; record only the
+ // main thread preparse times.
+ if (pre_parse_timer_ != NULL) {
+ pre_parse_timer_->Start();
+ }
DCHECK_EQ(Token::LBRACE, scanner()->current_token());
if (reusable_preparser_ == NULL) {
- intptr_t stack_limit = isolate()->stack_guard()->real_climit();
- reusable_preparser_ = new PreParser(&scanner_, NULL, stack_limit);
+ reusable_preparser_ = new PreParser(&scanner_, NULL, stack_limit_);
reusable_preparser_->set_allow_harmony_scoping(allow_harmony_scoping());
reusable_preparser_->set_allow_modules(allow_modules());
reusable_preparser_->set_allow_natives_syntax(allow_natives_syntax());
reusable_preparser_->set_allow_lazy(true);
- reusable_preparser_->set_allow_generators(allow_generators());
reusable_preparser_->set_allow_arrow_functions(allow_arrow_functions());
reusable_preparser_->set_allow_harmony_numeric_literals(
allow_harmony_numeric_literals());
+ reusable_preparser_->set_allow_classes(allow_classes());
+ reusable_preparser_->set_allow_harmony_object_literals(
+ allow_harmony_object_literals());
}
PreParser::PreParseResult result =
reusable_preparser_->PreParseLazyFunction(strict_mode(),
is_generator(),
logger);
+ if (pre_parse_timer_ != NULL) {
+ pre_parse_timer_->Stop();
+ }
return result;
}
@@ -3899,10 +4004,9 @@ void Parser::HandleSourceURLComments() {
void Parser::ThrowPendingError() {
- DCHECK(ast_value_factory_->IsInternalized());
+ DCHECK(ast_value_factory()->IsInternalized());
if (has_pending_error_) {
- MessageLocation location(script_,
- pending_error_location_.beg_pos,
+ MessageLocation location(script(), pending_error_location_.beg_pos,
pending_error_location_.end_pos);
Factory* factory = isolate()->factory();
bool has_arg =
@@ -3917,24 +4021,41 @@ void Parser::ThrowPendingError() {
.ToHandleChecked();
elements->set(0, *arg_string);
}
- isolate()->debug()->OnCompileError(script_);
+ isolate()->debug()->OnCompileError(script());
Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> result = pending_error_is_reference_error_
- ? factory->NewReferenceError(pending_error_message_, array)
- : factory->NewSyntaxError(pending_error_message_, array);
- isolate()->Throw(*result, &location);
+ Handle<Object> error;
+ MaybeHandle<Object> maybe_error =
+ pending_error_is_reference_error_
+ ? factory->NewReferenceError(pending_error_message_, array)
+ : factory->NewSyntaxError(pending_error_message_, array);
+ if (maybe_error.ToHandle(&error)) isolate()->Throw(*error, &location);
}
}
-void Parser::InternalizeUseCounts() {
+void Parser::Internalize() {
+ // Internalize strings.
+ ast_value_factory()->Internalize(isolate());
+
+ // Error processing.
+ if (info()->function() == NULL) {
+ if (stack_overflow()) {
+ isolate()->StackOverflow();
+ } else {
+ ThrowPendingError();
+ }
+ }
+
+ // Move statistics to Isolate.
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
for (int i = 0; i < use_counts_[feature]; ++i) {
isolate()->CountUsage(v8::Isolate::UseCounterFeature(feature));
}
}
+ isolate()->counters()->total_preparse_skipped()->Increment(
+ total_preparse_skipped_);
}
@@ -4772,16 +4893,12 @@ bool RegExpParser::ParseRegExp(FlatStringReader* input,
bool Parser::Parse() {
DCHECK(info()->function() == NULL);
FunctionLiteral* result = NULL;
- ast_value_factory_ = info()->ast_value_factory();
- if (ast_value_factory_ == NULL) {
- ast_value_factory_ =
- new AstValueFactory(zone(), isolate()->heap()->HashSeed());
- }
- if (allow_natives_syntax() || extension_ != NULL) {
+ pre_parse_timer_ = isolate()->counters()->pre_parse();
+ if (FLAG_trace_parse || allow_natives_syntax() || extension_ != NULL) {
// If intrinsics are allowed, the Parser cannot operate independent of the
- // V8 heap because of Rumtime. Tell the string table to internalize strings
+ // V8 heap because of Runtime. Tell the string table to internalize strings
// and values right after they're created.
- ast_value_factory_->Internalize(isolate());
+ ast_value_factory()->Internalize(isolate());
}
if (info()->is_lazy()) {
@@ -4796,16 +4913,52 @@ bool Parser::Parse() {
result = ParseProgram();
}
info()->SetFunction(result);
- DCHECK(ast_value_factory_->IsInternalized());
- // info takes ownership of ast_value_factory_.
- if (info()->ast_value_factory() == NULL) {
- info()->SetAstValueFactory(ast_value_factory_);
- }
- ast_value_factory_ = NULL;
-
- InternalizeUseCounts();
+ Internalize();
+ DCHECK(ast_value_factory()->IsInternalized());
return (result != NULL);
}
+
+void Parser::ParseOnBackground() {
+ DCHECK(info()->function() == NULL);
+ FunctionLiteral* result = NULL;
+ fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
+
+ CompleteParserRecorder recorder;
+ if (compile_options() == ScriptCompiler::kProduceParserCache) {
+ log_ = &recorder;
+ }
+
+ DCHECK(info()->source_stream() != NULL);
+ ExternalStreamingStream stream(info()->source_stream(),
+ info()->source_stream_encoding());
+ scanner_.Initialize(&stream);
+ DCHECK(info()->context().is_null() || info()->context()->IsNativeContext());
+
+ // When streaming, we don't know the length of the source until we have parsed
+ // it. The raw data can be UTF-8, so we wouldn't know the source length until
+ // we have decoded it anyway even if we knew the raw data length (which we
+ // don't). We work around this by storing all the scopes which need their end
+ // position set at the end of the script (the top scope and possible eval
+ // scopes) and set their end position after we know the script length.
+ Scope* top_scope = NULL;
+ Scope* eval_scope = NULL;
+ result = DoParseProgram(info(), &top_scope, &eval_scope);
+
+ top_scope->set_end_position(scanner()->location().end_pos);
+ if (eval_scope != NULL) {
+ eval_scope->set_end_position(scanner()->location().end_pos);
+ }
+
+ info()->SetFunction(result);
+
+ // We cannot internalize on a background thread; a foreground task will take
+ // care of calling Parser::Internalize just before compilation.
+
+ if (compile_options() == ScriptCompiler::kProduceParserCache) {
+ if (result != NULL) *info_->cached_data() = recorder.GetScriptData();
+ log_ = NULL;
+ }
+}
} } // namespace v8::internal
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index e3cee84097..40886f669d 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -355,21 +355,6 @@ class ParserTraits {
typedef Variable GeneratorVariable;
typedef v8::internal::Zone Zone;
- class Checkpoint BASE_EMBEDDED {
- public:
- template <typename Parser>
- explicit Checkpoint(Parser* parser) {
- isolate_ = parser->zone()->isolate();
- saved_ast_node_id_ = isolate_->ast_node_id();
- }
-
- void Restore() { isolate_->set_ast_node_id(saved_ast_node_id_); }
-
- private:
- Isolate* isolate_;
- int saved_ast_node_id_;
- };
-
typedef v8::internal::AstProperties AstProperties;
typedef Vector<VariableProxy*> ParameterIdentifierVector;
@@ -378,6 +363,7 @@ class ParserTraits {
typedef v8::internal::Expression* Expression;
typedef Yield* YieldExpression;
typedef v8::internal::FunctionLiteral* FunctionLiteral;
+ typedef v8::internal::ClassLiteral* ClassLiteral;
typedef v8::internal::Literal* Literal;
typedef ObjectLiteral::Property* ObjectLiteralProperty;
typedef ZoneList<v8::internal::Expression*>* ExpressionList;
@@ -388,20 +374,22 @@ class ParserTraits {
typedef AstNodeFactory<AstConstructionVisitor> Factory;
};
+ class Checkpoint;
+
explicit ParserTraits(Parser* parser) : parser_(parser) {}
// Custom operations executed when FunctionStates are created and destructed.
- template<typename FunctionState>
- static void SetUpFunctionState(FunctionState* function_state, Zone* zone) {
- Isolate* isolate = zone->isolate();
- function_state->saved_ast_node_id_ = isolate->ast_node_id();
- isolate->set_ast_node_id(BailoutId::FirstUsable().ToInt());
+ template <typename FunctionState>
+ static void SetUpFunctionState(FunctionState* function_state) {
+ function_state->saved_id_gen_ = *function_state->ast_node_id_gen_;
+ *function_state->ast_node_id_gen_ =
+ AstNode::IdGen(BailoutId::FirstUsable().ToInt());
}
- template<typename FunctionState>
- static void TearDownFunctionState(FunctionState* function_state, Zone* zone) {
+ template <typename FunctionState>
+ static void TearDownFunctionState(FunctionState* function_state) {
if (function_state->outer_function_state_ != NULL) {
- zone->isolate()->set_ast_node_id(function_state->saved_ast_node_id_);
+ *function_state->ast_node_id_gen_ = function_state->saved_id_gen_;
}
}
@@ -414,6 +402,10 @@ class ParserTraits {
static bool IsIdentifier(Expression* expression);
+ bool IsPrototype(const AstRawString* identifier) const;
+
+ bool IsConstructor(const AstRawString* identifier) const;
+
static const AstRawString* AsIdentifier(Expression* expression) {
DCHECK(IsIdentifier(expression));
return expression->AsVariableProxy()->raw_name();
@@ -439,7 +431,8 @@ class ParserTraits {
}
static void CheckFunctionLiteralInsideTopLevelObjectLiteral(
- Scope* scope, Expression* value, bool* has_function) {
+ Scope* scope, ObjectLiteralProperty* property, bool* has_function) {
+ Expression* value = property->value();
if (scope->DeclarationScope()->is_global_scope() &&
value->AsFunctionLiteral() != NULL) {
*has_function = true;
@@ -529,6 +522,8 @@ class ParserTraits {
static Literal* EmptyLiteral() {
return NULL;
}
+ static ObjectLiteralProperty* EmptyObjectLiteralProperty() { return NULL; }
+ static FunctionLiteral* EmptyFunctionLiteral() { return NULL; }
// Used in error return values.
static ZoneList<Expression*>* NullExpressionList() {
@@ -545,10 +540,20 @@ class ParserTraits {
// Producing data during the recursive descent.
const AstRawString* GetSymbol(Scanner* scanner);
const AstRawString* GetNextSymbol(Scanner* scanner);
+ const AstRawString* GetNumberAsSymbol(Scanner* scanner);
Expression* ThisExpression(Scope* scope,
AstNodeFactory<AstConstructionVisitor>* factory,
int pos = RelocInfo::kNoPosition);
+ Expression* SuperReference(Scope* scope,
+ AstNodeFactory<AstConstructionVisitor>* factory,
+ int pos = RelocInfo::kNoPosition);
+ Expression* ClassLiteral(const AstRawString* name, Expression* extends,
+ Expression* constructor,
+ ZoneList<ObjectLiteral::Property*>* properties,
+ int pos,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+
Literal* ExpressionFromLiteral(
Token::Value token, int pos, Scanner* scanner,
AstNodeFactory<AstConstructionVisitor>* factory);
@@ -580,14 +585,10 @@ class ParserTraits {
// Temporary glue; these functions will move to ParserBase.
Expression* ParseV8Intrinsic(bool* ok);
FunctionLiteral* ParseFunctionLiteral(
- const AstRawString* name,
- Scanner::Location function_name_location,
- bool name_is_strict_reserved,
- bool is_generator,
- int function_token_position,
- FunctionLiteral::FunctionType type,
- FunctionLiteral::ArityRestriction arity_restriction,
- bool* ok);
+ const AstRawString* name, Scanner::Location function_name_location,
+ bool name_is_strict_reserved, FunctionKind kind,
+ int function_token_position, FunctionLiteral::FunctionType type,
+ FunctionLiteral::ArityRestriction arity_restriction, bool* ok);
V8_INLINE void SkipLazyFunctionBody(const AstRawString* name,
int* materialized_literal_count,
int* expected_property_count, bool* ok);
@@ -604,7 +605,16 @@ class ParserTraits {
class Parser : public ParserBase<ParserTraits> {
public:
- explicit Parser(CompilationInfo* info);
+ // Note that the hash seed in ParseInfo must be the hash seed from the
+ // Isolate's heap, otherwise the heap will be in an inconsistent state once
+ // the strings created by the Parser are internalized.
+ struct ParseInfo {
+ uintptr_t stack_limit;
+ uint32_t hash_seed;
+ UnicodeCache* unicode_cache;
+ };
+
+ Parser(CompilationInfo* info, ParseInfo* parse_info);
~Parser() {
delete reusable_preparser_;
reusable_preparser_ = NULL;
@@ -617,11 +627,23 @@ class Parser : public ParserBase<ParserTraits> {
// nodes) if parsing failed.
static bool Parse(CompilationInfo* info,
bool allow_lazy = false) {
- Parser parser(info);
+ ParseInfo parse_info = {info->isolate()->stack_guard()->real_climit(),
+ info->isolate()->heap()->HashSeed(),
+ info->isolate()->unicode_cache()};
+ Parser parser(info, &parse_info);
parser.set_allow_lazy(allow_lazy);
- return parser.Parse();
+ if (parser.Parse()) {
+ info->SetStrictMode(info->function()->strict_mode());
+ return true;
+ }
+ return false;
}
bool Parse();
+ void ParseOnBackground();
+
+ // Handle errors detected during parsing, move statistics to Isolate,
+ // internalize strings (move them to the heap).
+ void Internalize();
private:
friend class ParserTraits;
@@ -654,12 +676,16 @@ class Parser : public ParserBase<ParserTraits> {
FunctionLiteral* ParseLazy();
FunctionLiteral* ParseLazy(Utf16CharacterStream* source);
- Isolate* isolate() { return isolate_; }
+ Isolate* isolate() { return info_->isolate(); }
CompilationInfo* info() const { return info_; }
+ Handle<Script> script() const { return info_->script(); }
+ AstValueFactory* ast_value_factory() const {
+ return info_->ast_value_factory();
+ }
// Called by ParseProgram after setting up the scanner.
- FunctionLiteral* DoParseProgram(CompilationInfo* info,
- Handle<String> source);
+ FunctionLiteral* DoParseProgram(CompilationInfo* info, Scope** scope,
+ Scope** ad_hoc_eval_scope);
void SetCachedData();
@@ -677,7 +703,8 @@ class Parser : public ParserBase<ParserTraits> {
// By making the 'exception handling' explicit, we are forced to check
// for failure at the call sites.
void* ParseSourceElements(ZoneList<Statement*>* processor, int end_token,
- bool is_eval, bool is_global, bool* ok);
+ bool is_eval, bool is_global,
+ Scope** ad_hoc_eval_scope, bool* ok);
Statement* ParseModuleElement(ZoneList<const AstRawString*>* labels,
bool* ok);
Statement* ParseModuleDeclaration(ZoneList<const AstRawString*>* names,
@@ -694,6 +721,8 @@ class Parser : public ParserBase<ParserTraits> {
Statement* ParseStatement(ZoneList<const AstRawString*>* labels, bool* ok);
Statement* ParseFunctionDeclaration(ZoneList<const AstRawString*>* names,
bool* ok);
+ Statement* ParseClassDeclaration(ZoneList<const AstRawString*>* names,
+ bool* ok);
Statement* ParseNativeDeclaration(bool* ok);
Block* ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok);
Block* ParseVariableStatement(VariableDeclarationContext var_context,
@@ -741,14 +770,10 @@ class Parser : public ParserBase<ParserTraits> {
Statement* body, bool* ok);
FunctionLiteral* ParseFunctionLiteral(
- const AstRawString* name,
- Scanner::Location function_name_location,
- bool name_is_strict_reserved,
- bool is_generator,
- int function_token_position,
- FunctionLiteral::FunctionType type,
- FunctionLiteral::ArityRestriction arity_restriction,
- bool* ok);
+ const AstRawString* name, Scanner::Location function_name_location,
+ bool name_is_strict_reserved, FunctionKind kind,
+ int function_token_position, FunctionLiteral::FunctionType type,
+ FunctionLiteral::ArityRestriction arity_restriction, bool* ok);
// Magical syntax support.
Expression* ParseV8Intrinsic(bool* ok);
@@ -804,17 +829,11 @@ class Parser : public ParserBase<ParserTraits> {
void ThrowPendingError();
- void InternalizeUseCounts();
-
- Isolate* isolate_;
-
- Handle<Script> script_;
Scanner scanner_;
PreParser* reusable_preparser_;
Scope* original_scope_; // for ES5 function declarations in sloppy eval
Target* target_stack_; // for break, continue statements
ParseData* cached_parse_data_;
- AstValueFactory* ast_value_factory_;
CompilationInfo* info_;
@@ -826,7 +845,11 @@ class Parser : public ParserBase<ParserTraits> {
const char* pending_error_char_arg_;
bool pending_error_is_reference_error_;
+ // Other information which will be stored in Parser and moved to Isolate after
+ // parsing.
int use_counts_[v8::Isolate::kUseCounterFeatureCount];
+ int total_preparse_skipped_;
+ HistogramTimer* pre_parse_timer_;
};
@@ -843,7 +866,7 @@ Scope* ParserTraits::NewScope(Scope* parent_scope, ScopeType scope_type) {
const AstRawString* ParserTraits::EmptyIdentifierString() {
- return parser_->ast_value_factory_->empty_string();
+ return parser_->ast_value_factory()->empty_string();
}
@@ -870,7 +893,7 @@ void ParserTraits::CheckConflictingVarDeclarations(v8::internal::Scope* scope,
AstValueFactory* ParserTraits::ast_value_factory() {
- return parser_->ast_value_factory_;
+ return parser_->ast_value_factory();
}
diff --git a/deps/v8/src/perf-jit.cc b/deps/v8/src/perf-jit.cc
index 5e7fd4cf4a..3f30e38467 100644
--- a/deps/v8/src/perf-jit.cc
+++ b/deps/v8/src/perf-jit.cc
@@ -83,7 +83,8 @@ void PerfJitLogger::LogRecordedBuffer(Code* code, SharedFunctionInfo*,
const char* code_name = name;
uint8_t* code_pointer = reinterpret_cast<uint8_t*>(code->instruction_start());
- uint32_t code_size = code->instruction_size();
+ uint32_t code_size = code->is_crankshafted() ? code->safepoint_table_offset()
+ : code->instruction_size();
static const char string_terminator[] = "\0";
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index 7ce8e3d91a..3173cc0f90 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -32,6 +32,12 @@ int isfinite(double value);
namespace v8 {
namespace internal {
+class PreParserTraits::Checkpoint
+ : public ParserBase<PreParserTraits>::CheckpointBase {
+ public:
+ explicit Checkpoint(ParserBase<PreParserTraits>* parser)
+ : ParserBase<PreParserTraits>::CheckpointBase(parser) {}
+};
void PreParserTraits::ReportMessageAt(Scanner::Location location,
const char* message,
@@ -72,6 +78,17 @@ PreParserIdentifier PreParserTraits::GetSymbol(Scanner* scanner) {
if (scanner->UnescapedLiteralMatches("arguments", 9)) {
return PreParserIdentifier::Arguments();
}
+ if (scanner->UnescapedLiteralMatches("prototype", 9)) {
+ return PreParserIdentifier::Prototype();
+ }
+ if (scanner->UnescapedLiteralMatches("constructor", 11)) {
+ return PreParserIdentifier::Constructor();
+ }
+ return PreParserIdentifier::Default();
+}
+
+
+PreParserIdentifier PreParserTraits::GetNumberAsSymbol(Scanner* scanner) {
return PreParserIdentifier::Default();
}
@@ -91,16 +108,12 @@ PreParserExpression PreParserTraits::ParseV8Intrinsic(bool* ok) {
PreParserExpression PreParserTraits::ParseFunctionLiteral(
- PreParserIdentifier name,
- Scanner::Location function_name_location,
- bool name_is_strict_reserved,
- bool is_generator,
- int function_token_position,
- FunctionLiteral::FunctionType type,
- FunctionLiteral::ArityRestriction arity_restriction,
- bool* ok) {
+ PreParserIdentifier name, Scanner::Location function_name_location,
+ bool name_is_strict_reserved, FunctionKind kind,
+ int function_token_position, FunctionLiteral::FunctionType type,
+ FunctionLiteral::ArityRestriction arity_restriction, bool* ok) {
return pre_parser_->ParseFunctionLiteral(
- name, function_name_location, name_is_strict_reserved, is_generator,
+ name, function_name_location, name_is_strict_reserved, kind,
function_token_position, type, arity_restriction, ok);
}
@@ -171,6 +184,8 @@ PreParser::Statement PreParser::ParseSourceElement(bool* ok) {
switch (peek()) {
case Token::FUNCTION:
return ParseFunctionDeclaration(ok);
+ case Token::CLASS:
+ return ParseClassDeclaration(ok);
case Token::CONST:
return ParseVariableStatement(kSourceElement, ok);
case Token::LET:
@@ -298,6 +313,9 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) {
}
}
+ case Token::CLASS:
+ return ParseClassDeclaration(CHECK_OK);
+
case Token::DEBUGGER:
return ParseDebuggerStatement(ok);
@@ -325,22 +343,31 @@ PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
// '{' FunctionBody '}'
Expect(Token::FUNCTION, CHECK_OK);
int pos = position();
- bool is_generator = allow_generators() && Check(Token::MUL);
+ bool is_generator = Check(Token::MUL);
bool is_strict_reserved = false;
Identifier name = ParseIdentifierOrStrictReservedWord(
&is_strict_reserved, CHECK_OK);
- ParseFunctionLiteral(name,
- scanner()->location(),
- is_strict_reserved,
- is_generator,
- pos,
- FunctionLiteral::DECLARATION,
- FunctionLiteral::NORMAL_ARITY,
- CHECK_OK);
+ ParseFunctionLiteral(name, scanner()->location(), is_strict_reserved,
+ is_generator ? FunctionKind::kGeneratorFunction
+ : FunctionKind::kNormalFunction,
+ pos, FunctionLiteral::DECLARATION,
+ FunctionLiteral::NORMAL_ARITY, CHECK_OK);
return Statement::FunctionDeclaration();
}
+PreParser::Statement PreParser::ParseClassDeclaration(bool* ok) {
+ Expect(Token::CLASS, CHECK_OK);
+ int pos = position();
+ bool is_strict_reserved = false;
+ Identifier name =
+ ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
+ ParseClassLiteral(name, scanner()->location(), is_strict_reserved, pos,
+ CHECK_OK);
+ return Statement::Default();
+}
+
+
PreParser::Statement PreParser::ParseBlock(bool* ok) {
// Block ::
// '{' Statement* '}'
@@ -794,14 +821,10 @@ PreParser::Statement PreParser::ParseDebuggerStatement(bool* ok) {
PreParser::Expression PreParser::ParseFunctionLiteral(
- Identifier function_name,
- Scanner::Location function_name_location,
- bool name_is_strict_reserved,
- bool is_generator,
- int function_token_pos,
+ Identifier function_name, Scanner::Location function_name_location,
+ bool name_is_strict_reserved, FunctionKind kind, int function_token_pos,
FunctionLiteral::FunctionType function_type,
- FunctionLiteral::ArityRestriction arity_restriction,
- bool* ok) {
+ FunctionLiteral::ArityRestriction arity_restriction, bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
@@ -810,7 +833,7 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
PreParserScope function_scope(scope_, FUNCTION_SCOPE);
FunctionState function_state(&function_state_, &scope_, &function_scope, NULL,
this->ast_value_factory());
- function_state.set_is_generator(is_generator);
+ function_state.set_is_generator(IsGeneratorFunction(kind));
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK);
@@ -865,7 +888,8 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
// Validate strict mode. We can do this only after parsing the function,
// since the function can declare itself strict.
- if (strict_mode() == STRICT) {
+ // Concise methods use StrictFormalParameters.
+ if (strict_mode() == STRICT || IsConciseMethod(kind)) {
if (function_name.IsEvalOrArguments()) {
ReportMessageAt(function_name_location, "strict_eval_arguments");
*ok = false;
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index 8a93258258..78f6a269f0 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -7,6 +7,7 @@
#include "src/v8.h"
+#include "src/bailout-reason.h"
#include "src/func-name-inferrer.h"
#include "src/hashmap.h"
#include "src/scanner.h"
@@ -30,7 +31,7 @@ namespace internal {
// interface as AstNodeFactory, so ParserBase doesn't need to care which one is
// used.
-// - Miscellanous other tasks interleaved with the recursive descent. For
+// - Miscellaneous other tasks interleaved with the recursive descent. For
// example, Parser keeps track of which function literals should be marked as
// pretenured, and PreParser doesn't care.
@@ -46,6 +47,7 @@ namespace internal {
// typedef Identifier;
// typedef Expression;
// typedef FunctionLiteral;
+// typedef ClassLiteral;
// typedef ObjectLiteralProperty;
// typedef Literal;
// typedef ExpressionList;
@@ -63,9 +65,12 @@ class ParserBase : public Traits {
typedef typename Traits::Type::Expression ExpressionT;
typedef typename Traits::Type::Identifier IdentifierT;
typedef typename Traits::Type::FunctionLiteral FunctionLiteralT;
+ typedef typename Traits::Type::Literal LiteralT;
+ typedef typename Traits::Type::ObjectLiteralProperty ObjectLiteralPropertyT;
ParserBase(Scanner* scanner, uintptr_t stack_limit, v8::Extension* extension,
ParserRecorder* log, typename Traits::Type::Zone* zone,
+ AstNode::IdGen* ast_node_id_gen,
typename Traits::Type::Parser this_object)
: Traits(this_object),
parenthesized_function_(false),
@@ -75,32 +80,35 @@ class ParserBase : public Traits {
fni_(NULL),
log_(log),
mode_(PARSE_EAGERLY), // Lazy mode must be set explicitly.
- scanner_(scanner),
stack_limit_(stack_limit),
+ scanner_(scanner),
stack_overflow_(false),
allow_lazy_(false),
allow_natives_syntax_(false),
- allow_generators_(false),
allow_arrow_functions_(false),
- zone_(zone) {}
+ allow_harmony_object_literals_(false),
+ zone_(zone),
+ ast_node_id_gen_(ast_node_id_gen) {}
// Getters that indicate whether certain syntactical constructs are
// allowed to be parsed by this instance of the parser.
bool allow_lazy() const { return allow_lazy_; }
bool allow_natives_syntax() const { return allow_natives_syntax_; }
- bool allow_generators() const { return allow_generators_; }
bool allow_arrow_functions() const { return allow_arrow_functions_; }
bool allow_modules() const { return scanner()->HarmonyModules(); }
bool allow_harmony_scoping() const { return scanner()->HarmonyScoping(); }
bool allow_harmony_numeric_literals() const {
return scanner()->HarmonyNumericLiterals();
}
+ bool allow_classes() const { return scanner()->HarmonyClasses(); }
+ bool allow_harmony_object_literals() const {
+ return allow_harmony_object_literals_;
+ }
// Setters that determine whether certain syntactical constructs are
// allowed to be parsed by this instance of the parser.
void set_allow_lazy(bool allow) { allow_lazy_ = allow; }
void set_allow_natives_syntax(bool allow) { allow_natives_syntax_ = allow; }
- void set_allow_generators(bool allow) { allow_generators_ = allow; }
void set_allow_arrow_functions(bool allow) { allow_arrow_functions_ = allow; }
void set_allow_modules(bool allow) { scanner()->SetHarmonyModules(allow); }
void set_allow_harmony_scoping(bool allow) {
@@ -109,9 +117,13 @@ class ParserBase : public Traits {
void set_allow_harmony_numeric_literals(bool allow) {
scanner()->SetHarmonyNumericLiterals(allow);
}
+ void set_allow_classes(bool allow) { scanner()->SetHarmonyClasses(allow); }
+ void set_allow_harmony_object_literals(bool allow) {
+ allow_harmony_object_literals_ = allow;
+ }
protected:
- friend class Traits::Type::Checkpoint;
+ friend class Traits::Checkpoint;
enum AllowEvalOrArgumentsAsIdentifier {
kAllowEvalOrArguments,
@@ -123,7 +135,8 @@ class ParserBase : public Traits {
PARSE_EAGERLY
};
- class ParserCheckpoint;
+ class CheckpointBase;
+ class ObjectLiteralChecker;
// ---------------------------------------------------------------------------
// FunctionState and BlockState together implement the parser's scope stack.
@@ -149,17 +162,18 @@ class ParserBase : public Traits {
class FunctionState BASE_EMBEDDED {
public:
- FunctionState(
- FunctionState** function_state_stack,
- typename Traits::Type::Scope** scope_stack,
- typename Traits::Type::Scope* scope,
- typename Traits::Type::Zone* zone = NULL,
- AstValueFactory* ast_value_factory = NULL);
+ FunctionState(FunctionState** function_state_stack,
+ typename Traits::Type::Scope** scope_stack,
+ typename Traits::Type::Scope* scope,
+ typename Traits::Type::Zone* zone = NULL,
+ AstValueFactory* ast_value_factory = NULL,
+ AstNode::IdGen* ast_node_id_gen = NULL);
FunctionState(FunctionState** function_state_stack,
typename Traits::Type::Scope** scope_stack,
typename Traits::Type::Scope** scope,
typename Traits::Type::Zone* zone = NULL,
- AstValueFactory* ast_value_factory = NULL);
+ AstValueFactory* ast_value_factory = NULL,
+ AstNode::IdGen* ast_node_id_gen = NULL);
~FunctionState();
int NextMaterializedLiteralIndex() {
@@ -215,23 +229,22 @@ class ParserBase : public Traits {
FunctionState* outer_function_state_;
typename Traits::Type::Scope** scope_stack_;
typename Traits::Type::Scope* outer_scope_;
- int saved_ast_node_id_; // Only used by ParserTraits.
+ AstNode::IdGen* ast_node_id_gen_; // Only used by ParserTraits.
+ AstNode::IdGen saved_id_gen_; // Ditto.
typename Traits::Type::Zone* extra_param_;
typename Traits::Type::Factory factory_;
friend class ParserTraits;
- friend class ParserCheckpoint;
+ friend class CheckpointBase;
};
// Annoyingly, arrow functions first parse as comma expressions, then when we
// see the => we have to go back and reinterpret the arguments as being formal
// parameters. To do so we need to reset some of the parser state back to
// what it was before the arguments were first seen.
- class ParserCheckpoint : public Traits::Type::Checkpoint {
+ class CheckpointBase BASE_EMBEDDED {
public:
- template <typename Parser>
- explicit ParserCheckpoint(Parser* parser)
- : Traits::Type::Checkpoint(parser) {
+ explicit CheckpointBase(ParserBase* parser) {
function_state_ = parser->function_state_;
next_materialized_literal_index_ =
function_state_->next_materialized_literal_index_;
@@ -240,7 +253,6 @@ class ParserBase : public Traits {
}
void Restore() {
- Traits::Type::Checkpoint::Restore();
function_state_->next_materialized_literal_index_ =
next_materialized_literal_index_;
function_state_->next_handler_index_ = next_handler_index_;
@@ -277,6 +289,7 @@ class ParserBase : public Traits {
void set_stack_overflow() { stack_overflow_ = true; }
Mode mode() const { return mode_; }
typename Traits::Type::Zone* zone() const { return zone_; }
+ AstNode::IdGen* ast_node_id_gen() const { return ast_node_id_gen_; }
INLINE(Token::Value peek()) {
if (stack_overflow_) return Token::ILLEGAL;
@@ -469,7 +482,12 @@ class ParserBase : public Traits {
ExpressionT ParsePrimaryExpression(bool* ok);
ExpressionT ParseExpression(bool accept_IN, bool* ok);
ExpressionT ParseArrayLiteral(bool* ok);
+ IdentifierT ParsePropertyName(bool* is_get, bool* is_set, bool* is_static,
+ bool* ok);
ExpressionT ParseObjectLiteral(bool* ok);
+ ObjectLiteralPropertyT ParsePropertyDefinition(ObjectLiteralChecker* checker,
+ bool in_class, bool is_static,
+ bool* ok);
typename Traits::Type::ExpressionList ParseArguments(bool* ok);
ExpressionT ParseAssignmentExpression(bool accept_IN, bool* ok);
ExpressionT ParseYieldExpression(bool* ok);
@@ -484,6 +502,10 @@ class ParserBase : public Traits {
bool* ok);
ExpressionT ParseArrowFunctionLiteral(int start_pos, ExpressionT params_ast,
bool* ok);
+ ExpressionT ParseClassLiteral(IdentifierT name,
+ Scanner::Location function_name_location,
+ bool name_is_strict_reserved, int pos,
+ bool* ok);
// Checks if the expression is a valid reference expression (e.g., on the
// left-hand side of assignments). Although ruled out by ECMA as early errors,
@@ -513,13 +535,13 @@ class ParserBase : public Traits {
kValueFlag = 4
};
- // Validation per ECMA 262 - 11.1.5 "Object Initialiser".
+ // Validation per ECMA 262 - 11.1.5 "Object Initializer".
class ObjectLiteralChecker {
public:
ObjectLiteralChecker(ParserBase* parser, StrictMode strict_mode)
: parser_(parser),
finder_(scanner()->unicode_cache()),
- strict_mode_(strict_mode) { }
+ strict_mode_(strict_mode) {}
void CheckProperty(Token::Value property, PropertyKind type, bool* ok);
@@ -558,18 +580,19 @@ class ParserBase : public Traits {
FuncNameInferrer* fni_;
ParserRecorder* log_;
Mode mode_;
+ uintptr_t stack_limit_;
private:
Scanner* scanner_;
- uintptr_t stack_limit_;
bool stack_overflow_;
bool allow_lazy_;
bool allow_natives_syntax_;
- bool allow_generators_;
bool allow_arrow_functions_;
+ bool allow_harmony_object_literals_;
typename Traits::Type::Zone* zone_; // Only used by Parser.
+ AstNode::IdGen* ast_node_id_gen_;
};
@@ -597,10 +620,20 @@ class PreParserIdentifier {
static PreParserIdentifier Yield() {
return PreParserIdentifier(kYieldIdentifier);
}
+ static PreParserIdentifier Prototype() {
+ return PreParserIdentifier(kPrototypeIdentifier);
+ }
+ static PreParserIdentifier Constructor() {
+ return PreParserIdentifier(kConstructorIdentifier);
+ }
bool IsEval() const { return type_ == kEvalIdentifier; }
bool IsArguments() const { return type_ == kArgumentsIdentifier; }
- bool IsEvalOrArguments() const { return type_ >= kEvalIdentifier; }
bool IsYield() const { return type_ == kYieldIdentifier; }
+ bool IsPrototype() const { return type_ == kPrototypeIdentifier; }
+ bool IsConstructor() const { return type_ == kConstructorIdentifier; }
+ bool IsEvalOrArguments() const {
+ return type_ == kEvalIdentifier || type_ == kArgumentsIdentifier;
+ }
bool IsFutureReserved() const { return type_ == kFutureReservedIdentifier; }
bool IsFutureStrictReserved() const {
return type_ == kFutureStrictReservedIdentifier;
@@ -623,7 +656,9 @@ class PreParserIdentifier {
kLetIdentifier,
kYieldIdentifier,
kEvalIdentifier,
- kArgumentsIdentifier
+ kArgumentsIdentifier,
+ kPrototypeIdentifier,
+ kConstructorIdentifier
};
explicit PreParserIdentifier(Type type) : type_(type) {}
Type type_;
@@ -677,6 +712,10 @@ class PreParserExpression {
return PreParserExpression(kThisExpression);
}
+ static PreParserExpression Super() {
+ return PreParserExpression(kSuperExpression);
+ }
+
static PreParserExpression ThisProperty() {
return PreParserExpression(kThisPropertyExpression);
}
@@ -798,7 +837,8 @@ class PreParserExpression {
kThisExpression = (1 << 4),
kThisPropertyExpression = (2 << 4),
kPropertyExpression = (3 << 4),
- kCallExpression = (4 << 4)
+ kCallExpression = (4 << 4),
+ kSuperExpression = (5 << 4)
};
explicit PreParserExpression(int expression_code) : code_(expression_code) {}
@@ -907,6 +947,7 @@ class PreParserScope {
ScopeType type() { return scope_type_; }
StrictMode strict_mode() const { return strict_mode_; }
void SetStrictMode(StrictMode strict_mode) { strict_mode_ = strict_mode; }
+ void SetScopeName(PreParserIdentifier name) {}
// When PreParser is in use, lazy compilation is already being done,
// things cannot get lazier than that.
@@ -929,7 +970,7 @@ class PreParserScope {
class PreParserFactory {
public:
- explicit PreParserFactory(void* extra_param1, void* extra_param2) {}
+ PreParserFactory(void*, void*, void*) {}
PreParserExpression NewStringLiteral(PreParserIdentifier identifier,
int pos) {
return PreParserExpression::Default();
@@ -951,11 +992,12 @@ class PreParserFactory {
}
PreParserExpression NewObjectLiteralProperty(bool is_getter,
PreParserExpression value,
- int pos) {
+ int pos, bool is_static) {
return PreParserExpression::Default();
}
PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
- PreParserExpression value) {
+ PreParserExpression value,
+ bool is_static) {
return PreParserExpression::Default();
}
PreParserExpression NewObjectLiteral(PreParserExpressionList properties,
@@ -965,7 +1007,7 @@ class PreParserFactory {
int pos) {
return PreParserExpression::Default();
}
- PreParserExpression NewVariableProxy(void* generator_variable) {
+ PreParserExpression NewVariableProxy(void* variable) {
return PreParserExpression::Default();
}
PreParserExpression NewProperty(PreParserExpression obj,
@@ -1037,8 +1079,15 @@ class PreParserFactory {
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
FunctionLiteral::IsFunctionFlag is_function,
- FunctionLiteral::IsParenthesizedFlag is_parenthesized,
- FunctionLiteral::KindFlag kind, int position) {
+ FunctionLiteral::IsParenthesizedFlag is_parenthesized, FunctionKind kind,
+ int position) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewClassLiteral(PreParserIdentifier name,
+ PreParserExpression extends,
+ PreParserExpression constructor,
+ PreParserExpressionList properties,
+ int position) {
return PreParserExpression::Default();
}
@@ -1066,13 +1115,6 @@ class PreParserTraits {
typedef PreParserScope Scope;
typedef PreParserScope ScopePtr;
- class Checkpoint BASE_EMBEDDED {
- public:
- template <typename Parser>
- explicit Checkpoint(Parser* parser) {}
- void Restore() {}
- };
-
// PreParser doesn't need to store generator variables.
typedef void GeneratorVariable;
// No interaction with Zones.
@@ -1086,6 +1128,7 @@ class PreParserTraits {
typedef PreParserExpression Expression;
typedef PreParserExpression YieldExpression;
typedef PreParserExpression FunctionLiteral;
+ typedef PreParserExpression ClassLiteral;
typedef PreParserExpression ObjectLiteralProperty;
typedef PreParserExpression Literal;
typedef PreParserExpressionList ExpressionList;
@@ -1096,20 +1139,30 @@ class PreParserTraits {
typedef PreParserFactory Factory;
};
+ class Checkpoint;
+
explicit PreParserTraits(PreParser* pre_parser) : pre_parser_(pre_parser) {}
// Custom operations executed when FunctionStates are created and
// destructed. (The PreParser doesn't need to do anything.)
- template<typename FunctionState>
- static void SetUpFunctionState(FunctionState* function_state, void*) {}
- template<typename FunctionState>
- static void TearDownFunctionState(FunctionState* function_state, void*) {}
+ template <typename FunctionState>
+ static void SetUpFunctionState(FunctionState* function_state) {}
+ template <typename FunctionState>
+ static void TearDownFunctionState(FunctionState* function_state) {}
// Helper functions for recursive descent.
static bool IsEvalOrArguments(PreParserIdentifier identifier) {
return identifier.IsEvalOrArguments();
}
+ static bool IsPrototype(PreParserIdentifier identifier) {
+ return identifier.IsPrototype();
+ }
+
+ static bool IsConstructor(PreParserIdentifier identifier) {
+ return identifier.IsConstructor();
+ }
+
// Returns true if the expression is of type "this.foo".
static bool IsThisProperty(PreParserExpression expression) {
return expression.IsThisProperty();
@@ -1154,7 +1207,8 @@ class PreParserTraits {
}
static void CheckFunctionLiteralInsideTopLevelObjectLiteral(
- PreParserScope* scope, PreParserExpression value, bool* has_function) {}
+ PreParserScope* scope, PreParserExpression property, bool* has_function) {
+ }
static void CheckAssigningFunctionLiteralToProperty(
PreParserExpression left, PreParserExpression right) {}
@@ -1226,6 +1280,12 @@ class PreParserTraits {
static PreParserExpression EmptyLiteral() {
return PreParserExpression::Default();
}
+ static PreParserExpression EmptyObjectLiteralProperty() {
+ return PreParserExpression::Default();
+ }
+ static PreParserExpression EmptyFunctionLiteral() {
+ return PreParserExpression::Default();
+ }
static PreParserExpressionList NullExpressionList() {
return PreParserExpressionList();
}
@@ -1238,6 +1298,7 @@ class PreParserTraits {
// Producing data during the recursive descent.
PreParserIdentifier GetSymbol(Scanner* scanner);
+ PreParserIdentifier GetNumberAsSymbol(Scanner* scanner);
static PreParserIdentifier GetNextSymbol(Scanner* scanner) {
return PreParserIdentifier::Default();
@@ -1248,6 +1309,20 @@ class PreParserTraits {
return PreParserExpression::This();
}
+ static PreParserExpression SuperReference(PreParserScope* scope,
+ PreParserFactory* factory) {
+ return PreParserExpression::Super();
+ }
+
+ static PreParserExpression ClassLiteral(PreParserIdentifier name,
+ PreParserExpression extends,
+ PreParserExpression constructor,
+ PreParserExpressionList properties,
+ int position,
+ PreParserFactory* factory) {
+ return PreParserExpression::Default();
+ }
+
static PreParserExpression ExpressionFromLiteral(
Token::Value token, int pos, Scanner* scanner,
PreParserFactory* factory) {
@@ -1309,14 +1384,10 @@ class PreParserTraits {
// Temporary glue; these functions will move to ParserBase.
PreParserExpression ParseV8Intrinsic(bool* ok);
PreParserExpression ParseFunctionLiteral(
- PreParserIdentifier name,
- Scanner::Location function_name_location,
- bool name_is_strict_reserved,
- bool is_generator,
- int function_token_position,
- FunctionLiteral::FunctionType type,
- FunctionLiteral::ArityRestriction arity_restriction,
- bool* ok);
+ PreParserIdentifier name, Scanner::Location function_name_location,
+ bool name_is_strict_reserved, FunctionKind kind,
+ int function_token_position, FunctionLiteral::FunctionType type,
+ FunctionLiteral::ArityRestriction arity_restriction, bool* ok);
private:
PreParser* pre_parser_;
@@ -1347,7 +1418,7 @@ class PreParser : public ParserBase<PreParserTraits> {
};
PreParser(Scanner* scanner, ParserRecorder* log, uintptr_t stack_limit)
- : ParserBase<PreParserTraits>(scanner, stack_limit, NULL, log, NULL,
+ : ParserBase<PreParserTraits>(scanner, stack_limit, NULL, log, NULL, NULL,
this) {}
// Pre-parse the program from the character stream; returns true on
@@ -1414,6 +1485,7 @@ class PreParser : public ParserBase<PreParserTraits> {
SourceElements ParseSourceElements(int end_token, bool* ok);
Statement ParseStatement(bool* ok);
Statement ParseFunctionDeclaration(bool* ok);
+ Statement ParseClassDeclaration(bool* ok);
Statement ParseBlock(bool* ok);
Statement ParseVariableStatement(VariableDeclarationContext var_context,
bool* ok);
@@ -1447,14 +1519,10 @@ class PreParser : public ParserBase<PreParserTraits> {
bool is_generator, bool* ok);
Expression ParseFunctionLiteral(
- Identifier name,
- Scanner::Location function_name_location,
- bool name_is_strict_reserved,
- bool is_generator,
- int function_token_pos,
+ Identifier name, Scanner::Location function_name_location,
+ bool name_is_strict_reserved, FunctionKind kind, int function_token_pos,
FunctionLiteral::FunctionType function_type,
- FunctionLiteral::ArityRestriction arity_restriction,
- bool* ok);
+ FunctionLiteral::ArityRestriction arity_restriction, bool* ok);
void ParseLazyFunctionLiteralBody(bool* ok);
bool CheckInOrOf(bool accept_OF);
@@ -1482,13 +1550,12 @@ PreParserStatementList PreParserTraits::ParseEagerFunctionBody(
}
-template<class Traits>
+template <class Traits>
ParserBase<Traits>::FunctionState::FunctionState(
FunctionState** function_state_stack,
typename Traits::Type::Scope** scope_stack,
- typename Traits::Type::Scope* scope,
- typename Traits::Type::Zone* extra_param,
- AstValueFactory* ast_value_factory)
+ typename Traits::Type::Scope* scope, typename Traits::Type::Zone* zone,
+ AstValueFactory* ast_value_factory, AstNode::IdGen* ast_node_id_gen)
: next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
next_handler_index_(0),
expected_property_count_(0),
@@ -1498,12 +1565,11 @@ ParserBase<Traits>::FunctionState::FunctionState(
outer_function_state_(*function_state_stack),
scope_stack_(scope_stack),
outer_scope_(*scope_stack),
- saved_ast_node_id_(0),
- extra_param_(extra_param),
- factory_(extra_param, ast_value_factory) {
+ ast_node_id_gen_(ast_node_id_gen),
+ factory_(zone, ast_value_factory, ast_node_id_gen) {
*scope_stack_ = scope;
*function_state_stack = this;
- Traits::SetUpFunctionState(this, extra_param);
+ Traits::SetUpFunctionState(this);
}
@@ -1511,9 +1577,8 @@ template <class Traits>
ParserBase<Traits>::FunctionState::FunctionState(
FunctionState** function_state_stack,
typename Traits::Type::Scope** scope_stack,
- typename Traits::Type::Scope** scope,
- typename Traits::Type::Zone* extra_param,
- AstValueFactory* ast_value_factory)
+ typename Traits::Type::Scope** scope, typename Traits::Type::Zone* zone,
+ AstValueFactory* ast_value_factory, AstNode::IdGen* ast_node_id_gen)
: next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
next_handler_index_(0),
expected_property_count_(0),
@@ -1523,12 +1588,11 @@ ParserBase<Traits>::FunctionState::FunctionState(
outer_function_state_(*function_state_stack),
scope_stack_(scope_stack),
outer_scope_(*scope_stack),
- saved_ast_node_id_(0),
- extra_param_(extra_param),
- factory_(extra_param, ast_value_factory) {
+ ast_node_id_gen_(ast_node_id_gen),
+ factory_(zone, ast_value_factory, ast_node_id_gen) {
*scope_stack_ = *scope;
*function_state_stack = this;
- Traits::SetUpFunctionState(this, extra_param);
+ Traits::SetUpFunctionState(this);
}
@@ -1536,7 +1600,7 @@ template <class Traits>
ParserBase<Traits>::FunctionState::~FunctionState() {
*scope_stack_ = outer_scope_;
*function_state_stack_ = outer_function_state_;
- Traits::TearDownFunctionState(this, extra_param_);
+ Traits::TearDownFunctionState(this);
}
@@ -1695,6 +1759,7 @@ ParserBase<Traits>::ParsePrimaryExpression(bool* ok) {
// ArrayLiteral
// ObjectLiteral
// RegExpLiteral
+ // ClassLiteral
// '(' Expression ')'
int pos = peek_position();
@@ -1765,6 +1830,23 @@ ParserBase<Traits>::ParsePrimaryExpression(bool* ok) {
}
break;
+ case Token::CLASS: {
+ Consume(Token::CLASS);
+ int class_token_position = position();
+ IdentifierT name = this->EmptyIdentifier();
+ bool is_strict_reserved_name = false;
+ Scanner::Location class_name_location = Scanner::Location::invalid();
+ if (peek_any_identifier()) {
+ name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
+ CHECK_OK);
+ class_name_location = scanner()->location();
+ }
+ result = this->ParseClassLiteral(name, class_name_location,
+ is_strict_reserved_name,
+ class_token_position, CHECK_OK);
+ break;
+ }
+
case Token::MOD:
if (allow_natives_syntax() || extension_ != NULL) {
result = this->ParseV8Intrinsic(CHECK_OK);
@@ -1834,14 +1916,138 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
template <class Traits>
+typename ParserBase<Traits>::IdentifierT ParserBase<Traits>::ParsePropertyName(
+ bool* is_get, bool* is_set, bool* is_static, bool* ok) {
+ Token::Value next = peek();
+ switch (next) {
+ case Token::STRING:
+ Consume(Token::STRING);
+ return this->GetSymbol(scanner_);
+ case Token::NUMBER:
+ Consume(Token::NUMBER);
+ return this->GetNumberAsSymbol(scanner_);
+ case Token::STATIC:
+ *is_static = true;
+ // Fall through.
+ default:
+ return ParseIdentifierNameOrGetOrSet(is_get, is_set, ok);
+ }
+ UNREACHABLE();
+ return this->EmptyIdentifier();
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ObjectLiteralPropertyT ParserBase<
+ Traits>::ParsePropertyDefinition(ObjectLiteralChecker* checker,
+ bool in_class, bool is_static, bool* ok) {
+ ExpressionT value = this->EmptyExpression();
+ bool is_get = false;
+ bool is_set = false;
+ bool name_is_static = false;
+ bool is_generator = allow_harmony_object_literals_ && Check(Token::MUL);
+
+ Token::Value name_token = peek();
+ int next_pos = peek_position();
+ IdentifierT name =
+ ParsePropertyName(&is_get, &is_set, &name_is_static,
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+
+ if (fni_ != NULL) this->PushLiteralName(fni_, name);
+
+ if (!in_class && !is_generator && peek() == Token::COLON) {
+ // PropertyDefinition : PropertyName ':' AssignmentExpression
+ checker->CheckProperty(name_token, kValueProperty,
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ Consume(Token::COLON);
+ value = this->ParseAssignmentExpression(
+ true, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+
+ } else if (is_generator ||
+ (allow_harmony_object_literals_ && peek() == Token::LPAREN)) {
+ // Concise Method
+
+ if (is_static && this->IsPrototype(name)) {
+ ReportMessageAt(scanner()->location(), "static_prototype");
+ *ok = false;
+ return this->EmptyObjectLiteralProperty();
+ }
+ if (is_generator && in_class && !is_static && this->IsConstructor(name)) {
+ ReportMessageAt(scanner()->location(), "constructor_special_method");
+ *ok = false;
+ return this->EmptyObjectLiteralProperty();
+ }
+
+ checker->CheckProperty(name_token, kValueProperty,
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ FunctionKind kind = is_generator ? FunctionKind::kConciseGeneratorMethod
+ : FunctionKind::kConciseMethod;
+
+ value = this->ParseFunctionLiteral(
+ name, scanner()->location(),
+ false, // reserved words are allowed here
+ kind, RelocInfo::kNoPosition, FunctionLiteral::ANONYMOUS_EXPRESSION,
+ FunctionLiteral::NORMAL_ARITY,
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+
+ } else if (in_class && name_is_static && !is_static) {
+ // static MethodDefinition
+ return ParsePropertyDefinition(checker, true, true, ok);
+
+ } else if (is_get || is_set) {
+ // Accessor
+ bool dont_care = false;
+ name_token = peek();
+ name = ParsePropertyName(&dont_care, &dont_care, &dont_care,
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+
+ // Validate the property.
+ if (is_static && this->IsPrototype(name)) {
+ ReportMessageAt(scanner()->location(), "static_prototype");
+ *ok = false;
+ return this->EmptyObjectLiteralProperty();
+ } else if (in_class && !is_static && this->IsConstructor(name)) {
+ // ES6, spec draft rev 27, treats static get constructor as an error too.
+ // https://bugs.ecmascript.org/show_bug.cgi?id=3223
+ // TODO(arv): Update when bug is resolved.
+ ReportMessageAt(scanner()->location(), "constructor_special_method");
+ *ok = false;
+ return this->EmptyObjectLiteralProperty();
+ }
+ checker->CheckProperty(name_token,
+ is_get ? kGetterProperty : kSetterProperty,
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+
+ typename Traits::Type::FunctionLiteral value = this->ParseFunctionLiteral(
+ name, scanner()->location(),
+ false, // reserved words are allowed here
+ FunctionKind::kNormalFunction, RelocInfo::kNoPosition,
+ FunctionLiteral::ANONYMOUS_EXPRESSION,
+ is_get ? FunctionLiteral::GETTER_ARITY : FunctionLiteral::SETTER_ARITY,
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ return factory()->NewObjectLiteralProperty(is_get, value, next_pos,
+ is_static);
+ } else {
+ Token::Value next = Next();
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return this->EmptyObjectLiteralProperty();
+ }
+
+ uint32_t index;
+ LiteralT key = this->IsArrayIndex(name, &index)
+ ? factory()->NewNumberLiteral(index, next_pos)
+ : factory()->NewStringLiteral(name, next_pos);
+
+ return factory()->NewObjectLiteralProperty(key, value, is_static);
+}
+
+
+template <class Traits>
typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseObjectLiteral(
bool* ok) {
// ObjectLiteral ::
- // '{' ((
- // ((IdentifierName | String | Number) ':' AssignmentExpression) |
- // (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
- // ) ',')* '}'
- // (Except that the trailing comma is not required.)
+ // '{' (PropertyDefinition (',' PropertyDefinition)* ','? )? '}'
int pos = peek_position();
typename Traits::Type::PropertyList properties =
@@ -1856,118 +2062,15 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseObjectLiteral(
while (peek() != Token::RBRACE) {
if (fni_ != NULL) fni_->Enter();
- typename Traits::Type::Literal key = this->EmptyLiteral();
- Token::Value next = peek();
- int next_pos = peek_position();
-
- switch (next) {
- case Token::FUTURE_RESERVED_WORD:
- case Token::FUTURE_STRICT_RESERVED_WORD:
- case Token::LET:
- case Token::YIELD:
- case Token::IDENTIFIER: {
- bool is_getter = false;
- bool is_setter = false;
- IdentifierT id =
- ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
- if (fni_ != NULL) this->PushLiteralName(fni_, id);
-
- if ((is_getter || is_setter) && peek() != Token::COLON) {
- // Special handling of getter and setter syntax:
- // { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... }
- // We have already read the "get" or "set" keyword.
- Token::Value next = Next();
- if (next != i::Token::IDENTIFIER &&
- next != i::Token::FUTURE_RESERVED_WORD &&
- next != i::Token::FUTURE_STRICT_RESERVED_WORD &&
- next != i::Token::LET &&
- next != i::Token::YIELD &&
- next != i::Token::NUMBER &&
- next != i::Token::STRING &&
- !Token::IsKeyword(next)) {
- ReportUnexpectedToken(next);
- *ok = false;
- return this->EmptyLiteral();
- }
- // Validate the property.
- PropertyKind type = is_getter ? kGetterProperty : kSetterProperty;
- checker.CheckProperty(next, type, CHECK_OK);
- IdentifierT name = this->GetSymbol(scanner_);
- typename Traits::Type::FunctionLiteral value =
- this->ParseFunctionLiteral(
- name, scanner()->location(),
- false, // reserved words are allowed here
- false, // not a generator
- RelocInfo::kNoPosition, FunctionLiteral::ANONYMOUS_EXPRESSION,
- is_getter ? FunctionLiteral::GETTER_ARITY
- : FunctionLiteral::SETTER_ARITY,
- CHECK_OK);
- typename Traits::Type::ObjectLiteralProperty property =
- factory()->NewObjectLiteralProperty(is_getter, value, next_pos);
- if (this->IsBoilerplateProperty(property)) {
- number_of_boilerplate_properties++;
- }
- properties->Add(property, zone());
- if (peek() != Token::RBRACE) {
- // Need {} because of the CHECK_OK macro.
- Expect(Token::COMMA, CHECK_OK);
- }
-
- if (fni_ != NULL) {
- fni_->Infer();
- fni_->Leave();
- }
- continue; // restart the while
- }
- // Failed to parse as get/set property, so it's just a normal property
- // (which might be called "get" or "set" or something else).
- key = factory()->NewStringLiteral(id, next_pos);
- break;
- }
- case Token::STRING: {
- Consume(Token::STRING);
- IdentifierT string = this->GetSymbol(scanner_);
- if (fni_ != NULL) this->PushLiteralName(fni_, string);
- uint32_t index;
- if (this->IsArrayIndex(string, &index)) {
- key = factory()->NewNumberLiteral(index, next_pos);
- break;
- }
- key = factory()->NewStringLiteral(string, next_pos);
- break;
- }
- case Token::NUMBER: {
- Consume(Token::NUMBER);
- key = this->ExpressionFromLiteral(Token::NUMBER, next_pos, scanner_,
- factory());
- break;
- }
- default:
- if (Token::IsKeyword(next)) {
- Consume(next);
- IdentifierT string = this->GetSymbol(scanner_);
- key = factory()->NewStringLiteral(string, next_pos);
- } else {
- Token::Value next = Next();
- ReportUnexpectedToken(next);
- *ok = false;
- return this->EmptyLiteral();
- }
- }
-
- // Validate the property
- checker.CheckProperty(next, kValueProperty, CHECK_OK);
-
- Expect(Token::COLON, CHECK_OK);
- ExpressionT value = this->ParseAssignmentExpression(true, CHECK_OK);
-
- typename Traits::Type::ObjectLiteralProperty property =
- factory()->NewObjectLiteralProperty(key, value);
+ const bool in_class = false;
+ const bool is_static = false;
+ ObjectLiteralPropertyT property =
+ this->ParsePropertyDefinition(&checker, in_class, is_static, CHECK_OK);
// Mark top-level object literals that contain function literals and
// pretenure the literal so it can be added as a constant function
// property. (Parser only.)
- this->CheckFunctionLiteralInsideTopLevelObjectLiteral(scope_, value,
+ this->CheckFunctionLiteralInsideTopLevelObjectLiteral(scope_, property,
&has_function);
// Count CONSTANT or COMPUTED properties to maintain the enumeration order.
@@ -2045,7 +2148,7 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
}
if (fni_ != NULL) fni_->Enter();
- ParserCheckpoint checkpoint(this);
+ typename Traits::Checkpoint checkpoint(this);
ExpressionT expression =
this->ParseConditionalExpression(accept_IN, CHECK_OK);
@@ -2109,9 +2212,9 @@ ParserBase<Traits>::ParseYieldExpression(bool* ok) {
ExpressionT generator_object =
factory()->NewVariableProxy(function_state_->generator_object_variable());
ExpressionT expression = Traits::EmptyExpression();
- Yield::Kind kind = Yield::SUSPEND;
+ Yield::Kind kind = Yield::kSuspend;
if (!scanner()->HasAnyLineTerminatorBeforeNext()) {
- if (Check(Token::MUL)) kind = Yield::DELEGATING;
+ if (Check(Token::MUL)) kind = Yield::kDelegating;
switch (peek()) {
case Token::EOS:
case Token::SEMICOLON:
@@ -2123,23 +2226,23 @@ ParserBase<Traits>::ParseYieldExpression(bool* ok) {
// The above set of tokens is the complete set of tokens that can appear
// after an AssignmentExpression, and none of them can start an
// AssignmentExpression. This allows us to avoid looking for an RHS for
- // a Yield::SUSPEND operation, given only one look-ahead token.
- if (kind == Yield::SUSPEND)
+ // a Yield::kSuspend operation, given only one look-ahead token.
+ if (kind == Yield::kSuspend)
break;
- DCHECK(kind == Yield::DELEGATING);
+ DCHECK_EQ(Yield::kDelegating, kind);
// Delegating yields require an RHS; fall through.
default:
expression = ParseAssignmentExpression(false, CHECK_OK);
break;
}
}
- if (kind == Yield::DELEGATING) {
+ if (kind == Yield::kDelegating) {
// var iterator = subject[Symbol.iterator]();
expression = this->GetIterator(expression, factory());
}
typename Traits::Type::YieldExpression yield =
factory()->NewYield(generator_object, expression, kind, pos);
- if (kind == Yield::DELEGATING) {
+ if (kind == Yield::kDelegating) {
yield->set_index(function_state_->NextHandlerIndex());
}
return yield;
@@ -2383,7 +2486,12 @@ ParserBase<Traits>::ParseMemberWithNewPrefixesExpression(bool* ok) {
if (peek() == Token::NEW) {
Consume(Token::NEW);
int new_pos = position();
- ExpressionT result = this->ParseMemberWithNewPrefixesExpression(CHECK_OK);
+ ExpressionT result = this->EmptyExpression();
+ if (Check(Token::SUPER)) {
+ result = this->SuperReference(scope_, factory());
+ } else {
+ result = this->ParseMemberWithNewPrefixesExpression(CHECK_OK);
+ }
if (peek() == Token::LPAREN) {
// NewExpression with arguments.
typename Traits::Type::ExpressionList args =
@@ -2397,7 +2505,7 @@ ParserBase<Traits>::ParseMemberWithNewPrefixesExpression(bool* ok) {
return factory()->NewCallNew(result, this->NewExpressionList(0, zone_),
new_pos);
}
- // No 'new' keyword.
+ // No 'new' or 'super' keyword.
return this->ParseMemberExpression(ok);
}
@@ -2406,7 +2514,7 @@ template <class Traits>
typename ParserBase<Traits>::ExpressionT
ParserBase<Traits>::ParseMemberExpression(bool* ok) {
// MemberExpression ::
- // (PrimaryExpression | FunctionLiteral)
+ // (PrimaryExpression | FunctionLiteral | ClassLiteral)
// ('[' Expression ']' | '.' Identifier | Arguments)*
// The '[' Expression ']' and '.' Identifier parts are parsed by
@@ -2418,7 +2526,7 @@ ParserBase<Traits>::ParseMemberExpression(bool* ok) {
if (peek() == Token::FUNCTION) {
Consume(Token::FUNCTION);
int function_token_position = position();
- bool is_generator = allow_generators() && Check(Token::MUL);
+ bool is_generator = Check(Token::MUL);
IdentifierT name = this->EmptyIdentifier();
bool is_strict_reserved_name = false;
Scanner::Location function_name_location = Scanner::Location::invalid();
@@ -2430,14 +2538,25 @@ ParserBase<Traits>::ParseMemberExpression(bool* ok) {
function_name_location = scanner()->location();
function_type = FunctionLiteral::NAMED_EXPRESSION;
}
- result = this->ParseFunctionLiteral(name,
- function_name_location,
- is_strict_reserved_name,
- is_generator,
- function_token_position,
- function_type,
- FunctionLiteral::NORMAL_ARITY,
- CHECK_OK);
+ result = this->ParseFunctionLiteral(
+ name, function_name_location, is_strict_reserved_name,
+ is_generator ? FunctionKind::kGeneratorFunction
+ : FunctionKind::kNormalFunction,
+ function_token_position, function_type, FunctionLiteral::NORMAL_ARITY,
+ CHECK_OK);
+ } else if (peek() == Token::SUPER) {
+ int beg_pos = position();
+ Consume(Token::SUPER);
+ Token::Value next = peek();
+ if (next == Token::PERIOD || next == Token::LBRACK ||
+ next == Token::LPAREN) {
+ result = this->SuperReference(scope_, factory());
+ } else {
+ ReportMessageAt(Scanner::Location(beg_pos, position()),
+ "unexpected_super");
+ *ok = false;
+ return this->EmptyExpression();
+ }
} else {
result = ParsePrimaryExpression(CHECK_OK);
}
@@ -2503,7 +2622,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<
{
FunctionState function_state(&function_state_, &scope_, &scope, zone(),
- this->ast_value_factory());
+ this->ast_value_factory(), ast_node_id_gen_);
Scanner::Location dupe_error_loc = Scanner::Location::invalid();
num_parameters = Traits::DeclareArrowParametersFromExpression(
params_ast, scope_, &dupe_error_loc, ok);
@@ -2585,7 +2704,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<
materialized_literal_count, expected_property_count, handler_count,
num_parameters, FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::ANONYMOUS_EXPRESSION, FunctionLiteral::kIsFunction,
- FunctionLiteral::kNotParenthesized, FunctionLiteral::kArrowFunction,
+ FunctionLiteral::kNotParenthesized, FunctionKind::kArrowFunction,
start_pos);
function_literal->set_function_token_position(start_pos);
@@ -2598,6 +2717,66 @@ typename ParserBase<Traits>::ExpressionT ParserBase<
}
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseClassLiteral(
+ IdentifierT name, Scanner::Location class_name_location,
+ bool name_is_strict_reserved, int pos, bool* ok) {
+ // All parts of a ClassDeclaration or a ClassExpression are strict code.
+ if (name_is_strict_reserved) {
+ ReportMessageAt(class_name_location, "unexpected_strict_reserved");
+ *ok = false;
+ return this->EmptyExpression();
+ }
+ if (this->IsEvalOrArguments(name)) {
+ ReportMessageAt(class_name_location, "strict_eval_arguments");
+ *ok = false;
+ return this->EmptyExpression();
+ }
+
+ // TODO(arv): Implement scopes and name binding in class body only.
+ // TODO(arv): Maybe add CLASS_SCOPE?
+ typename Traits::Type::ScopePtr extends_scope =
+ this->NewScope(scope_, BLOCK_SCOPE);
+ FunctionState extends_function_state(
+ &function_state_, &scope_, &extends_scope, zone(),
+ this->ast_value_factory(), ast_node_id_gen_);
+ scope_->SetStrictMode(STRICT);
+ scope_->SetScopeName(name);
+
+ ExpressionT extends = this->EmptyExpression();
+ if (Check(Token::EXTENDS)) {
+ extends = this->ParseLeftHandSideExpression(CHECK_OK);
+ }
+
+ ObjectLiteralChecker checker(this, STRICT);
+ typename Traits::Type::PropertyList properties =
+ this->NewPropertyList(4, zone_);
+ FunctionLiteralT constructor = this->EmptyFunctionLiteral();
+
+ Expect(Token::LBRACE, CHECK_OK);
+ while (peek() != Token::RBRACE) {
+ if (Check(Token::SEMICOLON)) continue;
+ if (fni_ != NULL) fni_->Enter();
+
+ const bool in_class = true;
+ const bool is_static = false;
+ ObjectLiteralPropertyT property =
+ this->ParsePropertyDefinition(&checker, in_class, is_static, CHECK_OK);
+
+ properties->Add(property, zone());
+
+ if (fni_ != NULL) {
+ fni_->Infer();
+ fni_->Leave();
+ }
+ }
+ Expect(Token::RBRACE, CHECK_OK);
+
+ return this->ClassLiteral(name, extends, constructor, properties, pos,
+ factory());
+}
+
+
template <typename Traits>
typename ParserBase<Traits>::ExpressionT
ParserBase<Traits>::CheckAndRewriteReferenceExpression(
@@ -2630,9 +2809,7 @@ ParserBase<Traits>::CheckAndRewriteReferenceExpression(
template <typename Traits>
void ParserBase<Traits>::ObjectLiteralChecker::CheckProperty(
- Token::Value property,
- PropertyKind type,
- bool* ok) {
+ Token::Value property, PropertyKind type, bool* ok) {
int old;
if (property == Token::NUMBER) {
old = scanner()->FindNumber(&finder_, type);
@@ -2656,8 +2833,6 @@ void ParserBase<Traits>::ObjectLiteralChecker::CheckProperty(
*ok = false;
}
}
-
-
} } // v8::internal
#endif // V8_PREPARSER_H
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index 19b0290758..1ff2edd285 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -289,6 +289,21 @@ void PrettyPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
}
+void PrettyPrinter::VisitClassLiteral(ClassLiteral* node) {
+ Print("(class ");
+ PrintLiteral(node->name(), false);
+ if (node->extends()) {
+ Print(" extends ");
+ Visit(node->extends());
+ }
+ Print(" { ");
+ for (int i = 0; i < node->properties()->length(); i++) {
+ PrintObjectLiteralProperty(node->properties()->at(i));
+ }
+ Print(" })");
+}
+
+
void PrettyPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {
Print("(");
PrintLiteral(node->name(), false);
@@ -323,16 +338,22 @@ void PrettyPrinter::VisitObjectLiteral(ObjectLiteral* node) {
Print("{ ");
for (int i = 0; i < node->properties()->length(); i++) {
if (i != 0) Print(",");
- ObjectLiteral::Property* property = node->properties()->at(i);
- Print(" ");
- Visit(property->key());
- Print(": ");
- Visit(property->value());
+ PrintObjectLiteralProperty(node->properties()->at(i));
}
Print(" }");
}
+void PrettyPrinter::PrintObjectLiteralProperty(
+ ObjectLiteralProperty* property) {
+ // TODO(arv): Better printing of methods etc.
+ Print(" ");
+ Visit(property->key());
+ Print(": ");
+ Visit(property->value());
+}
+
+
void PrettyPrinter::VisitArrayLiteral(ArrayLiteral* node) {
Print("[ ");
for (int i = 0; i < node->values()->length(); i++) {
@@ -447,6 +468,11 @@ void PrettyPrinter::VisitThisFunction(ThisFunction* node) {
}
+void PrettyPrinter::VisitSuperReference(SuperReference* node) {
+ Print("<super-reference>");
+}
+
+
const char* PrettyPrinter::Print(AstNode* node) {
Init();
Visit(node);
@@ -964,6 +990,12 @@ void AstPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
}
+void AstPrinter::VisitClassLiteral(ClassLiteral* node) {
+ IndentedScope indent(this, "CLASS LITERAL");
+ PrintLiteralIndented("NAME", node->name(), false);
+}
+
+
void AstPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {
IndentedScope indent(this, "NATIVE FUNC LITERAL");
PrintLiteralIndented("NAME", node->name(), false);
@@ -1145,6 +1177,11 @@ void AstPrinter::VisitThisFunction(ThisFunction* node) {
IndentedScope indent(this, "THIS-FUNCTION");
}
+
+void AstPrinter::VisitSuperReference(SuperReference* node) {
+ IndentedScope indent(this, "SUPER-REFERENCE");
+}
+
#endif // DEBUG
} } // namespace v8::internal
diff --git a/deps/v8/src/prettyprinter.h b/deps/v8/src/prettyprinter.h
index de40aae03d..d300d9a478 100644
--- a/deps/v8/src/prettyprinter.h
+++ b/deps/v8/src/prettyprinter.h
@@ -52,6 +52,7 @@ class PrettyPrinter: public AstVisitor {
void PrintDeclarations(ZoneList<Declaration*>* declarations);
void PrintFunctionLiteral(FunctionLiteral* function);
void PrintCaseClause(CaseClause* clause);
+ void PrintObjectLiteralProperty(ObjectLiteralProperty* property);
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
};
diff --git a/deps/v8/src/promise.js b/deps/v8/src/promise.js
index 2d8314a42f..37c10ec08e 100644
--- a/deps/v8/src/promise.js
+++ b/deps/v8/src/promise.js
@@ -146,7 +146,7 @@ var lastMicrotaskId = 0;
// For bootstrapper.
IsPromise = function IsPromise(x) {
- return IS_SPEC_OBJECT(x) && HAS_PRIVATE(x, promiseStatus);
+ return IS_SPEC_OBJECT(x) && HAS_DEFINED_PRIVATE(x, promiseStatus);
}
PromiseCreate = function PromiseCreate() {
@@ -162,7 +162,7 @@ var lastMicrotaskId = 0;
// Check promiseDebug property to avoid duplicate event.
if (DEBUG_IS_ACTIVE &&
GET_PRIVATE(promise, promiseStatus) == 0 &&
- !HAS_PRIVATE(promise, promiseDebug)) {
+ !HAS_DEFINED_PRIVATE(promise, promiseDebug)) {
%DebugPromiseRejectEvent(promise, r);
}
PromiseDone(promise, -1, r, promiseOnReject)
@@ -325,11 +325,22 @@ var lastMicrotaskId = 0;
// Utility for debugger
+ function PromiseHasRejectHandlerRecursive(promise) {
+ var queue = GET_PRIVATE(promise, promiseOnReject);
+ if (IS_UNDEFINED(queue)) return false;
+ // Do a depth first search for a reject handler that's not
+ // the default PromiseIdRejectHandler.
+ for (var i = 0; i < queue.length; i += 2) {
+ if (queue[i] != PromiseIdRejectHandler) return true;
+ if (PromiseHasRejectHandlerRecursive(queue[i + 1].promise)) return true;
+ }
+ return false;
+ }
+
PromiseHasRejectHandler = function PromiseHasRejectHandler() {
// Mark promise as already having triggered a reject event.
SET_PRIVATE(this, promiseDebug, true);
- var queue = GET_PRIVATE(this, promiseOnReject);
- return !IS_UNDEFINED(queue) && queue.length > 0;
+ return PromiseHasRejectHandlerRecursive(this);
};
// -------------------------------------------------------------------
diff --git a/deps/v8/src/property-details-inl.h b/deps/v8/src/property-details-inl.h
index eaa596f9da..efb27b3191 100644
--- a/deps/v8/src/property-details-inl.h
+++ b/deps/v8/src/property-details-inl.h
@@ -13,18 +13,6 @@
namespace v8 {
namespace internal {
-inline bool Representation::CanContainDouble(double value) {
- if (IsDouble() || is_more_general_than(Representation::Double())) {
- return true;
- }
- if (IsInt32Double(value)) {
- if (IsInteger32()) return true;
- if (IsSmi()) return Smi::IsValid(static_cast<int32_t>(value));
- }
- return false;
-}
-
-
Representation Representation::FromType(Type* type) {
DisallowHeapAllocation no_allocation;
if (type->Is(Type::None())) return Representation::None();
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 7eb2e4ea9d..f75bcff049 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -46,16 +46,11 @@ class TypeInfo;
// A copy of this is in mirror-debugger.js.
enum PropertyType {
// Only in slow mode.
- NORMAL = 0,
+ NORMAL = 0,
// Only in fast mode.
- FIELD = 1,
- CONSTANT = 2,
- CALLBACKS = 3,
- // Only in lookup results, not in descriptors.
- HANDLER = 4,
- INTERCEPTOR = 5,
- // Only used as a marker in LookupResult.
- NONEXISTENT = 6
+ FIELD = 1,
+ CONSTANT = 2,
+ CALLBACKS = 3
};
@@ -124,8 +119,6 @@ class Representation {
return other.is_more_general_than(*this) || other.Equals(*this);
}
- bool CanContainDouble(double value);
-
Representation generalize(Representation other) {
if (other.fits_into(*this)) return *this;
if (other.is_more_general_than(*this)) return other;
@@ -262,28 +255,28 @@ class PropertyDetails BASE_EMBEDDED {
}
bool IsReadOnly() const { return (attributes() & READ_ONLY) != 0; }
- bool IsDontDelete() const { return (attributes() & DONT_DELETE) != 0; }
+ bool IsConfigurable() const { return (attributes() & DONT_DELETE) == 0; }
bool IsDontEnum() const { return (attributes() & DONT_ENUM) != 0; }
bool IsDeleted() const { return DeletedField::decode(value_) != 0;}
// Bit fields in value_ (type, shift, size). Must be public so the
// constants can be embedded in generated code.
- class TypeField: public BitField<PropertyType, 0, 3> {};
- class AttributesField: public BitField<PropertyAttributes, 3, 3> {};
+ class TypeField : public BitField<PropertyType, 0, 2> {};
+ class AttributesField : public BitField<PropertyAttributes, 2, 3> {};
// Bit fields for normalized objects.
- class DeletedField: public BitField<uint32_t, 6, 1> {};
- class DictionaryStorageField: public BitField<uint32_t, 7, 24> {};
+ class DeletedField : public BitField<uint32_t, 5, 1> {};
+ class DictionaryStorageField : public BitField<uint32_t, 6, 24> {};
// Bit fields for fast objects.
- class RepresentationField: public BitField<uint32_t, 6, 4> {};
- class DescriptorPointer: public BitField<uint32_t, 10,
- kDescriptorIndexBitCount> {}; // NOLINT
- class FieldIndexField: public BitField<uint32_t,
- 10 + kDescriptorIndexBitCount,
- kDescriptorIndexBitCount> {}; // NOLINT
+ class RepresentationField : public BitField<uint32_t, 5, 4> {};
+ class DescriptorPointer
+ : public BitField<uint32_t, 9, kDescriptorIndexBitCount> {}; // NOLINT
+ class FieldIndexField
+ : public BitField<uint32_t, 9 + kDescriptorIndexBitCount,
+ kDescriptorIndexBitCount> {}; // NOLINT
// All bits for fast objects must fix in a smi.
- STATIC_ASSERT(10 + kDescriptorIndexBitCount + kDescriptorIndexBitCount <= 31);
+ STATIC_ASSERT(9 + kDescriptorIndexBitCount + kDescriptorIndexBitCount <= 31);
static const int kInitialIndex = 1;
diff --git a/deps/v8/src/property.cc b/deps/v8/src/property.cc
index f1378ecdf9..f0ff95c0fe 100644
--- a/deps/v8/src/property.cc
+++ b/deps/v8/src/property.cc
@@ -13,8 +13,8 @@ namespace internal {
void LookupResult::Iterate(ObjectVisitor* visitor) {
LookupResult* current = this; // Could be NULL.
while (current != NULL) {
- visitor->VisitPointer(BitCast<Object**>(&current->holder_));
- visitor->VisitPointer(BitCast<Object**>(&current->transition_));
+ visitor->VisitPointer(bit_cast<Object**>(&current->holder_));
+ visitor->VisitPointer(bit_cast<Object**>(&current->transition_));
current = current->next_;
}
}
@@ -24,35 +24,9 @@ OStream& operator<<(OStream& os, const LookupResult& r) {
if (!r.IsFound()) return os << "Not Found\n";
os << "LookupResult:\n";
- os << " -cacheable = " << (r.IsCacheable() ? "true" : "false") << "\n";
- os << " -attributes = " << hex << r.GetAttributes() << dec << "\n";
if (r.IsTransition()) {
os << " -transition target:\n" << Brief(r.GetTransitionTarget()) << "\n";
}
- switch (r.type()) {
- case NORMAL:
- return os << " -type = normal\n"
- << " -entry = " << r.GetDictionaryEntry() << "\n";
- case CONSTANT:
- return os << " -type = constant\n"
- << " -value:\n" << Brief(r.GetConstant()) << "\n";
- case FIELD:
- os << " -type = field\n"
- << " -index = " << r.GetFieldIndex().property_index() << "\n"
- << " -field type:";
- r.GetFieldType()->PrintTo(os);
- return os << "\n";
- case CALLBACKS:
- return os << " -type = call backs\n"
- << " -callback object:\n" << Brief(r.GetCallbackObject());
- case HANDLER:
- return os << " -type = lookup proxy\n";
- case INTERCEPTOR:
- return os << " -type = lookup interceptor\n";
- case NONEXISTENT:
- UNREACHABLE();
- break;
- }
return os;
}
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index 272a0a5ab9..779d9fcc6c 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -73,7 +73,7 @@ class Descriptor BASE_EMBEDDED {
OStream& operator<<(OStream& os, const Descriptor& d);
-class FieldDescriptor V8_FINAL : public Descriptor {
+class FieldDescriptor FINAL : public Descriptor {
public:
FieldDescriptor(Handle<Name> key,
int field_index,
@@ -91,7 +91,7 @@ class FieldDescriptor V8_FINAL : public Descriptor {
};
-class ConstantDescriptor V8_FINAL : public Descriptor {
+class ConstantDescriptor FINAL : public Descriptor {
public:
ConstantDescriptor(Handle<Name> key,
Handle<Object> value,
@@ -101,7 +101,7 @@ class ConstantDescriptor V8_FINAL : public Descriptor {
};
-class CallbacksDescriptor V8_FINAL : public Descriptor {
+class CallbacksDescriptor FINAL : public Descriptor {
public:
CallbacksDescriptor(Handle<Name> key,
Handle<Object> foreign,
@@ -111,7 +111,7 @@ class CallbacksDescriptor V8_FINAL : public Descriptor {
};
-class LookupResult V8_FINAL BASE_EMBEDDED {
+class LookupResult FINAL BASE_EMBEDDED {
public:
explicit LookupResult(Isolate* isolate)
: isolate_(isolate),
@@ -119,8 +119,7 @@ class LookupResult V8_FINAL BASE_EMBEDDED {
lookup_type_(NOT_FOUND),
holder_(NULL),
transition_(NULL),
- cacheable_(true),
- details_(NONE, NONEXISTENT, Representation::None()) {
+ details_(NONE, NORMAL, Representation::None()) {
isolate->set_top_lookup_result(this);
}
@@ -139,28 +138,6 @@ class LookupResult V8_FINAL BASE_EMBEDDED {
number_ = number;
}
- bool CanHoldValue(Handle<Object> value) const {
- switch (type()) {
- case NORMAL:
- return true;
- case FIELD:
- return value->FitsRepresentation(representation()) &&
- GetFieldType()->NowContains(value);
- case CONSTANT:
- DCHECK(GetConstant() != *value ||
- value->FitsRepresentation(representation()));
- return GetConstant() == *value;
- case CALLBACKS:
- case HANDLER:
- case INTERCEPTOR:
- return true;
- case NONEXISTENT:
- UNREACHABLE();
- }
- UNREACHABLE();
- return true;
- }
-
void TransitionResult(JSObject* holder, Map* target) {
lookup_type_ = TRANSITION_TYPE;
number_ = target->LastAdded();
@@ -169,72 +146,18 @@ class LookupResult V8_FINAL BASE_EMBEDDED {
transition_ = target;
}
- void DictionaryResult(JSObject* holder, int entry) {
- lookup_type_ = DICTIONARY_TYPE;
- holder_ = holder;
- transition_ = NULL;
- details_ = holder->property_dictionary()->DetailsAt(entry);
- number_ = entry;
- }
-
- void HandlerResult(JSProxy* proxy) {
- lookup_type_ = HANDLER_TYPE;
- holder_ = proxy;
- transition_ = NULL;
- details_ = PropertyDetails(NONE, HANDLER, Representation::Tagged());
- cacheable_ = false;
- }
-
- void InterceptorResult(JSObject* holder) {
- lookup_type_ = INTERCEPTOR_TYPE;
- holder_ = holder;
- transition_ = NULL;
- details_ = PropertyDetails(NONE, INTERCEPTOR, Representation::Tagged());
- }
-
void NotFound() {
lookup_type_ = NOT_FOUND;
- details_ = PropertyDetails(NONE, NONEXISTENT, Representation::None());
+ details_ = PropertyDetails(NONE, NORMAL, Representation::None());
holder_ = NULL;
transition_ = NULL;
}
- JSObject* holder() const {
- DCHECK(IsFound());
- return JSObject::cast(holder_);
- }
-
- JSProxy* proxy() const {
- DCHECK(IsHandler());
- return JSProxy::cast(holder_);
- }
-
- PropertyType type() const {
- DCHECK(IsFound());
- return details_.type();
- }
-
Representation representation() const {
DCHECK(IsFound());
- DCHECK(details_.type() != NONEXISTENT);
return details_.representation();
}
- PropertyAttributes GetAttributes() const {
- DCHECK(IsFound());
- DCHECK(details_.type() != NONEXISTENT);
- return details_.attributes();
- }
-
- PropertyDetails GetPropertyDetails() const {
- return details_;
- }
-
- bool IsFastPropertyType() const {
- DCHECK(IsFound());
- return IsTransition() || type() != NORMAL;
- }
-
// Property callbacks does not include transitions to callbacks.
bool IsPropertyCallbacks() const {
DCHECK(!(details_.type() == CALLBACKS && !IsFound()));
@@ -243,114 +166,28 @@ class LookupResult V8_FINAL BASE_EMBEDDED {
bool IsReadOnly() const {
DCHECK(IsFound());
- DCHECK(details_.type() != NONEXISTENT);
return details_.IsReadOnly();
}
bool IsField() const {
DCHECK(!(details_.type() == FIELD && !IsFound()));
- return IsDescriptorOrDictionary() && type() == FIELD;
- }
-
- bool IsNormal() const {
- DCHECK(!(details_.type() == NORMAL && !IsFound()));
- return IsDescriptorOrDictionary() && type() == NORMAL;
+ return lookup_type_ == DESCRIPTOR_TYPE && details_.type() == FIELD;
}
bool IsConstant() const {
DCHECK(!(details_.type() == CONSTANT && !IsFound()));
- return IsDescriptorOrDictionary() && type() == CONSTANT;
+ return lookup_type_ == DESCRIPTOR_TYPE && details_.type() == CONSTANT;
}
- bool IsConstantFunction() const {
- return IsConstant() && GetConstant()->IsJSFunction();
- }
-
- bool IsDontDelete() const { return details_.IsDontDelete(); }
- bool IsDontEnum() const { return details_.IsDontEnum(); }
+ bool IsConfigurable() const { return details_.IsConfigurable(); }
bool IsFound() const { return lookup_type_ != NOT_FOUND; }
- bool IsDescriptorOrDictionary() const {
- return lookup_type_ == DESCRIPTOR_TYPE || lookup_type_ == DICTIONARY_TYPE;
- }
bool IsTransition() const { return lookup_type_ == TRANSITION_TYPE; }
- bool IsHandler() const { return lookup_type_ == HANDLER_TYPE; }
- bool IsInterceptor() const { return lookup_type_ == INTERCEPTOR_TYPE; }
// Is the result is a property excluding transitions and the null descriptor?
bool IsProperty() const {
return IsFound() && !IsTransition();
}
- bool IsDataProperty() const {
- switch (lookup_type_) {
- case NOT_FOUND:
- case TRANSITION_TYPE:
- case HANDLER_TYPE:
- case INTERCEPTOR_TYPE:
- return false;
-
- case DESCRIPTOR_TYPE:
- case DICTIONARY_TYPE:
- switch (type()) {
- case FIELD:
- case NORMAL:
- case CONSTANT:
- return true;
- case CALLBACKS: {
- Object* callback = GetCallbackObject();
- DCHECK(!callback->IsForeign());
- return callback->IsAccessorInfo();
- }
- case HANDLER:
- case INTERCEPTOR:
- case NONEXISTENT:
- UNREACHABLE();
- return false;
- }
- }
- UNREACHABLE();
- return false;
- }
-
- bool IsCacheable() const { return cacheable_; }
- void DisallowCaching() { cacheable_ = false; }
-
- Object* GetLazyValue() const {
- switch (lookup_type_) {
- case NOT_FOUND:
- case TRANSITION_TYPE:
- case HANDLER_TYPE:
- case INTERCEPTOR_TYPE:
- return isolate()->heap()->the_hole_value();
-
- case DESCRIPTOR_TYPE:
- case DICTIONARY_TYPE:
- switch (type()) {
- case FIELD:
- return holder()->RawFastPropertyAt(GetFieldIndex());
- case NORMAL: {
- Object* value = holder()->property_dictionary()->ValueAt(
- GetDictionaryEntry());
- if (holder()->IsGlobalObject()) {
- value = PropertyCell::cast(value)->value();
- }
- return value;
- }
- case CONSTANT:
- return GetConstant();
- case CALLBACKS:
- return isolate()->heap()->the_hole_value();
- case HANDLER:
- case INTERCEPTOR:
- case NONEXISTENT:
- UNREACHABLE();
- return NULL;
- }
- }
- UNREACHABLE();
- return NULL;
- }
-
Map* GetTransitionTarget() const {
DCHECK(IsTransition());
return transition_;
@@ -360,66 +197,15 @@ class LookupResult V8_FINAL BASE_EMBEDDED {
return IsTransition() && details_.type() == FIELD;
}
- bool IsTransitionToConstant() const {
- return IsTransition() && details_.type() == CONSTANT;
- }
-
- int GetDescriptorIndex() const {
- DCHECK(lookup_type_ == DESCRIPTOR_TYPE);
- return number_;
- }
-
- FieldIndex GetFieldIndex() const {
- DCHECK(lookup_type_ == DESCRIPTOR_TYPE ||
- lookup_type_ == TRANSITION_TYPE);
- return FieldIndex::ForLookupResult(this);
- }
-
int GetLocalFieldIndexFromMap(Map* map) const {
return GetFieldIndexFromMap(map) - map->inobject_properties();
}
- int GetDictionaryEntry() const {
- DCHECK(lookup_type_ == DICTIONARY_TYPE);
- return number_;
- }
-
- JSFunction* GetConstantFunction() const {
- DCHECK(type() == CONSTANT);
- return JSFunction::cast(GetValue());
- }
-
Object* GetConstantFromMap(Map* map) const {
- DCHECK(type() == CONSTANT);
+ DCHECK(details_.type() == CONSTANT);
return GetValueFromMap(map);
}
- JSFunction* GetConstantFunctionFromMap(Map* map) const {
- return JSFunction::cast(GetConstantFromMap(map));
- }
-
- Object* GetConstant() const {
- DCHECK(type() == CONSTANT);
- return GetValue();
- }
-
- Object* GetCallbackObject() const {
- DCHECK(!IsTransition());
- DCHECK(type() == CALLBACKS);
- return GetValue();
- }
-
- Object* GetValue() const {
- if (lookup_type_ == DESCRIPTOR_TYPE) {
- return GetValueFromMap(holder()->map());
- } else if (lookup_type_ == TRANSITION_TYPE) {
- return GetValueFromMap(transition_);
- }
- // In the dictionary case, the data is held in the value field.
- DCHECK(lookup_type_ == DICTIONARY_TYPE);
- return holder()->GetNormalizedProperty(this);
- }
-
Object* GetValueFromMap(Map* map) const {
DCHECK(lookup_type_ == DESCRIPTOR_TYPE ||
lookup_type_ == TRANSITION_TYPE);
@@ -434,26 +220,12 @@ class LookupResult V8_FINAL BASE_EMBEDDED {
return map->instance_descriptors()->GetFieldIndex(number_);
}
- HeapType* GetFieldType() const {
- DCHECK(type() == FIELD);
- if (lookup_type_ == DESCRIPTOR_TYPE) {
- return GetFieldTypeFromMap(holder()->map());
- }
- DCHECK(lookup_type_ == TRANSITION_TYPE);
- return GetFieldTypeFromMap(transition_);
- }
-
HeapType* GetFieldTypeFromMap(Map* map) const {
- DCHECK(lookup_type_ == DESCRIPTOR_TYPE ||
- lookup_type_ == TRANSITION_TYPE);
+ DCHECK_NE(NOT_FOUND, lookup_type_);
DCHECK(number_ < map->NumberOfOwnDescriptors());
return map->instance_descriptors()->GetFieldType(number_);
}
- Map* GetFieldOwner() const {
- return GetFieldOwnerFromMap(holder()->map());
- }
-
Map* GetFieldOwnerFromMap(Map* map) const {
DCHECK(lookup_type_ == DESCRIPTOR_TYPE ||
lookup_type_ == TRANSITION_TYPE);
@@ -461,12 +233,6 @@ class LookupResult V8_FINAL BASE_EMBEDDED {
return map->FindFieldOwner(number_);
}
- bool ReceiverIsHolder(Handle<Object> receiver) {
- if (*receiver == holder()) return true;
- if (lookup_type_ == TRANSITION_TYPE) return true;
- return false;
- }
-
void Iterate(ObjectVisitor* visitor);
private:
@@ -474,19 +240,11 @@ class LookupResult V8_FINAL BASE_EMBEDDED {
LookupResult* next_;
// Where did we find the result;
- enum {
- NOT_FOUND,
- DESCRIPTOR_TYPE,
- TRANSITION_TYPE,
- DICTIONARY_TYPE,
- HANDLER_TYPE,
- INTERCEPTOR_TYPE
- } lookup_type_;
+ enum { NOT_FOUND, DESCRIPTOR_TYPE, TRANSITION_TYPE } lookup_type_;
JSReceiver* holder_;
Map* transition_;
int number_;
- bool cacheable_;
PropertyDetails details_;
};
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.h b/deps/v8/src/regexp-macro-assembler-irregexp.h
index cdfb46ad15..b192c22b6f 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp.h
+++ b/deps/v8/src/regexp-macro-assembler-irregexp.h
@@ -31,6 +31,7 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
virtual ~RegExpMacroAssemblerIrregexp();
// The byte-code interpreter checks on each push anyway.
virtual int stack_limit_slack() { return 1; }
+ virtual bool CanReadUnaligned() { return false; }
virtual void Bind(Label* label);
virtual void AdvanceCurrentPosition(int by); // Signed cp change.
virtual void PopCurrentPosition();
diff --git a/deps/v8/src/regexp-macro-assembler.cc b/deps/v8/src/regexp-macro-assembler.cc
index 13c2a6a32f..52df648d9a 100644
--- a/deps/v8/src/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp-macro-assembler.cc
@@ -24,15 +24,6 @@ RegExpMacroAssembler::~RegExpMacroAssembler() {
}
-bool RegExpMacroAssembler::CanReadUnaligned() {
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- return true;
-#else
- return false;
-#endif
-}
-
-
#ifndef V8_INTERPRETED_REGEXP // Avoid unused code, e.g., on ARM.
NativeRegExpMacroAssembler::NativeRegExpMacroAssembler(Zone* zone)
@@ -58,7 +49,7 @@ const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
if (subject->IsOneByteRepresentation()) {
const byte* address;
if (StringShape(subject).IsExternal()) {
- const uint8_t* data = ExternalAsciiString::cast(subject)->GetChars();
+ const uint8_t* data = ExternalOneByteString::cast(subject)->GetChars();
address = reinterpret_cast<const byte*>(data);
} else {
DCHECK(subject->IsSeqOneByteString());
@@ -110,11 +101,11 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
subject_ptr = slice->parent();
slice_offset = slice->offset();
}
- // Ensure that an underlying string has the same ASCII-ness.
- bool is_ascii = subject_ptr->IsOneByteRepresentation();
+ // Ensure that an underlying string has the same representation.
+ bool is_one_byte = subject_ptr->IsOneByteRepresentation();
DCHECK(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
// String is now either Sequential or External
- int char_size_shift = is_ascii ? 0 : 1;
+ int char_size_shift = is_one_byte ? 0 : 1;
const byte* input_start =
StringCharacterPosition(subject_ptr, start_offset + slice_offset);
diff --git a/deps/v8/src/regexp-macro-assembler.h b/deps/v8/src/regexp-macro-assembler.h
index f0cfc465fc..f72cc4d42d 100644
--- a/deps/v8/src/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp-macro-assembler.h
@@ -48,7 +48,7 @@ class RegExpMacroAssembler {
// kCheckStackLimit flag to push operations (instead of kNoStackLimitCheck)
// at least once for every stack_limit() pushes that are executed.
virtual int stack_limit_slack() = 0;
- virtual bool CanReadUnaligned();
+ virtual bool CanReadUnaligned() = 0;
virtual void AdvanceCurrentPosition(int by) = 0; // Signed cp change.
virtual void AdvanceRegister(int reg, int by) = 0; // r[reg] += by.
// Continues execution from the position pushed on the top of the backtrack
@@ -171,7 +171,7 @@ class RegExpMacroAssembler {
class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
public:
// Type of input string to generate code for.
- enum Mode { ASCII = 1, UC16 = 2 };
+ enum Mode { LATIN1 = 1, UC16 = 2 };
// Result of calling generated native RegExp code.
// RETRY: Something significant changed during execution, and the matching
diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js
index d7883fb693..0f3dbb630e 100644
--- a/deps/v8/src/regexp.js
+++ b/deps/v8/src/regexp.js
@@ -22,6 +22,8 @@ function DoConstructRegExp(object, pattern, flags) {
flags = (pattern.global ? 'g' : '')
+ (pattern.ignoreCase ? 'i' : '')
+ (pattern.multiline ? 'm' : '');
+ if (harmony_regexps)
+ flags += (pattern.sticky ? 'y' : '');
pattern = pattern.source;
}
@@ -31,6 +33,7 @@ function DoConstructRegExp(object, pattern, flags) {
var global = false;
var ignoreCase = false;
var multiline = false;
+ var sticky = false;
for (var i = 0; i < flags.length; i++) {
var c = %_CallFunction(flags, i, StringCharAt);
switch (c) {
@@ -52,12 +55,18 @@ function DoConstructRegExp(object, pattern, flags) {
}
multiline = true;
break;
+ case 'y':
+ if (!harmony_regexps || sticky) {
+ throw MakeSyntaxError("invalid_regexp_flags", [flags]);
+ }
+ sticky = true;
+ break;
default:
throw MakeSyntaxError("invalid_regexp_flags", [flags]);
}
}
- %RegExpInitializeObject(object, pattern, global, ignoreCase, multiline);
+ %RegExpInitializeObject(object, pattern, global, ignoreCase, multiline, sticky);
// Call internal function to compile the pattern.
%RegExpCompile(object, pattern, flags);
@@ -159,8 +168,8 @@ function RegExpExec(string) {
// algorithm, step 5) even if the value is discarded for non-global RegExps.
var i = TO_INTEGER(lastIndex);
- var global = this.global;
- if (global) {
+ var updateLastIndex = this.global || (harmony_regexps && this.sticky);
+ if (updateLastIndex) {
if (i < 0 || i > string.length) {
this.lastIndex = 0;
return null;
@@ -179,7 +188,7 @@ function RegExpExec(string) {
// Successful match.
lastMatchInfoOverride = null;
- if (global) {
+ if (updateLastIndex) {
this.lastIndex = lastMatchInfo[CAPTURE1];
}
RETURN_NEW_RESULT_FROM_MATCH_INFO(matchIndices, string);
@@ -207,7 +216,7 @@ function RegExpTest(string) {
// algorithm, step 5) even if the value is discarded for non-global RegExps.
var i = TO_INTEGER(lastIndex);
- if (this.global) {
+ if (this.global || (harmony_regexps && this.sticky)) {
if (i < 0 || i > string.length) {
this.lastIndex = 0;
return false;
@@ -222,12 +231,13 @@ function RegExpTest(string) {
this.lastIndex = lastMatchInfo[CAPTURE1];
return true;
} else {
- // Non-global regexp.
- // Remove irrelevant preceeding '.*' in a non-global test regexp.
- // The expression checks whether this.source starts with '.*' and
- // that the third char is not a '?'.
+ // Non-global, non-sticky regexp.
+ // Remove irrelevant preceeding '.*' in a test regexp. The expression
+ // checks whether this.source starts with '.*' and that the third char is
+ // not a '?'. But see https://code.google.com/p/v8/issues/detail?id=3560
var regexp = this;
- if (%_StringCharCodeAt(regexp.source, 0) == 46 && // '.'
+ if (regexp.source.length >= 3 &&
+ %_StringCharCodeAt(regexp.source, 0) == 46 && // '.'
%_StringCharCodeAt(regexp.source, 1) == 42 && // '*'
%_StringCharCodeAt(regexp.source, 2) != 63) { // '?'
regexp = TrimRegExp(regexp);
@@ -264,6 +274,7 @@ function RegExpToString() {
if (this.global) result += 'g';
if (this.ignoreCase) result += 'i';
if (this.multiline) result += 'm';
+ if (harmony_regexps && this.sticky) result += 'y';
return result;
}
@@ -394,7 +405,7 @@ function SetUpRegExp() {
// The length of compile is 1 in SpiderMonkey.
%FunctionSetLength($RegExp.prototype.compile, 1);
- // The properties input, $input, and $_ are aliases for each other. When this
+ // The properties `input` and `$_` are aliases for each other. When this
// value is set the value it is set to is coerced to a string.
// Getter and setter for the input.
var RegExpGetInput = function() {
@@ -410,8 +421,6 @@ function SetUpRegExp() {
RegExpSetInput, DONT_DELETE);
%DefineAccessorPropertyUnchecked($RegExp, '$_', RegExpGetInput,
RegExpSetInput, DONT_ENUM | DONT_DELETE);
- %DefineAccessorPropertyUnchecked($RegExp, '$input', RegExpGetInput,
- RegExpSetInput, DONT_ENUM | DONT_DELETE);
// The properties multiline and $* are aliases for each other. When this
// value is set in SpiderMonkey, the value it is set to is coerced to a
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index 169dce9a41..867229a650 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -15,14 +15,14 @@ namespace internal {
class Processor: public AstVisitor {
public:
- Processor(Variable* result, Zone* zone)
+ Processor(Variable* result, Zone* zone, AstNode::IdGen* ast_node_id_gen)
: result_(result),
result_assigned_(false),
is_set_(false),
in_try_(false),
// Passing a null AstValueFactory is fine, because Processor doesn't
// need to create strings or literals.
- factory_(zone, NULL) {
+ factory_(zone, NULL, ast_node_id_gen) {
InitializeAstVisitor(zone);
}
@@ -240,7 +240,7 @@ bool Rewriter::Rewrite(CompilationInfo* info) {
scope->NewTemporary(info->ast_value_factory()->dot_result_string());
// The name string must be internalized at this point.
DCHECK(!result->name().is_null());
- Processor processor(result, info->zone());
+ Processor processor(result, info->zone(), info->ast_node_id_gen());
processor.Process(body);
if (processor.HasStackOverflow()) return false;
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
deleted file mode 100644
index 4a78edb897..0000000000
--- a/deps/v8/src/runtime.h
+++ /dev/null
@@ -1,913 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_RUNTIME_H_
-#define V8_RUNTIME_H_
-
-#include "src/allocation.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-
-// The interface to C++ runtime functions.
-
-// ----------------------------------------------------------------------------
-// RUNTIME_FUNCTION_LIST_ALWAYS defines runtime calls available in both
-// release and debug mode.
-// This macro should only be used by the macro RUNTIME_FUNCTION_LIST.
-
-// WARNING: RUNTIME_FUNCTION_LIST_ALWAYS_* is a very large macro that caused
-// MSVC Intellisense to crash. It was broken into two macros to work around
-// this problem. Please avoid large recursive macros whenever possible.
-#define RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
- /* Property access */ \
- F(GetProperty, 2, 1) \
- F(KeyedGetProperty, 2, 1) \
- F(DeleteProperty, 3, 1) \
- F(HasOwnProperty, 2, 1) \
- F(HasProperty, 2, 1) \
- F(HasElement, 2, 1) \
- F(IsPropertyEnumerable, 2, 1) \
- F(GetPropertyNames, 1, 1) \
- F(GetPropertyNamesFast, 1, 1) \
- F(GetOwnPropertyNames, 2, 1) \
- F(GetOwnElementNames, 1, 1) \
- F(GetInterceptorInfo, 1, 1) \
- F(GetNamedInterceptorPropertyNames, 1, 1) \
- F(GetIndexedInterceptorElementNames, 1, 1) \
- F(GetArgumentsProperty, 1, 1) \
- F(ToFastProperties, 1, 1) \
- F(FinishArrayPrototypeSetup, 1, 1) \
- F(SpecialArrayFunctions, 0, 1) \
- F(IsSloppyModeFunction, 1, 1) \
- F(GetDefaultReceiver, 1, 1) \
- \
- F(GetPrototype, 1, 1) \
- F(SetPrototype, 2, 1) \
- F(InternalSetPrototype, 2, 1) \
- F(IsInPrototypeChain, 2, 1) \
- \
- F(GetOwnProperty, 2, 1) \
- \
- F(IsExtensible, 1, 1) \
- F(PreventExtensions, 1, 1) \
- \
- /* Utilities */ \
- F(CheckIsBootstrapping, 0, 1) \
- F(GetRootNaN, 0, 1) \
- F(Call, -1 /* >= 2 */, 1) \
- F(Apply, 5, 1) \
- F(GetFunctionDelegate, 1, 1) \
- F(GetConstructorDelegate, 1, 1) \
- F(DeoptimizeFunction, 1, 1) \
- F(ClearFunctionTypeFeedback, 1, 1) \
- F(RunningInSimulator, 0, 1) \
- F(IsConcurrentRecompilationSupported, 0, 1) \
- F(OptimizeFunctionOnNextCall, -1, 1) \
- F(NeverOptimizeFunction, 1, 1) \
- F(GetOptimizationStatus, -1, 1) \
- F(IsOptimized, 0, 1) /* TODO(turbofan): Only temporary */ \
- F(GetOptimizationCount, 1, 1) \
- F(UnblockConcurrentRecompilation, 0, 1) \
- F(CompileForOnStackReplacement, 1, 1) \
- F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
- F(SetNativeFlag, 1, 1) \
- F(SetInlineBuiltinFlag, 1, 1) \
- F(StoreArrayLiteralElement, 5, 1) \
- F(DebugPrepareStepInIfStepping, 1, 1) \
- F(DebugPushPromise, 1, 1) \
- F(DebugPopPromise, 0, 1) \
- F(DebugPromiseEvent, 1, 1) \
- F(DebugPromiseRejectEvent, 2, 1) \
- F(DebugAsyncTaskEvent, 1, 1) \
- F(FlattenString, 1, 1) \
- F(LoadMutableDouble, 2, 1) \
- F(TryMigrateInstance, 1, 1) \
- F(NotifyContextDisposed, 0, 1) \
- \
- /* Array join support */ \
- F(PushIfAbsent, 2, 1) \
- F(ArrayConcat, 1, 1) \
- \
- /* Conversions */ \
- F(ToBool, 1, 1) \
- F(Typeof, 1, 1) \
- \
- F(Booleanize, 2, 1) /* TODO(turbofan): Only temporary */ \
- \
- F(StringToNumber, 1, 1) \
- F(StringParseInt, 2, 1) \
- F(StringParseFloat, 1, 1) \
- F(StringToLowerCase, 1, 1) \
- F(StringToUpperCase, 1, 1) \
- F(StringSplit, 3, 1) \
- F(CharFromCode, 1, 1) \
- F(URIEscape, 1, 1) \
- F(URIUnescape, 1, 1) \
- \
- F(NumberToInteger, 1, 1) \
- F(NumberToIntegerMapMinusZero, 1, 1) \
- F(NumberToJSUint32, 1, 1) \
- F(NumberToJSInt32, 1, 1) \
- \
- /* Arithmetic operations */ \
- F(NumberAdd, 2, 1) \
- F(NumberSub, 2, 1) \
- F(NumberMul, 2, 1) \
- F(NumberDiv, 2, 1) \
- F(NumberMod, 2, 1) \
- F(NumberUnaryMinus, 1, 1) \
- F(NumberImul, 2, 1) \
- \
- F(StringBuilderConcat, 3, 1) \
- F(StringBuilderJoin, 3, 1) \
- F(SparseJoinWithSeparator, 3, 1) \
- \
- /* Bit operations */ \
- F(NumberOr, 2, 1) \
- F(NumberAnd, 2, 1) \
- F(NumberXor, 2, 1) \
- \
- F(NumberShl, 2, 1) \
- F(NumberShr, 2, 1) \
- F(NumberSar, 2, 1) \
- \
- /* Comparisons */ \
- F(NumberEquals, 2, 1) \
- F(StringEquals, 2, 1) \
- \
- F(NumberCompare, 3, 1) \
- F(SmiLexicographicCompare, 2, 1) \
- \
- /* Math */ \
- F(MathAcos, 1, 1) \
- F(MathAsin, 1, 1) \
- F(MathAtan, 1, 1) \
- F(MathFloorRT, 1, 1) \
- F(MathAtan2, 2, 1) \
- F(MathExpRT, 1, 1) \
- F(RoundNumber, 1, 1) \
- F(MathFround, 1, 1) \
- F(RemPiO2, 1, 1) \
- \
- /* Regular expressions */ \
- F(RegExpCompile, 3, 1) \
- F(RegExpExecMultiple, 4, 1) \
- F(RegExpInitializeObject, 5, 1) \
- \
- /* JSON */ \
- F(ParseJson, 1, 1) \
- F(BasicJSONStringify, 1, 1) \
- F(QuoteJSONString, 1, 1) \
- \
- /* Strings */ \
- F(StringIndexOf, 3, 1) \
- F(StringLastIndexOf, 3, 1) \
- F(StringLocaleCompare, 2, 1) \
- F(StringReplaceGlobalRegExpWithString, 4, 1) \
- F(StringReplaceOneCharWithString, 3, 1) \
- F(StringMatch, 3, 1) \
- F(StringTrim, 3, 1) \
- F(StringToArray, 2, 1) \
- F(NewStringWrapper, 1, 1) \
- F(NewString, 2, 1) \
- F(TruncateString, 2, 1) \
- \
- /* Numbers */ \
- F(NumberToRadixString, 2, 1) \
- F(NumberToFixed, 2, 1) \
- F(NumberToExponential, 2, 1) \
- F(NumberToPrecision, 2, 1) \
- F(IsValidSmi, 1, 1)
-
-
-#define RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
- /* Reflection */ \
- F(FunctionSetInstanceClassName, 2, 1) \
- F(FunctionSetLength, 2, 1) \
- F(FunctionSetPrototype, 2, 1) \
- F(FunctionGetName, 1, 1) \
- F(FunctionSetName, 2, 1) \
- F(FunctionNameShouldPrintAsAnonymous, 1, 1) \
- F(FunctionMarkNameShouldPrintAsAnonymous, 1, 1) \
- F(FunctionIsGenerator, 1, 1) \
- F(FunctionIsArrow, 1, 1) \
- F(FunctionBindArguments, 4, 1) \
- F(BoundFunctionGetBindings, 1, 1) \
- F(FunctionRemovePrototype, 1, 1) \
- F(FunctionGetSourceCode, 1, 1) \
- F(FunctionGetScript, 1, 1) \
- F(FunctionGetScriptSourcePosition, 1, 1) \
- F(FunctionGetPositionForOffset, 2, 1) \
- F(FunctionIsAPIFunction, 1, 1) \
- F(FunctionIsBuiltin, 1, 1) \
- F(GetScript, 1, 1) \
- F(CollectStackTrace, 2, 1) \
- F(GetV8Version, 0, 1) \
- \
- F(SetCode, 2, 1) \
- \
- F(CreateApiFunction, 2, 1) \
- F(IsTemplate, 1, 1) \
- F(GetTemplateField, 2, 1) \
- F(DisableAccessChecks, 1, 1) \
- F(EnableAccessChecks, 1, 1) \
- \
- /* Dates */ \
- F(DateCurrentTime, 0, 1) \
- F(DateParseString, 2, 1) \
- F(DateLocalTimezone, 1, 1) \
- F(DateToUTC, 1, 1) \
- F(DateMakeDay, 2, 1) \
- F(DateSetValue, 3, 1) \
- F(DateCacheVersion, 0, 1) \
- \
- /* Globals */ \
- F(CompileString, 2, 1) \
- \
- /* Eval */ \
- F(GlobalProxy, 1, 1) \
- F(IsAttachedGlobal, 1, 1) \
- \
- F(AddNamedProperty, 4, 1) \
- F(AddPropertyForTemplate, 4, 1) \
- F(SetProperty, 4, 1) \
- F(DefineApiAccessorProperty, 5, 1) \
- F(DefineDataPropertyUnchecked, 4, 1) \
- F(DefineAccessorPropertyUnchecked, 5, 1) \
- F(GetDataProperty, 2, 1) \
- F(SetHiddenProperty, 3, 1) \
- \
- /* Arrays */ \
- F(RemoveArrayHoles, 2, 1) \
- F(GetArrayKeys, 2, 1) \
- F(MoveArrayContents, 2, 1) \
- F(EstimateNumberOfElements, 1, 1) \
- F(NormalizeElements, 1, 1) \
- \
- /* Getters and Setters */ \
- F(LookupAccessor, 3, 1) \
- \
- /* ES5 */ \
- F(ObjectFreeze, 1, 1) \
- \
- /* Harmony modules */ \
- F(IsJSModule, 1, 1) \
- \
- /* Harmony symbols */ \
- F(CreateSymbol, 1, 1) \
- F(CreatePrivateSymbol, 1, 1) \
- F(CreateGlobalPrivateSymbol, 1, 1) \
- F(CreatePrivateOwnSymbol, 1, 1) \
- F(NewSymbolWrapper, 1, 1) \
- F(SymbolDescription, 1, 1) \
- F(SymbolRegistry, 0, 1) \
- F(SymbolIsPrivate, 1, 1) \
- \
- /* Harmony proxies */ \
- F(CreateJSProxy, 2, 1) \
- F(CreateJSFunctionProxy, 4, 1) \
- F(IsJSProxy, 1, 1) \
- F(IsJSFunctionProxy, 1, 1) \
- F(GetHandler, 1, 1) \
- F(GetCallTrap, 1, 1) \
- F(GetConstructTrap, 1, 1) \
- F(Fix, 1, 1) \
- \
- /* Harmony sets */ \
- F(SetInitialize, 1, 1) \
- F(SetAdd, 2, 1) \
- F(SetHas, 2, 1) \
- F(SetDelete, 2, 1) \
- F(SetClear, 1, 1) \
- F(SetGetSize, 1, 1) \
- \
- F(SetIteratorInitialize, 3, 1) \
- F(SetIteratorNext, 2, 1) \
- \
- /* Harmony maps */ \
- F(MapInitialize, 1, 1) \
- F(MapGet, 2, 1) \
- F(MapHas, 2, 1) \
- F(MapDelete, 2, 1) \
- F(MapClear, 1, 1) \
- F(MapSet, 3, 1) \
- F(MapGetSize, 1, 1) \
- \
- F(MapIteratorInitialize, 3, 1) \
- F(MapIteratorNext, 2, 1) \
- \
- /* Harmony weak maps and sets */ \
- F(WeakCollectionInitialize, 1, 1) \
- F(WeakCollectionGet, 2, 1) \
- F(WeakCollectionHas, 2, 1) \
- F(WeakCollectionDelete, 2, 1) \
- F(WeakCollectionSet, 3, 1) \
- \
- F(GetWeakMapEntries, 1, 1) \
- F(GetWeakSetValues, 1, 1) \
- \
- /* Harmony events */ \
- F(EnqueueMicrotask, 1, 1) \
- F(RunMicrotasks, 0, 1) \
- \
- /* Harmony observe */ \
- F(IsObserved, 1, 1) \
- F(SetIsObserved, 1, 1) \
- F(GetObservationState, 0, 1) \
- F(ObservationWeakMapCreate, 0, 1) \
- F(ObserverObjectAndRecordHaveSameOrigin, 3, 1) \
- F(ObjectWasCreatedInCurrentOrigin, 1, 1) \
- F(GetObjectContextObjectObserve, 1, 1) \
- F(GetObjectContextObjectGetNotifier, 1, 1) \
- F(GetObjectContextNotifierPerformChange, 1, 1) \
- \
- /* Harmony typed arrays */ \
- F(ArrayBufferInitialize, 2, 1) \
- F(ArrayBufferSliceImpl, 3, 1) \
- F(ArrayBufferIsView, 1, 1) \
- F(ArrayBufferNeuter, 1, 1) \
- \
- F(TypedArrayInitializeFromArrayLike, 4, 1) \
- F(TypedArrayGetBuffer, 1, 1) \
- F(TypedArraySetFastCases, 3, 1) \
- \
- F(DataViewGetBuffer, 1, 1) \
- F(DataViewGetInt8, 3, 1) \
- F(DataViewGetUint8, 3, 1) \
- F(DataViewGetInt16, 3, 1) \
- F(DataViewGetUint16, 3, 1) \
- F(DataViewGetInt32, 3, 1) \
- F(DataViewGetUint32, 3, 1) \
- F(DataViewGetFloat32, 3, 1) \
- F(DataViewGetFloat64, 3, 1) \
- \
- F(DataViewSetInt8, 4, 1) \
- F(DataViewSetUint8, 4, 1) \
- F(DataViewSetInt16, 4, 1) \
- F(DataViewSetUint16, 4, 1) \
- F(DataViewSetInt32, 4, 1) \
- F(DataViewSetUint32, 4, 1) \
- F(DataViewSetFloat32, 4, 1) \
- F(DataViewSetFloat64, 4, 1) \
- \
- /* Statements */ \
- F(NewObjectFromBound, 1, 1) \
- \
- /* Declarations and initialization */ \
- F(InitializeVarGlobal, 3, 1) \
- F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
- \
- /* Debugging */ \
- F(DebugPrint, 1, 1) \
- F(GlobalPrint, 1, 1) \
- F(DebugTrace, 0, 1) \
- F(TraceEnter, 0, 1) \
- F(TraceExit, 1, 1) \
- F(Abort, 1, 1) \
- F(AbortJS, 1, 1) \
- /* ES5 */ \
- F(OwnKeys, 1, 1) \
- \
- /* Message objects */ \
- F(MessageGetStartPosition, 1, 1) \
- F(MessageGetScript, 1, 1) \
- \
- /* Pseudo functions - handled as macros by parser */ \
- F(IS_VAR, 1, 1) \
- \
- /* expose boolean functions from objects-inl.h */ \
- F(HasFastSmiElements, 1, 1) \
- F(HasFastSmiOrObjectElements, 1, 1) \
- F(HasFastObjectElements, 1, 1) \
- F(HasFastDoubleElements, 1, 1) \
- F(HasFastHoleyElements, 1, 1) \
- F(HasDictionaryElements, 1, 1) \
- F(HasSloppyArgumentsElements, 1, 1) \
- F(HasExternalUint8ClampedElements, 1, 1) \
- F(HasExternalArrayElements, 1, 1) \
- F(HasExternalInt8Elements, 1, 1) \
- F(HasExternalUint8Elements, 1, 1) \
- F(HasExternalInt16Elements, 1, 1) \
- F(HasExternalUint16Elements, 1, 1) \
- F(HasExternalInt32Elements, 1, 1) \
- F(HasExternalUint32Elements, 1, 1) \
- F(HasExternalFloat32Elements, 1, 1) \
- F(HasExternalFloat64Elements, 1, 1) \
- F(HasFixedUint8ClampedElements, 1, 1) \
- F(HasFixedInt8Elements, 1, 1) \
- F(HasFixedUint8Elements, 1, 1) \
- F(HasFixedInt16Elements, 1, 1) \
- F(HasFixedUint16Elements, 1, 1) \
- F(HasFixedInt32Elements, 1, 1) \
- F(HasFixedUint32Elements, 1, 1) \
- F(HasFixedFloat32Elements, 1, 1) \
- F(HasFixedFloat64Elements, 1, 1) \
- F(HasFastProperties, 1, 1) \
- F(TransitionElementsKind, 2, 1) \
- F(HaveSameMap, 2, 1) \
- F(IsJSGlobalProxy, 1, 1) \
- F(ForInInit, 2, 2) /* TODO(turbofan): Only temporary */ \
- F(ForInNext, 4, 2) /* TODO(turbofan): Only temporary */ \
- F(ForInCacheArrayLength, 2, 1) /* TODO(turbofan): Only temporary */
-
-
-#define RUNTIME_FUNCTION_LIST_ALWAYS_3(F) \
- /* String and Regexp */ \
- F(NumberToStringRT, 1, 1) \
- F(RegExpConstructResult, 3, 1) \
- F(RegExpExecRT, 4, 1) \
- F(StringAdd, 2, 1) \
- F(SubString, 3, 1) \
- F(InternalizeString, 1, 1) \
- F(StringCompare, 2, 1) \
- F(StringCharCodeAtRT, 2, 1) \
- F(GetFromCache, 2, 1) \
- \
- /* Compilation */ \
- F(CompileUnoptimized, 1, 1) \
- F(CompileOptimized, 2, 1) \
- F(TryInstallOptimizedCode, 1, 1) \
- F(NotifyDeoptimized, 1, 1) \
- F(NotifyStubFailure, 0, 1) \
- \
- /* Utilities */ \
- F(AllocateInNewSpace, 1, 1) \
- F(AllocateInTargetSpace, 2, 1) \
- F(AllocateHeapNumber, 0, 1) \
- F(NumberToSmi, 1, 1) \
- F(NumberToStringSkipCache, 1, 1) \
- \
- F(NewArguments, 1, 1) /* TODO(turbofan): Only temporary */ \
- F(NewSloppyArguments, 3, 1) \
- F(NewStrictArguments, 3, 1) \
- \
- /* Harmony generators */ \
- F(CreateJSGeneratorObject, 0, 1) \
- F(SuspendJSGeneratorObject, 1, 1) \
- F(ResumeJSGeneratorObject, 3, 1) \
- F(ThrowGeneratorStateError, 1, 1) \
- \
- /* Arrays */ \
- F(ArrayConstructor, -1, 1) \
- F(InternalArrayConstructor, -1, 1) \
- \
- /* Literals */ \
- F(MaterializeRegExpLiteral, 4, 1) \
- F(CreateObjectLiteral, 4, 1) \
- F(CreateArrayLiteral, 4, 1) \
- F(CreateArrayLiteralStubBailout, 3, 1) \
- \
- /* Statements */ \
- F(NewClosure, 3, 1) \
- F(NewClosureFromStubFailure, 1, 1) \
- F(NewObject, 1, 1) \
- F(NewObjectWithAllocationSite, 2, 1) \
- F(FinalizeInstanceSize, 1, 1) \
- F(Throw, 1, 1) \
- F(ReThrow, 1, 1) \
- F(ThrowReferenceError, 1, 1) \
- F(ThrowNotDateError, 0, 1) \
- F(StackGuard, 0, 1) \
- F(Interrupt, 0, 1) \
- F(PromoteScheduledException, 0, 1) \
- \
- /* Contexts */ \
- F(NewGlobalContext, 2, 1) \
- F(NewFunctionContext, 1, 1) \
- F(PushWithContext, 2, 1) \
- F(PushCatchContext, 3, 1) \
- F(PushBlockContext, 2, 1) \
- F(PushModuleContext, 2, 1) \
- F(DeleteLookupSlot, 2, 1) \
- F(LoadLookupSlot, 2, 2) \
- F(LoadLookupSlotNoReferenceError, 2, 2) \
- F(StoreLookupSlot, 4, 1) \
- \
- /* Declarations and initialization */ \
- F(DeclareGlobals, 3, 1) \
- F(DeclareModules, 1, 1) \
- F(DeclareLookupSlot, 4, 1) \
- F(InitializeConstGlobal, 2, 1) \
- F(InitializeLegacyConstLookupSlot, 3, 1) \
- \
- /* Eval */ \
- F(ResolvePossiblyDirectEval, 5, 2) \
- \
- /* Maths */ \
- F(MathPowSlow, 2, 1) \
- F(MathPowRT, 2, 1)
-
-
-#define RUNTIME_FUNCTION_LIST_DEBUGGER(F) \
- /* Debugger support*/ \
- F(DebugBreak, 0, 1) \
- F(SetDebugEventListener, 2, 1) \
- F(Break, 0, 1) \
- F(DebugGetPropertyDetails, 2, 1) \
- F(DebugGetProperty, 2, 1) \
- F(DebugPropertyTypeFromDetails, 1, 1) \
- F(DebugPropertyAttributesFromDetails, 1, 1) \
- F(DebugPropertyIndexFromDetails, 1, 1) \
- F(DebugNamedInterceptorPropertyValue, 2, 1) \
- F(DebugIndexedInterceptorElementValue, 2, 1) \
- F(CheckExecutionState, 1, 1) \
- F(GetFrameCount, 1, 1) \
- F(GetFrameDetails, 2, 1) \
- F(GetScopeCount, 2, 1) \
- F(GetStepInPositions, 2, 1) \
- F(GetScopeDetails, 4, 1) \
- F(GetAllScopesDetails, 4, 1) \
- F(GetFunctionScopeCount, 1, 1) \
- F(GetFunctionScopeDetails, 2, 1) \
- F(SetScopeVariableValue, 6, 1) \
- F(DebugPrintScopes, 0, 1) \
- F(GetThreadCount, 1, 1) \
- F(GetThreadDetails, 2, 1) \
- F(SetDisableBreak, 1, 1) \
- F(GetBreakLocations, 2, 1) \
- F(SetFunctionBreakPoint, 3, 1) \
- F(SetScriptBreakPoint, 4, 1) \
- F(ClearBreakPoint, 1, 1) \
- F(ChangeBreakOnException, 2, 1) \
- F(IsBreakOnException, 1, 1) \
- F(PrepareStep, 4, 1) \
- F(ClearStepping, 0, 1) \
- F(DebugEvaluate, 6, 1) \
- F(DebugEvaluateGlobal, 4, 1) \
- F(DebugGetLoadedScripts, 0, 1) \
- F(DebugReferencedBy, 3, 1) \
- F(DebugConstructedBy, 2, 1) \
- F(DebugGetPrototype, 1, 1) \
- F(DebugSetScriptSource, 2, 1) \
- F(DebugCallbackSupportsStepping, 1, 1) \
- F(SystemBreak, 0, 1) \
- F(DebugDisassembleFunction, 1, 1) \
- F(DebugDisassembleConstructor, 1, 1) \
- F(FunctionGetInferredName, 1, 1) \
- F(LiveEditFindSharedFunctionInfosForScript, 1, 1) \
- F(LiveEditGatherCompileInfo, 2, 1) \
- F(LiveEditReplaceScript, 3, 1) \
- F(LiveEditReplaceFunctionCode, 2, 1) \
- F(LiveEditFunctionSourceUpdated, 1, 1) \
- F(LiveEditFunctionSetScript, 2, 1) \
- F(LiveEditReplaceRefToNestedFunction, 3, 1) \
- F(LiveEditPatchFunctionPositions, 2, 1) \
- F(LiveEditCheckAndDropActivations, 2, 1) \
- F(LiveEditCompareStrings, 2, 1) \
- F(LiveEditRestartFrame, 2, 1) \
- F(GetFunctionCodePositionFromSource, 2, 1) \
- F(ExecuteInDebugContext, 2, 1) \
- \
- F(SetFlags, 1, 1) \
- F(CollectGarbage, 1, 1) \
- F(GetHeapUsage, 0, 1) \
-
-
-#ifdef V8_I18N_SUPPORT
-#define RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F) \
- /* i18n support */ \
- /* Standalone, helper methods. */ \
- F(CanonicalizeLanguageTag, 1, 1) \
- F(AvailableLocalesOf, 1, 1) \
- F(GetDefaultICULocale, 0, 1) \
- F(GetLanguageTagVariants, 1, 1) \
- F(IsInitializedIntlObject, 1, 1) \
- F(IsInitializedIntlObjectOfType, 2, 1) \
- F(MarkAsInitializedIntlObjectOfType, 3, 1) \
- F(GetImplFromInitializedIntlObject, 1, 1) \
- \
- /* Date format and parse. */ \
- F(CreateDateTimeFormat, 3, 1) \
- F(InternalDateFormat, 2, 1) \
- F(InternalDateParse, 2, 1) \
- \
- /* Number format and parse. */ \
- F(CreateNumberFormat, 3, 1) \
- F(InternalNumberFormat, 2, 1) \
- F(InternalNumberParse, 2, 1) \
- \
- /* Collator. */ \
- F(CreateCollator, 3, 1) \
- F(InternalCompare, 3, 1) \
- \
- /* String.prototype.normalize. */ \
- F(StringNormalize, 2, 1) \
- \
- /* Break iterator. */ \
- F(CreateBreakIterator, 3, 1) \
- F(BreakIteratorAdoptText, 2, 1) \
- F(BreakIteratorFirst, 1, 1) \
- F(BreakIteratorNext, 1, 1) \
- F(BreakIteratorCurrent, 1, 1) \
- F(BreakIteratorBreakType, 1, 1) \
-
-#else
-#define RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F)
-#endif
-
-
-#ifdef DEBUG
-#define RUNTIME_FUNCTION_LIST_DEBUG(F) \
- /* Testing */ \
- F(ListNatives, 0, 1)
-#else
-#define RUNTIME_FUNCTION_LIST_DEBUG(F)
-#endif
-
-// ----------------------------------------------------------------------------
-// RUNTIME_FUNCTION_LIST defines all runtime functions accessed
-// either directly by id (via the code generator), or indirectly
-// via a native call by name (from within JS code).
-// Entries have the form F(name, number of arguments, number of return values).
-
-#define RUNTIME_FUNCTION_LIST(F) \
- RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
- RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
- RUNTIME_FUNCTION_LIST_ALWAYS_3(F) \
- RUNTIME_FUNCTION_LIST_DEBUG(F) \
- RUNTIME_FUNCTION_LIST_DEBUGGER(F) \
- RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F)
-
-// ----------------------------------------------------------------------------
-// INLINE_FUNCTION_LIST defines all inlined functions accessed
-// with a native call of the form %_name from within JS code.
-// Entries have the form F(name, number of arguments, number of return values).
-#define INLINE_FUNCTION_LIST(F) \
- F(IsSmi, 1, 1) \
- F(IsNonNegativeSmi, 1, 1) \
- F(IsArray, 1, 1) \
- F(IsRegExp, 1, 1) \
- F(IsConstructCall, 0, 1) \
- F(CallFunction, -1 /* receiver + n args + function */, 1) \
- F(ArgumentsLength, 0, 1) \
- F(Arguments, 1, 1) \
- F(ValueOf, 1, 1) \
- F(SetValueOf, 2, 1) \
- F(DateField, 2 /* date object, field index */, 1) \
- F(StringCharFromCode, 1, 1) \
- F(StringCharAt, 2, 1) \
- F(OneByteSeqStringSetChar, 3, 1) \
- F(TwoByteSeqStringSetChar, 3, 1) \
- F(ObjectEquals, 2, 1) \
- F(IsObject, 1, 1) \
- F(IsFunction, 1, 1) \
- F(IsUndetectableObject, 1, 1) \
- F(IsSpecObject, 1, 1) \
- F(IsStringWrapperSafeForDefaultValueOf, 1, 1) \
- F(MathPow, 2, 1) \
- F(IsMinusZero, 1, 1) \
- F(HasCachedArrayIndex, 1, 1) \
- F(GetCachedArrayIndex, 1, 1) \
- F(FastAsciiArrayJoin, 2, 1) \
- F(GeneratorNext, 2, 1) \
- F(GeneratorThrow, 2, 1) \
- F(DebugBreakInOptimizedCode, 0, 1) \
- F(ClassOf, 1, 1) \
- F(StringCharCodeAt, 2, 1) \
- F(StringAdd, 2, 1) \
- F(SubString, 3, 1) \
- F(StringCompare, 2, 1) \
- F(RegExpExec, 4, 1) \
- F(RegExpConstructResult, 3, 1) \
- F(GetFromCache, 2, 1) \
- F(NumberToString, 1, 1) \
- F(DebugIsActive, 0, 1)
-
-
-// ----------------------------------------------------------------------------
-// INLINE_OPTIMIZED_FUNCTION_LIST defines all inlined functions accessed
-// with a native call of the form %_name from within JS code that also have
-// a corresponding runtime function, that is called from non-optimized code.
-// For the benefit of (fuzz) tests, the runtime version can also be called
-// directly as %name (i.e. without the leading underscore).
-// Entries have the form F(name, number of arguments, number of return values).
-#define INLINE_OPTIMIZED_FUNCTION_LIST(F) \
- /* Typed Arrays */ \
- F(TypedArrayInitialize, 5, 1) \
- F(DataViewInitialize, 4, 1) \
- F(MaxSmi, 0, 1) \
- F(TypedArrayMaxSizeInHeap, 0, 1) \
- F(ArrayBufferViewGetByteLength, 1, 1) \
- F(ArrayBufferViewGetByteOffset, 1, 1) \
- F(TypedArrayGetLength, 1, 1) \
- /* ArrayBuffer */ \
- F(ArrayBufferGetByteLength, 1, 1) \
- /* Maths */ \
- F(ConstructDouble, 2, 1) \
- F(DoubleHi, 1, 1) \
- F(DoubleLo, 1, 1) \
- F(MathSqrtRT, 1, 1) \
- F(MathLogRT, 1, 1)
-
-
-//---------------------------------------------------------------------------
-// Runtime provides access to all C++ runtime functions.
-
-class RuntimeState {
- public:
- StaticResource<ConsStringIteratorOp>* string_iterator() {
- return &string_iterator_;
- }
- unibrow::Mapping<unibrow::ToUppercase, 128>* to_upper_mapping() {
- return &to_upper_mapping_;
- }
- unibrow::Mapping<unibrow::ToLowercase, 128>* to_lower_mapping() {
- return &to_lower_mapping_;
- }
- ConsStringIteratorOp* string_iterator_compare_x() {
- return &string_iterator_compare_x_;
- }
- ConsStringIteratorOp* string_iterator_compare_y() {
- return &string_iterator_compare_y_;
- }
- ConsStringIteratorOp* string_locale_compare_it1() {
- return &string_locale_compare_it1_;
- }
- ConsStringIteratorOp* string_locale_compare_it2() {
- return &string_locale_compare_it2_;
- }
-
- private:
- RuntimeState() {}
- // Non-reentrant string buffer for efficient general use in the runtime.
- StaticResource<ConsStringIteratorOp> string_iterator_;
- unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping_;
- unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping_;
- ConsStringIteratorOp string_iterator_compare_x_;
- ConsStringIteratorOp string_iterator_compare_y_;
- ConsStringIteratorOp string_locale_compare_it1_;
- ConsStringIteratorOp string_locale_compare_it2_;
-
- friend class Isolate;
- friend class Runtime;
-
- DISALLOW_COPY_AND_ASSIGN(RuntimeState);
-};
-
-
-class Runtime : public AllStatic {
- public:
- enum FunctionId {
-#define F(name, nargs, ressize) k##name,
- RUNTIME_FUNCTION_LIST(F)
- INLINE_OPTIMIZED_FUNCTION_LIST(F)
-#undef F
-#define F(name, nargs, ressize) kInline##name,
- INLINE_FUNCTION_LIST(F)
-#undef F
-#define F(name, nargs, ressize) kInlineOptimized##name,
- INLINE_OPTIMIZED_FUNCTION_LIST(F)
-#undef F
- kNumFunctions,
- kFirstInlineFunction = kInlineIsSmi
- };
-
- enum IntrinsicType {
- RUNTIME,
- INLINE,
- INLINE_OPTIMIZED
- };
-
- // Intrinsic function descriptor.
- struct Function {
- FunctionId function_id;
- IntrinsicType intrinsic_type;
- // The JS name of the function.
- const char* name;
-
- // The C++ (native) entry point. NULL if the function is inlined.
- byte* entry;
-
- // The number of arguments expected. nargs is -1 if the function takes
- // a variable number of arguments.
- int nargs;
- // Size of result. Most functions return a single pointer, size 1.
- int result_size;
- };
-
- static const int kNotFound = -1;
-
- // Add internalized strings for all the intrinsic function names to a
- // StringDictionary.
- static void InitializeIntrinsicFunctionNames(Isolate* isolate,
- Handle<NameDictionary> dict);
-
- // Get the intrinsic function with the given name, which must be internalized.
- static const Function* FunctionForName(Handle<String> name);
-
- // Get the intrinsic function with the given FunctionId.
- static const Function* FunctionForId(FunctionId id);
-
- // Get the intrinsic function with the given function entry address.
- static const Function* FunctionForEntry(Address ref);
-
- // General-purpose helper functions for runtime system.
- static int StringMatch(Isolate* isolate,
- Handle<String> sub,
- Handle<String> pat,
- int index);
-
- static bool IsUpperCaseChar(RuntimeState* runtime_state, uint16_t ch);
-
- // TODO(1240886): Some of the following methods are *not* handle safe, but
- // accept handle arguments. This seems fragile.
-
- // Support getting the characters in a string using [] notation as
- // in Firefox/SpiderMonkey, Safari and Opera.
- MUST_USE_RESULT static MaybeHandle<Object> GetElementOrCharAt(
- Isolate* isolate,
- Handle<Object> object,
- uint32_t index);
-
- MUST_USE_RESULT static MaybeHandle<Object> SetObjectProperty(
- Isolate* isolate, Handle<Object> object, Handle<Object> key,
- Handle<Object> value, StrictMode strict_mode);
-
- MUST_USE_RESULT static MaybeHandle<Object> DefineObjectProperty(
- Handle<JSObject> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attr,
- JSReceiver::StoreFromKeyed store_from_keyed =
- JSReceiver::MAY_BE_STORE_FROM_KEYED);
-
- MUST_USE_RESULT static MaybeHandle<Object> DeleteObjectProperty(
- Isolate* isolate,
- Handle<JSReceiver> object,
- Handle<Object> key,
- JSReceiver::DeleteMode mode);
-
- MUST_USE_RESULT static MaybeHandle<Object> HasObjectProperty(
- Isolate* isolate,
- Handle<JSReceiver> object,
- Handle<Object> key);
-
- MUST_USE_RESULT static MaybeHandle<Object> GetObjectProperty(
- Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key);
-
- static void SetupArrayBuffer(Isolate* isolate,
- Handle<JSArrayBuffer> array_buffer,
- bool is_external,
- void* data,
- size_t allocated_length);
-
- static bool SetupArrayBufferAllocatingData(
- Isolate* isolate,
- Handle<JSArrayBuffer> array_buffer,
- size_t allocated_length,
- bool initialize = true);
-
- static void NeuterArrayBuffer(Handle<JSArrayBuffer> array_buffer);
-
- static void FreeArrayBuffer(
- Isolate* isolate,
- JSArrayBuffer* phantom_array_buffer);
-
- enum TypedArrayId {
- // arrayIds below should be synchromized with typedarray.js natives.
- ARRAY_ID_UINT8 = 1,
- ARRAY_ID_INT8 = 2,
- ARRAY_ID_UINT16 = 3,
- ARRAY_ID_INT16 = 4,
- ARRAY_ID_UINT32 = 5,
- ARRAY_ID_INT32 = 6,
- ARRAY_ID_FLOAT32 = 7,
- ARRAY_ID_FLOAT64 = 8,
- ARRAY_ID_UINT8_CLAMPED = 9,
-
- ARRAY_ID_FIRST = ARRAY_ID_UINT8,
- ARRAY_ID_LAST = ARRAY_ID_UINT8_CLAMPED
- };
-
- static void ArrayIdToTypeAndSize(int array_id,
- ExternalArrayType *type,
- ElementsKind* external_elements_kind,
- ElementsKind* fixed_elements_kind,
- size_t *element_size);
-
- // Used in runtime.cc and hydrogen's VisitArrayLiteral.
- MUST_USE_RESULT static MaybeHandle<Object> CreateArrayLiteralBoilerplate(
- Isolate* isolate,
- Handle<FixedArray> literals,
- Handle<FixedArray> elements);
-};
-
-
-//---------------------------------------------------------------------------
-// Constants used by interface to runtime functions.
-
-class AllocateDoubleAlignFlag: public BitField<bool, 0, 1> {};
-class AllocateTargetSpace: public BitField<AllocationSpace, 1, 3> {};
-
-class DeclareGlobalsEvalFlag: public BitField<bool, 0, 1> {};
-class DeclareGlobalsNativeFlag: public BitField<bool, 1, 1> {};
-class DeclareGlobalsStrictMode: public BitField<StrictMode, 2, 1> {};
-
-} } // namespace v8::internal
-
-#endif // V8_RUNTIME_H_
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index d9e1fe5c99..4d15d205a4 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -563,6 +563,14 @@ function ToInteger(x) {
}
+// ES6, draft 08-24-14, section 7.1.15
+function ToLength(arg) {
+ arg = ToInteger(arg);
+ if (arg < 0) return 0;
+ return arg < $Number.MAX_SAFE_INTEGER ? arg : $Number.MAX_SAFE_INTEGER;
+}
+
+
// ECMA-262, section 9.6, page 34.
function ToUint32(x) {
if (%_IsSmi(x) && x >= 0) return x;
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
new file mode 100644
index 0000000000..d0d6aa64fb
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -0,0 +1,347 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/arguments.h"
+#include "src/runtime/runtime.h"
+#include "src/runtime/runtime-utils.h"
+
+
+namespace v8 {
+namespace internal {
+
+RUNTIME_FUNCTION(Runtime_SetInitialize) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
+ Handle<OrderedHashSet> table = isolate->factory()->NewOrderedHashSet();
+ holder->set_table(*table);
+ return *holder;
+}
+
+
+RUNTIME_FUNCTION(Runtime_SetAdd) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
+ table = OrderedHashSet::Add(table, key);
+ holder->set_table(*table);
+ return *holder;
+}
+
+
+RUNTIME_FUNCTION(Runtime_SetHas) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
+ return isolate->heap()->ToBoolean(table->Contains(key));
+}
+
+
+RUNTIME_FUNCTION(Runtime_SetDelete) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
+ bool was_present = false;
+ table = OrderedHashSet::Remove(table, key, &was_present);
+ holder->set_table(*table);
+ return isolate->heap()->ToBoolean(was_present);
+}
+
+
+RUNTIME_FUNCTION(Runtime_SetClear) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
+ Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
+ table = OrderedHashSet::Clear(table);
+ holder->set_table(*table);
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_SetGetSize) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
+ Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
+ return Smi::FromInt(table->NumberOfElements());
+}
+
+
+RUNTIME_FUNCTION(Runtime_SetIteratorInitialize) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSSetIterator, holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSSet, set, 1);
+ CONVERT_SMI_ARG_CHECKED(kind, 2)
+ RUNTIME_ASSERT(kind == JSSetIterator::kKindValues ||
+ kind == JSSetIterator::kKindEntries);
+ Handle<OrderedHashSet> table(OrderedHashSet::cast(set->table()));
+ holder->set_table(*table);
+ holder->set_index(Smi::FromInt(0));
+ holder->set_kind(Smi::FromInt(kind));
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_SetIteratorNext) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_CHECKED(JSSetIterator, holder, 0);
+ CONVERT_ARG_CHECKED(JSArray, value_array, 1);
+ return holder->Next(value_array);
+}
+
+
+RUNTIME_FUNCTION(Runtime_MapInitialize) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
+ Handle<OrderedHashMap> table = isolate->factory()->NewOrderedHashMap();
+ holder->set_table(*table);
+ return *holder;
+}
+
+
+RUNTIME_FUNCTION(Runtime_MapGet) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
+ Handle<Object> lookup(table->Lookup(key), isolate);
+ return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup;
+}
+
+
+RUNTIME_FUNCTION(Runtime_MapHas) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
+ Handle<Object> lookup(table->Lookup(key), isolate);
+ return isolate->heap()->ToBoolean(!lookup->IsTheHole());
+}
+
+
+RUNTIME_FUNCTION(Runtime_MapDelete) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
+ bool was_present = false;
+ Handle<OrderedHashMap> new_table =
+ OrderedHashMap::Remove(table, key, &was_present);
+ holder->set_table(*new_table);
+ return isolate->heap()->ToBoolean(was_present);
+}
+
+
+RUNTIME_FUNCTION(Runtime_MapClear) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
+ table = OrderedHashMap::Clear(table);
+ holder->set_table(*table);
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_MapSet) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
+ Handle<OrderedHashMap> new_table = OrderedHashMap::Put(table, key, value);
+ holder->set_table(*new_table);
+ return *holder;
+}
+
+
+RUNTIME_FUNCTION(Runtime_MapGetSize) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
+ return Smi::FromInt(table->NumberOfElements());
+}
+
+
+RUNTIME_FUNCTION(Runtime_MapIteratorInitialize) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSMapIterator, holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSMap, map, 1);
+ CONVERT_SMI_ARG_CHECKED(kind, 2)
+ RUNTIME_ASSERT(kind == JSMapIterator::kKindKeys ||
+ kind == JSMapIterator::kKindValues ||
+ kind == JSMapIterator::kKindEntries);
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(map->table()));
+ holder->set_table(*table);
+ holder->set_index(Smi::FromInt(0));
+ holder->set_kind(Smi::FromInt(kind));
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_GetWeakMapEntries) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
+ Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
+ Handle<FixedArray> entries =
+ isolate->factory()->NewFixedArray(table->NumberOfElements() * 2);
+ {
+ DisallowHeapAllocation no_gc;
+ int number_of_non_hole_elements = 0;
+ for (int i = 0; i < table->Capacity(); i++) {
+ Handle<Object> key(table->KeyAt(i), isolate);
+ if (table->IsKey(*key)) {
+ entries->set(number_of_non_hole_elements++, *key);
+ Object* value = table->Lookup(key);
+ entries->set(number_of_non_hole_elements++, value);
+ }
+ }
+ DCHECK_EQ(table->NumberOfElements() * 2, number_of_non_hole_elements);
+ }
+ return *isolate->factory()->NewJSArrayWithElements(entries);
+}
+
+
+RUNTIME_FUNCTION(Runtime_MapIteratorNext) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_CHECKED(JSMapIterator, holder, 0);
+ CONVERT_ARG_CHECKED(JSArray, value_array, 1);
+ return holder->Next(value_array);
+}
+
+
+static Handle<JSWeakCollection> WeakCollectionInitialize(
+ Isolate* isolate, Handle<JSWeakCollection> weak_collection) {
+ DCHECK(weak_collection->map()->inobject_properties() == 0);
+ Handle<ObjectHashTable> table = ObjectHashTable::New(isolate, 0);
+ weak_collection->set_table(*table);
+ return weak_collection;
+}
+
+
+RUNTIME_FUNCTION(Runtime_WeakCollectionInitialize) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
+ return *WeakCollectionInitialize(isolate, weak_collection);
+}
+
+
+RUNTIME_FUNCTION(Runtime_WeakCollectionGet) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
+ Handle<ObjectHashTable> table(
+ ObjectHashTable::cast(weak_collection->table()));
+ RUNTIME_ASSERT(table->IsKey(*key));
+ Handle<Object> lookup(table->Lookup(key), isolate);
+ return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup;
+}
+
+
+RUNTIME_FUNCTION(Runtime_WeakCollectionHas) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
+ Handle<ObjectHashTable> table(
+ ObjectHashTable::cast(weak_collection->table()));
+ RUNTIME_ASSERT(table->IsKey(*key));
+ Handle<Object> lookup(table->Lookup(key), isolate);
+ return isolate->heap()->ToBoolean(!lookup->IsTheHole());
+}
+
+
+RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
+ Handle<ObjectHashTable> table(
+ ObjectHashTable::cast(weak_collection->table()));
+ RUNTIME_ASSERT(table->IsKey(*key));
+ bool was_present = false;
+ Handle<ObjectHashTable> new_table =
+ ObjectHashTable::Remove(table, key, &was_present);
+ weak_collection->set_table(*new_table);
+ return isolate->heap()->ToBoolean(was_present);
+}
+
+
+RUNTIME_FUNCTION(Runtime_WeakCollectionSet) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ Handle<ObjectHashTable> table(
+ ObjectHashTable::cast(weak_collection->table()));
+ RUNTIME_ASSERT(table->IsKey(*key));
+ Handle<ObjectHashTable> new_table = ObjectHashTable::Put(table, key, value);
+ weak_collection->set_table(*new_table);
+ return *weak_collection;
+}
+
+
+RUNTIME_FUNCTION(Runtime_GetWeakSetValues) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
+ Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
+ Handle<FixedArray> values =
+ isolate->factory()->NewFixedArray(table->NumberOfElements());
+ {
+ DisallowHeapAllocation no_gc;
+ int number_of_non_hole_elements = 0;
+ for (int i = 0; i < table->Capacity(); i++) {
+ Handle<Object> key(table->KeyAt(i), isolate);
+ if (table->IsKey(*key)) {
+ values->set(number_of_non_hole_elements++, *key);
+ }
+ }
+ DCHECK_EQ(table->NumberOfElements(), number_of_non_hole_elements);
+ }
+ return *isolate->factory()->NewJSArrayWithElements(values);
+}
+
+
+RUNTIME_FUNCTION(Runtime_ObservationWeakMapCreate) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+ // TODO(adamk): Currently this runtime function is only called three times per
+ // isolate. If it's called more often, the map should be moved into the
+ // strong root list.
+ Handle<Map> map =
+ isolate->factory()->NewMap(JS_WEAK_MAP_TYPE, JSWeakMap::kSize);
+ Handle<JSWeakMap> weakmap =
+ Handle<JSWeakMap>::cast(isolate->factory()->NewJSObjectFromMap(map));
+ return *WeakCollectionInitialize(isolate, weakmap);
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
new file mode 100644
index 0000000000..3f7e936d3b
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -0,0 +1,441 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/arguments.h"
+#include "src/compiler.h"
+#include "src/deoptimizer.h"
+#include "src/frames.h"
+#include "src/full-codegen.h"
+#include "src/isolate.h"
+#include "src/isolate-inl.h"
+#include "src/runtime/runtime.h"
+#include "src/runtime/runtime-utils.h"
+#include "src/v8threads.h"
+#include "src/vm-state.h"
+#include "src/vm-state-inl.h"
+
+namespace v8 {
+namespace internal {
+
+RUNTIME_FUNCTION(Runtime_CompileLazy) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+#ifdef DEBUG
+ if (FLAG_trace_lazy && !function->shared()->is_compiled()) {
+ PrintF("[unoptimized: ");
+ function->PrintName();
+ PrintF("]\n");
+ }
+#endif
+
+ // Compile the target function.
+ DCHECK(function->shared()->allows_lazy_compilation());
+
+ Handle<Code> code;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, code,
+ Compiler::GetLazyCode(function));
+ DCHECK(code->kind() == Code::FUNCTION ||
+ code->kind() == Code::OPTIMIZED_FUNCTION);
+ function->ReplaceCode(*code);
+ return *code;
+}
+
+
+RUNTIME_FUNCTION(Runtime_CompileOptimized) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ CONVERT_BOOLEAN_ARG_CHECKED(concurrent, 1);
+
+ Handle<Code> unoptimized(function->shared()->code());
+ if (!isolate->use_crankshaft() ||
+ function->shared()->optimization_disabled() ||
+ isolate->DebuggerHasBreakPoints()) {
+ // If the function is not optimizable or debugger is active continue
+ // using the code from the full compiler.
+ if (FLAG_trace_opt) {
+ PrintF("[failed to optimize ");
+ function->PrintName();
+ PrintF(": is code optimizable: %s, is debugger enabled: %s]\n",
+ function->shared()->optimization_disabled() ? "F" : "T",
+ isolate->DebuggerHasBreakPoints() ? "T" : "F");
+ }
+ function->ReplaceCode(*unoptimized);
+ return function->code();
+ }
+
+ Compiler::ConcurrencyMode mode =
+ concurrent ? Compiler::CONCURRENT : Compiler::NOT_CONCURRENT;
+ Handle<Code> code;
+ if (Compiler::GetOptimizedCode(function, unoptimized, mode).ToHandle(&code)) {
+ function->ReplaceCode(*code);
+ } else {
+ function->ReplaceCode(function->shared()->code());
+ }
+
+ DCHECK(function->code()->kind() == Code::FUNCTION ||
+ function->code()->kind() == Code::OPTIMIZED_FUNCTION ||
+ function->IsInOptimizationQueue());
+ return function->code();
+}
+
+
+RUNTIME_FUNCTION(Runtime_NotifyStubFailure) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+ Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
+ DCHECK(AllowHeapAllocation::IsAllowed());
+ delete deoptimizer;
+ return isolate->heap()->undefined_value();
+}
+
+
+class ActivationsFinder : public ThreadVisitor {
+ public:
+ Code* code_;
+ bool has_code_activations_;
+
+ explicit ActivationsFinder(Code* code)
+ : code_(code), has_code_activations_(false) {}
+
+ void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
+ JavaScriptFrameIterator it(isolate, top);
+ VisitFrames(&it);
+ }
+
+ void VisitFrames(JavaScriptFrameIterator* it) {
+ for (; !it->done(); it->Advance()) {
+ JavaScriptFrame* frame = it->frame();
+ if (code_->contains(frame->pc())) has_code_activations_ = true;
+ }
+ }
+};
+
+
+RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_SMI_ARG_CHECKED(type_arg, 0);
+ Deoptimizer::BailoutType type =
+ static_cast<Deoptimizer::BailoutType>(type_arg);
+ Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
+ DCHECK(AllowHeapAllocation::IsAllowed());
+
+ Handle<JSFunction> function = deoptimizer->function();
+ Handle<Code> optimized_code = deoptimizer->compiled_code();
+
+ DCHECK(optimized_code->kind() == Code::OPTIMIZED_FUNCTION);
+ DCHECK(type == deoptimizer->bailout_type());
+
+ // Make sure to materialize objects before causing any allocation.
+ JavaScriptFrameIterator it(isolate);
+ deoptimizer->MaterializeHeapObjects(&it);
+ delete deoptimizer;
+
+ JavaScriptFrame* frame = it.frame();
+ RUNTIME_ASSERT(frame->function()->IsJSFunction());
+ DCHECK(frame->function() == *function);
+
+ // Avoid doing too much work when running with --always-opt and keep
+ // the optimized code around.
+ if (FLAG_always_opt || type == Deoptimizer::LAZY) {
+ return isolate->heap()->undefined_value();
+ }
+
+ // Search for other activations of the same function and code.
+ ActivationsFinder activations_finder(*optimized_code);
+ activations_finder.VisitFrames(&it);
+ isolate->thread_manager()->IterateArchivedThreads(&activations_finder);
+
+ if (!activations_finder.has_code_activations_) {
+ if (function->code() == *optimized_code) {
+ if (FLAG_trace_deopt) {
+ PrintF("[removing optimized code for: ");
+ function->PrintName();
+ PrintF("]\n");
+ }
+ function->ReplaceCode(function->shared()->code());
+ // Evict optimized code for this function from the cache so that it
+ // doesn't get used for new closures.
+ function->shared()->EvictFromOptimizedCodeMap(*optimized_code,
+ "notify deoptimized");
+ }
+ } else {
+ // TODO(titzer): we should probably do DeoptimizeCodeList(code)
+ // unconditionally if the code is not already marked for deoptimization.
+ // If there is an index by shared function info, all the better.
+ Deoptimizer::DeoptimizeFunction(*function);
+ }
+
+ return isolate->heap()->undefined_value();
+}
+
+
+static bool IsSuitableForOnStackReplacement(Isolate* isolate,
+ Handle<JSFunction> function,
+ Handle<Code> current_code) {
+ // Keep track of whether we've succeeded in optimizing.
+ if (!isolate->use_crankshaft() || !current_code->optimizable()) return false;
+ // If we are trying to do OSR when there are already optimized
+ // activations of the function, it means (a) the function is directly or
+ // indirectly recursive and (b) an optimized invocation has been
+ // deoptimized so that we are currently in an unoptimized activation.
+ // Check for optimized activations of this function.
+ for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
+ JavaScriptFrame* frame = it.frame();
+ if (frame->is_optimized() && frame->function() == *function) return false;
+ }
+
+ return true;
+}
+
+
+RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ Handle<Code> caller_code(function->shared()->code());
+
+ // We're not prepared to handle a function with arguments object.
+ DCHECK(!function->shared()->uses_arguments());
+
+ RUNTIME_ASSERT(FLAG_use_osr);
+
+ // Passing the PC in the javascript frame from the caller directly is
+ // not GC safe, so we walk the stack to get it.
+ JavaScriptFrameIterator it(isolate);
+ JavaScriptFrame* frame = it.frame();
+ if (!caller_code->contains(frame->pc())) {
+ // Code on the stack may not be the code object referenced by the shared
+ // function info. It may have been replaced to include deoptimization data.
+ caller_code = Handle<Code>(frame->LookupCode());
+ }
+
+ uint32_t pc_offset =
+ static_cast<uint32_t>(frame->pc() - caller_code->instruction_start());
+
+#ifdef DEBUG
+ DCHECK_EQ(frame->function(), *function);
+ DCHECK_EQ(frame->LookupCode(), *caller_code);
+ DCHECK(caller_code->contains(frame->pc()));
+#endif // DEBUG
+
+
+ BailoutId ast_id = caller_code->TranslatePcOffsetToAstId(pc_offset);
+ DCHECK(!ast_id.IsNone());
+
+ Compiler::ConcurrencyMode mode =
+ isolate->concurrent_osr_enabled() &&
+ (function->shared()->ast_node_count() > 512)
+ ? Compiler::CONCURRENT
+ : Compiler::NOT_CONCURRENT;
+ Handle<Code> result = Handle<Code>::null();
+
+ OptimizedCompileJob* job = NULL;
+ if (mode == Compiler::CONCURRENT) {
+ // Gate the OSR entry with a stack check.
+ BackEdgeTable::AddStackCheck(caller_code, pc_offset);
+ // Poll already queued compilation jobs.
+ OptimizingCompilerThread* thread = isolate->optimizing_compiler_thread();
+ if (thread->IsQueuedForOSR(function, ast_id)) {
+ if (FLAG_trace_osr) {
+ PrintF("[OSR - Still waiting for queued: ");
+ function->PrintName();
+ PrintF(" at AST id %d]\n", ast_id.ToInt());
+ }
+ return NULL;
+ }
+
+ job = thread->FindReadyOSRCandidate(function, ast_id);
+ }
+
+ if (job != NULL) {
+ if (FLAG_trace_osr) {
+ PrintF("[OSR - Found ready: ");
+ function->PrintName();
+ PrintF(" at AST id %d]\n", ast_id.ToInt());
+ }
+ result = Compiler::GetConcurrentlyOptimizedCode(job);
+ } else if (IsSuitableForOnStackReplacement(isolate, function, caller_code)) {
+ if (FLAG_trace_osr) {
+ PrintF("[OSR - Compiling: ");
+ function->PrintName();
+ PrintF(" at AST id %d]\n", ast_id.ToInt());
+ }
+ MaybeHandle<Code> maybe_result =
+ Compiler::GetOptimizedCode(function, caller_code, mode, ast_id);
+ if (maybe_result.ToHandle(&result) &&
+ result.is_identical_to(isolate->builtins()->InOptimizationQueue())) {
+ // Optimization is queued. Return to check later.
+ return NULL;
+ }
+ }
+
+ // Revert the patched back edge table, regardless of whether OSR succeeds.
+ BackEdgeTable::Revert(isolate, *caller_code);
+
+ // Check whether we ended up with usable optimized code.
+ if (!result.is_null() && result->kind() == Code::OPTIMIZED_FUNCTION) {
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(result->deoptimization_data());
+
+ if (data->OsrPcOffset()->value() >= 0) {
+ DCHECK(BailoutId(data->OsrAstId()->value()) == ast_id);
+ if (FLAG_trace_osr) {
+ PrintF("[OSR - Entry at AST id %d, offset %d in optimized code]\n",
+ ast_id.ToInt(), data->OsrPcOffset()->value());
+ }
+ // TODO(titzer): this is a massive hack to make the deopt counts
+ // match. Fix heuristics for reenabling optimizations!
+ function->shared()->increment_deopt_count();
+
+ // TODO(titzer): Do not install code into the function.
+ function->ReplaceCode(*result);
+ return *result;
+ }
+ }
+
+ // Failed.
+ if (FLAG_trace_osr) {
+ PrintF("[OSR - Failed: ");
+ function->PrintName();
+ PrintF(" at AST id %d]\n", ast_id.ToInt());
+ }
+
+ if (!function->IsOptimized()) {
+ function->ReplaceCode(function->shared()->code());
+ }
+ return NULL;
+}
+
+
+RUNTIME_FUNCTION(Runtime_TryInstallOptimizedCode) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+
+ // First check if this is a real stack overflow.
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed()) {
+ SealHandleScope shs(isolate);
+ return isolate->StackOverflow();
+ }
+
+ isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
+ return (function->IsOptimized()) ? function->code()
+ : function->shared()->code();
+}
+
+
+bool CodeGenerationFromStringsAllowed(Isolate* isolate,
+ Handle<Context> context) {
+ DCHECK(context->allow_code_gen_from_strings()->IsFalse());
+ // Check with callback if set.
+ AllowCodeGenerationFromStringsCallback callback =
+ isolate->allow_code_gen_callback();
+ if (callback == NULL) {
+ // No callback set and code generation disallowed.
+ return false;
+ } else {
+ // Callback set. Let it decide if code generation is allowed.
+ VMState<EXTERNAL> state(isolate);
+ return callback(v8::Utils::ToLocal(context));
+ }
+}
+
+
+RUNTIME_FUNCTION(Runtime_CompileString) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
+ CONVERT_BOOLEAN_ARG_CHECKED(function_literal_only, 1);
+
+ // Extract native context.
+ Handle<Context> context(isolate->native_context());
+
+ // Check if native context allows code generation from
+ // strings. Throw an exception if it doesn't.
+ if (context->allow_code_gen_from_strings()->IsFalse() &&
+ !CodeGenerationFromStringsAllowed(isolate, context)) {
+ Handle<Object> error_message =
+ context->ErrorMessageForCodeGenerationFromStrings();
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewEvalError("code_gen_from_strings",
+ HandleVector<Object>(&error_message, 1)));
+ }
+
+ // Compile source string in the native context.
+ ParseRestriction restriction = function_literal_only
+ ? ONLY_SINGLE_FUNCTION_LITERAL
+ : NO_PARSE_RESTRICTION;
+ Handle<JSFunction> fun;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, fun,
+ Compiler::GetFunctionFromEval(source, context, SLOPPY, restriction,
+ RelocInfo::kNoPosition));
+ return *fun;
+}
+
+
+static ObjectPair CompileGlobalEval(Isolate* isolate, Handle<String> source,
+ Handle<Object> receiver,
+ StrictMode strict_mode,
+ int scope_position) {
+ Handle<Context> context = Handle<Context>(isolate->context());
+ Handle<Context> native_context = Handle<Context>(context->native_context());
+
+ // Check if native context allows code generation from
+ // strings. Throw an exception if it doesn't.
+ if (native_context->allow_code_gen_from_strings()->IsFalse() &&
+ !CodeGenerationFromStringsAllowed(isolate, native_context)) {
+ Handle<Object> error_message =
+ native_context->ErrorMessageForCodeGenerationFromStrings();
+ Handle<Object> error;
+ MaybeHandle<Object> maybe_error = isolate->factory()->NewEvalError(
+ "code_gen_from_strings", HandleVector<Object>(&error_message, 1));
+ if (maybe_error.ToHandle(&error)) isolate->Throw(*error);
+ return MakePair(isolate->heap()->exception(), NULL);
+ }
+
+ // Deal with a normal eval call with a string argument. Compile it
+ // and return the compiled function bound in the local context.
+ static const ParseRestriction restriction = NO_PARSE_RESTRICTION;
+ Handle<JSFunction> compiled;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, compiled,
+ Compiler::GetFunctionFromEval(source, context, strict_mode, restriction,
+ scope_position),
+ MakePair(isolate->heap()->exception(), NULL));
+ return MakePair(*compiled, *receiver);
+}
+
+
+RUNTIME_FUNCTION_RETURN_PAIR(Runtime_ResolvePossiblyDirectEval) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 5);
+
+ Handle<Object> callee = args.at<Object>(0);
+
+ // If "eval" didn't refer to the original GlobalEval, it's not a
+ // direct call to eval.
+ // (And even if it is, but the first argument isn't a string, just let
+ // execution default to an indirect call to eval, which will also return
+ // the first argument without doing anything).
+ if (*callee != isolate->native_context()->global_eval_fun() ||
+ !args[1]->IsString()) {
+ return MakePair(*callee, isolate->heap()->undefined_value());
+ }
+
+ DCHECK(args[3]->IsSmi());
+ DCHECK(args.smi_at(3) == SLOPPY || args.smi_at(3) == STRICT);
+ StrictMode strict_mode = static_cast<StrictMode>(args.smi_at(3));
+ DCHECK(args[4]->IsSmi());
+ return CompileGlobalEval(isolate, args.at<String>(1), args.at<Object>(2),
+ strict_mode, args.smi_at(4));
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/runtime/runtime-i18n.cc b/deps/v8/src/runtime/runtime-i18n.cc
new file mode 100644
index 0000000000..5822374c7f
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-i18n.cc
@@ -0,0 +1,751 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#ifdef V8_I18N_SUPPORT
+#include "src/v8.h"
+
+#include "src/arguments.h"
+#include "src/i18n.h"
+#include "src/runtime/runtime.h"
+#include "src/runtime/runtime-utils.h"
+
+#include "unicode/brkiter.h"
+#include "unicode/calendar.h"
+#include "unicode/coll.h"
+#include "unicode/curramt.h"
+#include "unicode/datefmt.h"
+#include "unicode/dcfmtsym.h"
+#include "unicode/decimfmt.h"
+#include "unicode/dtfmtsym.h"
+#include "unicode/dtptngen.h"
+#include "unicode/locid.h"
+#include "unicode/numfmt.h"
+#include "unicode/numsys.h"
+#include "unicode/rbbi.h"
+#include "unicode/smpdtfmt.h"
+#include "unicode/timezone.h"
+#include "unicode/uchar.h"
+#include "unicode/ucol.h"
+#include "unicode/ucurr.h"
+#include "unicode/uloc.h"
+#include "unicode/unum.h"
+#include "unicode/uversion.h"
+
+
+namespace v8 {
+namespace internal {
+
+RUNTIME_FUNCTION(Runtime_CanonicalizeLanguageTag) {
+ HandleScope scope(isolate);
+ Factory* factory = isolate->factory();
+
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, locale_id_str, 0);
+
+ v8::String::Utf8Value locale_id(v8::Utils::ToLocal(locale_id_str));
+
+ // Return value which denotes invalid language tag.
+ const char* const kInvalidTag = "invalid-tag";
+
+ UErrorCode error = U_ZERO_ERROR;
+ char icu_result[ULOC_FULLNAME_CAPACITY];
+ int icu_length = 0;
+
+ uloc_forLanguageTag(*locale_id, icu_result, ULOC_FULLNAME_CAPACITY,
+ &icu_length, &error);
+ if (U_FAILURE(error) || icu_length == 0) {
+ return *factory->NewStringFromAsciiChecked(kInvalidTag);
+ }
+
+ char result[ULOC_FULLNAME_CAPACITY];
+
+ // Force strict BCP47 rules.
+ uloc_toLanguageTag(icu_result, result, ULOC_FULLNAME_CAPACITY, TRUE, &error);
+
+ if (U_FAILURE(error)) {
+ return *factory->NewStringFromAsciiChecked(kInvalidTag);
+ }
+
+ return *factory->NewStringFromAsciiChecked(result);
+}
+
+
+RUNTIME_FUNCTION(Runtime_AvailableLocalesOf) {
+ HandleScope scope(isolate);
+ Factory* factory = isolate->factory();
+
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, service, 0);
+
+ const icu::Locale* available_locales = NULL;
+ int32_t count = 0;
+
+ if (service->IsUtf8EqualTo(CStrVector("collator"))) {
+ available_locales = icu::Collator::getAvailableLocales(count);
+ } else if (service->IsUtf8EqualTo(CStrVector("numberformat"))) {
+ available_locales = icu::NumberFormat::getAvailableLocales(count);
+ } else if (service->IsUtf8EqualTo(CStrVector("dateformat"))) {
+ available_locales = icu::DateFormat::getAvailableLocales(count);
+ } else if (service->IsUtf8EqualTo(CStrVector("breakiterator"))) {
+ available_locales = icu::BreakIterator::getAvailableLocales(count);
+ }
+
+ UErrorCode error = U_ZERO_ERROR;
+ char result[ULOC_FULLNAME_CAPACITY];
+ Handle<JSObject> locales = factory->NewJSObject(isolate->object_function());
+
+ for (int32_t i = 0; i < count; ++i) {
+ const char* icu_name = available_locales[i].getName();
+
+ error = U_ZERO_ERROR;
+ // No need to force strict BCP47 rules.
+ uloc_toLanguageTag(icu_name, result, ULOC_FULLNAME_CAPACITY, FALSE, &error);
+ if (U_FAILURE(error)) {
+ // This shouldn't happen, but lets not break the user.
+ continue;
+ }
+
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, JSObject::SetOwnPropertyIgnoreAttributes(
+ locales, factory->NewStringFromAsciiChecked(result),
+ factory->NewNumber(i), NONE));
+ }
+
+ return *locales;
+}
+
+
+RUNTIME_FUNCTION(Runtime_GetDefaultICULocale) {
+ HandleScope scope(isolate);
+ Factory* factory = isolate->factory();
+
+ DCHECK(args.length() == 0);
+
+ icu::Locale default_locale;
+
+ // Set the locale
+ char result[ULOC_FULLNAME_CAPACITY];
+ UErrorCode status = U_ZERO_ERROR;
+ uloc_toLanguageTag(default_locale.getName(), result, ULOC_FULLNAME_CAPACITY,
+ FALSE, &status);
+ if (U_SUCCESS(status)) {
+ return *factory->NewStringFromAsciiChecked(result);
+ }
+
+ return *factory->NewStringFromStaticChars("und");
+}
+
+
+RUNTIME_FUNCTION(Runtime_GetLanguageTagVariants) {
+ HandleScope scope(isolate);
+ Factory* factory = isolate->factory();
+
+ DCHECK(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, input, 0);
+
+ uint32_t length = static_cast<uint32_t>(input->length()->Number());
+ // Set some limit to prevent fuzz tests from going OOM.
+ // Can be bumped when callers' requirements change.
+ RUNTIME_ASSERT(length < 100);
+ Handle<FixedArray> output = factory->NewFixedArray(length);
+ Handle<Name> maximized = factory->NewStringFromStaticChars("maximized");
+ Handle<Name> base = factory->NewStringFromStaticChars("base");
+ for (unsigned int i = 0; i < length; ++i) {
+ Handle<Object> locale_id;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, locale_id,
+ Object::GetElement(isolate, input, i));
+ if (!locale_id->IsString()) {
+ return isolate->Throw(*factory->illegal_argument_string());
+ }
+
+ v8::String::Utf8Value utf8_locale_id(
+ v8::Utils::ToLocal(Handle<String>::cast(locale_id)));
+
+ UErrorCode error = U_ZERO_ERROR;
+
+ // Convert from BCP47 to ICU format.
+ // de-DE-u-co-phonebk -> de_DE@collation=phonebook
+ char icu_locale[ULOC_FULLNAME_CAPACITY];
+ int icu_locale_length = 0;
+ uloc_forLanguageTag(*utf8_locale_id, icu_locale, ULOC_FULLNAME_CAPACITY,
+ &icu_locale_length, &error);
+ if (U_FAILURE(error) || icu_locale_length == 0) {
+ return isolate->Throw(*factory->illegal_argument_string());
+ }
+
+ // Maximize the locale.
+ // de_DE@collation=phonebook -> de_Latn_DE@collation=phonebook
+ char icu_max_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_addLikelySubtags(icu_locale, icu_max_locale, ULOC_FULLNAME_CAPACITY,
+ &error);
+
+ // Remove extensions from maximized locale.
+ // de_Latn_DE@collation=phonebook -> de_Latn_DE
+ char icu_base_max_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_getBaseName(icu_max_locale, icu_base_max_locale,
+ ULOC_FULLNAME_CAPACITY, &error);
+
+ // Get original name without extensions.
+ // de_DE@collation=phonebook -> de_DE
+ char icu_base_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_getBaseName(icu_locale, icu_base_locale, ULOC_FULLNAME_CAPACITY,
+ &error);
+
+ // Convert from ICU locale format to BCP47 format.
+ // de_Latn_DE -> de-Latn-DE
+ char base_max_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_toLanguageTag(icu_base_max_locale, base_max_locale,
+ ULOC_FULLNAME_CAPACITY, FALSE, &error);
+
+ // de_DE -> de-DE
+ char base_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_toLanguageTag(icu_base_locale, base_locale, ULOC_FULLNAME_CAPACITY,
+ FALSE, &error);
+
+ if (U_FAILURE(error)) {
+ return isolate->Throw(*factory->illegal_argument_string());
+ }
+
+ Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
+ Handle<String> value = factory->NewStringFromAsciiChecked(base_max_locale);
+ JSObject::AddProperty(result, maximized, value, NONE);
+ value = factory->NewStringFromAsciiChecked(base_locale);
+ JSObject::AddProperty(result, base, value, NONE);
+ output->set(i, *result);
+ }
+
+ Handle<JSArray> result = factory->NewJSArrayWithElements(output);
+ result->set_length(Smi::FromInt(length));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_IsInitializedIntlObject) {
+ HandleScope scope(isolate);
+
+ DCHECK(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+
+ if (!input->IsJSObject()) return isolate->heap()->false_value();
+ Handle<JSObject> obj = Handle<JSObject>::cast(input);
+
+ Handle<String> marker = isolate->factory()->intl_initialized_marker_string();
+ Handle<Object> tag(obj->GetHiddenProperty(marker), isolate);
+ return isolate->heap()->ToBoolean(!tag->IsTheHole());
+}
+
+
+RUNTIME_FUNCTION(Runtime_IsInitializedIntlObjectOfType) {
+ HandleScope scope(isolate);
+
+ DCHECK(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, expected_type, 1);
+
+ if (!input->IsJSObject()) return isolate->heap()->false_value();
+ Handle<JSObject> obj = Handle<JSObject>::cast(input);
+
+ Handle<String> marker = isolate->factory()->intl_initialized_marker_string();
+ Handle<Object> tag(obj->GetHiddenProperty(marker), isolate);
+ return isolate->heap()->ToBoolean(tag->IsString() &&
+ String::cast(*tag)->Equals(*expected_type));
+}
+
+
+RUNTIME_FUNCTION(Runtime_MarkAsInitializedIntlObjectOfType) {
+ HandleScope scope(isolate);
+
+ DCHECK(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, input, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, impl, 2);
+
+ Handle<String> marker = isolate->factory()->intl_initialized_marker_string();
+ JSObject::SetHiddenProperty(input, marker, type);
+
+ marker = isolate->factory()->intl_impl_object_string();
+ JSObject::SetHiddenProperty(input, marker, impl);
+
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_GetImplFromInitializedIntlObject) {
+ HandleScope scope(isolate);
+
+ DCHECK(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+
+ if (!input->IsJSObject()) {
+ Vector<Handle<Object> > arguments = HandleVector(&input, 1);
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+ NewTypeError("not_intl_object", arguments));
+ }
+
+ Handle<JSObject> obj = Handle<JSObject>::cast(input);
+
+ Handle<String> marker = isolate->factory()->intl_impl_object_string();
+ Handle<Object> impl(obj->GetHiddenProperty(marker), isolate);
+ if (impl->IsTheHole()) {
+ Vector<Handle<Object> > arguments = HandleVector(&obj, 1);
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+ NewTypeError("not_intl_object", arguments));
+ }
+ return *impl;
+}
+
+
+RUNTIME_FUNCTION(Runtime_CreateDateTimeFormat) {
+ HandleScope scope(isolate);
+
+ DCHECK(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
+
+ Handle<ObjectTemplateInfo> date_format_template = I18N::GetTemplate(isolate);
+
+ // Create an empty object wrapper.
+ Handle<JSObject> local_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, local_object,
+ Execution::InstantiateObject(date_format_template));
+
+ // Set date time formatter as internal field of the resulting JS object.
+ icu::SimpleDateFormat* date_format =
+ DateFormat::InitializeDateTimeFormat(isolate, locale, options, resolved);
+
+ if (!date_format) return isolate->ThrowIllegalOperation();
+
+ local_object->SetInternalField(0, reinterpret_cast<Smi*>(date_format));
+
+ Factory* factory = isolate->factory();
+ Handle<String> key = factory->NewStringFromStaticChars("dateFormat");
+ Handle<String> value = factory->NewStringFromStaticChars("valid");
+ JSObject::AddProperty(local_object, key, value, NONE);
+
+ // Make object handle weak so we can delete the data format once GC kicks in.
+ Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
+ GlobalHandles::MakeWeak(wrapper.location(),
+ reinterpret_cast<void*>(wrapper.location()),
+ DateFormat::DeleteDateFormat);
+ return *local_object;
+}
+
+
+RUNTIME_FUNCTION(Runtime_InternalDateFormat) {
+ HandleScope scope(isolate);
+
+ DCHECK(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 1);
+
+ Handle<Object> value;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+ Execution::ToNumber(isolate, date));
+
+ icu::SimpleDateFormat* date_format =
+ DateFormat::UnpackDateFormat(isolate, date_format_holder);
+ if (!date_format) return isolate->ThrowIllegalOperation();
+
+ icu::UnicodeString result;
+ date_format->format(value->Number(), result);
+
+ Handle<String> result_str;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result_str,
+ isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()),
+ result.length())));
+ return *result_str;
+}
+
+
+RUNTIME_FUNCTION(Runtime_InternalDateParse) {
+ HandleScope scope(isolate);
+
+ DCHECK(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, date_string, 1);
+
+ v8::String::Utf8Value utf8_date(v8::Utils::ToLocal(date_string));
+ icu::UnicodeString u_date(icu::UnicodeString::fromUTF8(*utf8_date));
+ icu::SimpleDateFormat* date_format =
+ DateFormat::UnpackDateFormat(isolate, date_format_holder);
+ if (!date_format) return isolate->ThrowIllegalOperation();
+
+ UErrorCode status = U_ZERO_ERROR;
+ UDate date = date_format->parse(u_date, status);
+ if (U_FAILURE(status)) return isolate->heap()->undefined_value();
+
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Execution::NewDate(isolate, static_cast<double>(date)));
+ DCHECK(result->IsJSDate());
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_CreateNumberFormat) {
+ HandleScope scope(isolate);
+
+ DCHECK(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
+
+ Handle<ObjectTemplateInfo> number_format_template =
+ I18N::GetTemplate(isolate);
+
+ // Create an empty object wrapper.
+ Handle<JSObject> local_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, local_object,
+ Execution::InstantiateObject(number_format_template));
+
+ // Set number formatter as internal field of the resulting JS object.
+ icu::DecimalFormat* number_format =
+ NumberFormat::InitializeNumberFormat(isolate, locale, options, resolved);
+
+ if (!number_format) return isolate->ThrowIllegalOperation();
+
+ local_object->SetInternalField(0, reinterpret_cast<Smi*>(number_format));
+
+ Factory* factory = isolate->factory();
+ Handle<String> key = factory->NewStringFromStaticChars("numberFormat");
+ Handle<String> value = factory->NewStringFromStaticChars("valid");
+ JSObject::AddProperty(local_object, key, value, NONE);
+
+ Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
+ GlobalHandles::MakeWeak(wrapper.location(),
+ reinterpret_cast<void*>(wrapper.location()),
+ NumberFormat::DeleteNumberFormat);
+ return *local_object;
+}
+
+
+RUNTIME_FUNCTION(Runtime_InternalNumberFormat) {
+ HandleScope scope(isolate);
+
+ DCHECK(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, number, 1);
+
+ Handle<Object> value;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+ Execution::ToNumber(isolate, number));
+
+ icu::DecimalFormat* number_format =
+ NumberFormat::UnpackNumberFormat(isolate, number_format_holder);
+ if (!number_format) return isolate->ThrowIllegalOperation();
+
+ icu::UnicodeString result;
+ number_format->format(value->Number(), result);
+
+ Handle<String> result_str;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result_str,
+ isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()),
+ result.length())));
+ return *result_str;
+}
+
+
+RUNTIME_FUNCTION(Runtime_InternalNumberParse) {
+ HandleScope scope(isolate);
+
+ DCHECK(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, number_string, 1);
+
+ v8::String::Utf8Value utf8_number(v8::Utils::ToLocal(number_string));
+ icu::UnicodeString u_number(icu::UnicodeString::fromUTF8(*utf8_number));
+ icu::DecimalFormat* number_format =
+ NumberFormat::UnpackNumberFormat(isolate, number_format_holder);
+ if (!number_format) return isolate->ThrowIllegalOperation();
+
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Formattable result;
+ // ICU 4.6 doesn't support parseCurrency call. We need to wait for ICU49
+ // to be part of Chrome.
+ // TODO(cira): Include currency parsing code using parseCurrency call.
+ // We need to check if the formatter parses all currencies or only the
+ // one it was constructed with (it will impact the API - how to return ISO
+ // code and the value).
+ number_format->parse(u_number, result, status);
+ if (U_FAILURE(status)) return isolate->heap()->undefined_value();
+
+ switch (result.getType()) {
+ case icu::Formattable::kDouble:
+ return *isolate->factory()->NewNumber(result.getDouble());
+ case icu::Formattable::kLong:
+ return *isolate->factory()->NewNumberFromInt(result.getLong());
+ case icu::Formattable::kInt64:
+ return *isolate->factory()->NewNumber(
+ static_cast<double>(result.getInt64()));
+ default:
+ return isolate->heap()->undefined_value();
+ }
+}
+
+
+RUNTIME_FUNCTION(Runtime_CreateCollator) {
+ HandleScope scope(isolate);
+
+ DCHECK(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
+
+ Handle<ObjectTemplateInfo> collator_template = I18N::GetTemplate(isolate);
+
+ // Create an empty object wrapper.
+ Handle<JSObject> local_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, local_object, Execution::InstantiateObject(collator_template));
+
+ // Set collator as internal field of the resulting JS object.
+ icu::Collator* collator =
+ Collator::InitializeCollator(isolate, locale, options, resolved);
+
+ if (!collator) return isolate->ThrowIllegalOperation();
+
+ local_object->SetInternalField(0, reinterpret_cast<Smi*>(collator));
+
+ Factory* factory = isolate->factory();
+ Handle<String> key = factory->NewStringFromStaticChars("collator");
+ Handle<String> value = factory->NewStringFromStaticChars("valid");
+ JSObject::AddProperty(local_object, key, value, NONE);
+
+ Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
+ GlobalHandles::MakeWeak(wrapper.location(),
+ reinterpret_cast<void*>(wrapper.location()),
+ Collator::DeleteCollator);
+ return *local_object;
+}
+
+
+RUNTIME_FUNCTION(Runtime_InternalCompare) {
+ HandleScope scope(isolate);
+
+ DCHECK(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, collator_holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, string1, 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, string2, 2);
+
+ icu::Collator* collator = Collator::UnpackCollator(isolate, collator_holder);
+ if (!collator) return isolate->ThrowIllegalOperation();
+
+ v8::String::Value string_value1(v8::Utils::ToLocal(string1));
+ v8::String::Value string_value2(v8::Utils::ToLocal(string2));
+ const UChar* u_string1 = reinterpret_cast<const UChar*>(*string_value1);
+ const UChar* u_string2 = reinterpret_cast<const UChar*>(*string_value2);
+ UErrorCode status = U_ZERO_ERROR;
+ UCollationResult result =
+ collator->compare(u_string1, string_value1.length(), u_string2,
+ string_value2.length(), status);
+ if (U_FAILURE(status)) return isolate->ThrowIllegalOperation();
+
+ return *isolate->factory()->NewNumberFromInt(result);
+}
+
+
+RUNTIME_FUNCTION(Runtime_StringNormalize) {
+ HandleScope scope(isolate);
+ static const UNormalizationMode normalizationForms[] = {
+ UNORM_NFC, UNORM_NFD, UNORM_NFKC, UNORM_NFKD};
+
+ DCHECK(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, stringValue, 0);
+ CONVERT_NUMBER_CHECKED(int, form_id, Int32, args[1]);
+ RUNTIME_ASSERT(form_id >= 0 &&
+ static_cast<size_t>(form_id) < arraysize(normalizationForms));
+
+ v8::String::Value string_value(v8::Utils::ToLocal(stringValue));
+ const UChar* u_value = reinterpret_cast<const UChar*>(*string_value);
+
+ // TODO(mnita): check Normalizer2 (not available in ICU 46)
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString result;
+ icu::Normalizer::normalize(u_value, normalizationForms[form_id], 0, result,
+ status);
+ if (U_FAILURE(status)) {
+ return isolate->heap()->undefined_value();
+ }
+
+ Handle<String> result_str;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result_str,
+ isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()),
+ result.length())));
+ return *result_str;
+}
+
+
+RUNTIME_FUNCTION(Runtime_CreateBreakIterator) {
+ HandleScope scope(isolate);
+
+ DCHECK(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
+
+ Handle<ObjectTemplateInfo> break_iterator_template =
+ I18N::GetTemplate2(isolate);
+
+ // Create an empty object wrapper.
+ Handle<JSObject> local_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, local_object,
+ Execution::InstantiateObject(break_iterator_template));
+
+ // Set break iterator as internal field of the resulting JS object.
+ icu::BreakIterator* break_iterator = BreakIterator::InitializeBreakIterator(
+ isolate, locale, options, resolved);
+
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ local_object->SetInternalField(0, reinterpret_cast<Smi*>(break_iterator));
+ // Make sure that the pointer to adopted text is NULL.
+ local_object->SetInternalField(1, reinterpret_cast<Smi*>(NULL));
+
+ Factory* factory = isolate->factory();
+ Handle<String> key = factory->NewStringFromStaticChars("breakIterator");
+ Handle<String> value = factory->NewStringFromStaticChars("valid");
+ JSObject::AddProperty(local_object, key, value, NONE);
+
+ // Make object handle weak so we can delete the break iterator once GC kicks
+ // in.
+ Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
+ GlobalHandles::MakeWeak(wrapper.location(),
+ reinterpret_cast<void*>(wrapper.location()),
+ BreakIterator::DeleteBreakIterator);
+ return *local_object;
+}
+
+
+RUNTIME_FUNCTION(Runtime_BreakIteratorAdoptText) {
+ HandleScope scope(isolate);
+
+ DCHECK(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, text, 1);
+
+ icu::BreakIterator* break_iterator =
+ BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ icu::UnicodeString* u_text = reinterpret_cast<icu::UnicodeString*>(
+ break_iterator_holder->GetInternalField(1));
+ delete u_text;
+
+ v8::String::Value text_value(v8::Utils::ToLocal(text));
+ u_text = new icu::UnicodeString(reinterpret_cast<const UChar*>(*text_value),
+ text_value.length());
+ break_iterator_holder->SetInternalField(1, reinterpret_cast<Smi*>(u_text));
+
+ break_iterator->setText(*u_text);
+
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_BreakIteratorFirst) {
+ HandleScope scope(isolate);
+
+ DCHECK(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
+
+ icu::BreakIterator* break_iterator =
+ BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ return *isolate->factory()->NewNumberFromInt(break_iterator->first());
+}
+
+
+RUNTIME_FUNCTION(Runtime_BreakIteratorNext) {
+ HandleScope scope(isolate);
+
+ DCHECK(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
+
+ icu::BreakIterator* break_iterator =
+ BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ return *isolate->factory()->NewNumberFromInt(break_iterator->next());
+}
+
+
+RUNTIME_FUNCTION(Runtime_BreakIteratorCurrent) {
+ HandleScope scope(isolate);
+
+ DCHECK(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
+
+ icu::BreakIterator* break_iterator =
+ BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ return *isolate->factory()->NewNumberFromInt(break_iterator->current());
+}
+
+
+RUNTIME_FUNCTION(Runtime_BreakIteratorBreakType) {
+ HandleScope scope(isolate);
+
+ DCHECK(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
+
+ icu::BreakIterator* break_iterator =
+ BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ // TODO(cira): Remove cast once ICU fixes base BreakIterator class.
+ icu::RuleBasedBreakIterator* rule_based_iterator =
+ static_cast<icu::RuleBasedBreakIterator*>(break_iterator);
+ int32_t status = rule_based_iterator->getRuleStatus();
+ // Keep return values in sync with JavaScript BreakType enum.
+ if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
+ return *isolate->factory()->NewStringFromStaticChars("none");
+ } else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
+ return *isolate->factory()->number_string();
+ } else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
+ return *isolate->factory()->NewStringFromStaticChars("letter");
+ } else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
+ return *isolate->factory()->NewStringFromStaticChars("kana");
+ } else if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) {
+ return *isolate->factory()->NewStringFromStaticChars("ideo");
+ } else {
+ return *isolate->factory()->NewStringFromStaticChars("unknown");
+ }
+}
+}
+} // namespace v8::internal
+
+#endif // V8_I18N_SUPPORT
diff --git a/deps/v8/src/runtime/runtime-json.cc b/deps/v8/src/runtime/runtime-json.cc
new file mode 100644
index 0000000000..7a89c519a0
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-json.cc
@@ -0,0 +1,54 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/arguments.h"
+#include "src/json-parser.h"
+#include "src/json-stringifier.h"
+#include "src/runtime/runtime.h"
+#include "src/runtime/runtime-utils.h"
+
+namespace v8 {
+namespace internal {
+
+RUNTIME_FUNCTION(Runtime_QuoteJSONString) {
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
+ DCHECK(args.length() == 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, BasicJsonStringifier::StringifyString(isolate, string));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_BasicJSONStringify) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ BasicJsonStringifier stringifier(isolate);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ stringifier.Stringify(object));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_ParseJson) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
+
+ source = String::Flatten(source);
+ // Optimized fast case where we only have Latin1 characters.
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ source->IsSeqOneByteString()
+ ? JsonParser<true>::Parse(source)
+ : JsonParser<false>::Parse(source));
+ return *result;
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/runtime/runtime-maths.cc b/deps/v8/src/runtime/runtime-maths.cc
new file mode 100644
index 0000000000..16acb390f2
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-maths.cc
@@ -0,0 +1,247 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/arguments.h"
+#include "src/assembler.h"
+#include "src/codegen.h"
+#include "src/runtime/runtime.h"
+#include "src/runtime/runtime-utils.h"
+#include "third_party/fdlibm/fdlibm.h"
+
+
+namespace v8 {
+namespace internal {
+
+#define RUNTIME_UNARY_MATH(Name, name) \
+ RUNTIME_FUNCTION(Runtime_Math##Name) { \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 1); \
+ isolate->counters()->math_##name()->Increment(); \
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0); \
+ return *isolate->factory()->NewHeapNumber(std::name(x)); \
+ }
+
+RUNTIME_UNARY_MATH(Acos, acos)
+RUNTIME_UNARY_MATH(Asin, asin)
+RUNTIME_UNARY_MATH(Atan, atan)
+RUNTIME_UNARY_MATH(LogRT, log)
+#undef RUNTIME_UNARY_MATH
+
+
+RUNTIME_FUNCTION(Runtime_DoubleHi) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ uint64_t integer = double_to_uint64(x);
+ integer = (integer >> 32) & 0xFFFFFFFFu;
+ return *isolate->factory()->NewNumber(static_cast<int32_t>(integer));
+}
+
+
+RUNTIME_FUNCTION(Runtime_DoubleLo) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ return *isolate->factory()->NewNumber(
+ static_cast<int32_t>(double_to_uint64(x) & 0xFFFFFFFFu));
+}
+
+
+RUNTIME_FUNCTION(Runtime_ConstructDouble) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_NUMBER_CHECKED(uint32_t, hi, Uint32, args[0]);
+ CONVERT_NUMBER_CHECKED(uint32_t, lo, Uint32, args[1]);
+ uint64_t result = (static_cast<uint64_t>(hi) << 32) | lo;
+ return *isolate->factory()->NewNumber(uint64_to_double(result));
+}
+
+
+RUNTIME_FUNCTION(Runtime_RemPiO2) {
+ HandleScope handle_scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ Factory* factory = isolate->factory();
+ double y[2];
+ int n = fdlibm::rempio2(x, y);
+ Handle<FixedArray> array = factory->NewFixedArray(3);
+ Handle<HeapNumber> y0 = factory->NewHeapNumber(y[0]);
+ Handle<HeapNumber> y1 = factory->NewHeapNumber(y[1]);
+ array->set(0, Smi::FromInt(n));
+ array->set(1, *y0);
+ array->set(2, *y1);
+ return *factory->NewJSArrayWithElements(array);
+}
+
+
+static const double kPiDividedBy4 = 0.78539816339744830962;
+
+
+RUNTIME_FUNCTION(Runtime_MathAtan2) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ isolate->counters()->math_atan2()->Increment();
+
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
+ double result;
+ if (std::isinf(x) && std::isinf(y)) {
+ // Make sure that the result in case of two infinite arguments
+ // is a multiple of Pi / 4. The sign of the result is determined
+ // by the first argument (x) and the sign of the second argument
+ // determines the multiplier: one or three.
+ int multiplier = (x < 0) ? -1 : 1;
+ if (y < 0) multiplier *= 3;
+ result = multiplier * kPiDividedBy4;
+ } else {
+ result = std::atan2(x, y);
+ }
+ return *isolate->factory()->NewNumber(result);
+}
+
+
+RUNTIME_FUNCTION(Runtime_MathExpRT) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ isolate->counters()->math_exp()->Increment();
+
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ lazily_initialize_fast_exp();
+ return *isolate->factory()->NewNumber(fast_exp(x));
+}
+
+
+RUNTIME_FUNCTION(Runtime_MathFloorRT) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ isolate->counters()->math_floor()->Increment();
+
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ return *isolate->factory()->NewNumber(Floor(x));
+}
+
+
+// Slow version of Math.pow. We check for fast paths for special cases.
+// Used if VFP3 is not available.
+RUNTIME_FUNCTION(Runtime_MathPowSlow) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ isolate->counters()->math_pow()->Increment();
+
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+
+ // If the second argument is a smi, it is much faster to call the
+ // custom powi() function than the generic pow().
+ if (args[1]->IsSmi()) {
+ int y = args.smi_at(1);
+ return *isolate->factory()->NewNumber(power_double_int(x, y));
+ }
+
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
+ double result = power_helper(x, y);
+ if (std::isnan(result)) return isolate->heap()->nan_value();
+ return *isolate->factory()->NewNumber(result);
+}
+
+
+// Fast version of Math.pow if we know that y is not an integer and y is not
+// -0.5 or 0.5. Used as slow case from full codegen.
+RUNTIME_FUNCTION(Runtime_MathPowRT) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ isolate->counters()->math_pow()->Increment();
+
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
+ if (y == 0) {
+ return Smi::FromInt(1);
+ } else {
+ double result = power_double_double(x, y);
+ if (std::isnan(result)) return isolate->heap()->nan_value();
+ return *isolate->factory()->NewNumber(result);
+ }
+}
+
+
+RUNTIME_FUNCTION(Runtime_RoundNumber) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(input, 0);
+ isolate->counters()->math_round()->Increment();
+
+ if (!input->IsHeapNumber()) {
+ DCHECK(input->IsSmi());
+ return *input;
+ }
+
+ Handle<HeapNumber> number = Handle<HeapNumber>::cast(input);
+
+ double value = number->value();
+ int exponent = number->get_exponent();
+ int sign = number->get_sign();
+
+ if (exponent < -1) {
+ // Number in range ]-0.5..0.5[. These always round to +/-zero.
+ if (sign) return isolate->heap()->minus_zero_value();
+ return Smi::FromInt(0);
+ }
+
+ // We compare with kSmiValueSize - 2 because (2^30 - 0.1) has exponent 29 and
+ // should be rounded to 2^30, which is not smi (for 31-bit smis, similar
+ // argument holds for 32-bit smis).
+ if (!sign && exponent < kSmiValueSize - 2) {
+ return Smi::FromInt(static_cast<int>(value + 0.5));
+ }
+
+ // If the magnitude is big enough, there's no place for fraction part. If we
+ // try to add 0.5 to this number, 1.0 will be added instead.
+ if (exponent >= 52) {
+ return *number;
+ }
+
+ if (sign && value >= -0.5) return isolate->heap()->minus_zero_value();
+
+ // Do not call NumberFromDouble() to avoid extra checks.
+ return *isolate->factory()->NewNumber(Floor(value + 0.5));
+}
+
+
+RUNTIME_FUNCTION(Runtime_MathSqrtRT) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ isolate->counters()->math_sqrt()->Increment();
+
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ return *isolate->factory()->NewNumber(fast_sqrt(x));
+}
+
+
+RUNTIME_FUNCTION(Runtime_MathFround) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ float xf = DoubleToFloat32(x);
+ return *isolate->factory()->NewNumber(xf);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_MathPow) {
+ SealHandleScope shs(isolate);
+ return __RT_impl_Runtime_MathPowSlow(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_IsMinusZero) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(Object, obj, 0);
+ if (!obj->IsHeapNumber()) return isolate->heap()->false_value();
+ HeapNumber* number = HeapNumber::cast(obj);
+ return isolate->heap()->ToBoolean(IsMinusZero(number->value()));
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
new file mode 100644
index 0000000000..3286aa6f13
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -0,0 +1,565 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/arguments.h"
+#include "src/codegen.h"
+#include "src/misc-intrinsics.h"
+#include "src/runtime/runtime.h"
+#include "src/runtime/runtime-utils.h"
+
+
+#ifndef _STLP_VENDOR_CSTD
+// STLPort doesn't import fpclassify and isless into the std namespace.
+using std::fpclassify;
+using std::isless;
+#endif
+
+namespace v8 {
+namespace internal {
+
+RUNTIME_FUNCTION(Runtime_NumberToRadixString) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_SMI_ARG_CHECKED(radix, 1);
+ RUNTIME_ASSERT(2 <= radix && radix <= 36);
+
+ // Fast case where the result is a one character string.
+ if (args[0]->IsSmi()) {
+ int value = args.smi_at(0);
+ if (value >= 0 && value < radix) {
+ // Character array used for conversion.
+ static const char kCharTable[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+ return *isolate->factory()->LookupSingleCharacterStringFromCode(
+ kCharTable[value]);
+ }
+ }
+
+ // Slow case.
+ CONVERT_DOUBLE_ARG_CHECKED(value, 0);
+ if (std::isnan(value)) {
+ return isolate->heap()->nan_string();
+ }
+ if (std::isinf(value)) {
+ if (value < 0) {
+ return isolate->heap()->minus_infinity_string();
+ }
+ return isolate->heap()->infinity_string();
+ }
+ char* str = DoubleToRadixCString(value, radix);
+ Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
+ DeleteArray(str);
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberToFixed) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+
+ CONVERT_DOUBLE_ARG_CHECKED(value, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
+ int f = FastD2IChecked(f_number);
+ // See DoubleToFixedCString for these constants:
+ RUNTIME_ASSERT(f >= 0 && f <= 20);
+ RUNTIME_ASSERT(!Double(value).IsSpecial());
+ char* str = DoubleToFixedCString(value, f);
+ Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
+ DeleteArray(str);
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberToExponential) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+
+ CONVERT_DOUBLE_ARG_CHECKED(value, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
+ int f = FastD2IChecked(f_number);
+ RUNTIME_ASSERT(f >= -1 && f <= 20);
+ RUNTIME_ASSERT(!Double(value).IsSpecial());
+ char* str = DoubleToExponentialCString(value, f);
+ Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
+ DeleteArray(str);
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberToPrecision) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+
+ CONVERT_DOUBLE_ARG_CHECKED(value, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
+ int f = FastD2IChecked(f_number);
+ RUNTIME_ASSERT(f >= 1 && f <= 21);
+ RUNTIME_ASSERT(!Double(value).IsSpecial());
+ char* str = DoubleToPrecisionCString(value, f);
+ Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
+ DeleteArray(str);
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_IsValidSmi) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+
+ CONVERT_NUMBER_CHECKED(int32_t, number, Int32, args[0]);
+ return isolate->heap()->ToBoolean(Smi::IsValid(number));
+}
+
+
+static bool AreDigits(const uint8_t* s, int from, int to) {
+ for (int i = from; i < to; i++) {
+ if (s[i] < '0' || s[i] > '9') return false;
+ }
+
+ return true;
+}
+
+
+static int ParseDecimalInteger(const uint8_t* s, int from, int to) {
+ DCHECK(to - from < 10); // Overflow is not possible.
+ DCHECK(from < to);
+ int d = s[from] - '0';
+
+ for (int i = from + 1; i < to; i++) {
+ d = 10 * d + (s[i] - '0');
+ }
+
+ return d;
+}
+
+
+RUNTIME_FUNCTION(Runtime_StringToNumber) {
+ HandleScope handle_scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
+ subject = String::Flatten(subject);
+
+ // Fast case: short integer or some sorts of junk values.
+ if (subject->IsSeqOneByteString()) {
+ int len = subject->length();
+ if (len == 0) return Smi::FromInt(0);
+
+ DisallowHeapAllocation no_gc;
+ uint8_t const* data = Handle<SeqOneByteString>::cast(subject)->GetChars();
+ bool minus = (data[0] == '-');
+ int start_pos = (minus ? 1 : 0);
+
+ if (start_pos == len) {
+ return isolate->heap()->nan_value();
+ } else if (data[start_pos] > '9') {
+ // Fast check for a junk value. A valid string may start from a
+ // whitespace, a sign ('+' or '-'), the decimal point, a decimal digit
+ // or the 'I' character ('Infinity'). All of that have codes not greater
+ // than '9' except 'I' and &nbsp;.
+ if (data[start_pos] != 'I' && data[start_pos] != 0xa0) {
+ return isolate->heap()->nan_value();
+ }
+ } else if (len - start_pos < 10 && AreDigits(data, start_pos, len)) {
+ // The maximal/minimal smi has 10 digits. If the string has less digits
+ // we know it will fit into the smi-data type.
+ int d = ParseDecimalInteger(data, start_pos, len);
+ if (minus) {
+ if (d == 0) return isolate->heap()->minus_zero_value();
+ d = -d;
+ } else if (!subject->HasHashCode() && len <= String::kMaxArrayIndexSize &&
+ (len == 1 || data[0] != '0')) {
+ // String hash is not calculated yet but all the data are present.
+ // Update the hash field to speed up sequential convertions.
+ uint32_t hash = StringHasher::MakeArrayIndexHash(d, len);
+#ifdef DEBUG
+ subject->Hash(); // Force hash calculation.
+ DCHECK_EQ(static_cast<int>(subject->hash_field()),
+ static_cast<int>(hash));
+#endif
+ subject->set_hash_field(hash);
+ }
+ return Smi::FromInt(d);
+ }
+ }
+
+ // Slower case.
+ int flags = ALLOW_HEX;
+ if (FLAG_harmony_numeric_literals) {
+ // The current spec draft has not updated "ToNumber Applied to the String
+ // Type", https://bugs.ecmascript.org/show_bug.cgi?id=1584
+ flags |= ALLOW_OCTAL | ALLOW_BINARY;
+ }
+
+ return *isolate->factory()->NewNumber(
+ StringToDouble(isolate->unicode_cache(), *subject, flags));
+}
+
+
+RUNTIME_FUNCTION(Runtime_StringParseInt) {
+ HandleScope handle_scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
+ CONVERT_NUMBER_CHECKED(int, radix, Int32, args[1]);
+ RUNTIME_ASSERT(radix == 0 || (2 <= radix && radix <= 36));
+
+ subject = String::Flatten(subject);
+ double value;
+
+ {
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = subject->GetFlatContent();
+
+ // ECMA-262 section 15.1.2.3, empty string is NaN
+ if (flat.IsOneByte()) {
+ value =
+ StringToInt(isolate->unicode_cache(), flat.ToOneByteVector(), radix);
+ } else {
+ value = StringToInt(isolate->unicode_cache(), flat.ToUC16Vector(), radix);
+ }
+ }
+
+ return *isolate->factory()->NewNumber(value);
+}
+
+
+RUNTIME_FUNCTION(Runtime_StringParseFloat) {
+ HandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
+
+ subject = String::Flatten(subject);
+ double value = StringToDouble(isolate->unicode_cache(), *subject,
+ ALLOW_TRAILING_JUNK, base::OS::nan_value());
+
+ return *isolate->factory()->NewNumber(value);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberToStringRT) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(number, 0);
+
+ return *isolate->factory()->NumberToString(number);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberToStringSkipCache) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(number, 0);
+
+ return *isolate->factory()->NumberToString(number, false);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberToInteger) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+
+ CONVERT_DOUBLE_ARG_CHECKED(number, 0);
+ return *isolate->factory()->NewNumber(DoubleToInteger(number));
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberToIntegerMapMinusZero) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+
+ CONVERT_DOUBLE_ARG_CHECKED(number, 0);
+ double double_value = DoubleToInteger(number);
+ // Map both -0 and +0 to +0.
+ if (double_value == 0) double_value = 0;
+
+ return *isolate->factory()->NewNumber(double_value);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberToJSUint32) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+
+ CONVERT_NUMBER_CHECKED(int32_t, number, Uint32, args[0]);
+ return *isolate->factory()->NewNumberFromUint(number);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberToJSInt32) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+
+ CONVERT_DOUBLE_ARG_CHECKED(number, 0);
+ return *isolate->factory()->NewNumberFromInt(DoubleToInt32(number));
+}
+
+
+// Converts a Number to a Smi, if possible. Returns NaN if the number is not
+// a small integer.
+RUNTIME_FUNCTION(Runtime_NumberToSmi) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(Object, obj, 0);
+ if (obj->IsSmi()) {
+ return obj;
+ }
+ if (obj->IsHeapNumber()) {
+ double value = HeapNumber::cast(obj)->value();
+ int int_value = FastD2I(value);
+ if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
+ return Smi::FromInt(int_value);
+ }
+ }
+ return isolate->heap()->nan_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberAdd) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
+ return *isolate->factory()->NewNumber(x + y);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberSub) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
+ return *isolate->factory()->NewNumber(x - y);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberMul) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
+ return *isolate->factory()->NewNumber(x * y);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberUnaryMinus) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ return *isolate->factory()->NewNumber(-x);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberDiv) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
+ return *isolate->factory()->NewNumber(x / y);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberMod) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
+ return *isolate->factory()->NewNumber(modulo(x, y));
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberImul) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+
+ // We rely on implementation-defined behavior below, but at least not on
+ // undefined behavior.
+ CONVERT_NUMBER_CHECKED(uint32_t, x, Int32, args[0]);
+ CONVERT_NUMBER_CHECKED(uint32_t, y, Int32, args[1]);
+ int32_t product = static_cast<int32_t>(x * y);
+ return *isolate->factory()->NewNumberFromInt(product);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberOr) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+
+ CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
+ CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
+ return *isolate->factory()->NewNumberFromInt(x | y);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberAnd) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+
+ CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
+ CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
+ return *isolate->factory()->NewNumberFromInt(x & y);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberXor) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+
+ CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
+ CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
+ return *isolate->factory()->NewNumberFromInt(x ^ y);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberShl) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+
+ CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
+ CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
+ return *isolate->factory()->NewNumberFromInt(x << (y & 0x1f));
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberShr) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+
+ CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]);
+ CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
+ return *isolate->factory()->NewNumberFromUint(x >> (y & 0x1f));
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberSar) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+
+ CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
+ CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
+ return *isolate->factory()->NewNumberFromInt(
+ ArithmeticShiftRight(x, y & 0x1f));
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberEquals) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 2);
+
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
+ if (std::isnan(x)) return Smi::FromInt(NOT_EQUAL);
+ if (std::isnan(y)) return Smi::FromInt(NOT_EQUAL);
+ if (x == y) return Smi::FromInt(EQUAL);
+ Object* result;
+ if ((fpclassify(x) == FP_ZERO) && (fpclassify(y) == FP_ZERO)) {
+ result = Smi::FromInt(EQUAL);
+ } else {
+ result = Smi::FromInt(NOT_EQUAL);
+ }
+ return result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_NumberCompare) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 3);
+
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, uncomparable_result, 2)
+ if (std::isnan(x) || std::isnan(y)) return *uncomparable_result;
+ if (x == y) return Smi::FromInt(EQUAL);
+ if (isless(x, y)) return Smi::FromInt(LESS);
+ return Smi::FromInt(GREATER);
+}
+
+
+// Compare two Smis as if they were converted to strings and then
+// compared lexicographically.
+RUNTIME_FUNCTION(Runtime_SmiLexicographicCompare) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_SMI_ARG_CHECKED(x_value, 0);
+ CONVERT_SMI_ARG_CHECKED(y_value, 1);
+
+ // If the integers are equal so are the string representations.
+ if (x_value == y_value) return Smi::FromInt(EQUAL);
+
+ // If one of the integers is zero the normal integer order is the
+ // same as the lexicographic order of the string representations.
+ if (x_value == 0 || y_value == 0)
+ return Smi::FromInt(x_value < y_value ? LESS : GREATER);
+
+ // If only one of the integers is negative the negative number is
+ // smallest because the char code of '-' is less than the char code
+ // of any digit. Otherwise, we make both values positive.
+
+ // Use unsigned values otherwise the logic is incorrect for -MIN_INT on
+ // architectures using 32-bit Smis.
+ uint32_t x_scaled = x_value;
+ uint32_t y_scaled = y_value;
+ if (x_value < 0 || y_value < 0) {
+ if (y_value >= 0) return Smi::FromInt(LESS);
+ if (x_value >= 0) return Smi::FromInt(GREATER);
+ x_scaled = -x_value;
+ y_scaled = -y_value;
+ }
+
+ static const uint32_t kPowersOf10[] = {
+ 1, 10, 100, 1000,
+ 10 * 1000, 100 * 1000, 1000 * 1000, 10 * 1000 * 1000,
+ 100 * 1000 * 1000, 1000 * 1000 * 1000};
+
+ // If the integers have the same number of decimal digits they can be
+ // compared directly as the numeric order is the same as the
+ // lexicographic order. If one integer has fewer digits, it is scaled
+ // by some power of 10 to have the same number of digits as the longer
+ // integer. If the scaled integers are equal it means the shorter
+ // integer comes first in the lexicographic order.
+
+ // From http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog10
+ int x_log2 = IntegerLog2(x_scaled);
+ int x_log10 = ((x_log2 + 1) * 1233) >> 12;
+ x_log10 -= x_scaled < kPowersOf10[x_log10];
+
+ int y_log2 = IntegerLog2(y_scaled);
+ int y_log10 = ((y_log2 + 1) * 1233) >> 12;
+ y_log10 -= y_scaled < kPowersOf10[y_log10];
+
+ int tie = EQUAL;
+
+ if (x_log10 < y_log10) {
+ // X has fewer digits. We would like to simply scale up X but that
+ // might overflow, e.g when comparing 9 with 1_000_000_000, 9 would
+ // be scaled up to 9_000_000_000. So we scale up by the next
+ // smallest power and scale down Y to drop one digit. It is OK to
+ // drop one digit from the longer integer since the final digit is
+ // past the length of the shorter integer.
+ x_scaled *= kPowersOf10[y_log10 - x_log10 - 1];
+ y_scaled /= 10;
+ tie = LESS;
+ } else if (y_log10 < x_log10) {
+ y_scaled *= kPowersOf10[x_log10 - y_log10 - 1];
+ x_scaled /= 10;
+ tie = GREATER;
+ }
+
+ if (x_scaled < y_scaled) return Smi::FromInt(LESS);
+ if (x_scaled > y_scaled) return Smi::FromInt(GREATER);
+ return Smi::FromInt(tie);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_NumberToString) {
+ SealHandleScope shs(isolate);
+ return __RT_impl_Runtime_NumberToStringRT(args, isolate);
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
new file mode 100644
index 0000000000..e96d50195a
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -0,0 +1,1131 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/arguments.h"
+#include "src/jsregexp-inl.h"
+#include "src/jsregexp.h"
+#include "src/runtime/runtime.h"
+#include "src/runtime/runtime-utils.h"
+#include "src/runtime/string-builder.h"
+#include "src/string-search.h"
+
+namespace v8 {
+namespace internal {
+
+class CompiledReplacement {
+ public:
+ explicit CompiledReplacement(Zone* zone)
+ : parts_(1, zone), replacement_substrings_(0, zone), zone_(zone) {}
+
+ // Return whether the replacement is simple.
+ bool Compile(Handle<String> replacement, int capture_count,
+ int subject_length);
+
+ // Use Apply only if Compile returned false.
+ void Apply(ReplacementStringBuilder* builder, int match_from, int match_to,
+ int32_t* match);
+
+ // Number of distinct parts of the replacement pattern.
+ int parts() { return parts_.length(); }
+
+ Zone* zone() const { return zone_; }
+
+ private:
+ enum PartType {
+ SUBJECT_PREFIX = 1,
+ SUBJECT_SUFFIX,
+ SUBJECT_CAPTURE,
+ REPLACEMENT_SUBSTRING,
+ REPLACEMENT_STRING,
+ NUMBER_OF_PART_TYPES
+ };
+
+ struct ReplacementPart {
+ static inline ReplacementPart SubjectMatch() {
+ return ReplacementPart(SUBJECT_CAPTURE, 0);
+ }
+ static inline ReplacementPart SubjectCapture(int capture_index) {
+ return ReplacementPart(SUBJECT_CAPTURE, capture_index);
+ }
+ static inline ReplacementPart SubjectPrefix() {
+ return ReplacementPart(SUBJECT_PREFIX, 0);
+ }
+ static inline ReplacementPart SubjectSuffix(int subject_length) {
+ return ReplacementPart(SUBJECT_SUFFIX, subject_length);
+ }
+ static inline ReplacementPart ReplacementString() {
+ return ReplacementPart(REPLACEMENT_STRING, 0);
+ }
+ static inline ReplacementPart ReplacementSubString(int from, int to) {
+ DCHECK(from >= 0);
+ DCHECK(to > from);
+ return ReplacementPart(-from, to);
+ }
+
+ // If tag <= 0 then it is the negation of a start index of a substring of
+ // the replacement pattern, otherwise it's a value from PartType.
+ ReplacementPart(int tag, int data) : tag(tag), data(data) {
+ // Must be non-positive or a PartType value.
+ DCHECK(tag < NUMBER_OF_PART_TYPES);
+ }
+ // Either a value of PartType or a non-positive number that is
+ // the negation of an index into the replacement string.
+ int tag;
+ // The data value's interpretation depends on the value of tag:
+ // tag == SUBJECT_PREFIX ||
+ // tag == SUBJECT_SUFFIX: data is unused.
+ // tag == SUBJECT_CAPTURE: data is the number of the capture.
+ // tag == REPLACEMENT_SUBSTRING ||
+ // tag == REPLACEMENT_STRING: data is index into array of substrings
+ // of the replacement string.
+ // tag <= 0: Temporary representation of the substring of the replacement
+ // string ranging over -tag .. data.
+ // Is replaced by REPLACEMENT_{SUB,}STRING when we create the
+ // substring objects.
+ int data;
+ };
+
+ template <typename Char>
+ bool ParseReplacementPattern(ZoneList<ReplacementPart>* parts,
+ Vector<Char> characters, int capture_count,
+ int subject_length, Zone* zone) {
+ int length = characters.length();
+ int last = 0;
+ for (int i = 0; i < length; i++) {
+ Char c = characters[i];
+ if (c == '$') {
+ int next_index = i + 1;
+ if (next_index == length) { // No next character!
+ break;
+ }
+ Char c2 = characters[next_index];
+ switch (c2) {
+ case '$':
+ if (i > last) {
+ // There is a substring before. Include the first "$".
+ parts->Add(
+ ReplacementPart::ReplacementSubString(last, next_index),
+ zone);
+ last = next_index + 1; // Continue after the second "$".
+ } else {
+ // Let the next substring start with the second "$".
+ last = next_index;
+ }
+ i = next_index;
+ break;
+ case '`':
+ if (i > last) {
+ parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
+ }
+ parts->Add(ReplacementPart::SubjectPrefix(), zone);
+ i = next_index;
+ last = i + 1;
+ break;
+ case '\'':
+ if (i > last) {
+ parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
+ }
+ parts->Add(ReplacementPart::SubjectSuffix(subject_length), zone);
+ i = next_index;
+ last = i + 1;
+ break;
+ case '&':
+ if (i > last) {
+ parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
+ }
+ parts->Add(ReplacementPart::SubjectMatch(), zone);
+ i = next_index;
+ last = i + 1;
+ break;
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9': {
+ int capture_ref = c2 - '0';
+ if (capture_ref > capture_count) {
+ i = next_index;
+ continue;
+ }
+ int second_digit_index = next_index + 1;
+ if (second_digit_index < length) {
+ // Peek ahead to see if we have two digits.
+ Char c3 = characters[second_digit_index];
+ if ('0' <= c3 && c3 <= '9') { // Double digits.
+ int double_digit_ref = capture_ref * 10 + c3 - '0';
+ if (double_digit_ref <= capture_count) {
+ next_index = second_digit_index;
+ capture_ref = double_digit_ref;
+ }
+ }
+ }
+ if (capture_ref > 0) {
+ if (i > last) {
+ parts->Add(ReplacementPart::ReplacementSubString(last, i),
+ zone);
+ }
+ DCHECK(capture_ref <= capture_count);
+ parts->Add(ReplacementPart::SubjectCapture(capture_ref), zone);
+ last = next_index + 1;
+ }
+ i = next_index;
+ break;
+ }
+ default:
+ i = next_index;
+ break;
+ }
+ }
+ }
+ if (length > last) {
+ if (last == 0) {
+ // Replacement is simple. Do not use Apply to do the replacement.
+ return true;
+ } else {
+ parts->Add(ReplacementPart::ReplacementSubString(last, length), zone);
+ }
+ }
+ return false;
+ }
+
+ ZoneList<ReplacementPart> parts_;
+ ZoneList<Handle<String> > replacement_substrings_;
+ Zone* zone_;
+};
+
+
+bool CompiledReplacement::Compile(Handle<String> replacement, int capture_count,
+ int subject_length) {
+ {
+ DisallowHeapAllocation no_gc;
+ String::FlatContent content = replacement->GetFlatContent();
+ DCHECK(content.IsFlat());
+ bool simple = false;
+ if (content.IsOneByte()) {
+ simple = ParseReplacementPattern(&parts_, content.ToOneByteVector(),
+ capture_count, subject_length, zone());
+ } else {
+ DCHECK(content.IsTwoByte());
+ simple = ParseReplacementPattern(&parts_, content.ToUC16Vector(),
+ capture_count, subject_length, zone());
+ }
+ if (simple) return true;
+ }
+
+ Isolate* isolate = replacement->GetIsolate();
+ // Find substrings of replacement string and create them as String objects.
+ int substring_index = 0;
+ for (int i = 0, n = parts_.length(); i < n; i++) {
+ int tag = parts_[i].tag;
+ if (tag <= 0) { // A replacement string slice.
+ int from = -tag;
+ int to = parts_[i].data;
+ replacement_substrings_.Add(
+ isolate->factory()->NewSubString(replacement, from, to), zone());
+ parts_[i].tag = REPLACEMENT_SUBSTRING;
+ parts_[i].data = substring_index;
+ substring_index++;
+ } else if (tag == REPLACEMENT_STRING) {
+ replacement_substrings_.Add(replacement, zone());
+ parts_[i].data = substring_index;
+ substring_index++;
+ }
+ }
+ return false;
+}
+
+
+void CompiledReplacement::Apply(ReplacementStringBuilder* builder,
+ int match_from, int match_to, int32_t* match) {
+ DCHECK_LT(0, parts_.length());
+ for (int i = 0, n = parts_.length(); i < n; i++) {
+ ReplacementPart part = parts_[i];
+ switch (part.tag) {
+ case SUBJECT_PREFIX:
+ if (match_from > 0) builder->AddSubjectSlice(0, match_from);
+ break;
+ case SUBJECT_SUFFIX: {
+ int subject_length = part.data;
+ if (match_to < subject_length) {
+ builder->AddSubjectSlice(match_to, subject_length);
+ }
+ break;
+ }
+ case SUBJECT_CAPTURE: {
+ int capture = part.data;
+ int from = match[capture * 2];
+ int to = match[capture * 2 + 1];
+ if (from >= 0 && to > from) {
+ builder->AddSubjectSlice(from, to);
+ }
+ break;
+ }
+ case REPLACEMENT_SUBSTRING:
+ case REPLACEMENT_STRING:
+ builder->AddString(replacement_substrings_[part.data]);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+
+void FindOneByteStringIndices(Vector<const uint8_t> subject, char pattern,
+ ZoneList<int>* indices, unsigned int limit,
+ Zone* zone) {
+ DCHECK(limit > 0);
+ // Collect indices of pattern in subject using memchr.
+ // Stop after finding at most limit values.
+ const uint8_t* subject_start = subject.start();
+ const uint8_t* subject_end = subject_start + subject.length();
+ const uint8_t* pos = subject_start;
+ while (limit > 0) {
+ pos = reinterpret_cast<const uint8_t*>(
+ memchr(pos, pattern, subject_end - pos));
+ if (pos == NULL) return;
+ indices->Add(static_cast<int>(pos - subject_start), zone);
+ pos++;
+ limit--;
+ }
+}
+
+
+void FindTwoByteStringIndices(const Vector<const uc16> subject, uc16 pattern,
+ ZoneList<int>* indices, unsigned int limit,
+ Zone* zone) {
+ DCHECK(limit > 0);
+ const uc16* subject_start = subject.start();
+ const uc16* subject_end = subject_start + subject.length();
+ for (const uc16* pos = subject_start; pos < subject_end && limit > 0; pos++) {
+ if (*pos == pattern) {
+ indices->Add(static_cast<int>(pos - subject_start), zone);
+ limit--;
+ }
+ }
+}
+
+
+template <typename SubjectChar, typename PatternChar>
+void FindStringIndices(Isolate* isolate, Vector<const SubjectChar> subject,
+ Vector<const PatternChar> pattern,
+ ZoneList<int>* indices, unsigned int limit, Zone* zone) {
+ DCHECK(limit > 0);
+ // Collect indices of pattern in subject.
+ // Stop after finding at most limit values.
+ int pattern_length = pattern.length();
+ int index = 0;
+ StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
+ while (limit > 0) {
+ index = search.Search(subject, index);
+ if (index < 0) return;
+ indices->Add(index, zone);
+ index += pattern_length;
+ limit--;
+ }
+}
+
+
+void FindStringIndicesDispatch(Isolate* isolate, String* subject,
+ String* pattern, ZoneList<int>* indices,
+ unsigned int limit, Zone* zone) {
+ {
+ DisallowHeapAllocation no_gc;
+ String::FlatContent subject_content = subject->GetFlatContent();
+ String::FlatContent pattern_content = pattern->GetFlatContent();
+ DCHECK(subject_content.IsFlat());
+ DCHECK(pattern_content.IsFlat());
+ if (subject_content.IsOneByte()) {
+ Vector<const uint8_t> subject_vector = subject_content.ToOneByteVector();
+ if (pattern_content.IsOneByte()) {
+ Vector<const uint8_t> pattern_vector =
+ pattern_content.ToOneByteVector();
+ if (pattern_vector.length() == 1) {
+ FindOneByteStringIndices(subject_vector, pattern_vector[0], indices,
+ limit, zone);
+ } else {
+ FindStringIndices(isolate, subject_vector, pattern_vector, indices,
+ limit, zone);
+ }
+ } else {
+ FindStringIndices(isolate, subject_vector,
+ pattern_content.ToUC16Vector(), indices, limit, zone);
+ }
+ } else {
+ Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
+ if (pattern_content.IsOneByte()) {
+ Vector<const uint8_t> pattern_vector =
+ pattern_content.ToOneByteVector();
+ if (pattern_vector.length() == 1) {
+ FindTwoByteStringIndices(subject_vector, pattern_vector[0], indices,
+ limit, zone);
+ } else {
+ FindStringIndices(isolate, subject_vector, pattern_vector, indices,
+ limit, zone);
+ }
+ } else {
+ Vector<const uc16> pattern_vector = pattern_content.ToUC16Vector();
+ if (pattern_vector.length() == 1) {
+ FindTwoByteStringIndices(subject_vector, pattern_vector[0], indices,
+ limit, zone);
+ } else {
+ FindStringIndices(isolate, subject_vector, pattern_vector, indices,
+ limit, zone);
+ }
+ }
+ }
+ }
+}
+
+
+template <typename ResultSeqString>
+MUST_USE_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
+ Isolate* isolate, Handle<String> subject, Handle<JSRegExp> pattern_regexp,
+ Handle<String> replacement, Handle<JSArray> last_match_info) {
+ DCHECK(subject->IsFlat());
+ DCHECK(replacement->IsFlat());
+
+ ZoneScope zone_scope(isolate->runtime_zone());
+ ZoneList<int> indices(8, zone_scope.zone());
+ DCHECK_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag());
+ String* pattern =
+ String::cast(pattern_regexp->DataAt(JSRegExp::kAtomPatternIndex));
+ int subject_len = subject->length();
+ int pattern_len = pattern->length();
+ int replacement_len = replacement->length();
+
+ FindStringIndicesDispatch(isolate, *subject, pattern, &indices, 0xffffffff,
+ zone_scope.zone());
+
+ int matches = indices.length();
+ if (matches == 0) return *subject;
+
+ // Detect integer overflow.
+ int64_t result_len_64 = (static_cast<int64_t>(replacement_len) -
+ static_cast<int64_t>(pattern_len)) *
+ static_cast<int64_t>(matches) +
+ static_cast<int64_t>(subject_len);
+ int result_len;
+ if (result_len_64 > static_cast<int64_t>(String::kMaxLength)) {
+ STATIC_ASSERT(String::kMaxLength < kMaxInt);
+ result_len = kMaxInt; // Provoke exception.
+ } else {
+ result_len = static_cast<int>(result_len_64);
+ }
+
+ int subject_pos = 0;
+ int result_pos = 0;
+
+ MaybeHandle<SeqString> maybe_res;
+ if (ResultSeqString::kHasOneByteEncoding) {
+ maybe_res = isolate->factory()->NewRawOneByteString(result_len);
+ } else {
+ maybe_res = isolate->factory()->NewRawTwoByteString(result_len);
+ }
+ Handle<SeqString> untyped_res;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, untyped_res, maybe_res);
+ Handle<ResultSeqString> result = Handle<ResultSeqString>::cast(untyped_res);
+
+ for (int i = 0; i < matches; i++) {
+ // Copy non-matched subject content.
+ if (subject_pos < indices.at(i)) {
+ String::WriteToFlat(*subject, result->GetChars() + result_pos,
+ subject_pos, indices.at(i));
+ result_pos += indices.at(i) - subject_pos;
+ }
+
+ // Replace match.
+ if (replacement_len > 0) {
+ String::WriteToFlat(*replacement, result->GetChars() + result_pos, 0,
+ replacement_len);
+ result_pos += replacement_len;
+ }
+
+ subject_pos = indices.at(i) + pattern_len;
+ }
+ // Add remaining subject content at the end.
+ if (subject_pos < subject_len) {
+ String::WriteToFlat(*subject, result->GetChars() + result_pos, subject_pos,
+ subject_len);
+ }
+
+ int32_t match_indices[] = {indices.at(matches - 1),
+ indices.at(matches - 1) + pattern_len};
+ RegExpImpl::SetLastMatchInfo(last_match_info, subject, 0, match_indices);
+
+ return *result;
+}
+
+
+MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithString(
+ Isolate* isolate, Handle<String> subject, Handle<JSRegExp> regexp,
+ Handle<String> replacement, Handle<JSArray> last_match_info) {
+ DCHECK(subject->IsFlat());
+ DCHECK(replacement->IsFlat());
+
+ int capture_count = regexp->CaptureCount();
+ int subject_length = subject->length();
+
+ // CompiledReplacement uses zone allocation.
+ ZoneScope zone_scope(isolate->runtime_zone());
+ CompiledReplacement compiled_replacement(zone_scope.zone());
+ bool simple_replace =
+ compiled_replacement.Compile(replacement, capture_count, subject_length);
+
+ // Shortcut for simple non-regexp global replacements
+ if (regexp->TypeTag() == JSRegExp::ATOM && simple_replace) {
+ if (subject->HasOnlyOneByteChars() && replacement->HasOnlyOneByteChars()) {
+ return StringReplaceGlobalAtomRegExpWithString<SeqOneByteString>(
+ isolate, subject, regexp, replacement, last_match_info);
+ } else {
+ return StringReplaceGlobalAtomRegExpWithString<SeqTwoByteString>(
+ isolate, subject, regexp, replacement, last_match_info);
+ }
+ }
+
+ RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
+ if (global_cache.HasException()) return isolate->heap()->exception();
+
+ int32_t* current_match = global_cache.FetchNext();
+ if (current_match == NULL) {
+ if (global_cache.HasException()) return isolate->heap()->exception();
+ return *subject;
+ }
+
+ // Guessing the number of parts that the final result string is built
+ // from. Global regexps can match any number of times, so we guess
+ // conservatively.
+ int expected_parts = (compiled_replacement.parts() + 1) * 4 + 1;
+ ReplacementStringBuilder builder(isolate->heap(), subject, expected_parts);
+
+ // Number of parts added by compiled replacement plus preceeding
+ // string and possibly suffix after last match. It is possible for
+ // all components to use two elements when encoded as two smis.
+ const int parts_added_per_loop = 2 * (compiled_replacement.parts() + 2);
+
+ int prev = 0;
+
+ do {
+ builder.EnsureCapacity(parts_added_per_loop);
+
+ int start = current_match[0];
+ int end = current_match[1];
+
+ if (prev < start) {
+ builder.AddSubjectSlice(prev, start);
+ }
+
+ if (simple_replace) {
+ builder.AddString(replacement);
+ } else {
+ compiled_replacement.Apply(&builder, start, end, current_match);
+ }
+ prev = end;
+
+ current_match = global_cache.FetchNext();
+ } while (current_match != NULL);
+
+ if (global_cache.HasException()) return isolate->heap()->exception();
+
+ if (prev < subject_length) {
+ builder.EnsureCapacity(2);
+ builder.AddSubjectSlice(prev, subject_length);
+ }
+
+ RegExpImpl::SetLastMatchInfo(last_match_info, subject, capture_count,
+ global_cache.LastSuccessfulMatch());
+
+ Handle<String> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, builder.ToString());
+ return *result;
+}
+
+
+template <typename ResultSeqString>
+MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
+ Isolate* isolate, Handle<String> subject, Handle<JSRegExp> regexp,
+ Handle<JSArray> last_match_info) {
+ DCHECK(subject->IsFlat());
+
+ // Shortcut for simple non-regexp global replacements
+ if (regexp->TypeTag() == JSRegExp::ATOM) {
+ Handle<String> empty_string = isolate->factory()->empty_string();
+ if (subject->IsOneByteRepresentation()) {
+ return StringReplaceGlobalAtomRegExpWithString<SeqOneByteString>(
+ isolate, subject, regexp, empty_string, last_match_info);
+ } else {
+ return StringReplaceGlobalAtomRegExpWithString<SeqTwoByteString>(
+ isolate, subject, regexp, empty_string, last_match_info);
+ }
+ }
+
+ RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
+ if (global_cache.HasException()) return isolate->heap()->exception();
+
+ int32_t* current_match = global_cache.FetchNext();
+ if (current_match == NULL) {
+ if (global_cache.HasException()) return isolate->heap()->exception();
+ return *subject;
+ }
+
+ int start = current_match[0];
+ int end = current_match[1];
+ int capture_count = regexp->CaptureCount();
+ int subject_length = subject->length();
+
+ int new_length = subject_length - (end - start);
+ if (new_length == 0) return isolate->heap()->empty_string();
+
+ Handle<ResultSeqString> answer;
+ if (ResultSeqString::kHasOneByteEncoding) {
+ answer = Handle<ResultSeqString>::cast(
+ isolate->factory()->NewRawOneByteString(new_length).ToHandleChecked());
+ } else {
+ answer = Handle<ResultSeqString>::cast(
+ isolate->factory()->NewRawTwoByteString(new_length).ToHandleChecked());
+ }
+
+ int prev = 0;
+ int position = 0;
+
+ do {
+ start = current_match[0];
+ end = current_match[1];
+ if (prev < start) {
+ // Add substring subject[prev;start] to answer string.
+ String::WriteToFlat(*subject, answer->GetChars() + position, prev, start);
+ position += start - prev;
+ }
+ prev = end;
+
+ current_match = global_cache.FetchNext();
+ } while (current_match != NULL);
+
+ if (global_cache.HasException()) return isolate->heap()->exception();
+
+ RegExpImpl::SetLastMatchInfo(last_match_info, subject, capture_count,
+ global_cache.LastSuccessfulMatch());
+
+ if (prev < subject_length) {
+ // Add substring subject[prev;length] to answer string.
+ String::WriteToFlat(*subject, answer->GetChars() + position, prev,
+ subject_length);
+ position += subject_length - prev;
+ }
+
+ if (position == 0) return isolate->heap()->empty_string();
+
+ // Shorten string and fill
+ int string_size = ResultSeqString::SizeFor(position);
+ int allocated_string_size = ResultSeqString::SizeFor(new_length);
+ int delta = allocated_string_size - string_size;
+
+ answer->set_length(position);
+ if (delta == 0) return *answer;
+
+ Address end_of_string = answer->address() + string_size;
+ Heap* heap = isolate->heap();
+
+ // The trimming is performed on a newly allocated object, which is on a
+ // fresly allocated page or on an already swept page. Hence, the sweeper
+ // thread can not get confused with the filler creation. No synchronization
+ // needed.
+ heap->CreateFillerObjectAt(end_of_string, delta);
+ heap->AdjustLiveBytes(answer->address(), -delta, Heap::FROM_MUTATOR);
+ return *answer;
+}
+
+
+RUNTIME_FUNCTION(Runtime_StringReplaceGlobalRegExpWithString) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 4);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, replacement, 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
+
+ RUNTIME_ASSERT(regexp->GetFlags().is_global());
+ RUNTIME_ASSERT(last_match_info->HasFastObjectElements());
+
+ subject = String::Flatten(subject);
+
+ if (replacement->length() == 0) {
+ if (subject->HasOnlyOneByteChars()) {
+ return StringReplaceGlobalRegExpWithEmptyString<SeqOneByteString>(
+ isolate, subject, regexp, last_match_info);
+ } else {
+ return StringReplaceGlobalRegExpWithEmptyString<SeqTwoByteString>(
+ isolate, subject, regexp, last_match_info);
+ }
+ }
+
+ replacement = String::Flatten(replacement);
+
+ return StringReplaceGlobalRegExpWithString(isolate, subject, regexp,
+ replacement, last_match_info);
+}
+
+
+RUNTIME_FUNCTION(Runtime_StringSplit) {
+ HandleScope handle_scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
+ CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[2]);
+ RUNTIME_ASSERT(limit > 0);
+
+ int subject_length = subject->length();
+ int pattern_length = pattern->length();
+ RUNTIME_ASSERT(pattern_length > 0);
+
+ if (limit == 0xffffffffu) {
+ Handle<Object> cached_answer(
+ RegExpResultsCache::Lookup(isolate->heap(), *subject, *pattern,
+ RegExpResultsCache::STRING_SPLIT_SUBSTRINGS),
+ isolate);
+ if (*cached_answer != Smi::FromInt(0)) {
+ // The cache FixedArray is a COW-array and can therefore be reused.
+ Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(
+ Handle<FixedArray>::cast(cached_answer));
+ return *result;
+ }
+ }
+
+ // The limit can be very large (0xffffffffu), but since the pattern
+ // isn't empty, we can never create more parts than ~half the length
+ // of the subject.
+
+ subject = String::Flatten(subject);
+ pattern = String::Flatten(pattern);
+
+ static const int kMaxInitialListCapacity = 16;
+
+ ZoneScope zone_scope(isolate->runtime_zone());
+
+ // Find (up to limit) indices of separator and end-of-string in subject
+ int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit);
+ ZoneList<int> indices(initial_capacity, zone_scope.zone());
+
+ FindStringIndicesDispatch(isolate, *subject, *pattern, &indices, limit,
+ zone_scope.zone());
+
+ if (static_cast<uint32_t>(indices.length()) < limit) {
+ indices.Add(subject_length, zone_scope.zone());
+ }
+
+ // The list indices now contains the end of each part to create.
+
+ // Create JSArray of substrings separated by separator.
+ int part_count = indices.length();
+
+ Handle<JSArray> result = isolate->factory()->NewJSArray(part_count);
+ JSObject::EnsureCanContainHeapObjectElements(result);
+ result->set_length(Smi::FromInt(part_count));
+
+ DCHECK(result->HasFastObjectElements());
+
+ if (part_count == 1 && indices.at(0) == subject_length) {
+ FixedArray::cast(result->elements())->set(0, *subject);
+ return *result;
+ }
+
+ Handle<FixedArray> elements(FixedArray::cast(result->elements()));
+ int part_start = 0;
+ for (int i = 0; i < part_count; i++) {
+ HandleScope local_loop_handle(isolate);
+ int part_end = indices.at(i);
+ Handle<String> substring =
+ isolate->factory()->NewProperSubString(subject, part_start, part_end);
+ elements->set(i, *substring);
+ part_start = part_end + pattern_length;
+ }
+
+ if (limit == 0xffffffffu) {
+ if (result->HasFastObjectElements()) {
+ RegExpResultsCache::Enter(isolate, subject, pattern, elements,
+ RegExpResultsCache::STRING_SPLIT_SUBSTRINGS);
+ }
+ }
+
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_RegExpCompile) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSRegExp, re, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ RegExpImpl::Compile(re, pattern, flags));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_RegExpExecRT) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 4);
+ CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
+ CONVERT_INT32_ARG_CHECKED(index, 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
+ // Due to the way the JS calls are constructed this must be less than the
+ // length of a string, i.e. it is always a Smi. We check anyway for security.
+ RUNTIME_ASSERT(index >= 0);
+ RUNTIME_ASSERT(index <= subject->length());
+ isolate->counters()->regexp_entry_runtime()->Increment();
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ RegExpImpl::Exec(regexp, subject, index, last_match_info));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_RegExpConstructResult) {
+ HandleScope handle_scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_SMI_ARG_CHECKED(size, 0);
+ RUNTIME_ASSERT(size >= 0 && size <= FixedArray::kMaxLength);
+ CONVERT_ARG_HANDLE_CHECKED(Object, index, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, input, 2);
+ Handle<FixedArray> elements = isolate->factory()->NewFixedArray(size);
+ Handle<Map> regexp_map(isolate->native_context()->regexp_result_map());
+ Handle<JSObject> object =
+ isolate->factory()->NewJSObjectFromMap(regexp_map, NOT_TENURED, false);
+ Handle<JSArray> array = Handle<JSArray>::cast(object);
+ array->set_elements(*elements);
+ array->set_length(Smi::FromInt(size));
+ // Write in-object properties after the length of the array.
+ array->InObjectPropertyAtPut(JSRegExpResult::kIndexIndex, *index);
+ array->InObjectPropertyAtPut(JSRegExpResult::kInputIndex, *input);
+ return *array;
+}
+
+
+RUNTIME_FUNCTION(Runtime_RegExpInitializeObject) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 6);
+ CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
+ // If source is the empty string we set it to "(?:)" instead as
+ // suggested by ECMA-262, 5th, section 15.10.4.1.
+ if (source->length() == 0) source = isolate->factory()->query_colon_string();
+
+ CONVERT_ARG_HANDLE_CHECKED(Object, global, 2);
+ if (!global->IsTrue()) global = isolate->factory()->false_value();
+
+ CONVERT_ARG_HANDLE_CHECKED(Object, ignoreCase, 3);
+ if (!ignoreCase->IsTrue()) ignoreCase = isolate->factory()->false_value();
+
+ CONVERT_ARG_HANDLE_CHECKED(Object, multiline, 4);
+ if (!multiline->IsTrue()) multiline = isolate->factory()->false_value();
+
+ CONVERT_ARG_HANDLE_CHECKED(Object, sticky, 5);
+ if (!sticky->IsTrue()) sticky = isolate->factory()->false_value();
+
+ Map* map = regexp->map();
+ Object* constructor = map->constructor();
+ if (!FLAG_harmony_regexps && constructor->IsJSFunction() &&
+ JSFunction::cast(constructor)->initial_map() == map) {
+ // If we still have the original map, set in-object properties directly.
+ regexp->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, *source);
+ // Both true and false are immovable immortal objects so no need for write
+ // barrier.
+ regexp->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex, *global,
+ SKIP_WRITE_BARRIER);
+ regexp->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex, *ignoreCase,
+ SKIP_WRITE_BARRIER);
+ regexp->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex, *multiline,
+ SKIP_WRITE_BARRIER);
+ regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
+ Smi::FromInt(0), SKIP_WRITE_BARRIER);
+ return *regexp;
+ }
+
+ // Map has changed, so use generic, but slower, method. We also end here if
+ // the --harmony-regexp flag is set, because the initial map does not have
+ // space for the 'sticky' flag, since it is from the snapshot, but must work
+ // both with and without --harmony-regexp. When sticky comes out from under
+ // the flag, we will be able to use the fast initial map.
+ PropertyAttributes final =
+ static_cast<PropertyAttributes>(READ_ONLY | DONT_ENUM | DONT_DELETE);
+ PropertyAttributes writable =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+ Handle<Object> zero(Smi::FromInt(0), isolate);
+ Factory* factory = isolate->factory();
+ JSObject::SetOwnPropertyIgnoreAttributes(regexp, factory->source_string(),
+ source, final).Check();
+ JSObject::SetOwnPropertyIgnoreAttributes(regexp, factory->global_string(),
+ global, final).Check();
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ regexp, factory->ignore_case_string(), ignoreCase, final).Check();
+ JSObject::SetOwnPropertyIgnoreAttributes(regexp, factory->multiline_string(),
+ multiline, final).Check();
+ if (FLAG_harmony_regexps) {
+ JSObject::SetOwnPropertyIgnoreAttributes(regexp, factory->sticky_string(),
+ sticky, final).Check();
+ }
+ JSObject::SetOwnPropertyIgnoreAttributes(regexp, factory->last_index_string(),
+ zero, writable).Check();
+ return *regexp;
+}
+
+
+RUNTIME_FUNCTION(Runtime_MaterializeRegExpLiteral) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 4);
+ CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
+ CONVERT_SMI_ARG_CHECKED(index, 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2);
+ CONVERT_ARG_HANDLE_CHECKED(String, flags, 3);
+
+ // Get the RegExp function from the context in the literals array.
+ // This is the RegExp function from the context in which the
+ // function was created. We do not use the RegExp function from the
+ // current native context because this might be the RegExp function
+ // from another context which we should not have access to.
+ Handle<JSFunction> constructor = Handle<JSFunction>(
+ JSFunction::NativeContextFromLiterals(*literals)->regexp_function());
+ // Compute the regular expression literal.
+ Handle<Object> regexp;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, regexp,
+ RegExpImpl::CreateRegExpLiteral(constructor, pattern, flags));
+ literals->set(index, *regexp);
+ return *regexp;
+}
+
+
+// Only called from Runtime_RegExpExecMultiple so it doesn't need to maintain
+// separate last match info. See comment on that function.
+template <bool has_capture>
+static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
+ Handle<JSRegExp> regexp,
+ Handle<JSArray> last_match_array,
+ Handle<JSArray> result_array) {
+ DCHECK(subject->IsFlat());
+ DCHECK_NE(has_capture, regexp->CaptureCount() == 0);
+
+ int capture_count = regexp->CaptureCount();
+ int subject_length = subject->length();
+
+ static const int kMinLengthToCache = 0x1000;
+
+ if (subject_length > kMinLengthToCache) {
+ Handle<Object> cached_answer(
+ RegExpResultsCache::Lookup(isolate->heap(), *subject, regexp->data(),
+ RegExpResultsCache::REGEXP_MULTIPLE_INDICES),
+ isolate);
+ if (*cached_answer != Smi::FromInt(0)) {
+ Handle<FixedArray> cached_fixed_array =
+ Handle<FixedArray>(FixedArray::cast(*cached_answer));
+ // The cache FixedArray is a COW-array and can therefore be reused.
+ JSArray::SetContent(result_array, cached_fixed_array);
+ // The actual length of the result array is stored in the last element of
+ // the backing store (the backing FixedArray may have a larger capacity).
+ Object* cached_fixed_array_last_element =
+ cached_fixed_array->get(cached_fixed_array->length() - 1);
+ Smi* js_array_length = Smi::cast(cached_fixed_array_last_element);
+ result_array->set_length(js_array_length);
+ RegExpImpl::SetLastMatchInfo(last_match_array, subject, capture_count,
+ NULL);
+ return *result_array;
+ }
+ }
+
+ RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
+ if (global_cache.HasException()) return isolate->heap()->exception();
+
+ // Ensured in Runtime_RegExpExecMultiple.
+ DCHECK(result_array->HasFastObjectElements());
+ Handle<FixedArray> result_elements(
+ FixedArray::cast(result_array->elements()));
+ if (result_elements->length() < 16) {
+ result_elements = isolate->factory()->NewFixedArrayWithHoles(16);
+ }
+
+ FixedArrayBuilder builder(result_elements);
+
+ // Position to search from.
+ int match_start = -1;
+ int match_end = 0;
+ bool first = true;
+
+ // Two smis before and after the match, for very long strings.
+ static const int kMaxBuilderEntriesPerRegExpMatch = 5;
+
+ while (true) {
+ int32_t* current_match = global_cache.FetchNext();
+ if (current_match == NULL) break;
+ match_start = current_match[0];
+ builder.EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
+ if (match_end < match_start) {
+ ReplacementStringBuilder::AddSubjectSlice(&builder, match_end,
+ match_start);
+ }
+ match_end = current_match[1];
+ {
+ // Avoid accumulating new handles inside loop.
+ HandleScope temp_scope(isolate);
+ Handle<String> match;
+ if (!first) {
+ match = isolate->factory()->NewProperSubString(subject, match_start,
+ match_end);
+ } else {
+ match =
+ isolate->factory()->NewSubString(subject, match_start, match_end);
+ first = false;
+ }
+
+ if (has_capture) {
+ // Arguments array to replace function is match, captures, index and
+ // subject, i.e., 3 + capture count in total.
+ Handle<FixedArray> elements =
+ isolate->factory()->NewFixedArray(3 + capture_count);
+
+ elements->set(0, *match);
+ for (int i = 1; i <= capture_count; i++) {
+ int start = current_match[i * 2];
+ if (start >= 0) {
+ int end = current_match[i * 2 + 1];
+ DCHECK(start <= end);
+ Handle<String> substring =
+ isolate->factory()->NewSubString(subject, start, end);
+ elements->set(i, *substring);
+ } else {
+ DCHECK(current_match[i * 2 + 1] < 0);
+ elements->set(i, isolate->heap()->undefined_value());
+ }
+ }
+ elements->set(capture_count + 1, Smi::FromInt(match_start));
+ elements->set(capture_count + 2, *subject);
+ builder.Add(*isolate->factory()->NewJSArrayWithElements(elements));
+ } else {
+ builder.Add(*match);
+ }
+ }
+ }
+
+ if (global_cache.HasException()) return isolate->heap()->exception();
+
+ if (match_start >= 0) {
+ // Finished matching, with at least one match.
+ if (match_end < subject_length) {
+ ReplacementStringBuilder::AddSubjectSlice(&builder, match_end,
+ subject_length);
+ }
+
+ RegExpImpl::SetLastMatchInfo(last_match_array, subject, capture_count,
+ NULL);
+
+ if (subject_length > kMinLengthToCache) {
+ // Store the length of the result array into the last element of the
+ // backing FixedArray.
+ builder.EnsureCapacity(1);
+ Handle<FixedArray> fixed_array = builder.array();
+ fixed_array->set(fixed_array->length() - 1,
+ Smi::FromInt(builder.length()));
+ // Cache the result and turn the FixedArray into a COW array.
+ RegExpResultsCache::Enter(isolate, subject,
+ handle(regexp->data(), isolate), fixed_array,
+ RegExpResultsCache::REGEXP_MULTIPLE_INDICES);
+ }
+ return *builder.ToJSArray(result_array);
+ } else {
+ return isolate->heap()->null_value(); // No matches at all.
+ }
+}
+
+
+// This is only called for StringReplaceGlobalRegExpWithFunction. This sets
+// lastMatchInfoOverride to maintain the last match info, so we don't need to
+// set any other last match array info.
+RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) {
+ HandleScope handles(isolate);
+ DCHECK(args.length() == 4);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3);
+ RUNTIME_ASSERT(last_match_info->HasFastObjectElements());
+ RUNTIME_ASSERT(result_array->HasFastObjectElements());
+
+ subject = String::Flatten(subject);
+ RUNTIME_ASSERT(regexp->GetFlags().is_global());
+
+ if (regexp->CaptureCount() == 0) {
+ return SearchRegExpMultiple<false>(isolate, subject, regexp,
+ last_match_info, result_array);
+ } else {
+ return SearchRegExpMultiple<true>(isolate, subject, regexp, last_match_info,
+ result_array);
+ }
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_RegExpConstructResult) {
+ SealHandleScope shs(isolate);
+ return __RT_impl_Runtime_RegExpConstructResult(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_RegExpExec) {
+ SealHandleScope shs(isolate);
+ return __RT_impl_Runtime_RegExpExecRT(args, isolate);
+}
+
+
+// Perform string match of pattern on subject, starting at start index.
+// Caller must ensure that 0 <= start_index <= sub->length(),
+// and should check that pat->length() + start_index <= sub->length().
+int Runtime::StringMatch(Isolate* isolate, Handle<String> sub,
+ Handle<String> pat, int start_index) {
+ DCHECK(0 <= start_index);
+ DCHECK(start_index <= sub->length());
+
+ int pattern_length = pat->length();
+ if (pattern_length == 0) return start_index;
+
+ int subject_length = sub->length();
+ if (start_index + pattern_length > subject_length) return -1;
+
+ sub = String::Flatten(sub);
+ pat = String::Flatten(pat);
+
+ DisallowHeapAllocation no_gc; // ensure vectors stay valid
+ // Extract flattened substrings of cons strings before getting encoding.
+ String::FlatContent seq_sub = sub->GetFlatContent();
+ String::FlatContent seq_pat = pat->GetFlatContent();
+
+ // dispatch on type of strings
+ if (seq_pat.IsOneByte()) {
+ Vector<const uint8_t> pat_vector = seq_pat.ToOneByteVector();
+ if (seq_sub.IsOneByte()) {
+ return SearchString(isolate, seq_sub.ToOneByteVector(), pat_vector,
+ start_index);
+ }
+ return SearchString(isolate, seq_sub.ToUC16Vector(), pat_vector,
+ start_index);
+ }
+ Vector<const uc16> pat_vector = seq_pat.ToUC16Vector();
+ if (seq_sub.IsOneByte()) {
+ return SearchString(isolate, seq_sub.ToOneByteVector(), pat_vector,
+ start_index);
+ }
+ return SearchString(isolate, seq_sub.ToUC16Vector(), pat_vector, start_index);
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
new file mode 100644
index 0000000000..82174e96bc
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -0,0 +1,1260 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/arguments.h"
+#include "src/jsregexp-inl.h"
+#include "src/jsregexp.h"
+#include "src/runtime/runtime.h"
+#include "src/runtime/runtime-utils.h"
+#include "src/runtime/string-builder.h"
+#include "src/string-search.h"
+
+namespace v8 {
+namespace internal {
+
+
+// This may return an empty MaybeHandle if an exception is thrown or
+// we abort due to reaching the recursion limit.
+MaybeHandle<String> StringReplaceOneCharWithString(
+ Isolate* isolate, Handle<String> subject, Handle<String> search,
+ Handle<String> replace, bool* found, int recursion_limit) {
+ StackLimitCheck stackLimitCheck(isolate);
+ if (stackLimitCheck.HasOverflowed() || (recursion_limit == 0)) {
+ return MaybeHandle<String>();
+ }
+ recursion_limit--;
+ if (subject->IsConsString()) {
+ ConsString* cons = ConsString::cast(*subject);
+ Handle<String> first = Handle<String>(cons->first());
+ Handle<String> second = Handle<String>(cons->second());
+ Handle<String> new_first;
+ if (!StringReplaceOneCharWithString(isolate, first, search, replace, found,
+ recursion_limit).ToHandle(&new_first)) {
+ return MaybeHandle<String>();
+ }
+ if (*found) return isolate->factory()->NewConsString(new_first, second);
+
+ Handle<String> new_second;
+ if (!StringReplaceOneCharWithString(isolate, second, search, replace, found,
+ recursion_limit)
+ .ToHandle(&new_second)) {
+ return MaybeHandle<String>();
+ }
+ if (*found) return isolate->factory()->NewConsString(first, new_second);
+
+ return subject;
+ } else {
+ int index = Runtime::StringMatch(isolate, subject, search, 0);
+ if (index == -1) return subject;
+ *found = true;
+ Handle<String> first = isolate->factory()->NewSubString(subject, 0, index);
+ Handle<String> cons1;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, cons1, isolate->factory()->NewConsString(first, replace),
+ String);
+ Handle<String> second =
+ isolate->factory()->NewSubString(subject, index + 1, subject->length());
+ return isolate->factory()->NewConsString(cons1, second);
+ }
+}
+
+
+RUNTIME_FUNCTION(Runtime_StringReplaceOneCharWithString) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, search, 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, replace, 2);
+
+ // If the cons string tree is too deep, we simply abort the recursion and
+ // retry with a flattened subject string.
+ const int kRecursionLimit = 0x1000;
+ bool found = false;
+ Handle<String> result;
+ if (StringReplaceOneCharWithString(isolate, subject, search, replace, &found,
+ kRecursionLimit).ToHandle(&result)) {
+ return *result;
+ }
+ if (isolate->has_pending_exception()) return isolate->heap()->exception();
+
+ subject = String::Flatten(subject);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ StringReplaceOneCharWithString(isolate, subject, search, replace, &found,
+ kRecursionLimit));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_StringIndexOf) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, sub, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, pat, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, index, 2);
+
+ uint32_t start_index;
+ if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
+
+ RUNTIME_ASSERT(start_index <= static_cast<uint32_t>(sub->length()));
+ int position = Runtime::StringMatch(isolate, sub, pat, start_index);
+ return Smi::FromInt(position);
+}
+
+
+template <typename schar, typename pchar>
+static int StringMatchBackwards(Vector<const schar> subject,
+ Vector<const pchar> pattern, int idx) {
+ int pattern_length = pattern.length();
+ DCHECK(pattern_length >= 1);
+ DCHECK(idx + pattern_length <= subject.length());
+
+ if (sizeof(schar) == 1 && sizeof(pchar) > 1) {
+ for (int i = 0; i < pattern_length; i++) {
+ uc16 c = pattern[i];
+ if (c > String::kMaxOneByteCharCode) {
+ return -1;
+ }
+ }
+ }
+
+ pchar pattern_first_char = pattern[0];
+ for (int i = idx; i >= 0; i--) {
+ if (subject[i] != pattern_first_char) continue;
+ int j = 1;
+ while (j < pattern_length) {
+ if (pattern[j] != subject[i + j]) {
+ break;
+ }
+ j++;
+ }
+ if (j == pattern_length) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+
+RUNTIME_FUNCTION(Runtime_StringLastIndexOf) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, sub, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, pat, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, index, 2);
+
+ uint32_t start_index;
+ if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
+
+ uint32_t pat_length = pat->length();
+ uint32_t sub_length = sub->length();
+
+ if (start_index + pat_length > sub_length) {
+ start_index = sub_length - pat_length;
+ }
+
+ if (pat_length == 0) {
+ return Smi::FromInt(start_index);
+ }
+
+ sub = String::Flatten(sub);
+ pat = String::Flatten(pat);
+
+ int position = -1;
+ DisallowHeapAllocation no_gc; // ensure vectors stay valid
+
+ String::FlatContent sub_content = sub->GetFlatContent();
+ String::FlatContent pat_content = pat->GetFlatContent();
+
+ if (pat_content.IsOneByte()) {
+ Vector<const uint8_t> pat_vector = pat_content.ToOneByteVector();
+ if (sub_content.IsOneByte()) {
+ position = StringMatchBackwards(sub_content.ToOneByteVector(), pat_vector,
+ start_index);
+ } else {
+ position = StringMatchBackwards(sub_content.ToUC16Vector(), pat_vector,
+ start_index);
+ }
+ } else {
+ Vector<const uc16> pat_vector = pat_content.ToUC16Vector();
+ if (sub_content.IsOneByte()) {
+ position = StringMatchBackwards(sub_content.ToOneByteVector(), pat_vector,
+ start_index);
+ } else {
+ position = StringMatchBackwards(sub_content.ToUC16Vector(), pat_vector,
+ start_index);
+ }
+ }
+
+ return Smi::FromInt(position);
+}
+
+
+RUNTIME_FUNCTION(Runtime_StringLocaleCompare) {
+ HandleScope handle_scope(isolate);
+ DCHECK(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, str1, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, str2, 1);
+
+ if (str1.is_identical_to(str2)) return Smi::FromInt(0); // Equal.
+ int str1_length = str1->length();
+ int str2_length = str2->length();
+
+ // Decide trivial cases without flattening.
+ if (str1_length == 0) {
+ if (str2_length == 0) return Smi::FromInt(0); // Equal.
+ return Smi::FromInt(-str2_length);
+ } else {
+ if (str2_length == 0) return Smi::FromInt(str1_length);
+ }
+
+ int end = str1_length < str2_length ? str1_length : str2_length;
+
+ // No need to flatten if we are going to find the answer on the first
+ // character. At this point we know there is at least one character
+ // in each string, due to the trivial case handling above.
+ int d = str1->Get(0) - str2->Get(0);
+ if (d != 0) return Smi::FromInt(d);
+
+ str1 = String::Flatten(str1);
+ str2 = String::Flatten(str2);
+
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat1 = str1->GetFlatContent();
+ String::FlatContent flat2 = str2->GetFlatContent();
+
+ for (int i = 0; i < end; i++) {
+ if (flat1.Get(i) != flat2.Get(i)) {
+ return Smi::FromInt(flat1.Get(i) - flat2.Get(i));
+ }
+ }
+
+ return Smi::FromInt(str1_length - str2_length);
+}
+
+
+RUNTIME_FUNCTION(Runtime_SubString) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
+ int start, end;
+ // We have a fast integer-only case here to avoid a conversion to double in
+ // the common case where from and to are Smis.
+ if (args[1]->IsSmi() && args[2]->IsSmi()) {
+ CONVERT_SMI_ARG_CHECKED(from_number, 1);
+ CONVERT_SMI_ARG_CHECKED(to_number, 2);
+ start = from_number;
+ end = to_number;
+ } else {
+ CONVERT_DOUBLE_ARG_CHECKED(from_number, 1);
+ CONVERT_DOUBLE_ARG_CHECKED(to_number, 2);
+ start = FastD2IChecked(from_number);
+ end = FastD2IChecked(to_number);
+ }
+ RUNTIME_ASSERT(end >= start);
+ RUNTIME_ASSERT(start >= 0);
+ RUNTIME_ASSERT(end <= string->length());
+ isolate->counters()->sub_string_runtime()->Increment();
+
+ return *isolate->factory()->NewSubString(string, start, end);
+}
+
+
+RUNTIME_FUNCTION(Runtime_StringAdd) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(String, str1, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, str2, 1);
+ isolate->counters()->string_add_runtime()->Increment();
+ Handle<String> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, isolate->factory()->NewConsString(str1, str2));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_InternalizeString) {
+ HandleScope handles(isolate);
+ RUNTIME_ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
+ return *isolate->factory()->InternalizeString(string);
+}
+
+
+RUNTIME_FUNCTION(Runtime_StringMatch) {
+ HandleScope handles(isolate);
+ DCHECK(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, regexp_info, 2);
+
+ RUNTIME_ASSERT(regexp_info->HasFastObjectElements());
+
+ RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
+ if (global_cache.HasException()) return isolate->heap()->exception();
+
+ int capture_count = regexp->CaptureCount();
+
+ ZoneScope zone_scope(isolate->runtime_zone());
+ ZoneList<int> offsets(8, zone_scope.zone());
+
+ while (true) {
+ int32_t* match = global_cache.FetchNext();
+ if (match == NULL) break;
+ offsets.Add(match[0], zone_scope.zone()); // start
+ offsets.Add(match[1], zone_scope.zone()); // end
+ }
+
+ if (global_cache.HasException()) return isolate->heap()->exception();
+
+ if (offsets.length() == 0) {
+ // Not a single match.
+ return isolate->heap()->null_value();
+ }
+
+ RegExpImpl::SetLastMatchInfo(regexp_info, subject, capture_count,
+ global_cache.LastSuccessfulMatch());
+
+ int matches = offsets.length() / 2;
+ Handle<FixedArray> elements = isolate->factory()->NewFixedArray(matches);
+ Handle<String> substring =
+ isolate->factory()->NewSubString(subject, offsets.at(0), offsets.at(1));
+ elements->set(0, *substring);
+ for (int i = 1; i < matches; i++) {
+ HandleScope temp_scope(isolate);
+ int from = offsets.at(i * 2);
+ int to = offsets.at(i * 2 + 1);
+ Handle<String> substring =
+ isolate->factory()->NewProperSubString(subject, from, to);
+ elements->set(i, *substring);
+ }
+ Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(elements);
+ result->set_length(Smi::FromInt(matches));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_StringCharCodeAtRT) {
+ HandleScope handle_scope(isolate);
+ DCHECK(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
+ CONVERT_NUMBER_CHECKED(uint32_t, i, Uint32, args[1]);
+
+ // Flatten the string. If someone wants to get a char at an index
+ // in a cons string, it is likely that more indices will be
+ // accessed.
+ subject = String::Flatten(subject);
+
+ if (i >= static_cast<uint32_t>(subject->length())) {
+ return isolate->heap()->nan_value();
+ }
+
+ return Smi::FromInt(subject->Get(i));
+}
+
+
+RUNTIME_FUNCTION(Runtime_CharFromCode) {
+ HandleScope handlescope(isolate);
+ DCHECK(args.length() == 1);
+ if (args[0]->IsNumber()) {
+ CONVERT_NUMBER_CHECKED(uint32_t, code, Uint32, args[0]);
+ code &= 0xffff;
+ return *isolate->factory()->LookupSingleCharacterStringFromCode(code);
+ }
+ return isolate->heap()->empty_string();
+}
+
+
+RUNTIME_FUNCTION(Runtime_StringCompare) {
+ HandleScope handle_scope(isolate);
+ DCHECK(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
+
+ isolate->counters()->string_compare_runtime()->Increment();
+
+ // A few fast case tests before we flatten.
+ if (x.is_identical_to(y)) return Smi::FromInt(EQUAL);
+ if (y->length() == 0) {
+ if (x->length() == 0) return Smi::FromInt(EQUAL);
+ return Smi::FromInt(GREATER);
+ } else if (x->length() == 0) {
+ return Smi::FromInt(LESS);
+ }
+
+ int d = x->Get(0) - y->Get(0);
+ if (d < 0)
+ return Smi::FromInt(LESS);
+ else if (d > 0)
+ return Smi::FromInt(GREATER);
+
+ // Slow case.
+ x = String::Flatten(x);
+ y = String::Flatten(y);
+
+ DisallowHeapAllocation no_gc;
+ Object* equal_prefix_result = Smi::FromInt(EQUAL);
+ int prefix_length = x->length();
+ if (y->length() < prefix_length) {
+ prefix_length = y->length();
+ equal_prefix_result = Smi::FromInt(GREATER);
+ } else if (y->length() > prefix_length) {
+ equal_prefix_result = Smi::FromInt(LESS);
+ }
+ int r;
+ String::FlatContent x_content = x->GetFlatContent();
+ String::FlatContent y_content = y->GetFlatContent();
+ if (x_content.IsOneByte()) {
+ Vector<const uint8_t> x_chars = x_content.ToOneByteVector();
+ if (y_content.IsOneByte()) {
+ Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
+ r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ } else {
+ Vector<const uc16> y_chars = y_content.ToUC16Vector();
+ r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ }
+ } else {
+ Vector<const uc16> x_chars = x_content.ToUC16Vector();
+ if (y_content.IsOneByte()) {
+ Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
+ r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ } else {
+ Vector<const uc16> y_chars = y_content.ToUC16Vector();
+ r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ }
+ }
+ Object* result;
+ if (r == 0) {
+ result = equal_prefix_result;
+ } else {
+ result = (r < 0) ? Smi::FromInt(LESS) : Smi::FromInt(GREATER);
+ }
+ return result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
+ int32_t array_length;
+ if (!args[1]->ToInt32(&array_length)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
+ }
+ CONVERT_ARG_HANDLE_CHECKED(String, special, 2);
+
+ size_t actual_array_length = 0;
+ RUNTIME_ASSERT(
+ TryNumberToSize(isolate, array->length(), &actual_array_length));
+ RUNTIME_ASSERT(array_length >= 0);
+ RUNTIME_ASSERT(static_cast<size_t>(array_length) <= actual_array_length);
+
+ // This assumption is used by the slice encoding in one or two smis.
+ DCHECK(Smi::kMaxValue >= String::kMaxLength);
+
+ RUNTIME_ASSERT(array->HasFastElements());
+ JSObject::EnsureCanContainHeapObjectElements(array);
+
+ int special_length = special->length();
+ if (!array->HasFastObjectElements()) {
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
+ }
+
+ int length;
+ bool one_byte = special->HasOnlyOneByteChars();
+
+ {
+ DisallowHeapAllocation no_gc;
+ FixedArray* fixed_array = FixedArray::cast(array->elements());
+ if (fixed_array->length() < array_length) {
+ array_length = fixed_array->length();
+ }
+
+ if (array_length == 0) {
+ return isolate->heap()->empty_string();
+ } else if (array_length == 1) {
+ Object* first = fixed_array->get(0);
+ if (first->IsString()) return first;
+ }
+ length = StringBuilderConcatLength(special_length, fixed_array,
+ array_length, &one_byte);
+ }
+
+ if (length == -1) {
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
+ }
+
+ if (one_byte) {
+ Handle<SeqOneByteString> answer;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, answer, isolate->factory()->NewRawOneByteString(length));
+ StringBuilderConcatHelper(*special, answer->GetChars(),
+ FixedArray::cast(array->elements()),
+ array_length);
+ return *answer;
+ } else {
+ Handle<SeqTwoByteString> answer;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, answer, isolate->factory()->NewRawTwoByteString(length));
+ StringBuilderConcatHelper(*special, answer->GetChars(),
+ FixedArray::cast(array->elements()),
+ array_length);
+ return *answer;
+ }
+}
+
+
+RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
+ int32_t array_length;
+ if (!args[1]->ToInt32(&array_length)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
+ }
+ CONVERT_ARG_HANDLE_CHECKED(String, separator, 2);
+ RUNTIME_ASSERT(array->HasFastObjectElements());
+ RUNTIME_ASSERT(array_length >= 0);
+
+ Handle<FixedArray> fixed_array(FixedArray::cast(array->elements()));
+ if (fixed_array->length() < array_length) {
+ array_length = fixed_array->length();
+ }
+
+ if (array_length == 0) {
+ return isolate->heap()->empty_string();
+ } else if (array_length == 1) {
+ Object* first = fixed_array->get(0);
+ RUNTIME_ASSERT(first->IsString());
+ return first;
+ }
+
+ int separator_length = separator->length();
+ RUNTIME_ASSERT(separator_length > 0);
+ int max_nof_separators =
+ (String::kMaxLength + separator_length - 1) / separator_length;
+ if (max_nof_separators < (array_length - 1)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
+ }
+ int length = (array_length - 1) * separator_length;
+ for (int i = 0; i < array_length; i++) {
+ Object* element_obj = fixed_array->get(i);
+ RUNTIME_ASSERT(element_obj->IsString());
+ String* element = String::cast(element_obj);
+ int increment = element->length();
+ if (increment > String::kMaxLength - length) {
+ STATIC_ASSERT(String::kMaxLength < kMaxInt);
+ length = kMaxInt; // Provoke exception;
+ break;
+ }
+ length += increment;
+ }
+
+ Handle<SeqTwoByteString> answer;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, answer, isolate->factory()->NewRawTwoByteString(length));
+
+ DisallowHeapAllocation no_gc;
+
+ uc16* sink = answer->GetChars();
+#ifdef DEBUG
+ uc16* end = sink + length;
+#endif
+
+ RUNTIME_ASSERT(fixed_array->get(0)->IsString());
+ String* first = String::cast(fixed_array->get(0));
+ String* separator_raw = *separator;
+ int first_length = first->length();
+ String::WriteToFlat(first, sink, 0, first_length);
+ sink += first_length;
+
+ for (int i = 1; i < array_length; i++) {
+ DCHECK(sink + separator_length <= end);
+ String::WriteToFlat(separator_raw, sink, 0, separator_length);
+ sink += separator_length;
+
+ RUNTIME_ASSERT(fixed_array->get(i)->IsString());
+ String* element = String::cast(fixed_array->get(i));
+ int element_length = element->length();
+ DCHECK(sink + element_length <= end);
+ String::WriteToFlat(element, sink, 0, element_length);
+ sink += element_length;
+ }
+ DCHECK(sink == end);
+
+ // Use %_FastOneByteArrayJoin instead.
+ DCHECK(!answer->IsOneByteRepresentation());
+ return *answer;
+}
+
+template <typename Char>
+static void JoinSparseArrayWithSeparator(FixedArray* elements,
+ int elements_length,
+ uint32_t array_length,
+ String* separator,
+ Vector<Char> buffer) {
+ DisallowHeapAllocation no_gc;
+ int previous_separator_position = 0;
+ int separator_length = separator->length();
+ int cursor = 0;
+ for (int i = 0; i < elements_length; i += 2) {
+ int position = NumberToInt32(elements->get(i));
+ String* string = String::cast(elements->get(i + 1));
+ int string_length = string->length();
+ if (string->length() > 0) {
+ while (previous_separator_position < position) {
+ String::WriteToFlat<Char>(separator, &buffer[cursor], 0,
+ separator_length);
+ cursor += separator_length;
+ previous_separator_position++;
+ }
+ String::WriteToFlat<Char>(string, &buffer[cursor], 0, string_length);
+ cursor += string->length();
+ }
+ }
+ if (separator_length > 0) {
+ // Array length must be representable as a signed 32-bit number,
+ // otherwise the total string length would have been too large.
+ DCHECK(array_length <= 0x7fffffff); // Is int32_t.
+ int last_array_index = static_cast<int>(array_length - 1);
+ while (previous_separator_position < last_array_index) {
+ String::WriteToFlat<Char>(separator, &buffer[cursor], 0,
+ separator_length);
+ cursor += separator_length;
+ previous_separator_position++;
+ }
+ }
+ DCHECK(cursor <= buffer.length());
+}
+
+
+RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, elements_array, 0);
+ CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]);
+ CONVERT_ARG_HANDLE_CHECKED(String, separator, 2);
+ // elements_array is fast-mode JSarray of alternating positions
+ // (increasing order) and strings.
+ RUNTIME_ASSERT(elements_array->HasFastSmiOrObjectElements());
+ // array_length is length of original array (used to add separators);
+ // separator is string to put between elements. Assumed to be non-empty.
+ RUNTIME_ASSERT(array_length > 0);
+
+ // Find total length of join result.
+ int string_length = 0;
+ bool is_one_byte = separator->IsOneByteRepresentation();
+ bool overflow = false;
+ CONVERT_NUMBER_CHECKED(int, elements_length, Int32, elements_array->length());
+ RUNTIME_ASSERT(elements_length <= elements_array->elements()->length());
+ RUNTIME_ASSERT((elements_length & 1) == 0); // Even length.
+ FixedArray* elements = FixedArray::cast(elements_array->elements());
+ for (int i = 0; i < elements_length; i += 2) {
+ RUNTIME_ASSERT(elements->get(i)->IsNumber());
+ CONVERT_NUMBER_CHECKED(uint32_t, position, Uint32, elements->get(i));
+ RUNTIME_ASSERT(position < array_length);
+ RUNTIME_ASSERT(elements->get(i + 1)->IsString());
+ }
+
+ {
+ DisallowHeapAllocation no_gc;
+ for (int i = 0; i < elements_length; i += 2) {
+ String* string = String::cast(elements->get(i + 1));
+ int length = string->length();
+ if (is_one_byte && !string->IsOneByteRepresentation()) {
+ is_one_byte = false;
+ }
+ if (length > String::kMaxLength ||
+ String::kMaxLength - length < string_length) {
+ overflow = true;
+ break;
+ }
+ string_length += length;
+ }
+ }
+
+ int separator_length = separator->length();
+ if (!overflow && separator_length > 0) {
+ if (array_length <= 0x7fffffffu) {
+ int separator_count = static_cast<int>(array_length) - 1;
+ int remaining_length = String::kMaxLength - string_length;
+ if ((remaining_length / separator_length) >= separator_count) {
+ string_length += separator_length * (array_length - 1);
+ } else {
+ // Not room for the separators within the maximal string length.
+ overflow = true;
+ }
+ } else {
+ // Nonempty separator and at least 2^31-1 separators necessary
+ // means that the string is too large to create.
+ STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
+ overflow = true;
+ }
+ }
+ if (overflow) {
+ // Throw an exception if the resulting string is too large. See
+ // https://code.google.com/p/chromium/issues/detail?id=336820
+ // for details.
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
+ }
+
+ if (is_one_byte) {
+ Handle<SeqOneByteString> result = isolate->factory()
+ ->NewRawOneByteString(string_length)
+ .ToHandleChecked();
+ JoinSparseArrayWithSeparator<uint8_t>(
+ FixedArray::cast(elements_array->elements()), elements_length,
+ array_length, *separator,
+ Vector<uint8_t>(result->GetChars(), string_length));
+ return *result;
+ } else {
+ Handle<SeqTwoByteString> result = isolate->factory()
+ ->NewRawTwoByteString(string_length)
+ .ToHandleChecked();
+ JoinSparseArrayWithSeparator<uc16>(
+ FixedArray::cast(elements_array->elements()), elements_length,
+ array_length, *separator,
+ Vector<uc16>(result->GetChars(), string_length));
+ return *result;
+ }
+}
+
+
+// Copies Latin1 characters to the given fixed array looking up
+// one-char strings in the cache. Gives up on the first char that is
+// not in the cache and fills the remainder with smi zeros. Returns
+// the length of the successfully copied prefix.
+static int CopyCachedOneByteCharsToArray(Heap* heap, const uint8_t* chars,
+ FixedArray* elements, int length) {
+ DisallowHeapAllocation no_gc;
+ FixedArray* one_byte_cache = heap->single_character_string_cache();
+ Object* undefined = heap->undefined_value();
+ int i;
+ WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
+ for (i = 0; i < length; ++i) {
+ Object* value = one_byte_cache->get(chars[i]);
+ if (value == undefined) break;
+ elements->set(i, value, mode);
+ }
+ if (i < length) {
+ DCHECK(Smi::FromInt(0) == 0);
+ memset(elements->data_start() + i, 0, kPointerSize * (length - i));
+ }
+#ifdef DEBUG
+ for (int j = 0; j < length; ++j) {
+ Object* element = elements->get(j);
+ DCHECK(element == Smi::FromInt(0) ||
+ (element->IsString() && String::cast(element)->LooksValid()));
+ }
+#endif
+ return i;
+}
+
+
+// Converts a String to JSArray.
+// For example, "foo" => ["f", "o", "o"].
+RUNTIME_FUNCTION(Runtime_StringToArray) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
+ CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
+
+ s = String::Flatten(s);
+ const int length = static_cast<int>(Min<uint32_t>(s->length(), limit));
+
+ Handle<FixedArray> elements;
+ int position = 0;
+ if (s->IsFlat() && s->IsOneByteRepresentation()) {
+ // Try using cached chars where possible.
+ elements = isolate->factory()->NewUninitializedFixedArray(length);
+
+ DisallowHeapAllocation no_gc;
+ String::FlatContent content = s->GetFlatContent();
+ if (content.IsOneByte()) {
+ Vector<const uint8_t> chars = content.ToOneByteVector();
+ // Note, this will initialize all elements (not only the prefix)
+ // to prevent GC from seeing partially initialized array.
+ position = CopyCachedOneByteCharsToArray(isolate->heap(), chars.start(),
+ *elements, length);
+ } else {
+ MemsetPointer(elements->data_start(), isolate->heap()->undefined_value(),
+ length);
+ }
+ } else {
+ elements = isolate->factory()->NewFixedArray(length);
+ }
+ for (int i = position; i < length; ++i) {
+ Handle<Object> str =
+ isolate->factory()->LookupSingleCharacterStringFromCode(s->Get(i));
+ elements->set(i, *str);
+ }
+
+#ifdef DEBUG
+ for (int i = 0; i < length; ++i) {
+ DCHECK(String::cast(elements->get(i))->length() == 1);
+ }
+#endif
+
+ return *isolate->factory()->NewJSArrayWithElements(elements);
+}
+
+
+static inline bool ToUpperOverflows(uc32 character) {
+ // y with umlauts and the micro sign are the only characters that stop
+ // fitting into one-byte when converting to uppercase.
+ static const uc32 yuml_code = 0xff;
+ static const uc32 micro_code = 0xb5;
+ return (character == yuml_code || character == micro_code);
+}
+
+
+template <class Converter>
+MUST_USE_RESULT static Object* ConvertCaseHelper(
+ Isolate* isolate, String* string, SeqString* result, int result_length,
+ unibrow::Mapping<Converter, 128>* mapping) {
+ DisallowHeapAllocation no_gc;
+ // We try this twice, once with the assumption that the result is no longer
+ // than the input and, if that assumption breaks, again with the exact
+ // length. This may not be pretty, but it is nicer than what was here before
+ // and I hereby claim my vaffel-is.
+ //
+ // NOTE: This assumes that the upper/lower case of an ASCII
+ // character is also ASCII. This is currently the case, but it
+ // might break in the future if we implement more context and locale
+ // dependent upper/lower conversions.
+ bool has_changed_character = false;
+
+ // Convert all characters to upper case, assuming that they will fit
+ // in the buffer
+ Access<ConsStringIteratorOp> op(isolate->runtime_state()->string_iterator());
+ StringCharacterStream stream(string, op.value());
+ unibrow::uchar chars[Converter::kMaxWidth];
+ // We can assume that the string is not empty
+ uc32 current = stream.GetNext();
+ bool ignore_overflow = Converter::kIsToLower || result->IsSeqTwoByteString();
+ for (int i = 0; i < result_length;) {
+ bool has_next = stream.HasMore();
+ uc32 next = has_next ? stream.GetNext() : 0;
+ int char_length = mapping->get(current, next, chars);
+ if (char_length == 0) {
+ // The case conversion of this character is the character itself.
+ result->Set(i, current);
+ i++;
+ } else if (char_length == 1 &&
+ (ignore_overflow || !ToUpperOverflows(current))) {
+ // Common case: converting the letter resulted in one character.
+ DCHECK(static_cast<uc32>(chars[0]) != current);
+ result->Set(i, chars[0]);
+ has_changed_character = true;
+ i++;
+ } else if (result_length == string->length()) {
+ bool overflows = ToUpperOverflows(current);
+ // We've assumed that the result would be as long as the
+ // input but here is a character that converts to several
+ // characters. No matter, we calculate the exact length
+ // of the result and try the whole thing again.
+ //
+ // Note that this leaves room for optimization. We could just
+ // memcpy what we already have to the result string. Also,
+ // the result string is the last object allocated we could
+ // "realloc" it and probably, in the vast majority of cases,
+ // extend the existing string to be able to hold the full
+ // result.
+ int next_length = 0;
+ if (has_next) {
+ next_length = mapping->get(next, 0, chars);
+ if (next_length == 0) next_length = 1;
+ }
+ int current_length = i + char_length + next_length;
+ while (stream.HasMore()) {
+ current = stream.GetNext();
+ overflows |= ToUpperOverflows(current);
+ // NOTE: we use 0 as the next character here because, while
+ // the next character may affect what a character converts to,
+ // it does not in any case affect the length of what it convert
+ // to.
+ int char_length = mapping->get(current, 0, chars);
+ if (char_length == 0) char_length = 1;
+ current_length += char_length;
+ if (current_length > String::kMaxLength) {
+ AllowHeapAllocation allocate_error_and_return;
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+ NewInvalidStringLengthError());
+ }
+ }
+ // Try again with the real length. Return signed if we need
+ // to allocate a two-byte string for to uppercase.
+ return (overflows && !ignore_overflow) ? Smi::FromInt(-current_length)
+ : Smi::FromInt(current_length);
+ } else {
+ for (int j = 0; j < char_length; j++) {
+ result->Set(i, chars[j]);
+ i++;
+ }
+ has_changed_character = true;
+ }
+ current = next;
+ }
+ if (has_changed_character) {
+ return result;
+ } else {
+ // If we didn't actually change anything in doing the conversion
+ // we simple return the result and let the converted string
+ // become garbage; there is no reason to keep two identical strings
+ // alive.
+ return string;
+ }
+}
+
+
+static const uintptr_t kOneInEveryByte = kUintptrAllBitsSet / 0xFF;
+static const uintptr_t kAsciiMask = kOneInEveryByte << 7;
+
+// Given a word and two range boundaries returns a word with high bit
+// set in every byte iff the corresponding input byte was strictly in
+// the range (m, n). All the other bits in the result are cleared.
+// This function is only useful when it can be inlined and the
+// boundaries are statically known.
+// Requires: all bytes in the input word and the boundaries must be
+// ASCII (less than 0x7F).
+static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) {
+ // Use strict inequalities since in edge cases the function could be
+ // further simplified.
+ DCHECK(0 < m && m < n);
+ // Has high bit set in every w byte less than n.
+ uintptr_t tmp1 = kOneInEveryByte * (0x7F + n) - w;
+ // Has high bit set in every w byte greater than m.
+ uintptr_t tmp2 = w + kOneInEveryByte * (0x7F - m);
+ return (tmp1 & tmp2 & (kOneInEveryByte * 0x80));
+}
+
+
+#ifdef DEBUG
+static bool CheckFastAsciiConvert(char* dst, const char* src, int length,
+ bool changed, bool is_to_lower) {
+ bool expected_changed = false;
+ for (int i = 0; i < length; i++) {
+ if (dst[i] == src[i]) continue;
+ expected_changed = true;
+ if (is_to_lower) {
+ DCHECK('A' <= src[i] && src[i] <= 'Z');
+ DCHECK(dst[i] == src[i] + ('a' - 'A'));
+ } else {
+ DCHECK('a' <= src[i] && src[i] <= 'z');
+ DCHECK(dst[i] == src[i] - ('a' - 'A'));
+ }
+ }
+ return (expected_changed == changed);
+}
+#endif
+
+
+template <class Converter>
+static bool FastAsciiConvert(char* dst, const char* src, int length,
+ bool* changed_out) {
+#ifdef DEBUG
+ char* saved_dst = dst;
+ const char* saved_src = src;
+#endif
+ DisallowHeapAllocation no_gc;
+ // We rely on the distance between upper and lower case letters
+ // being a known power of 2.
+ DCHECK('a' - 'A' == (1 << 5));
+ // Boundaries for the range of input characters than require conversion.
+ static const char lo = Converter::kIsToLower ? 'A' - 1 : 'a' - 1;
+ static const char hi = Converter::kIsToLower ? 'Z' + 1 : 'z' + 1;
+ bool changed = false;
+ uintptr_t or_acc = 0;
+ const char* const limit = src + length;
+
+ // dst is newly allocated and always aligned.
+ DCHECK(IsAligned(reinterpret_cast<intptr_t>(dst), sizeof(uintptr_t)));
+ // Only attempt processing one word at a time if src is also aligned.
+ if (IsAligned(reinterpret_cast<intptr_t>(src), sizeof(uintptr_t))) {
+ // Process the prefix of the input that requires no conversion one aligned
+ // (machine) word at a time.
+ while (src <= limit - sizeof(uintptr_t)) {
+ const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
+ or_acc |= w;
+ if (AsciiRangeMask(w, lo, hi) != 0) {
+ changed = true;
+ break;
+ }
+ *reinterpret_cast<uintptr_t*>(dst) = w;
+ src += sizeof(uintptr_t);
+ dst += sizeof(uintptr_t);
+ }
+ // Process the remainder of the input performing conversion when
+ // required one word at a time.
+ while (src <= limit - sizeof(uintptr_t)) {
+ const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
+ or_acc |= w;
+ uintptr_t m = AsciiRangeMask(w, lo, hi);
+ // The mask has high (7th) bit set in every byte that needs
+ // conversion and we know that the distance between cases is
+ // 1 << 5.
+ *reinterpret_cast<uintptr_t*>(dst) = w ^ (m >> 2);
+ src += sizeof(uintptr_t);
+ dst += sizeof(uintptr_t);
+ }
+ }
+ // Process the last few bytes of the input (or the whole input if
+ // unaligned access is not supported).
+ while (src < limit) {
+ char c = *src;
+ or_acc |= c;
+ if (lo < c && c < hi) {
+ c ^= (1 << 5);
+ changed = true;
+ }
+ *dst = c;
+ ++src;
+ ++dst;
+ }
+
+ if ((or_acc & kAsciiMask) != 0) return false;
+
+ DCHECK(CheckFastAsciiConvert(saved_dst, saved_src, length, changed,
+ Converter::kIsToLower));
+
+ *changed_out = changed;
+ return true;
+}
+
+
+template <class Converter>
+MUST_USE_RESULT static Object* ConvertCase(
+ Handle<String> s, Isolate* isolate,
+ unibrow::Mapping<Converter, 128>* mapping) {
+ s = String::Flatten(s);
+ int length = s->length();
+ // Assume that the string is not empty; we need this assumption later
+ if (length == 0) return *s;
+
+ // Simpler handling of ASCII strings.
+ //
+ // NOTE: This assumes that the upper/lower case of an ASCII
+ // character is also ASCII. This is currently the case, but it
+ // might break in the future if we implement more context and locale
+ // dependent upper/lower conversions.
+ if (s->IsOneByteRepresentationUnderneath()) {
+ // Same length as input.
+ Handle<SeqOneByteString> result =
+ isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat_content = s->GetFlatContent();
+ DCHECK(flat_content.IsFlat());
+ bool has_changed_character = false;
+ bool is_ascii = FastAsciiConvert<Converter>(
+ reinterpret_cast<char*>(result->GetChars()),
+ reinterpret_cast<const char*>(flat_content.ToOneByteVector().start()),
+ length, &has_changed_character);
+ // If not ASCII, we discard the result and take the 2 byte path.
+ if (is_ascii) return has_changed_character ? *result : *s;
+ }
+
+ Handle<SeqString> result; // Same length as input.
+ if (s->IsOneByteRepresentation()) {
+ result = isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
+ } else {
+ result = isolate->factory()->NewRawTwoByteString(length).ToHandleChecked();
+ }
+
+ Object* answer = ConvertCaseHelper(isolate, *s, *result, length, mapping);
+ if (answer->IsException() || answer->IsString()) return answer;
+
+ DCHECK(answer->IsSmi());
+ length = Smi::cast(answer)->value();
+ if (s->IsOneByteRepresentation() && length > 0) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, isolate->factory()->NewRawOneByteString(length));
+ } else {
+ if (length < 0) length = -length;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, isolate->factory()->NewRawTwoByteString(length));
+ }
+ return ConvertCaseHelper(isolate, *s, *result, length, mapping);
+}
+
+
+RUNTIME_FUNCTION(Runtime_StringToLowerCase) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
+ return ConvertCase(s, isolate, isolate->runtime_state()->to_lower_mapping());
+}
+
+
+RUNTIME_FUNCTION(Runtime_StringToUpperCase) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
+ return ConvertCase(s, isolate, isolate->runtime_state()->to_upper_mapping());
+}
+
+
+RUNTIME_FUNCTION(Runtime_StringTrim) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
+ CONVERT_BOOLEAN_ARG_CHECKED(trimLeft, 1);
+ CONVERT_BOOLEAN_ARG_CHECKED(trimRight, 2);
+
+ string = String::Flatten(string);
+ int length = string->length();
+
+ int left = 0;
+ UnicodeCache* unicode_cache = isolate->unicode_cache();
+ if (trimLeft) {
+ while (left < length &&
+ unicode_cache->IsWhiteSpaceOrLineTerminator(string->Get(left))) {
+ left++;
+ }
+ }
+
+ int right = length;
+ if (trimRight) {
+ while (
+ right > left &&
+ unicode_cache->IsWhiteSpaceOrLineTerminator(string->Get(right - 1))) {
+ right--;
+ }
+ }
+
+ return *isolate->factory()->NewSubString(string, left, right);
+}
+
+
+RUNTIME_FUNCTION(Runtime_TruncateString) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(SeqString, string, 0);
+ CONVERT_INT32_ARG_CHECKED(new_length, 1);
+ RUNTIME_ASSERT(new_length >= 0);
+ return *SeqString::Truncate(string, new_length);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NewString) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_INT32_ARG_CHECKED(length, 0);
+ CONVERT_BOOLEAN_ARG_CHECKED(is_one_byte, 1);
+ if (length == 0) return isolate->heap()->empty_string();
+ Handle<String> result;
+ if (is_one_byte) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, isolate->factory()->NewRawOneByteString(length));
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, isolate->factory()->NewRawTwoByteString(length));
+ }
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_StringEquals) {
+ HandleScope handle_scope(isolate);
+ DCHECK(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
+
+ bool not_equal = !String::Equals(x, y);
+ // This is slightly convoluted because the value that signifies
+ // equality is 0 and inequality is 1 so we have to negate the result
+ // from String::Equals.
+ DCHECK(not_equal == 0 || not_equal == 1);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(NOT_EQUAL == 1);
+ return Smi::FromInt(not_equal);
+}
+
+
+RUNTIME_FUNCTION(Runtime_FlattenString) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, str, 0);
+ return *String::Flatten(str);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_StringCharFromCode) {
+ SealHandleScope shs(isolate);
+ return __RT_impl_Runtime_CharFromCode(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_StringCharAt) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 2);
+ if (!args[0]->IsString()) return Smi::FromInt(0);
+ if (!args[1]->IsNumber()) return Smi::FromInt(0);
+ if (std::isinf(args.number_at(1))) return isolate->heap()->empty_string();
+ Object* code = __RT_impl_Runtime_StringCharCodeAtRT(args, isolate);
+ if (code->IsNaN()) return isolate->heap()->empty_string();
+ return __RT_impl_Runtime_CharFromCode(Arguments(1, &code), isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_OneByteSeqStringSetChar) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_INT32_ARG_CHECKED(index, 0);
+ CONVERT_INT32_ARG_CHECKED(value, 1);
+ CONVERT_ARG_CHECKED(SeqOneByteString, string, 2);
+ string->SeqOneByteStringSet(index, value);
+ return string;
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_TwoByteSeqStringSetChar) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_INT32_ARG_CHECKED(index, 0);
+ CONVERT_INT32_ARG_CHECKED(value, 1);
+ CONVERT_ARG_CHECKED(SeqTwoByteString, string, 2);
+ string->SeqTwoByteStringSet(index, value);
+ return string;
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_StringCompare) {
+ SealHandleScope shs(isolate);
+ return __RT_impl_Runtime_StringCompare(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_StringCharCodeAt) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 2);
+ if (!args[0]->IsString()) return isolate->heap()->undefined_value();
+ if (!args[1]->IsNumber()) return isolate->heap()->undefined_value();
+ if (std::isinf(args.number_at(1))) return isolate->heap()->nan_value();
+ return __RT_impl_Runtime_StringCharCodeAtRT(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_SubString) {
+ SealHandleScope shs(isolate);
+ return __RT_impl_Runtime_SubString(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_StringAdd) {
+ SealHandleScope shs(isolate);
+ return __RT_impl_Runtime_StringAdd(args, isolate);
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
new file mode 100644
index 0000000000..eac3c613af
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -0,0 +1,323 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/arguments.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/runtime/runtime.h"
+#include "src/runtime/runtime-utils.h"
+
+namespace v8 {
+namespace internal {
+
+RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ if (!function->IsOptimized()) return isolate->heap()->undefined_value();
+
+ // TODO(turbofan): Deoptimization is not supported yet.
+ if (function->code()->is_turbofanned() && !FLAG_turbo_deoptimization) {
+ return isolate->heap()->undefined_value();
+ }
+
+ Deoptimizer::DeoptimizeFunction(*function);
+
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_RunningInSimulator) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 0);
+#if defined(USE_SIMULATOR)
+ return isolate->heap()->true_value();
+#else
+ return isolate->heap()->false_value();
+#endif
+}
+
+
+RUNTIME_FUNCTION(Runtime_IsConcurrentRecompilationSupported) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 0);
+ return isolate->heap()->ToBoolean(
+ isolate->concurrent_recompilation_enabled());
+}
+
+
+RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
+ HandleScope scope(isolate);
+ RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ // The following two assertions are lifted from the DCHECKs inside
+ // JSFunction::MarkForOptimization().
+ RUNTIME_ASSERT(!function->shared()->is_generator());
+ RUNTIME_ASSERT(function->shared()->allows_lazy_compilation() ||
+ (function->code()->kind() == Code::FUNCTION &&
+ function->code()->optimizable()));
+
+ // If the function is optimized, just return.
+ if (function->IsOptimized()) return isolate->heap()->undefined_value();
+
+ function->MarkForOptimization();
+
+ Code* unoptimized = function->shared()->code();
+ if (args.length() == 2 && unoptimized->kind() == Code::FUNCTION) {
+ CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
+ if (type->IsOneByteEqualTo(STATIC_CHAR_VECTOR("osr")) && FLAG_use_osr) {
+ // Start patching from the currently patched loop nesting level.
+ DCHECK(BackEdgeTable::Verify(isolate, unoptimized));
+ isolate->runtime_profiler()->AttemptOnStackReplacement(
+ *function, Code::kMaxLoopNestingMarker);
+ } else if (type->IsOneByteEqualTo(STATIC_CHAR_VECTOR("concurrent")) &&
+ isolate->concurrent_recompilation_enabled()) {
+ function->MarkForConcurrentOptimization();
+ }
+ }
+
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSFunction, function, 0);
+ function->shared()->set_optimization_disabled(true);
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
+ HandleScope scope(isolate);
+ RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
+ if (!isolate->use_crankshaft()) {
+ return Smi::FromInt(4); // 4 == "never".
+ }
+ bool sync_with_compiler_thread = true;
+ if (args.length() == 2) {
+ CONVERT_ARG_HANDLE_CHECKED(String, sync, 1);
+ if (sync->IsOneByteEqualTo(STATIC_CHAR_VECTOR("no sync"))) {
+ sync_with_compiler_thread = false;
+ }
+ }
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ if (isolate->concurrent_recompilation_enabled() &&
+ sync_with_compiler_thread) {
+ while (function->IsInOptimizationQueue()) {
+ isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
+ base::OS::Sleep(50);
+ }
+ }
+ if (FLAG_always_opt) {
+ // We may have always opt, but that is more best-effort than a real
+ // promise, so we still say "no" if it is not optimized.
+ return function->IsOptimized() ? Smi::FromInt(3) // 3 == "always".
+ : Smi::FromInt(2); // 2 == "no".
+ }
+ if (FLAG_deopt_every_n_times) {
+ return Smi::FromInt(6); // 6 == "maybe deopted".
+ }
+ if (function->IsOptimized() && function->code()->is_turbofanned()) {
+ return Smi::FromInt(7); // 7 == "TurboFan compiler".
+ }
+ return function->IsOptimized() ? Smi::FromInt(1) // 1 == "yes".
+ : Smi::FromInt(2); // 2 == "no".
+}
+
+
+RUNTIME_FUNCTION(Runtime_UnblockConcurrentRecompilation) {
+ DCHECK(args.length() == 0);
+ RUNTIME_ASSERT(FLAG_block_concurrent_recompilation);
+ RUNTIME_ASSERT(isolate->concurrent_recompilation_enabled());
+ isolate->optimizing_compiler_thread()->Unblock();
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_GetOptimizationCount) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ return Smi::FromInt(function->shared()->opt_count());
+}
+
+
+RUNTIME_FUNCTION(Runtime_ClearFunctionTypeFeedback) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ function->shared()->ClearTypeFeedbackInfo();
+ Code* unoptimized = function->shared()->code();
+ if (unoptimized->kind() == Code::FUNCTION) {
+ unoptimized->ClearInlineCaches();
+ }
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_NotifyContextDisposed) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+ isolate->heap()->NotifyContextDisposed();
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_SetAllocationTimeout) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 2 || args.length() == 3);
+#ifdef DEBUG
+ CONVERT_SMI_ARG_CHECKED(interval, 0);
+ CONVERT_SMI_ARG_CHECKED(timeout, 1);
+ isolate->heap()->set_allocation_timeout(timeout);
+ FLAG_gc_interval = interval;
+ if (args.length() == 3) {
+ // Enable/disable inline allocation if requested.
+ CONVERT_BOOLEAN_ARG_CHECKED(inline_allocation, 2);
+ if (inline_allocation) {
+ isolate->heap()->EnableInlineAllocation();
+ } else {
+ isolate->heap()->DisableInlineAllocation();
+ }
+ }
+#endif
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_DebugPrint) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+
+ OFStream os(stdout);
+#ifdef DEBUG
+ if (args[0]->IsString()) {
+ // If we have a string, assume it's a code "marker"
+ // and print some interesting cpu debugging info.
+ JavaScriptFrameIterator it(isolate);
+ JavaScriptFrame* frame = it.frame();
+ os << "fp = " << frame->fp() << ", sp = " << frame->sp()
+ << ", caller_sp = " << frame->caller_sp() << ": ";
+ } else {
+ os << "DebugPrint: ";
+ }
+ args[0]->Print(os);
+ if (args[0]->IsHeapObject()) {
+ os << "\n";
+ HeapObject::cast(args[0])->map()->Print(os);
+ }
+#else
+ // ShortPrint is available in release mode. Print is not.
+ os << Brief(args[0]);
+#endif
+ os << endl;
+
+ return args[0]; // return TOS
+}
+
+
+RUNTIME_FUNCTION(Runtime_DebugTrace) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 0);
+ isolate->PrintStack(stdout);
+ return isolate->heap()->undefined_value();
+}
+
+
+// This will not allocate (flatten the string), but it may run
+// very slowly for very deeply nested ConsStrings. For debugging use only.
+RUNTIME_FUNCTION(Runtime_GlobalPrint) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+
+ CONVERT_ARG_CHECKED(String, string, 0);
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(string, &op);
+ while (stream.HasMore()) {
+ uint16_t character = stream.GetNext();
+ PrintF("%c", character);
+ }
+ return string;
+}
+
+
+RUNTIME_FUNCTION(Runtime_SystemBreak) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 0);
+ base::OS::DebugBreak();
+ return isolate->heap()->undefined_value();
+}
+
+
+// Sets a v8 flag.
+RUNTIME_FUNCTION(Runtime_SetFlags) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(String, arg, 0);
+ SmartArrayPointer<char> flags =
+ arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ FlagList::SetFlagsFromString(flags.get(), StrLength(flags.get()));
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_Abort) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_SMI_ARG_CHECKED(message_id, 0);
+ const char* message =
+ GetBailoutReason(static_cast<BailoutReason>(message_id));
+ base::OS::PrintError("abort: %s\n", message);
+ isolate->PrintStack(stderr);
+ base::OS::Abort();
+ UNREACHABLE();
+ return NULL;
+}
+
+
+RUNTIME_FUNCTION(Runtime_AbortJS) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, message, 0);
+ base::OS::PrintError("abort: %s\n", message->ToCString().get());
+ isolate->PrintStack(stderr);
+ base::OS::Abort();
+ UNREACHABLE();
+ return NULL;
+}
+
+
+RUNTIME_FUNCTION(Runtime_HaveSameMap) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_CHECKED(JSObject, obj1, 0);
+ CONVERT_ARG_CHECKED(JSObject, obj2, 1);
+ return isolate->heap()->ToBoolean(obj1->map() == obj2->map());
+}
+
+
+#define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name) \
+ RUNTIME_FUNCTION(Runtime_Has##Name) { \
+ CONVERT_ARG_CHECKED(JSObject, obj, 0); \
+ return isolate->heap()->ToBoolean(obj->Has##Name()); \
+ }
+
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastObjectElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiOrObjectElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastHoleyElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(SloppyArgumentsElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalArrayElements)
+// Properties test sitting with elements tests - not fooling anyone.
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastProperties)
+
+#undef ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
new file mode 100644
index 0000000000..c138a4febd
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -0,0 +1,760 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/arguments.h"
+#include "src/runtime/runtime.h"
+#include "src/runtime/runtime-utils.h"
+
+
+namespace v8 {
+namespace internal {
+
+void Runtime::FreeArrayBuffer(Isolate* isolate,
+ JSArrayBuffer* phantom_array_buffer) {
+ if (phantom_array_buffer->should_be_freed()) {
+ DCHECK(phantom_array_buffer->is_external());
+ free(phantom_array_buffer->backing_store());
+ }
+ if (phantom_array_buffer->is_external()) return;
+
+ size_t allocated_length =
+ NumberToSize(isolate, phantom_array_buffer->byte_length());
+
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(
+ -static_cast<int64_t>(allocated_length));
+ CHECK(V8::ArrayBufferAllocator() != NULL);
+ V8::ArrayBufferAllocator()->Free(phantom_array_buffer->backing_store(),
+ allocated_length);
+}
+
+
+void Runtime::SetupArrayBuffer(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer,
+ bool is_external, void* data,
+ size_t allocated_length) {
+ DCHECK(array_buffer->GetInternalFieldCount() ==
+ v8::ArrayBuffer::kInternalFieldCount);
+ for (int i = 0; i < v8::ArrayBuffer::kInternalFieldCount; i++) {
+ array_buffer->SetInternalField(i, Smi::FromInt(0));
+ }
+ array_buffer->set_backing_store(data);
+ array_buffer->set_flag(Smi::FromInt(0));
+ array_buffer->set_is_external(is_external);
+
+ Handle<Object> byte_length =
+ isolate->factory()->NewNumberFromSize(allocated_length);
+ CHECK(byte_length->IsSmi() || byte_length->IsHeapNumber());
+ array_buffer->set_byte_length(*byte_length);
+
+ array_buffer->set_weak_next(isolate->heap()->array_buffers_list());
+ isolate->heap()->set_array_buffers_list(*array_buffer);
+ array_buffer->set_weak_first_view(isolate->heap()->undefined_value());
+}
+
+
+bool Runtime::SetupArrayBufferAllocatingData(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer,
+ size_t allocated_length,
+ bool initialize) {
+ void* data;
+ CHECK(V8::ArrayBufferAllocator() != NULL);
+ if (allocated_length != 0) {
+ if (initialize) {
+ data = V8::ArrayBufferAllocator()->Allocate(allocated_length);
+ } else {
+ data =
+ V8::ArrayBufferAllocator()->AllocateUninitialized(allocated_length);
+ }
+ if (data == NULL) return false;
+ } else {
+ data = NULL;
+ }
+
+ SetupArrayBuffer(isolate, array_buffer, false, data, allocated_length);
+
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(allocated_length);
+
+ return true;
+}
+
+
+void Runtime::NeuterArrayBuffer(Handle<JSArrayBuffer> array_buffer) {
+ Isolate* isolate = array_buffer->GetIsolate();
+ for (Handle<Object> view_obj(array_buffer->weak_first_view(), isolate);
+ !view_obj->IsUndefined();) {
+ Handle<JSArrayBufferView> view(JSArrayBufferView::cast(*view_obj));
+ if (view->IsJSTypedArray()) {
+ JSTypedArray::cast(*view)->Neuter();
+ } else if (view->IsJSDataView()) {
+ JSDataView::cast(*view)->Neuter();
+ } else {
+ UNREACHABLE();
+ }
+ view_obj = handle(view->weak_next(), isolate);
+ }
+ array_buffer->Neuter();
+}
+
+
+RUNTIME_FUNCTION(Runtime_ArrayBufferInitialize) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, holder, 0);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(byteLength, 1);
+ if (!holder->byte_length()->IsUndefined()) {
+ // ArrayBuffer is already initialized; probably a fuzz test.
+ return *holder;
+ }
+ size_t allocated_length = 0;
+ if (!TryNumberToSize(isolate, *byteLength, &allocated_length)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError("invalid_array_buffer_length",
+ HandleVector<Object>(NULL, 0)));
+ }
+ if (!Runtime::SetupArrayBufferAllocatingData(isolate, holder,
+ allocated_length)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError("invalid_array_buffer_length",
+ HandleVector<Object>(NULL, 0)));
+ }
+ return *holder;
+}
+
+
+RUNTIME_FUNCTION(Runtime_ArrayBufferGetByteLength) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSArrayBuffer, holder, 0);
+ return holder->byte_length();
+}
+
+
+RUNTIME_FUNCTION(Runtime_ArrayBufferSliceImpl) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, source, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, target, 1);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(first, 2);
+ RUNTIME_ASSERT(!source.is_identical_to(target));
+ size_t start = 0;
+ RUNTIME_ASSERT(TryNumberToSize(isolate, *first, &start));
+ size_t target_length = NumberToSize(isolate, target->byte_length());
+
+ if (target_length == 0) return isolate->heap()->undefined_value();
+
+ size_t source_byte_length = NumberToSize(isolate, source->byte_length());
+ RUNTIME_ASSERT(start <= source_byte_length);
+ RUNTIME_ASSERT(source_byte_length - start >= target_length);
+ uint8_t* source_data = reinterpret_cast<uint8_t*>(source->backing_store());
+ uint8_t* target_data = reinterpret_cast<uint8_t*>(target->backing_store());
+ CopyBytes(target_data, source_data + start, target_length);
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_ArrayBufferIsView) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(Object, object, 0);
+ return isolate->heap()->ToBoolean(object->IsJSArrayBufferView());
+}
+
+
+RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, array_buffer, 0);
+ if (array_buffer->backing_store() == NULL) {
+ CHECK(Smi::FromInt(0) == array_buffer->byte_length());
+ return isolate->heap()->undefined_value();
+ }
+ DCHECK(!array_buffer->is_external());
+ void* backing_store = array_buffer->backing_store();
+ size_t byte_length = NumberToSize(isolate, array_buffer->byte_length());
+ array_buffer->set_is_external(true);
+ Runtime::NeuterArrayBuffer(array_buffer);
+ V8::ArrayBufferAllocator()->Free(backing_store, byte_length);
+ return isolate->heap()->undefined_value();
+}
+
+
+void Runtime::ArrayIdToTypeAndSize(int arrayId, ExternalArrayType* array_type,
+ ElementsKind* external_elements_kind,
+ ElementsKind* fixed_elements_kind,
+ size_t* element_size) {
+ switch (arrayId) {
+#define ARRAY_ID_CASE(Type, type, TYPE, ctype, size) \
+ case ARRAY_ID_##TYPE: \
+ *array_type = kExternal##Type##Array; \
+ *external_elements_kind = EXTERNAL_##TYPE##_ELEMENTS; \
+ *fixed_elements_kind = TYPE##_ELEMENTS; \
+ *element_size = size; \
+ break;
+
+ TYPED_ARRAYS(ARRAY_ID_CASE)
+#undef ARRAY_ID_CASE
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+RUNTIME_FUNCTION(Runtime_TypedArrayInitialize) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 5);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
+ CONVERT_SMI_ARG_CHECKED(arrayId, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, maybe_buffer, 2);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_offset_object, 3);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_length_object, 4);
+
+ RUNTIME_ASSERT(arrayId >= Runtime::ARRAY_ID_FIRST &&
+ arrayId <= Runtime::ARRAY_ID_LAST);
+
+ ExternalArrayType array_type = kExternalInt8Array; // Bogus initialization.
+ size_t element_size = 1; // Bogus initialization.
+ ElementsKind external_elements_kind =
+ EXTERNAL_INT8_ELEMENTS; // Bogus initialization.
+ ElementsKind fixed_elements_kind = INT8_ELEMENTS; // Bogus initialization.
+ Runtime::ArrayIdToTypeAndSize(arrayId, &array_type, &external_elements_kind,
+ &fixed_elements_kind, &element_size);
+ RUNTIME_ASSERT(holder->map()->elements_kind() == fixed_elements_kind);
+
+ size_t byte_offset = 0;
+ size_t byte_length = 0;
+ RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_offset_object, &byte_offset));
+ RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_length_object, &byte_length));
+
+ if (maybe_buffer->IsJSArrayBuffer()) {
+ Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(maybe_buffer);
+ size_t array_buffer_byte_length =
+ NumberToSize(isolate, buffer->byte_length());
+ RUNTIME_ASSERT(byte_offset <= array_buffer_byte_length);
+ RUNTIME_ASSERT(array_buffer_byte_length - byte_offset >= byte_length);
+ } else {
+ RUNTIME_ASSERT(maybe_buffer->IsNull());
+ }
+
+ RUNTIME_ASSERT(byte_length % element_size == 0);
+ size_t length = byte_length / element_size;
+
+ if (length > static_cast<unsigned>(Smi::kMaxValue)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError("invalid_typed_array_length",
+ HandleVector<Object>(NULL, 0)));
+ }
+
+ // All checks are done, now we can modify objects.
+
+ DCHECK(holder->GetInternalFieldCount() ==
+ v8::ArrayBufferView::kInternalFieldCount);
+ for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
+ holder->SetInternalField(i, Smi::FromInt(0));
+ }
+ Handle<Object> length_obj = isolate->factory()->NewNumberFromSize(length);
+ holder->set_length(*length_obj);
+ holder->set_byte_offset(*byte_offset_object);
+ holder->set_byte_length(*byte_length_object);
+
+ if (!maybe_buffer->IsNull()) {
+ Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(maybe_buffer);
+ holder->set_buffer(*buffer);
+ holder->set_weak_next(buffer->weak_first_view());
+ buffer->set_weak_first_view(*holder);
+
+ Handle<ExternalArray> elements = isolate->factory()->NewExternalArray(
+ static_cast<int>(length), array_type,
+ static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
+ Handle<Map> map =
+ JSObject::GetElementsTransitionMap(holder, external_elements_kind);
+ JSObject::SetMapAndElements(holder, map, elements);
+ DCHECK(IsExternalArrayElementsKind(holder->map()->elements_kind()));
+ } else {
+ holder->set_buffer(Smi::FromInt(0));
+ holder->set_weak_next(isolate->heap()->undefined_value());
+ Handle<FixedTypedArrayBase> elements =
+ isolate->factory()->NewFixedTypedArray(static_cast<int>(length),
+ array_type);
+ holder->set_elements(*elements);
+ }
+ return isolate->heap()->undefined_value();
+}
+
+
+// Initializes a typed array from an array-like object.
+// If an array-like object happens to be a typed array of the same type,
+// initializes backing store using memove.
+//
+// Returns true if backing store was initialized or false otherwise.
+RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 4);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
+ CONVERT_SMI_ARG_CHECKED(arrayId, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, source, 2);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(length_obj, 3);
+
+ RUNTIME_ASSERT(arrayId >= Runtime::ARRAY_ID_FIRST &&
+ arrayId <= Runtime::ARRAY_ID_LAST);
+
+ ExternalArrayType array_type = kExternalInt8Array; // Bogus initialization.
+ size_t element_size = 1; // Bogus initialization.
+ ElementsKind external_elements_kind =
+ EXTERNAL_INT8_ELEMENTS; // Bogus intialization.
+ ElementsKind fixed_elements_kind = INT8_ELEMENTS; // Bogus initialization.
+ Runtime::ArrayIdToTypeAndSize(arrayId, &array_type, &external_elements_kind,
+ &fixed_elements_kind, &element_size);
+
+ RUNTIME_ASSERT(holder->map()->elements_kind() == fixed_elements_kind);
+
+ Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
+ if (source->IsJSTypedArray() &&
+ JSTypedArray::cast(*source)->type() == array_type) {
+ length_obj = Handle<Object>(JSTypedArray::cast(*source)->length(), isolate);
+ }
+ size_t length = 0;
+ RUNTIME_ASSERT(TryNumberToSize(isolate, *length_obj, &length));
+
+ if ((length > static_cast<unsigned>(Smi::kMaxValue)) ||
+ (length > (kMaxInt / element_size))) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError("invalid_typed_array_length",
+ HandleVector<Object>(NULL, 0)));
+ }
+ size_t byte_length = length * element_size;
+
+ DCHECK(holder->GetInternalFieldCount() ==
+ v8::ArrayBufferView::kInternalFieldCount);
+ for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
+ holder->SetInternalField(i, Smi::FromInt(0));
+ }
+
+ // NOTE: not initializing backing store.
+ // We assume that the caller of this function will initialize holder
+ // with the loop
+ // for(i = 0; i < length; i++) { holder[i] = source[i]; }
+ // We assume that the caller of this function is always a typed array
+ // constructor.
+ // If source is a typed array, this loop will always run to completion,
+ // so we are sure that the backing store will be initialized.
+ // Otherwise, the indexing operation might throw, so the loop will not
+ // run to completion and the typed array might remain partly initialized.
+ // However we further assume that the caller of this function is a typed array
+ // constructor, and the exception will propagate out of the constructor,
+ // therefore uninitialized memory will not be accessible by a user program.
+ //
+ // TODO(dslomov): revise this once we support subclassing.
+
+ if (!Runtime::SetupArrayBufferAllocatingData(isolate, buffer, byte_length,
+ false)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError("invalid_array_buffer_length",
+ HandleVector<Object>(NULL, 0)));
+ }
+
+ holder->set_buffer(*buffer);
+ holder->set_byte_offset(Smi::FromInt(0));
+ Handle<Object> byte_length_obj(
+ isolate->factory()->NewNumberFromSize(byte_length));
+ holder->set_byte_length(*byte_length_obj);
+ holder->set_length(*length_obj);
+ holder->set_weak_next(buffer->weak_first_view());
+ buffer->set_weak_first_view(*holder);
+
+ Handle<ExternalArray> elements = isolate->factory()->NewExternalArray(
+ static_cast<int>(length), array_type,
+ static_cast<uint8_t*>(buffer->backing_store()));
+ Handle<Map> map =
+ JSObject::GetElementsTransitionMap(holder, external_elements_kind);
+ JSObject::SetMapAndElements(holder, map, elements);
+
+ if (source->IsJSTypedArray()) {
+ Handle<JSTypedArray> typed_array(JSTypedArray::cast(*source));
+
+ if (typed_array->type() == holder->type()) {
+ uint8_t* backing_store =
+ static_cast<uint8_t*>(typed_array->GetBuffer()->backing_store());
+ size_t source_byte_offset =
+ NumberToSize(isolate, typed_array->byte_offset());
+ memcpy(buffer->backing_store(), backing_store + source_byte_offset,
+ byte_length);
+ return isolate->heap()->true_value();
+ }
+ }
+
+ return isolate->heap()->false_value();
+}
+
+
+#define BUFFER_VIEW_GETTER(Type, getter, accessor) \
+ RUNTIME_FUNCTION(Runtime_##Type##Get##getter) { \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 1); \
+ CONVERT_ARG_HANDLE_CHECKED(JS##Type, holder, 0); \
+ return holder->accessor(); \
+ }
+
+BUFFER_VIEW_GETTER(ArrayBufferView, ByteLength, byte_length)
+BUFFER_VIEW_GETTER(ArrayBufferView, ByteOffset, byte_offset)
+BUFFER_VIEW_GETTER(TypedArray, Length, length)
+BUFFER_VIEW_GETTER(DataView, Buffer, buffer)
+
+#undef BUFFER_VIEW_GETTER
+
+RUNTIME_FUNCTION(Runtime_TypedArrayGetBuffer) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
+ return *holder->GetBuffer();
+}
+
+
+// Return codes for Runtime_TypedArraySetFastCases.
+// Should be synchronized with typedarray.js natives.
+enum TypedArraySetResultCodes {
+ // Set from typed array of the same type.
+ // This is processed by TypedArraySetFastCases
+ TYPED_ARRAY_SET_TYPED_ARRAY_SAME_TYPE = 0,
+ // Set from typed array of the different type, overlapping in memory.
+ TYPED_ARRAY_SET_TYPED_ARRAY_OVERLAPPING = 1,
+ // Set from typed array of the different type, non-overlapping.
+ TYPED_ARRAY_SET_TYPED_ARRAY_NONOVERLAPPING = 2,
+ // Set from non-typed array.
+ TYPED_ARRAY_SET_NON_TYPED_ARRAY = 3
+};
+
+
+RUNTIME_FUNCTION(Runtime_TypedArraySetFastCases) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ if (!args[0]->IsJSTypedArray()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError("not_typed_array", HandleVector<Object>(NULL, 0)));
+ }
+
+ if (!args[1]->IsJSTypedArray())
+ return Smi::FromInt(TYPED_ARRAY_SET_NON_TYPED_ARRAY);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, target_obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, source_obj, 1);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(offset_obj, 2);
+
+ Handle<JSTypedArray> target(JSTypedArray::cast(*target_obj));
+ Handle<JSTypedArray> source(JSTypedArray::cast(*source_obj));
+ size_t offset = 0;
+ RUNTIME_ASSERT(TryNumberToSize(isolate, *offset_obj, &offset));
+ size_t target_length = NumberToSize(isolate, target->length());
+ size_t source_length = NumberToSize(isolate, source->length());
+ size_t target_byte_length = NumberToSize(isolate, target->byte_length());
+ size_t source_byte_length = NumberToSize(isolate, source->byte_length());
+ if (offset > target_length || offset + source_length > target_length ||
+ offset + source_length < offset) { // overflow
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError("typed_array_set_source_too_large",
+ HandleVector<Object>(NULL, 0)));
+ }
+
+ size_t target_offset = NumberToSize(isolate, target->byte_offset());
+ size_t source_offset = NumberToSize(isolate, source->byte_offset());
+ uint8_t* target_base =
+ static_cast<uint8_t*>(target->GetBuffer()->backing_store()) +
+ target_offset;
+ uint8_t* source_base =
+ static_cast<uint8_t*>(source->GetBuffer()->backing_store()) +
+ source_offset;
+
+ // Typed arrays of the same type: use memmove.
+ if (target->type() == source->type()) {
+ memmove(target_base + offset * target->element_size(), source_base,
+ source_byte_length);
+ return Smi::FromInt(TYPED_ARRAY_SET_TYPED_ARRAY_SAME_TYPE);
+ }
+
+ // Typed arrays of different types over the same backing store
+ if ((source_base <= target_base &&
+ source_base + source_byte_length > target_base) ||
+ (target_base <= source_base &&
+ target_base + target_byte_length > source_base)) {
+ // We do not support overlapping ArrayBuffers
+ DCHECK(target->GetBuffer()->backing_store() ==
+ source->GetBuffer()->backing_store());
+ return Smi::FromInt(TYPED_ARRAY_SET_TYPED_ARRAY_OVERLAPPING);
+ } else { // Non-overlapping typed arrays
+ return Smi::FromInt(TYPED_ARRAY_SET_TYPED_ARRAY_NONOVERLAPPING);
+ }
+}
+
+
+RUNTIME_FUNCTION(Runtime_TypedArrayMaxSizeInHeap) {
+ DCHECK(args.length() == 0);
+ DCHECK_OBJECT_SIZE(FLAG_typed_array_max_size_in_heap +
+ FixedTypedArrayBase::kDataOffset);
+ return Smi::FromInt(FLAG_typed_array_max_size_in_heap);
+}
+
+
+RUNTIME_FUNCTION(Runtime_DataViewInitialize) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 4);
+ CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 1);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_offset, 2);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_length, 3);
+
+ DCHECK(holder->GetInternalFieldCount() ==
+ v8::ArrayBufferView::kInternalFieldCount);
+ for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
+ holder->SetInternalField(i, Smi::FromInt(0));
+ }
+ size_t buffer_length = 0;
+ size_t offset = 0;
+ size_t length = 0;
+ RUNTIME_ASSERT(
+ TryNumberToSize(isolate, buffer->byte_length(), &buffer_length));
+ RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_offset, &offset));
+ RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_length, &length));
+
+ // TODO(jkummerow): When we have a "safe numerics" helper class, use it here.
+ // Entire range [offset, offset + length] must be in bounds.
+ RUNTIME_ASSERT(offset <= buffer_length);
+ RUNTIME_ASSERT(offset + length <= buffer_length);
+ // No overflow.
+ RUNTIME_ASSERT(offset + length >= offset);
+
+ holder->set_buffer(*buffer);
+ holder->set_byte_offset(*byte_offset);
+ holder->set_byte_length(*byte_length);
+
+ holder->set_weak_next(buffer->weak_first_view());
+ buffer->set_weak_first_view(*holder);
+
+ return isolate->heap()->undefined_value();
+}
+
+
+inline static bool NeedToFlipBytes(bool is_little_endian) {
+#ifdef V8_TARGET_LITTLE_ENDIAN
+ return !is_little_endian;
+#else
+ return is_little_endian;
+#endif
+}
+
+
+template <int n>
+inline void CopyBytes(uint8_t* target, uint8_t* source) {
+ for (int i = 0; i < n; i++) {
+ *(target++) = *(source++);
+ }
+}
+
+
+template <int n>
+inline void FlipBytes(uint8_t* target, uint8_t* source) {
+ source = source + (n - 1);
+ for (int i = 0; i < n; i++) {
+ *(target++) = *(source--);
+ }
+}
+
+
+template <typename T>
+inline static bool DataViewGetValue(Isolate* isolate,
+ Handle<JSDataView> data_view,
+ Handle<Object> byte_offset_obj,
+ bool is_little_endian, T* result) {
+ size_t byte_offset = 0;
+ if (!TryNumberToSize(isolate, *byte_offset_obj, &byte_offset)) {
+ return false;
+ }
+ Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()));
+
+ size_t data_view_byte_offset =
+ NumberToSize(isolate, data_view->byte_offset());
+ size_t data_view_byte_length =
+ NumberToSize(isolate, data_view->byte_length());
+ if (byte_offset + sizeof(T) > data_view_byte_length ||
+ byte_offset + sizeof(T) < byte_offset) { // overflow
+ return false;
+ }
+
+ union Value {
+ T data;
+ uint8_t bytes[sizeof(T)];
+ };
+
+ Value value;
+ size_t buffer_offset = data_view_byte_offset + byte_offset;
+ DCHECK(NumberToSize(isolate, buffer->byte_length()) >=
+ buffer_offset + sizeof(T));
+ uint8_t* source =
+ static_cast<uint8_t*>(buffer->backing_store()) + buffer_offset;
+ if (NeedToFlipBytes(is_little_endian)) {
+ FlipBytes<sizeof(T)>(value.bytes, source);
+ } else {
+ CopyBytes<sizeof(T)>(value.bytes, source);
+ }
+ *result = value.data;
+ return true;
+}
+
+
+template <typename T>
+static bool DataViewSetValue(Isolate* isolate, Handle<JSDataView> data_view,
+ Handle<Object> byte_offset_obj,
+ bool is_little_endian, T data) {
+ size_t byte_offset = 0;
+ if (!TryNumberToSize(isolate, *byte_offset_obj, &byte_offset)) {
+ return false;
+ }
+ Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()));
+
+ size_t data_view_byte_offset =
+ NumberToSize(isolate, data_view->byte_offset());
+ size_t data_view_byte_length =
+ NumberToSize(isolate, data_view->byte_length());
+ if (byte_offset + sizeof(T) > data_view_byte_length ||
+ byte_offset + sizeof(T) < byte_offset) { // overflow
+ return false;
+ }
+
+ union Value {
+ T data;
+ uint8_t bytes[sizeof(T)];
+ };
+
+ Value value;
+ value.data = data;
+ size_t buffer_offset = data_view_byte_offset + byte_offset;
+ DCHECK(NumberToSize(isolate, buffer->byte_length()) >=
+ buffer_offset + sizeof(T));
+ uint8_t* target =
+ static_cast<uint8_t*>(buffer->backing_store()) + buffer_offset;
+ if (NeedToFlipBytes(is_little_endian)) {
+ FlipBytes<sizeof(T)>(target, value.bytes);
+ } else {
+ CopyBytes<sizeof(T)>(target, value.bytes);
+ }
+ return true;
+}
+
+
+#define DATA_VIEW_GETTER(TypeName, Type, Converter) \
+ RUNTIME_FUNCTION(Runtime_DataViewGet##TypeName) { \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 3); \
+ CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0); \
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(offset, 1); \
+ CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 2); \
+ Type result; \
+ if (DataViewGetValue(isolate, holder, offset, is_little_endian, \
+ &result)) { \
+ return *isolate->factory()->Converter(result); \
+ } else { \
+ THROW_NEW_ERROR_RETURN_FAILURE( \
+ isolate, NewRangeError("invalid_data_view_accessor_offset", \
+ HandleVector<Object>(NULL, 0))); \
+ } \
+ }
+
+DATA_VIEW_GETTER(Uint8, uint8_t, NewNumberFromUint)
+DATA_VIEW_GETTER(Int8, int8_t, NewNumberFromInt)
+DATA_VIEW_GETTER(Uint16, uint16_t, NewNumberFromUint)
+DATA_VIEW_GETTER(Int16, int16_t, NewNumberFromInt)
+DATA_VIEW_GETTER(Uint32, uint32_t, NewNumberFromUint)
+DATA_VIEW_GETTER(Int32, int32_t, NewNumberFromInt)
+DATA_VIEW_GETTER(Float32, float, NewNumber)
+DATA_VIEW_GETTER(Float64, double, NewNumber)
+
+#undef DATA_VIEW_GETTER
+
+
+template <typename T>
+static T DataViewConvertValue(double value);
+
+
+template <>
+int8_t DataViewConvertValue<int8_t>(double value) {
+ return static_cast<int8_t>(DoubleToInt32(value));
+}
+
+
+template <>
+int16_t DataViewConvertValue<int16_t>(double value) {
+ return static_cast<int16_t>(DoubleToInt32(value));
+}
+
+
+template <>
+int32_t DataViewConvertValue<int32_t>(double value) {
+ return DoubleToInt32(value);
+}
+
+
+template <>
+uint8_t DataViewConvertValue<uint8_t>(double value) {
+ return static_cast<uint8_t>(DoubleToUint32(value));
+}
+
+
+template <>
+uint16_t DataViewConvertValue<uint16_t>(double value) {
+ return static_cast<uint16_t>(DoubleToUint32(value));
+}
+
+
+template <>
+uint32_t DataViewConvertValue<uint32_t>(double value) {
+ return DoubleToUint32(value);
+}
+
+
+template <>
+float DataViewConvertValue<float>(double value) {
+ return static_cast<float>(value);
+}
+
+
+template <>
+double DataViewConvertValue<double>(double value) {
+ return value;
+}
+
+
+#define DATA_VIEW_SETTER(TypeName, Type) \
+ RUNTIME_FUNCTION(Runtime_DataViewSet##TypeName) { \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 4); \
+ CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0); \
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(offset, 1); \
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); \
+ CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 3); \
+ Type v = DataViewConvertValue<Type>(value->Number()); \
+ if (DataViewSetValue(isolate, holder, offset, is_little_endian, v)) { \
+ return isolate->heap()->undefined_value(); \
+ } else { \
+ THROW_NEW_ERROR_RETURN_FAILURE( \
+ isolate, NewRangeError("invalid_data_view_accessor_offset", \
+ HandleVector<Object>(NULL, 0))); \
+ } \
+ }
+
+DATA_VIEW_SETTER(Uint8, uint8_t)
+DATA_VIEW_SETTER(Int8, int8_t)
+DATA_VIEW_SETTER(Uint16, uint16_t)
+DATA_VIEW_SETTER(Int16, int16_t)
+DATA_VIEW_SETTER(Uint32, uint32_t)
+DATA_VIEW_SETTER(Int32, int32_t)
+DATA_VIEW_SETTER(Float32, float)
+DATA_VIEW_SETTER(Float64, double)
+
+#undef DATA_VIEW_SETTER
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/uri.h b/deps/v8/src/runtime/runtime-uri.cc
index bb5140b8c0..10e21be05f 100644
--- a/deps/v8/src/uri.h
+++ b/deps/v8/src/runtime/runtime-uri.cc
@@ -1,20 +1,20 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
+// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_URI_H_
-#define V8_URI_H_
-
#include "src/v8.h"
+#include "src/arguments.h"
#include "src/conversions.h"
+#include "src/runtime/runtime.h"
+#include "src/runtime/runtime-utils.h"
#include "src/string-search.h"
#include "src/utils.h"
+
namespace v8 {
namespace internal {
-
template <typename Char>
static INLINE(Vector<const Char> GetCharVector(Handle<String> string));
@@ -22,7 +22,7 @@ static INLINE(Vector<const Char> GetCharVector(Handle<String> string));
template <>
Vector<const uint8_t> GetCharVector(Handle<String> string) {
String::FlatContent flat = string->GetFlatContent();
- DCHECK(flat.IsAscii());
+ DCHECK(flat.IsOneByte());
return flat.ToOneByteVector();
}
@@ -37,43 +37,42 @@ Vector<const uc16> GetCharVector(Handle<String> string) {
class URIUnescape : public AllStatic {
public:
- template<typename Char>
+ template <typename Char>
MUST_USE_RESULT static MaybeHandle<String> Unescape(Isolate* isolate,
Handle<String> source);
private:
static const signed char kHexValue['g'];
- template<typename Char>
- MUST_USE_RESULT static MaybeHandle<String> UnescapeSlow(
- Isolate* isolate, Handle<String> string, int start_index);
+ template <typename Char>
+ MUST_USE_RESULT static MaybeHandle<String> UnescapeSlow(Isolate* isolate,
+ Handle<String> string,
+ int start_index);
static INLINE(int TwoDigitHex(uint16_t character1, uint16_t character2));
template <typename Char>
- static INLINE(int UnescapeChar(Vector<const Char> vector,
- int i,
- int length,
+ static INLINE(int UnescapeChar(Vector<const Char> vector, int i, int length,
int* step));
};
const signed char URIUnescape::kHexValue[] = {
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1,
- -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 10, 11, 12, 13, 14, 15 };
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -0, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15};
-template<typename Char>
+template <typename Char>
MaybeHandle<String> URIUnescape::Unescape(Isolate* isolate,
Handle<String> source) {
int index;
- { DisallowHeapAllocation no_allocation;
- StringSearch<uint8_t, Char> search(isolate, STATIC_ASCII_VECTOR("%"));
+ {
+ DisallowHeapAllocation no_allocation;
+ StringSearch<uint8_t, Char> search(isolate, STATIC_CHAR_VECTOR("%"));
index = search.Search(GetCharVector<Char>(source), 0);
if (index < 0) return source;
}
@@ -82,18 +81,20 @@ MaybeHandle<String> URIUnescape::Unescape(Isolate* isolate,
template <typename Char>
-MaybeHandle<String> URIUnescape::UnescapeSlow(
- Isolate* isolate, Handle<String> string, int start_index) {
+MaybeHandle<String> URIUnescape::UnescapeSlow(Isolate* isolate,
+ Handle<String> string,
+ int start_index) {
bool one_byte = true;
int length = string->length();
int unescaped_length = 0;
- { DisallowHeapAllocation no_allocation;
+ {
+ DisallowHeapAllocation no_allocation;
Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = start_index; i < length; unescaped_length++) {
int step;
if (UnescapeChar(vector, i, length, &step) >
- String::kMaxOneByteCharCode) {
+ String::kMaxOneByteCharCode) {
one_byte = false;
}
i += step;
@@ -108,8 +109,9 @@ MaybeHandle<String> URIUnescape::UnescapeSlow(
Handle<String> second_part;
DCHECK(unescaped_length <= String::kMaxLength);
if (one_byte) {
- Handle<SeqOneByteString> dest = isolate->factory()->NewRawOneByteString(
- unescaped_length).ToHandleChecked();
+ Handle<SeqOneByteString> dest = isolate->factory()
+ ->NewRawOneByteString(unescaped_length)
+ .ToHandleChecked();
DisallowHeapAllocation no_allocation;
Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = start_index; i < length; dest_position++) {
@@ -120,8 +122,9 @@ MaybeHandle<String> URIUnescape::UnescapeSlow(
}
second_part = dest;
} else {
- Handle<SeqTwoByteString> dest = isolate->factory()->NewRawTwoByteString(
- unescaped_length).ToHandleChecked();
+ Handle<SeqTwoByteString> dest = isolate->factory()
+ ->NewRawTwoByteString(unescaped_length)
+ .ToHandleChecked();
DisallowHeapAllocation no_allocation;
Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = start_index; i < length; dest_position++) {
@@ -148,26 +151,18 @@ int URIUnescape::TwoDigitHex(uint16_t character1, uint16_t character2) {
template <typename Char>
-int URIUnescape::UnescapeChar(Vector<const Char> vector,
- int i,
- int length,
+int URIUnescape::UnescapeChar(Vector<const Char> vector, int i, int length,
int* step) {
uint16_t character = vector[i];
int32_t hi = 0;
int32_t lo = 0;
- if (character == '%' &&
- i <= length - 6 &&
- vector[i + 1] == 'u' &&
- (hi = TwoDigitHex(vector[i + 2],
- vector[i + 3])) != -1 &&
- (lo = TwoDigitHex(vector[i + 4],
- vector[i + 5])) != -1) {
+ if (character == '%' && i <= length - 6 && vector[i + 1] == 'u' &&
+ (hi = TwoDigitHex(vector[i + 2], vector[i + 3])) != -1 &&
+ (lo = TwoDigitHex(vector[i + 4], vector[i + 5])) != -1) {
*step = 6;
return (hi << 8) + lo;
- } else if (character == '%' &&
- i <= length - 3 &&
- (lo = TwoDigitHex(vector[i + 1],
- vector[i + 2])) != -1) {
+ } else if (character == '%' && i <= length - 3 &&
+ (lo = TwoDigitHex(vector[i + 1], vector[i + 2])) != -1) {
*step = 3;
return lo;
} else {
@@ -179,7 +174,7 @@ int URIUnescape::UnescapeChar(Vector<const Char> vector,
class URIEscape : public AllStatic {
public:
- template<typename Char>
+ template <typename Char>
MUST_USE_RESULT static MaybeHandle<String> Escape(Isolate* isolate,
Handle<String> string);
@@ -206,31 +201,27 @@ const char URIEscape::kHexChars[] = "0123456789ABCDEF";
// }
const char URIEscape::kNotEscaped[] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
- 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-
-
-template<typename Char>
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+
+template <typename Char>
MaybeHandle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) {
DCHECK(string->IsFlat());
int escaped_length = 0;
int length = string->length();
- { DisallowHeapAllocation no_allocation;
+ {
+ DisallowHeapAllocation no_allocation;
Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = 0; i < length; i++) {
uint16_t c = vector[i];
@@ -243,7 +234,7 @@ MaybeHandle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) {
}
// We don't allow strings that are longer than a maximal length.
- DCHECK(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow.
+ DCHECK(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow.
if (escaped_length > String::kMaxLength) break; // Provoke exception.
}
}
@@ -253,30 +244,30 @@ MaybeHandle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) {
Handle<SeqOneByteString> dest;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, dest,
- isolate->factory()->NewRawOneByteString(escaped_length),
+ isolate, dest, isolate->factory()->NewRawOneByteString(escaped_length),
String);
int dest_position = 0;
- { DisallowHeapAllocation no_allocation;
+ {
+ DisallowHeapAllocation no_allocation;
Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = 0; i < length; i++) {
uint16_t c = vector[i];
if (c >= 256) {
dest->SeqOneByteStringSet(dest_position, '%');
- dest->SeqOneByteStringSet(dest_position+1, 'u');
- dest->SeqOneByteStringSet(dest_position+2, kHexChars[c >> 12]);
- dest->SeqOneByteStringSet(dest_position+3, kHexChars[(c >> 8) & 0xf]);
- dest->SeqOneByteStringSet(dest_position+4, kHexChars[(c >> 4) & 0xf]);
- dest->SeqOneByteStringSet(dest_position+5, kHexChars[c & 0xf]);
+ dest->SeqOneByteStringSet(dest_position + 1, 'u');
+ dest->SeqOneByteStringSet(dest_position + 2, kHexChars[c >> 12]);
+ dest->SeqOneByteStringSet(dest_position + 3, kHexChars[(c >> 8) & 0xf]);
+ dest->SeqOneByteStringSet(dest_position + 4, kHexChars[(c >> 4) & 0xf]);
+ dest->SeqOneByteStringSet(dest_position + 5, kHexChars[c & 0xf]);
dest_position += 6;
} else if (IsNotEscaped(c)) {
dest->SeqOneByteStringSet(dest_position, c);
dest_position++;
} else {
dest->SeqOneByteStringSet(dest_position, '%');
- dest->SeqOneByteStringSet(dest_position+1, kHexChars[c >> 4]);
- dest->SeqOneByteStringSet(dest_position+2, kHexChars[c & 0xf]);
+ dest->SeqOneByteStringSet(dest_position + 1, kHexChars[c >> 4]);
+ dest->SeqOneByteStringSet(dest_position + 2, kHexChars[c & 0xf]);
dest_position += 3;
}
}
@@ -285,6 +276,34 @@ MaybeHandle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) {
return dest;
}
-} } // namespace v8::internal
-#endif // V8_URI_H_
+RUNTIME_FUNCTION(Runtime_URIEscape) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
+ Handle<String> string = String::Flatten(source);
+ DCHECK(string->IsFlat());
+ Handle<String> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, string->IsOneByteRepresentationUnderneath()
+ ? URIEscape::Escape<uint8_t>(isolate, source)
+ : URIEscape::Escape<uc16>(isolate, source));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_URIUnescape) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
+ Handle<String> string = String::Flatten(source);
+ DCHECK(string->IsFlat());
+ Handle<String> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, string->IsOneByteRepresentationUnderneath()
+ ? URIUnescape::Unescape<uint8_t>(isolate, source)
+ : URIUnescape::Unescape<uc16>(isolate, source));
+ return *result;
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/runtime/runtime-utils.h b/deps/v8/src/runtime/runtime-utils.h
new file mode 100644
index 0000000000..a7e74ac316
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-utils.h
@@ -0,0 +1,146 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_RUNTIME_UTILS_H_
+#define V8_RUNTIME_UTILS_H_
+
+
+namespace v8 {
+namespace internal {
+
+#define RUNTIME_ASSERT(value) \
+ if (!(value)) return isolate->ThrowIllegalOperation();
+
+#define RUNTIME_ASSERT_HANDLIFIED(value, T) \
+ if (!(value)) { \
+ isolate->ThrowIllegalOperation(); \
+ return MaybeHandle<T>(); \
+ }
+
+// Cast the given object to a value of the specified type and store
+// it in a variable with the given name. If the object is not of the
+// expected type call IllegalOperation and return.
+#define CONVERT_ARG_CHECKED(Type, name, index) \
+ RUNTIME_ASSERT(args[index]->Is##Type()); \
+ Type* name = Type::cast(args[index]);
+
+#define CONVERT_ARG_HANDLE_CHECKED(Type, name, index) \
+ RUNTIME_ASSERT(args[index]->Is##Type()); \
+ Handle<Type> name = args.at<Type>(index);
+
+#define CONVERT_NUMBER_ARG_HANDLE_CHECKED(name, index) \
+ RUNTIME_ASSERT(args[index]->IsNumber()); \
+ Handle<Object> name = args.at<Object>(index);
+
+// Cast the given object to a boolean and store it in a variable with
+// the given name. If the object is not a boolean call IllegalOperation
+// and return.
+#define CONVERT_BOOLEAN_ARG_CHECKED(name, index) \
+ RUNTIME_ASSERT(args[index]->IsBoolean()); \
+ bool name = args[index]->IsTrue();
+
+// Cast the given argument to a Smi and store its value in an int variable
+// with the given name. If the argument is not a Smi call IllegalOperation
+// and return.
+#define CONVERT_SMI_ARG_CHECKED(name, index) \
+ RUNTIME_ASSERT(args[index]->IsSmi()); \
+ int name = args.smi_at(index);
+
+// Cast the given argument to a double and store it in a variable with
+// the given name. If the argument is not a number (as opposed to
+// the number not-a-number) call IllegalOperation and return.
+#define CONVERT_DOUBLE_ARG_CHECKED(name, index) \
+ RUNTIME_ASSERT(args[index]->IsNumber()); \
+ double name = args.number_at(index);
+
+// Call the specified converter on the object *comand store the result in
+// a variable of the specified type with the given name. If the
+// object is not a Number call IllegalOperation and return.
+#define CONVERT_NUMBER_CHECKED(type, name, Type, obj) \
+ RUNTIME_ASSERT(obj->IsNumber()); \
+ type name = NumberTo##Type(obj);
+
+
+// Cast the given argument to PropertyDetails and store its value in a
+// variable with the given name. If the argument is not a Smi call
+// IllegalOperation and return.
+#define CONVERT_PROPERTY_DETAILS_CHECKED(name, index) \
+ RUNTIME_ASSERT(args[index]->IsSmi()); \
+ PropertyDetails name = PropertyDetails(Smi::cast(args[index]));
+
+
+// Assert that the given argument has a valid value for a StrictMode
+// and store it in a StrictMode variable with the given name.
+#define CONVERT_STRICT_MODE_ARG_CHECKED(name, index) \
+ RUNTIME_ASSERT(args[index]->IsSmi()); \
+ RUNTIME_ASSERT(args.smi_at(index) == STRICT || \
+ args.smi_at(index) == SLOPPY); \
+ StrictMode name = static_cast<StrictMode>(args.smi_at(index));
+
+
+// Assert that the given argument is a number within the Int32 range
+// and convert it to int32_t. If the argument is not an Int32 call
+// IllegalOperation and return.
+#define CONVERT_INT32_ARG_CHECKED(name, index) \
+ RUNTIME_ASSERT(args[index]->IsNumber()); \
+ int32_t name = 0; \
+ RUNTIME_ASSERT(args[index]->ToInt32(&name));
+
+
+// A mechanism to return a pair of Object pointers in registers (if possible).
+// How this is achieved is calling convention-dependent.
+// All currently supported x86 compiles uses calling conventions that are cdecl
+// variants where a 64-bit value is returned in two 32-bit registers
+// (edx:eax on ia32, r1:r0 on ARM).
+// In AMD-64 calling convention a struct of two pointers is returned in rdx:rax.
+// In Win64 calling convention, a struct of two pointers is returned in memory,
+// allocated by the caller, and passed as a pointer in a hidden first parameter.
+#ifdef V8_HOST_ARCH_64_BIT
+struct ObjectPair {
+ Object* x;
+ Object* y;
+};
+
+
+static inline ObjectPair MakePair(Object* x, Object* y) {
+ ObjectPair result = {x, y};
+ // Pointers x and y returned in rax and rdx, in AMD-x64-abi.
+ // In Win64 they are assigned to a hidden first argument.
+ return result;
+}
+#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
+// For x32 a 128-bit struct return is done as rax and rdx from the ObjectPair
+// are used in the full codegen and Crankshaft compiler. An alternative is
+// using uint64_t and modifying full codegen and Crankshaft compiler.
+struct ObjectPair {
+ Object* x;
+ uint32_t x_upper;
+ Object* y;
+ uint32_t y_upper;
+};
+
+
+static inline ObjectPair MakePair(Object* x, Object* y) {
+ ObjectPair result = {x, 0, y, 0};
+ // Pointers x and y returned in rax and rdx, in x32-abi.
+ return result;
+}
+#else
+typedef uint64_t ObjectPair;
+static inline ObjectPair MakePair(Object* x, Object* y) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ return reinterpret_cast<uint32_t>(x) |
+ (reinterpret_cast<ObjectPair>(y) << 32);
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ return reinterpret_cast<uint32_t>(y) |
+ (reinterpret_cast<ObjectPair>(x) << 32);
+#else
+#error Unknown endianness
+#endif
+}
+#endif
+}
+} // namespace v8::internal
+
+#endif // V8_RUNTIME_UTILS_H_
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime/runtime.cc
index 1fbedc6adc..427b821745 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -11,6 +11,7 @@
#include "src/allocation-site-scopes.h"
#include "src/api.h"
#include "src/arguments.h"
+#include "src/bailout-reason.h"
#include "src/base/cpu.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
@@ -27,134 +28,48 @@
#include "src/full-codegen.h"
#include "src/global-handles.h"
#include "src/isolate-inl.h"
-#include "src/json-parser.h"
-#include "src/json-stringifier.h"
-#include "src/jsregexp-inl.h"
-#include "src/jsregexp.h"
#include "src/liveedit.h"
#include "src/misc-intrinsics.h"
#include "src/parser.h"
#include "src/prototype.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
+#include "src/runtime/runtime-utils.h"
#include "src/runtime-profiler.h"
#include "src/scopeinfo.h"
#include "src/smart-pointers.h"
-#include "src/string-search.h"
-#include "src/stub-cache.h"
-#include "src/uri.h"
#include "src/utils.h"
#include "src/v8threads.h"
#include "src/vm-state-inl.h"
-#include "third_party/fdlibm/fdlibm.h"
-
-#ifdef V8_I18N_SUPPORT
-#include "src/i18n.h"
-#include "unicode/brkiter.h"
-#include "unicode/calendar.h"
-#include "unicode/coll.h"
-#include "unicode/curramt.h"
-#include "unicode/datefmt.h"
-#include "unicode/dcfmtsym.h"
-#include "unicode/decimfmt.h"
-#include "unicode/dtfmtsym.h"
-#include "unicode/dtptngen.h"
-#include "unicode/locid.h"
-#include "unicode/numfmt.h"
-#include "unicode/numsys.h"
-#include "unicode/rbbi.h"
-#include "unicode/smpdtfmt.h"
-#include "unicode/timezone.h"
-#include "unicode/uchar.h"
-#include "unicode/ucol.h"
-#include "unicode/ucurr.h"
-#include "unicode/uloc.h"
-#include "unicode/unum.h"
-#include "unicode/uversion.h"
-#endif
-#ifndef _STLP_VENDOR_CSTD
-// STLPort doesn't import fpclassify and isless into the std namespace.
-using std::fpclassify;
-using std::isless;
-#endif
namespace v8 {
namespace internal {
+// Header of runtime functions.
+#define F(name, number_of_args, result_size) \
+ Object* Runtime_##name(int args_length, Object** args_object, \
+ Isolate* isolate);
+
+#define P(name, number_of_args, result_size) \
+ ObjectPair Runtime_##name(int args_length, Object** args_object, \
+ Isolate* isolate);
+
+#define I(name, number_of_args, result_size) \
+ Object* RuntimeReference_##name(int args_length, Object** args_object, \
+ Isolate* isolate);
+
+RUNTIME_FUNCTION_LIST_RETURN_OBJECT(F)
+RUNTIME_FUNCTION_LIST_RETURN_PAIR(P)
+INLINE_OPTIMIZED_FUNCTION_LIST(F)
+INLINE_FUNCTION_LIST(I)
-#define RUNTIME_ASSERT(value) \
- if (!(value)) return isolate->ThrowIllegalOperation();
-
-#define RUNTIME_ASSERT_HANDLIFIED(value, T) \
- if (!(value)) { \
- isolate->ThrowIllegalOperation(); \
- return MaybeHandle<T>(); \
- }
-
-// Cast the given object to a value of the specified type and store
-// it in a variable with the given name. If the object is not of the
-// expected type call IllegalOperation and return.
-#define CONVERT_ARG_CHECKED(Type, name, index) \
- RUNTIME_ASSERT(args[index]->Is##Type()); \
- Type* name = Type::cast(args[index]);
-
-#define CONVERT_ARG_HANDLE_CHECKED(Type, name, index) \
- RUNTIME_ASSERT(args[index]->Is##Type()); \
- Handle<Type> name = args.at<Type>(index);
-
-#define CONVERT_NUMBER_ARG_HANDLE_CHECKED(name, index) \
- RUNTIME_ASSERT(args[index]->IsNumber()); \
- Handle<Object> name = args.at<Object>(index);
-
-// Cast the given object to a boolean and store it in a variable with
-// the given name. If the object is not a boolean call IllegalOperation
-// and return.
-#define CONVERT_BOOLEAN_ARG_CHECKED(name, index) \
- RUNTIME_ASSERT(args[index]->IsBoolean()); \
- bool name = args[index]->IsTrue();
-
-// Cast the given argument to a Smi and store its value in an int variable
-// with the given name. If the argument is not a Smi call IllegalOperation
-// and return.
-#define CONVERT_SMI_ARG_CHECKED(name, index) \
- RUNTIME_ASSERT(args[index]->IsSmi()); \
- int name = args.smi_at(index);
-
-// Cast the given argument to a double and store it in a variable with
-// the given name. If the argument is not a number (as opposed to
-// the number not-a-number) call IllegalOperation and return.
-#define CONVERT_DOUBLE_ARG_CHECKED(name, index) \
- RUNTIME_ASSERT(args[index]->IsNumber()); \
- double name = args.number_at(index);
-
-// Call the specified converter on the object *comand store the result in
-// a variable of the specified type with the given name. If the
-// object is not a Number call IllegalOperation and return.
-#define CONVERT_NUMBER_CHECKED(type, name, Type, obj) \
- RUNTIME_ASSERT(obj->IsNumber()); \
- type name = NumberTo##Type(obj);
-
-
-// Cast the given argument to PropertyDetails and store its value in a
-// variable with the given name. If the argument is not a Smi call
-// IllegalOperation and return.
-#define CONVERT_PROPERTY_DETAILS_CHECKED(name, index) \
- RUNTIME_ASSERT(args[index]->IsSmi()); \
- PropertyDetails name = PropertyDetails(Smi::cast(args[index]));
-
-
-// Assert that the given argument has a valid value for a StrictMode
-// and store it in a StrictMode variable with the given name.
-#define CONVERT_STRICT_MODE_ARG_CHECKED(name, index) \
- RUNTIME_ASSERT(args[index]->IsSmi()); \
- RUNTIME_ASSERT(args.smi_at(index) == STRICT || \
- args.smi_at(index) == SLOPPY); \
- StrictMode name = static_cast<StrictMode>(args.smi_at(index));
+#undef I
+#undef F
+#undef P
static Handle<Map> ComputeObjectLiteralMap(
- Handle<Context> context,
- Handle<FixedArray> constant_properties,
+ Handle<Context> context, Handle<FixedArray> constant_properties,
bool* is_result_from_cache) {
Isolate* isolate = context->GetIsolate();
int properties_length = constant_properties->length();
@@ -199,21 +114,18 @@ static Handle<Map> ComputeObjectLiteralMap(
return isolate->factory()->ObjectLiteralMapFromCache(context, keys);
}
*is_result_from_cache = false;
- return Map::Create(handle(context->object_function()), number_of_properties);
+ return Map::Create(isolate, number_of_properties);
}
MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
- Isolate* isolate,
- Handle<FixedArray> literals,
+ Isolate* isolate, Handle<FixedArray> literals,
Handle<FixedArray> constant_properties);
MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
- Isolate* isolate,
- Handle<FixedArray> literals,
- Handle<FixedArray> constant_properties,
- bool should_have_fast_elements,
+ Isolate* isolate, Handle<FixedArray> literals,
+ Handle<FixedArray> constant_properties, bool should_have_fast_elements,
bool has_function_literal) {
// Get the native context from the literals array. This is the
// context in which the function was created and we use the object
@@ -230,10 +142,9 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
// not the same (which is the common case).
bool is_result_from_cache = false;
Handle<Map> map = has_function_literal
- ? Handle<Map>(context->object_function()->initial_map())
- : ComputeObjectLiteralMap(context,
- constant_properties,
- &is_result_from_cache);
+ ? Handle<Map>(context->object_function()->initial_map())
+ : ComputeObjectLiteralMap(context, constant_properties,
+ &is_result_from_cache);
PretenureFlag pretenure_flag =
isolate->heap()->InNewSpace(*literals) ? NOT_TENURED : TENURED;
@@ -251,20 +162,19 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
bool should_normalize = should_transform || has_function_literal;
if (should_normalize) {
// TODO(verwaest): We might not want to ever normalize here.
- JSObject::NormalizeProperties(
- boilerplate, KEEP_INOBJECT_PROPERTIES, length / 2);
+ JSObject::NormalizeProperties(boilerplate, KEEP_INOBJECT_PROPERTIES,
+ length / 2);
}
// TODO(verwaest): Support tracking representations in the boilerplate.
- for (int index = 0; index < length; index +=2) {
- Handle<Object> key(constant_properties->get(index+0), isolate);
- Handle<Object> value(constant_properties->get(index+1), isolate);
+ for (int index = 0; index < length; index += 2) {
+ Handle<Object> key(constant_properties->get(index + 0), isolate);
+ Handle<Object> value(constant_properties->get(index + 1), isolate);
if (value->IsFixedArray()) {
// The value contains the constant_properties of a
// simple object or array literal.
Handle<FixedArray> array = Handle<FixedArray>::cast(value);
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, value,
- CreateLiteralBoilerplate(isolate, literals, array),
+ isolate, value, CreateLiteralBoilerplate(isolate, literals, array),
Object);
}
MaybeHandle<Object> maybe_result;
@@ -291,7 +201,7 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
DCHECK(key->IsNumber());
double num = key->Number();
char arr[100];
- Vector<char> buffer(arr, ARRAY_SIZE(arr));
+ Vector<char> buffer(arr, arraysize(arr));
const char* str = DoubleToCString(num, buffer);
Handle<String> name = isolate->factory()->NewStringFromAsciiChecked(str);
maybe_result = JSObject::SetOwnPropertyIgnoreAttributes(boilerplate, name,
@@ -309,8 +219,8 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
// computed properties have been assigned so that we can generate
// constant function properties.
if (should_transform && !has_function_literal) {
- JSObject::MigrateSlowToFast(
- boilerplate, boilerplate->map()->unused_property_fields());
+ JSObject::MigrateSlowToFast(boilerplate,
+ boilerplate->map()->unused_property_fields());
}
return boilerplate;
@@ -318,9 +228,7 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
MUST_USE_RESULT static MaybeHandle<Object> TransitionElements(
- Handle<Object> object,
- ElementsKind to_kind,
- Isolate* isolate) {
+ Handle<Object> object, ElementsKind to_kind, Isolate* isolate) {
HandleScope scope(isolate);
if (!object->IsJSObject()) {
isolate->ThrowIllegalOperation();
@@ -338,8 +246,7 @@ MUST_USE_RESULT static MaybeHandle<Object> TransitionElements(
MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate(
- Isolate* isolate,
- Handle<FixedArray> literals,
+ Isolate* isolate, Handle<FixedArray> literals,
Handle<FixedArray> elements) {
// Create the JSArray.
Handle<JSFunction> constructor(
@@ -356,7 +263,8 @@ MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate(
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(elements->get(1)));
- { DisallowHeapAllocation no_gc;
+ {
+ DisallowHeapAllocation no_gc;
DCHECK(IsFastElementsKind(constant_elements_kind));
Context* native_context = isolate->context()->native_context();
Object* maps_array = native_context->js_array_maps();
@@ -371,9 +279,8 @@ MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate(
Handle<FixedDoubleArray>::cast(constant_elements_values));
} else {
DCHECK(IsFastSmiOrObjectElementsKind(constant_elements_kind));
- const bool is_cow =
- (constant_elements_values->map() ==
- isolate->heap()->fixed_cow_array_map());
+ const bool is_cow = (constant_elements_values->map() ==
+ isolate->heap()->fixed_cow_array_map());
if (is_cow) {
copied_elements_values = constant_elements_values;
#if DEBUG
@@ -396,8 +303,7 @@ MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate(
Handle<FixedArray> fa(FixedArray::cast(fixed_array_values->get(i)));
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- CreateLiteralBoilerplate(isolate, literals, fa),
+ isolate, result, CreateLiteralBoilerplate(isolate, literals, fa),
Object);
fixed_array_values_copy->set(i, *result);
}
@@ -413,27 +319,19 @@ MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate(
MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
- Isolate* isolate,
- Handle<FixedArray> literals,
- Handle<FixedArray> array) {
+ Isolate* isolate, Handle<FixedArray> literals, Handle<FixedArray> array) {
Handle<FixedArray> elements = CompileTimeValue::GetElements(array);
const bool kHasNoFunctionLiteral = false;
switch (CompileTimeValue::GetLiteralType(array)) {
case CompileTimeValue::OBJECT_LITERAL_FAST_ELEMENTS:
- return CreateObjectLiteralBoilerplate(isolate,
- literals,
- elements,
- true,
+ return CreateObjectLiteralBoilerplate(isolate, literals, elements, true,
kHasNoFunctionLiteral);
case CompileTimeValue::OBJECT_LITERAL_SLOW_ELEMENTS:
- return CreateObjectLiteralBoilerplate(isolate,
- literals,
- elements,
- false,
+ return CreateObjectLiteralBoilerplate(isolate, literals, elements, false,
kHasNoFunctionLiteral);
case CompileTimeValue::ARRAY_LITERAL:
- return Runtime::CreateArrayLiteralBoilerplate(
- isolate, literals, elements);
+ return Runtime::CreateArrayLiteralBoilerplate(isolate, literals,
+ elements);
default:
UNREACHABLE();
return MaybeHandle<Object>();
@@ -461,33 +359,29 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
Handle<Object> raw_boilerplate;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, raw_boilerplate,
- CreateObjectLiteralBoilerplate(
- isolate,
- literals,
- constant_properties,
- should_have_fast_elements,
- has_function_literal));
+ CreateObjectLiteralBoilerplate(isolate, literals, constant_properties,
+ should_have_fast_elements,
+ has_function_literal));
boilerplate = Handle<JSObject>::cast(raw_boilerplate);
AllocationSiteCreationContext creation_context(isolate);
site = creation_context.EnterNewScope();
RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- JSObject::DeepWalk(boilerplate, &creation_context));
+ isolate, JSObject::DeepWalk(boilerplate, &creation_context));
creation_context.ExitScope(site, boilerplate);
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *site);
} else {
site = Handle<AllocationSite>::cast(literal_site);
- boilerplate = Handle<JSObject>(JSObject::cast(site->transition_info()),
- isolate);
+ boilerplate =
+ Handle<JSObject>(JSObject::cast(site->transition_info()), isolate);
}
AllocationSiteUsageContext usage_context(isolate, site, true);
usage_context.EnterNewScope();
- MaybeHandle<Object> maybe_copy = JSObject::DeepCopy(
- boilerplate, &usage_context);
+ MaybeHandle<Object> maybe_copy =
+ JSObject::DeepCopy(boilerplate, &usage_context);
usage_context.ExitScope(site, boilerplate);
Handle<Object> copy;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, copy, maybe_copy);
@@ -496,9 +390,7 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite(
- Isolate* isolate,
- Handle<FixedArray> literals,
- int literals_index,
+ Isolate* isolate, Handle<FixedArray> literals, int literals_index,
Handle<FixedArray> elements) {
// Check if boilerplate exists. If not, create it first.
Handle<Object> literal_site(literals->get(literals_index), isolate);
@@ -529,12 +421,12 @@ MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite(
static MaybeHandle<JSObject> CreateArrayLiteralImpl(Isolate* isolate,
- Handle<FixedArray> literals,
- int literals_index,
- Handle<FixedArray> elements,
- int flags) {
- RUNTIME_ASSERT_HANDLIFIED(literals_index >= 0 &&
- literals_index < literals->length(), JSObject);
+ Handle<FixedArray> literals,
+ int literals_index,
+ Handle<FixedArray> elements,
+ int flags) {
+ RUNTIME_ASSERT_HANDLIFIED(
+ literals_index >= 0 && literals_index < literals->length(), JSObject);
Handle<AllocationSite> site;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, site,
@@ -548,8 +440,8 @@ static MaybeHandle<JSObject> CreateArrayLiteralImpl(Isolate* isolate,
JSObject::DeepCopyHints hints = (flags & ArrayLiteral::kShallowElements) == 0
? JSObject::kNoHints
: JSObject::kObjectIsShallow;
- MaybeHandle<JSObject> copy = JSObject::DeepCopy(boilerplate, &usage_context,
- hints);
+ MaybeHandle<JSObject> copy =
+ JSObject::DeepCopy(boilerplate, &usage_context, hints);
usage_context.ExitScope(site, boilerplate);
return copy;
}
@@ -564,9 +456,9 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
CONVERT_SMI_ARG_CHECKED(flags, 3);
Handle<JSObject> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- CreateArrayLiteralImpl(isolate, literals, literals_index, elements,
- flags));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, CreateArrayLiteralImpl(isolate, literals, literals_index,
+ elements, flags));
return *result;
}
@@ -579,9 +471,10 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteralStubBailout) {
CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
Handle<JSObject> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- CreateArrayLiteralImpl(isolate, literals, literals_index, elements,
- ArrayLiteral::kShallowElements));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ CreateArrayLiteralImpl(isolate, literals, literals_index, elements,
+ ArrayLiteral::kShallowElements));
return *result;
}
@@ -619,7 +512,7 @@ RUNTIME_FUNCTION(Runtime_CreatePrivateOwnSymbol) {
}
-RUNTIME_FUNCTION(Runtime_CreateGlobalPrivateSymbol) {
+RUNTIME_FUNCTION(Runtime_CreateGlobalPrivateOwnSymbol) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
@@ -635,6 +528,7 @@ RUNTIME_FUNCTION(Runtime_CreateGlobalPrivateSymbol) {
DCHECK(symbol->IsUndefined());
symbol = isolate->factory()->NewPrivateSymbol();
Handle<Symbol>::cast(symbol)->set_name(*name);
+ Handle<Symbol>::cast(symbol)->set_is_own(true);
JSObject::SetProperty(Handle<JSObject>::cast(privates), name, symbol,
STRICT).Assert();
}
@@ -692,8 +586,8 @@ RUNTIME_FUNCTION(Runtime_CreateJSFunctionProxy) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, construct_trap, 2);
CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 3);
if (!prototype->IsJSReceiver()) prototype = isolate->factory()->null_value();
- return *isolate->factory()->NewJSFunctionProxy(
- handler, call_trap, construct_trap, prototype);
+ return *isolate->factory()->NewJSFunctionProxy(handler, call_trap,
+ construct_trap, prototype);
}
@@ -746,1093 +640,6 @@ RUNTIME_FUNCTION(Runtime_Fix) {
}
-void Runtime::FreeArrayBuffer(Isolate* isolate,
- JSArrayBuffer* phantom_array_buffer) {
- if (phantom_array_buffer->should_be_freed()) {
- DCHECK(phantom_array_buffer->is_external());
- free(phantom_array_buffer->backing_store());
- }
- if (phantom_array_buffer->is_external()) return;
-
- size_t allocated_length = NumberToSize(
- isolate, phantom_array_buffer->byte_length());
-
- reinterpret_cast<v8::Isolate*>(isolate)
- ->AdjustAmountOfExternalAllocatedMemory(
- -static_cast<int64_t>(allocated_length));
- CHECK(V8::ArrayBufferAllocator() != NULL);
- V8::ArrayBufferAllocator()->Free(
- phantom_array_buffer->backing_store(),
- allocated_length);
-}
-
-
-void Runtime::SetupArrayBuffer(Isolate* isolate,
- Handle<JSArrayBuffer> array_buffer,
- bool is_external,
- void* data,
- size_t allocated_length) {
- DCHECK(array_buffer->GetInternalFieldCount() ==
- v8::ArrayBuffer::kInternalFieldCount);
- for (int i = 0; i < v8::ArrayBuffer::kInternalFieldCount; i++) {
- array_buffer->SetInternalField(i, Smi::FromInt(0));
- }
- array_buffer->set_backing_store(data);
- array_buffer->set_flag(Smi::FromInt(0));
- array_buffer->set_is_external(is_external);
-
- Handle<Object> byte_length =
- isolate->factory()->NewNumberFromSize(allocated_length);
- CHECK(byte_length->IsSmi() || byte_length->IsHeapNumber());
- array_buffer->set_byte_length(*byte_length);
-
- array_buffer->set_weak_next(isolate->heap()->array_buffers_list());
- isolate->heap()->set_array_buffers_list(*array_buffer);
- array_buffer->set_weak_first_view(isolate->heap()->undefined_value());
-}
-
-
-bool Runtime::SetupArrayBufferAllocatingData(
- Isolate* isolate,
- Handle<JSArrayBuffer> array_buffer,
- size_t allocated_length,
- bool initialize) {
- void* data;
- CHECK(V8::ArrayBufferAllocator() != NULL);
- if (allocated_length != 0) {
- if (initialize) {
- data = V8::ArrayBufferAllocator()->Allocate(allocated_length);
- } else {
- data =
- V8::ArrayBufferAllocator()->AllocateUninitialized(allocated_length);
- }
- if (data == NULL) return false;
- } else {
- data = NULL;
- }
-
- SetupArrayBuffer(isolate, array_buffer, false, data, allocated_length);
-
- reinterpret_cast<v8::Isolate*>(isolate)
- ->AdjustAmountOfExternalAllocatedMemory(allocated_length);
-
- return true;
-}
-
-
-void Runtime::NeuterArrayBuffer(Handle<JSArrayBuffer> array_buffer) {
- Isolate* isolate = array_buffer->GetIsolate();
- for (Handle<Object> view_obj(array_buffer->weak_first_view(), isolate);
- !view_obj->IsUndefined();) {
- Handle<JSArrayBufferView> view(JSArrayBufferView::cast(*view_obj));
- if (view->IsJSTypedArray()) {
- JSTypedArray::cast(*view)->Neuter();
- } else if (view->IsJSDataView()) {
- JSDataView::cast(*view)->Neuter();
- } else {
- UNREACHABLE();
- }
- view_obj = handle(view->weak_next(), isolate);
- }
- array_buffer->Neuter();
-}
-
-
-RUNTIME_FUNCTION(Runtime_ArrayBufferInitialize) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, holder, 0);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(byteLength, 1);
- if (!holder->byte_length()->IsUndefined()) {
- // ArrayBuffer is already initialized; probably a fuzz test.
- return *holder;
- }
- size_t allocated_length = 0;
- if (!TryNumberToSize(isolate, *byteLength, &allocated_length)) {
- return isolate->Throw(
- *isolate->factory()->NewRangeError("invalid_array_buffer_length",
- HandleVector<Object>(NULL, 0)));
- }
- if (!Runtime::SetupArrayBufferAllocatingData(isolate,
- holder, allocated_length)) {
- return isolate->Throw(
- *isolate->factory()->NewRangeError("invalid_array_buffer_length",
- HandleVector<Object>(NULL, 0)));
- }
- return *holder;
-}
-
-
-RUNTIME_FUNCTION(Runtime_ArrayBufferGetByteLength) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSArrayBuffer, holder, 0);
- return holder->byte_length();
-}
-
-
-RUNTIME_FUNCTION(Runtime_ArrayBufferSliceImpl) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, source, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, target, 1);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(first, 2);
- RUNTIME_ASSERT(!source.is_identical_to(target));
- size_t start = 0;
- RUNTIME_ASSERT(TryNumberToSize(isolate, *first, &start));
- size_t target_length = NumberToSize(isolate, target->byte_length());
-
- if (target_length == 0) return isolate->heap()->undefined_value();
-
- size_t source_byte_length = NumberToSize(isolate, source->byte_length());
- RUNTIME_ASSERT(start <= source_byte_length);
- RUNTIME_ASSERT(source_byte_length - start >= target_length);
- uint8_t* source_data = reinterpret_cast<uint8_t*>(source->backing_store());
- uint8_t* target_data = reinterpret_cast<uint8_t*>(target->backing_store());
- CopyBytes(target_data, source_data + start, target_length);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_ArrayBufferIsView) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(Object, object, 0);
- return isolate->heap()->ToBoolean(object->IsJSArrayBufferView());
-}
-
-
-RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, array_buffer, 0);
- if (array_buffer->backing_store() == NULL) {
- CHECK(Smi::FromInt(0) == array_buffer->byte_length());
- return isolate->heap()->undefined_value();
- }
- DCHECK(!array_buffer->is_external());
- void* backing_store = array_buffer->backing_store();
- size_t byte_length = NumberToSize(isolate, array_buffer->byte_length());
- array_buffer->set_is_external(true);
- Runtime::NeuterArrayBuffer(array_buffer);
- V8::ArrayBufferAllocator()->Free(backing_store, byte_length);
- return isolate->heap()->undefined_value();
-}
-
-
-void Runtime::ArrayIdToTypeAndSize(
- int arrayId,
- ExternalArrayType* array_type,
- ElementsKind* external_elements_kind,
- ElementsKind* fixed_elements_kind,
- size_t* element_size) {
- switch (arrayId) {
-#define ARRAY_ID_CASE(Type, type, TYPE, ctype, size) \
- case ARRAY_ID_##TYPE: \
- *array_type = kExternal##Type##Array; \
- *external_elements_kind = EXTERNAL_##TYPE##_ELEMENTS; \
- *fixed_elements_kind = TYPE##_ELEMENTS; \
- *element_size = size; \
- break;
-
- TYPED_ARRAYS(ARRAY_ID_CASE)
-#undef ARRAY_ID_CASE
-
- default:
- UNREACHABLE();
- }
-}
-
-
-RUNTIME_FUNCTION(Runtime_TypedArrayInitialize) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 5);
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
- CONVERT_SMI_ARG_CHECKED(arrayId, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, maybe_buffer, 2);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_offset_object, 3);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_length_object, 4);
-
- RUNTIME_ASSERT(arrayId >= Runtime::ARRAY_ID_FIRST &&
- arrayId <= Runtime::ARRAY_ID_LAST);
-
- ExternalArrayType array_type = kExternalInt8Array; // Bogus initialization.
- size_t element_size = 1; // Bogus initialization.
- ElementsKind external_elements_kind =
- EXTERNAL_INT8_ELEMENTS; // Bogus initialization.
- ElementsKind fixed_elements_kind = INT8_ELEMENTS; // Bogus initialization.
- Runtime::ArrayIdToTypeAndSize(arrayId,
- &array_type,
- &external_elements_kind,
- &fixed_elements_kind,
- &element_size);
- RUNTIME_ASSERT(holder->map()->elements_kind() == fixed_elements_kind);
-
- size_t byte_offset = 0;
- size_t byte_length = 0;
- RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_offset_object, &byte_offset));
- RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_length_object, &byte_length));
-
- if (maybe_buffer->IsJSArrayBuffer()) {
- Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(maybe_buffer);
- size_t array_buffer_byte_length =
- NumberToSize(isolate, buffer->byte_length());
- RUNTIME_ASSERT(byte_offset <= array_buffer_byte_length);
- RUNTIME_ASSERT(array_buffer_byte_length - byte_offset >= byte_length);
- } else {
- RUNTIME_ASSERT(maybe_buffer->IsNull());
- }
-
- RUNTIME_ASSERT(byte_length % element_size == 0);
- size_t length = byte_length / element_size;
-
- if (length > static_cast<unsigned>(Smi::kMaxValue)) {
- return isolate->Throw(
- *isolate->factory()->NewRangeError("invalid_typed_array_length",
- HandleVector<Object>(NULL, 0)));
- }
-
- // All checks are done, now we can modify objects.
-
- DCHECK(holder->GetInternalFieldCount() ==
- v8::ArrayBufferView::kInternalFieldCount);
- for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
- holder->SetInternalField(i, Smi::FromInt(0));
- }
- Handle<Object> length_obj = isolate->factory()->NewNumberFromSize(length);
- holder->set_length(*length_obj);
- holder->set_byte_offset(*byte_offset_object);
- holder->set_byte_length(*byte_length_object);
-
- if (!maybe_buffer->IsNull()) {
- Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(maybe_buffer);
- holder->set_buffer(*buffer);
- holder->set_weak_next(buffer->weak_first_view());
- buffer->set_weak_first_view(*holder);
-
- Handle<ExternalArray> elements =
- isolate->factory()->NewExternalArray(
- static_cast<int>(length), array_type,
- static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
- Handle<Map> map =
- JSObject::GetElementsTransitionMap(holder, external_elements_kind);
- JSObject::SetMapAndElements(holder, map, elements);
- DCHECK(IsExternalArrayElementsKind(holder->map()->elements_kind()));
- } else {
- holder->set_buffer(Smi::FromInt(0));
- holder->set_weak_next(isolate->heap()->undefined_value());
- Handle<FixedTypedArrayBase> elements =
- isolate->factory()->NewFixedTypedArray(
- static_cast<int>(length), array_type);
- holder->set_elements(*elements);
- }
- return isolate->heap()->undefined_value();
-}
-
-
-// Initializes a typed array from an array-like object.
-// If an array-like object happens to be a typed array of the same type,
-// initializes backing store using memove.
-//
-// Returns true if backing store was initialized or false otherwise.
-RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
- CONVERT_SMI_ARG_CHECKED(arrayId, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, source, 2);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(length_obj, 3);
-
- RUNTIME_ASSERT(arrayId >= Runtime::ARRAY_ID_FIRST &&
- arrayId <= Runtime::ARRAY_ID_LAST);
-
- ExternalArrayType array_type = kExternalInt8Array; // Bogus initialization.
- size_t element_size = 1; // Bogus initialization.
- ElementsKind external_elements_kind =
- EXTERNAL_INT8_ELEMENTS; // Bogus intialization.
- ElementsKind fixed_elements_kind = INT8_ELEMENTS; // Bogus initialization.
- Runtime::ArrayIdToTypeAndSize(arrayId,
- &array_type,
- &external_elements_kind,
- &fixed_elements_kind,
- &element_size);
-
- RUNTIME_ASSERT(holder->map()->elements_kind() == fixed_elements_kind);
-
- Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
- if (source->IsJSTypedArray() &&
- JSTypedArray::cast(*source)->type() == array_type) {
- length_obj = Handle<Object>(JSTypedArray::cast(*source)->length(), isolate);
- }
- size_t length = 0;
- RUNTIME_ASSERT(TryNumberToSize(isolate, *length_obj, &length));
-
- if ((length > static_cast<unsigned>(Smi::kMaxValue)) ||
- (length > (kMaxInt / element_size))) {
- return isolate->Throw(*isolate->factory()->
- NewRangeError("invalid_typed_array_length",
- HandleVector<Object>(NULL, 0)));
- }
- size_t byte_length = length * element_size;
-
- DCHECK(holder->GetInternalFieldCount() ==
- v8::ArrayBufferView::kInternalFieldCount);
- for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
- holder->SetInternalField(i, Smi::FromInt(0));
- }
-
- // NOTE: not initializing backing store.
- // We assume that the caller of this function will initialize holder
- // with the loop
- // for(i = 0; i < length; i++) { holder[i] = source[i]; }
- // We assume that the caller of this function is always a typed array
- // constructor.
- // If source is a typed array, this loop will always run to completion,
- // so we are sure that the backing store will be initialized.
- // Otherwise, the indexing operation might throw, so the loop will not
- // run to completion and the typed array might remain partly initialized.
- // However we further assume that the caller of this function is a typed array
- // constructor, and the exception will propagate out of the constructor,
- // therefore uninitialized memory will not be accessible by a user program.
- //
- // TODO(dslomov): revise this once we support subclassing.
-
- if (!Runtime::SetupArrayBufferAllocatingData(
- isolate, buffer, byte_length, false)) {
- return isolate->Throw(*isolate->factory()->
- NewRangeError("invalid_array_buffer_length",
- HandleVector<Object>(NULL, 0)));
- }
-
- holder->set_buffer(*buffer);
- holder->set_byte_offset(Smi::FromInt(0));
- Handle<Object> byte_length_obj(
- isolate->factory()->NewNumberFromSize(byte_length));
- holder->set_byte_length(*byte_length_obj);
- holder->set_length(*length_obj);
- holder->set_weak_next(buffer->weak_first_view());
- buffer->set_weak_first_view(*holder);
-
- Handle<ExternalArray> elements =
- isolate->factory()->NewExternalArray(
- static_cast<int>(length), array_type,
- static_cast<uint8_t*>(buffer->backing_store()));
- Handle<Map> map = JSObject::GetElementsTransitionMap(
- holder, external_elements_kind);
- JSObject::SetMapAndElements(holder, map, elements);
-
- if (source->IsJSTypedArray()) {
- Handle<JSTypedArray> typed_array(JSTypedArray::cast(*source));
-
- if (typed_array->type() == holder->type()) {
- uint8_t* backing_store =
- static_cast<uint8_t*>(
- typed_array->GetBuffer()->backing_store());
- size_t source_byte_offset =
- NumberToSize(isolate, typed_array->byte_offset());
- memcpy(
- buffer->backing_store(),
- backing_store + source_byte_offset,
- byte_length);
- return isolate->heap()->true_value();
- }
- }
-
- return isolate->heap()->false_value();
-}
-
-
-#define BUFFER_VIEW_GETTER(Type, getter, accessor) \
- RUNTIME_FUNCTION(Runtime_##Type##Get##getter) { \
- HandleScope scope(isolate); \
- DCHECK(args.length() == 1); \
- CONVERT_ARG_HANDLE_CHECKED(JS##Type, holder, 0); \
- return holder->accessor(); \
- }
-
-BUFFER_VIEW_GETTER(ArrayBufferView, ByteLength, byte_length)
-BUFFER_VIEW_GETTER(ArrayBufferView, ByteOffset, byte_offset)
-BUFFER_VIEW_GETTER(TypedArray, Length, length)
-BUFFER_VIEW_GETTER(DataView, Buffer, buffer)
-
-#undef BUFFER_VIEW_GETTER
-
-RUNTIME_FUNCTION(Runtime_TypedArrayGetBuffer) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
- return *holder->GetBuffer();
-}
-
-
-// Return codes for Runtime_TypedArraySetFastCases.
-// Should be synchronized with typedarray.js natives.
-enum TypedArraySetResultCodes {
- // Set from typed array of the same type.
- // This is processed by TypedArraySetFastCases
- TYPED_ARRAY_SET_TYPED_ARRAY_SAME_TYPE = 0,
- // Set from typed array of the different type, overlapping in memory.
- TYPED_ARRAY_SET_TYPED_ARRAY_OVERLAPPING = 1,
- // Set from typed array of the different type, non-overlapping.
- TYPED_ARRAY_SET_TYPED_ARRAY_NONOVERLAPPING = 2,
- // Set from non-typed array.
- TYPED_ARRAY_SET_NON_TYPED_ARRAY = 3
-};
-
-
-RUNTIME_FUNCTION(Runtime_TypedArraySetFastCases) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- if (!args[0]->IsJSTypedArray())
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "not_typed_array", HandleVector<Object>(NULL, 0)));
-
- if (!args[1]->IsJSTypedArray())
- return Smi::FromInt(TYPED_ARRAY_SET_NON_TYPED_ARRAY);
-
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, target_obj, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, source_obj, 1);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(offset_obj, 2);
-
- Handle<JSTypedArray> target(JSTypedArray::cast(*target_obj));
- Handle<JSTypedArray> source(JSTypedArray::cast(*source_obj));
- size_t offset = 0;
- RUNTIME_ASSERT(TryNumberToSize(isolate, *offset_obj, &offset));
- size_t target_length = NumberToSize(isolate, target->length());
- size_t source_length = NumberToSize(isolate, source->length());
- size_t target_byte_length = NumberToSize(isolate, target->byte_length());
- size_t source_byte_length = NumberToSize(isolate, source->byte_length());
- if (offset > target_length ||
- offset + source_length > target_length ||
- offset + source_length < offset) // overflow
- return isolate->Throw(*isolate->factory()->NewRangeError(
- "typed_array_set_source_too_large", HandleVector<Object>(NULL, 0)));
-
- size_t target_offset = NumberToSize(isolate, target->byte_offset());
- size_t source_offset = NumberToSize(isolate, source->byte_offset());
- uint8_t* target_base =
- static_cast<uint8_t*>(
- target->GetBuffer()->backing_store()) + target_offset;
- uint8_t* source_base =
- static_cast<uint8_t*>(
- source->GetBuffer()->backing_store()) + source_offset;
-
- // Typed arrays of the same type: use memmove.
- if (target->type() == source->type()) {
- memmove(target_base + offset * target->element_size(),
- source_base, source_byte_length);
- return Smi::FromInt(TYPED_ARRAY_SET_TYPED_ARRAY_SAME_TYPE);
- }
-
- // Typed arrays of different types over the same backing store
- if ((source_base <= target_base &&
- source_base + source_byte_length > target_base) ||
- (target_base <= source_base &&
- target_base + target_byte_length > source_base)) {
- // We do not support overlapping ArrayBuffers
- DCHECK(
- target->GetBuffer()->backing_store() ==
- source->GetBuffer()->backing_store());
- return Smi::FromInt(TYPED_ARRAY_SET_TYPED_ARRAY_OVERLAPPING);
- } else { // Non-overlapping typed arrays
- return Smi::FromInt(TYPED_ARRAY_SET_TYPED_ARRAY_NONOVERLAPPING);
- }
-}
-
-
-RUNTIME_FUNCTION(Runtime_TypedArrayMaxSizeInHeap) {
- DCHECK(args.length() == 0);
- DCHECK_OBJECT_SIZE(
- FLAG_typed_array_max_size_in_heap + FixedTypedArrayBase::kDataOffset);
- return Smi::FromInt(FLAG_typed_array_max_size_in_heap);
-}
-
-
-RUNTIME_FUNCTION(Runtime_DataViewInitialize) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 1);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_offset, 2);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_length, 3);
-
- DCHECK(holder->GetInternalFieldCount() ==
- v8::ArrayBufferView::kInternalFieldCount);
- for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
- holder->SetInternalField(i, Smi::FromInt(0));
- }
- size_t buffer_length = 0;
- size_t offset = 0;
- size_t length = 0;
- RUNTIME_ASSERT(
- TryNumberToSize(isolate, buffer->byte_length(), &buffer_length));
- RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_offset, &offset));
- RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_length, &length));
-
- // TODO(jkummerow): When we have a "safe numerics" helper class, use it here.
- // Entire range [offset, offset + length] must be in bounds.
- RUNTIME_ASSERT(offset <= buffer_length);
- RUNTIME_ASSERT(offset + length <= buffer_length);
- // No overflow.
- RUNTIME_ASSERT(offset + length >= offset);
-
- holder->set_buffer(*buffer);
- holder->set_byte_offset(*byte_offset);
- holder->set_byte_length(*byte_length);
-
- holder->set_weak_next(buffer->weak_first_view());
- buffer->set_weak_first_view(*holder);
-
- return isolate->heap()->undefined_value();
-}
-
-
-inline static bool NeedToFlipBytes(bool is_little_endian) {
-#ifdef V8_TARGET_LITTLE_ENDIAN
- return !is_little_endian;
-#else
- return is_little_endian;
-#endif
-}
-
-
-template<int n>
-inline void CopyBytes(uint8_t* target, uint8_t* source) {
- for (int i = 0; i < n; i++) {
- *(target++) = *(source++);
- }
-}
-
-
-template<int n>
-inline void FlipBytes(uint8_t* target, uint8_t* source) {
- source = source + (n-1);
- for (int i = 0; i < n; i++) {
- *(target++) = *(source--);
- }
-}
-
-
-template<typename T>
-inline static bool DataViewGetValue(
- Isolate* isolate,
- Handle<JSDataView> data_view,
- Handle<Object> byte_offset_obj,
- bool is_little_endian,
- T* result) {
- size_t byte_offset = 0;
- if (!TryNumberToSize(isolate, *byte_offset_obj, &byte_offset)) {
- return false;
- }
- Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()));
-
- size_t data_view_byte_offset =
- NumberToSize(isolate, data_view->byte_offset());
- size_t data_view_byte_length =
- NumberToSize(isolate, data_view->byte_length());
- if (byte_offset + sizeof(T) > data_view_byte_length ||
- byte_offset + sizeof(T) < byte_offset) { // overflow
- return false;
- }
-
- union Value {
- T data;
- uint8_t bytes[sizeof(T)];
- };
-
- Value value;
- size_t buffer_offset = data_view_byte_offset + byte_offset;
- DCHECK(
- NumberToSize(isolate, buffer->byte_length())
- >= buffer_offset + sizeof(T));
- uint8_t* source =
- static_cast<uint8_t*>(buffer->backing_store()) + buffer_offset;
- if (NeedToFlipBytes(is_little_endian)) {
- FlipBytes<sizeof(T)>(value.bytes, source);
- } else {
- CopyBytes<sizeof(T)>(value.bytes, source);
- }
- *result = value.data;
- return true;
-}
-
-
-template<typename T>
-static bool DataViewSetValue(
- Isolate* isolate,
- Handle<JSDataView> data_view,
- Handle<Object> byte_offset_obj,
- bool is_little_endian,
- T data) {
- size_t byte_offset = 0;
- if (!TryNumberToSize(isolate, *byte_offset_obj, &byte_offset)) {
- return false;
- }
- Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()));
-
- size_t data_view_byte_offset =
- NumberToSize(isolate, data_view->byte_offset());
- size_t data_view_byte_length =
- NumberToSize(isolate, data_view->byte_length());
- if (byte_offset + sizeof(T) > data_view_byte_length ||
- byte_offset + sizeof(T) < byte_offset) { // overflow
- return false;
- }
-
- union Value {
- T data;
- uint8_t bytes[sizeof(T)];
- };
-
- Value value;
- value.data = data;
- size_t buffer_offset = data_view_byte_offset + byte_offset;
- DCHECK(
- NumberToSize(isolate, buffer->byte_length())
- >= buffer_offset + sizeof(T));
- uint8_t* target =
- static_cast<uint8_t*>(buffer->backing_store()) + buffer_offset;
- if (NeedToFlipBytes(is_little_endian)) {
- FlipBytes<sizeof(T)>(target, value.bytes);
- } else {
- CopyBytes<sizeof(T)>(target, value.bytes);
- }
- return true;
-}
-
-
-#define DATA_VIEW_GETTER(TypeName, Type, Converter) \
- RUNTIME_FUNCTION(Runtime_DataViewGet##TypeName) { \
- HandleScope scope(isolate); \
- DCHECK(args.length() == 3); \
- CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0); \
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(offset, 1); \
- CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 2); \
- Type result; \
- if (DataViewGetValue( \
- isolate, holder, offset, is_little_endian, &result)) { \
- return *isolate->factory()->Converter(result); \
- } else { \
- return isolate->Throw(*isolate->factory()->NewRangeError( \
- "invalid_data_view_accessor_offset", \
- HandleVector<Object>(NULL, 0))); \
- } \
- }
-
-DATA_VIEW_GETTER(Uint8, uint8_t, NewNumberFromUint)
-DATA_VIEW_GETTER(Int8, int8_t, NewNumberFromInt)
-DATA_VIEW_GETTER(Uint16, uint16_t, NewNumberFromUint)
-DATA_VIEW_GETTER(Int16, int16_t, NewNumberFromInt)
-DATA_VIEW_GETTER(Uint32, uint32_t, NewNumberFromUint)
-DATA_VIEW_GETTER(Int32, int32_t, NewNumberFromInt)
-DATA_VIEW_GETTER(Float32, float, NewNumber)
-DATA_VIEW_GETTER(Float64, double, NewNumber)
-
-#undef DATA_VIEW_GETTER
-
-
-template <typename T>
-static T DataViewConvertValue(double value);
-
-
-template <>
-int8_t DataViewConvertValue<int8_t>(double value) {
- return static_cast<int8_t>(DoubleToInt32(value));
-}
-
-
-template <>
-int16_t DataViewConvertValue<int16_t>(double value) {
- return static_cast<int16_t>(DoubleToInt32(value));
-}
-
-
-template <>
-int32_t DataViewConvertValue<int32_t>(double value) {
- return DoubleToInt32(value);
-}
-
-
-template <>
-uint8_t DataViewConvertValue<uint8_t>(double value) {
- return static_cast<uint8_t>(DoubleToUint32(value));
-}
-
-
-template <>
-uint16_t DataViewConvertValue<uint16_t>(double value) {
- return static_cast<uint16_t>(DoubleToUint32(value));
-}
-
-
-template <>
-uint32_t DataViewConvertValue<uint32_t>(double value) {
- return DoubleToUint32(value);
-}
-
-
-template <>
-float DataViewConvertValue<float>(double value) {
- return static_cast<float>(value);
-}
-
-
-template <>
-double DataViewConvertValue<double>(double value) {
- return value;
-}
-
-
-#define DATA_VIEW_SETTER(TypeName, Type) \
- RUNTIME_FUNCTION(Runtime_DataViewSet##TypeName) { \
- HandleScope scope(isolate); \
- DCHECK(args.length() == 4); \
- CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0); \
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(offset, 1); \
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); \
- CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 3); \
- Type v = DataViewConvertValue<Type>(value->Number()); \
- if (DataViewSetValue( \
- isolate, holder, offset, is_little_endian, v)) { \
- return isolate->heap()->undefined_value(); \
- } else { \
- return isolate->Throw(*isolate->factory()->NewRangeError( \
- "invalid_data_view_accessor_offset", \
- HandleVector<Object>(NULL, 0))); \
- } \
- }
-
-DATA_VIEW_SETTER(Uint8, uint8_t)
-DATA_VIEW_SETTER(Int8, int8_t)
-DATA_VIEW_SETTER(Uint16, uint16_t)
-DATA_VIEW_SETTER(Int16, int16_t)
-DATA_VIEW_SETTER(Uint32, uint32_t)
-DATA_VIEW_SETTER(Int32, int32_t)
-DATA_VIEW_SETTER(Float32, float)
-DATA_VIEW_SETTER(Float64, double)
-
-#undef DATA_VIEW_SETTER
-
-
-RUNTIME_FUNCTION(Runtime_SetInitialize) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<OrderedHashSet> table = isolate->factory()->NewOrderedHashSet();
- holder->set_table(*table);
- return *holder;
-}
-
-
-RUNTIME_FUNCTION(Runtime_SetAdd) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
- table = OrderedHashSet::Add(table, key);
- holder->set_table(*table);
- return *holder;
-}
-
-
-RUNTIME_FUNCTION(Runtime_SetHas) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
- return isolate->heap()->ToBoolean(table->Contains(key));
-}
-
-
-RUNTIME_FUNCTION(Runtime_SetDelete) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
- bool was_present = false;
- table = OrderedHashSet::Remove(table, key, &was_present);
- holder->set_table(*table);
- return isolate->heap()->ToBoolean(was_present);
-}
-
-
-RUNTIME_FUNCTION(Runtime_SetClear) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
- table = OrderedHashSet::Clear(table);
- holder->set_table(*table);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_SetGetSize) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
- return Smi::FromInt(table->NumberOfElements());
-}
-
-
-RUNTIME_FUNCTION(Runtime_SetIteratorInitialize) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSSetIterator, holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSSet, set, 1);
- CONVERT_SMI_ARG_CHECKED(kind, 2)
- RUNTIME_ASSERT(kind == JSSetIterator::kKindValues ||
- kind == JSSetIterator::kKindEntries);
- Handle<OrderedHashSet> table(OrderedHashSet::cast(set->table()));
- holder->set_table(*table);
- holder->set_index(Smi::FromInt(0));
- holder->set_kind(Smi::FromInt(kind));
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_SetIteratorNext) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_CHECKED(JSSetIterator, holder, 0);
- CONVERT_ARG_CHECKED(JSArray, value_array, 1);
- return holder->Next(value_array);
-}
-
-
-RUNTIME_FUNCTION(Runtime_MapInitialize) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- Handle<OrderedHashMap> table = isolate->factory()->NewOrderedHashMap();
- holder->set_table(*table);
- return *holder;
-}
-
-
-RUNTIME_FUNCTION(Runtime_MapGet) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
- Handle<Object> lookup(table->Lookup(key), isolate);
- return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup;
-}
-
-
-RUNTIME_FUNCTION(Runtime_MapHas) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
- Handle<Object> lookup(table->Lookup(key), isolate);
- return isolate->heap()->ToBoolean(!lookup->IsTheHole());
-}
-
-
-RUNTIME_FUNCTION(Runtime_MapDelete) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
- bool was_present = false;
- Handle<OrderedHashMap> new_table =
- OrderedHashMap::Remove(table, key, &was_present);
- holder->set_table(*new_table);
- return isolate->heap()->ToBoolean(was_present);
-}
-
-
-RUNTIME_FUNCTION(Runtime_MapClear) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
- table = OrderedHashMap::Clear(table);
- holder->set_table(*table);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_MapSet) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
- Handle<OrderedHashMap> new_table = OrderedHashMap::Put(table, key, value);
- holder->set_table(*new_table);
- return *holder;
-}
-
-
-RUNTIME_FUNCTION(Runtime_MapGetSize) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
- return Smi::FromInt(table->NumberOfElements());
-}
-
-
-RUNTIME_FUNCTION(Runtime_MapIteratorInitialize) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSMapIterator, holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSMap, map, 1);
- CONVERT_SMI_ARG_CHECKED(kind, 2)
- RUNTIME_ASSERT(kind == JSMapIterator::kKindKeys
- || kind == JSMapIterator::kKindValues
- || kind == JSMapIterator::kKindEntries);
- Handle<OrderedHashMap> table(OrderedHashMap::cast(map->table()));
- holder->set_table(*table);
- holder->set_index(Smi::FromInt(0));
- holder->set_kind(Smi::FromInt(kind));
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetWeakMapEntries) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
- Handle<FixedArray> entries =
- isolate->factory()->NewFixedArray(table->NumberOfElements() * 2);
- {
- DisallowHeapAllocation no_gc;
- int number_of_non_hole_elements = 0;
- for (int i = 0; i < table->Capacity(); i++) {
- Handle<Object> key(table->KeyAt(i), isolate);
- if (table->IsKey(*key)) {
- entries->set(number_of_non_hole_elements++, *key);
- entries->set(number_of_non_hole_elements++, table->Lookup(key));
- }
- }
- DCHECK_EQ(table->NumberOfElements() * 2, number_of_non_hole_elements);
- }
- return *isolate->factory()->NewJSArrayWithElements(entries);
-}
-
-
-RUNTIME_FUNCTION(Runtime_MapIteratorNext) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_CHECKED(JSMapIterator, holder, 0);
- CONVERT_ARG_CHECKED(JSArray, value_array, 1);
- return holder->Next(value_array);
-}
-
-
-static Handle<JSWeakCollection> WeakCollectionInitialize(
- Isolate* isolate,
- Handle<JSWeakCollection> weak_collection) {
- DCHECK(weak_collection->map()->inobject_properties() == 0);
- Handle<ObjectHashTable> table = ObjectHashTable::New(isolate, 0);
- weak_collection->set_table(*table);
- return weak_collection;
-}
-
-
-RUNTIME_FUNCTION(Runtime_WeakCollectionInitialize) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
- return *WeakCollectionInitialize(isolate, weak_collection);
-}
-
-
-RUNTIME_FUNCTION(Runtime_WeakCollectionGet) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
- Handle<ObjectHashTable> table(
- ObjectHashTable::cast(weak_collection->table()));
- RUNTIME_ASSERT(table->IsKey(*key));
- Handle<Object> lookup(table->Lookup(key), isolate);
- return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup;
-}
-
-
-RUNTIME_FUNCTION(Runtime_WeakCollectionHas) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
- Handle<ObjectHashTable> table(
- ObjectHashTable::cast(weak_collection->table()));
- RUNTIME_ASSERT(table->IsKey(*key));
- Handle<Object> lookup(table->Lookup(key), isolate);
- return isolate->heap()->ToBoolean(!lookup->IsTheHole());
-}
-
-
-RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
- Handle<ObjectHashTable> table(ObjectHashTable::cast(
- weak_collection->table()));
- RUNTIME_ASSERT(table->IsKey(*key));
- bool was_present = false;
- Handle<ObjectHashTable> new_table =
- ObjectHashTable::Remove(table, key, &was_present);
- weak_collection->set_table(*new_table);
- return isolate->heap()->ToBoolean(was_present);
-}
-
-
-RUNTIME_FUNCTION(Runtime_WeakCollectionSet) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- Handle<ObjectHashTable> table(
- ObjectHashTable::cast(weak_collection->table()));
- RUNTIME_ASSERT(table->IsKey(*key));
- Handle<ObjectHashTable> new_table = ObjectHashTable::Put(table, key, value);
- weak_collection->set_table(*new_table);
- return *weak_collection;
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetWeakSetValues) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
- Handle<FixedArray> values =
- isolate->factory()->NewFixedArray(table->NumberOfElements());
- {
- DisallowHeapAllocation no_gc;
- int number_of_non_hole_elements = 0;
- for (int i = 0; i < table->Capacity(); i++) {
- Handle<Object> key(table->KeyAt(i), isolate);
- if (table->IsKey(*key)) {
- values->set(number_of_non_hole_elements++, *key);
- }
- }
- DCHECK_EQ(table->NumberOfElements(), number_of_non_hole_elements);
- }
- return *isolate->factory()->NewJSArrayWithElements(values);
-}
-
-
RUNTIME_FUNCTION(Runtime_GetPrototype) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -1893,8 +700,8 @@ RUNTIME_FUNCTION(Runtime_SetPrototype) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
if (obj->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(
- obj, isolate->factory()->proto_string(), v8::ACCESS_SET)) {
+ !isolate->MayNamedAccess(obj, isolate->factory()->proto_string(),
+ v8::ACCESS_SET)) {
isolate->ReportFailedAccessCheck(obj, v8::ACCESS_SET);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->undefined_value();
@@ -1903,21 +710,18 @@ RUNTIME_FUNCTION(Runtime_SetPrototype) {
Handle<Object> old_value = GetPrototypeSkipHiddenPrototypes(isolate, obj);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- JSObject::SetPrototype(obj, prototype, true));
+ isolate, result, JSObject::SetPrototype(obj, prototype, true));
Handle<Object> new_value = GetPrototypeSkipHiddenPrototypes(isolate, obj);
if (!new_value->SameValue(*old_value)) {
- JSObject::EnqueueChangeRecord(obj, "setPrototype",
- isolate->factory()->proto_string(),
- old_value);
+ JSObject::EnqueueChangeRecord(
+ obj, "setPrototype", isolate->factory()->proto_string(), old_value);
}
return *result;
}
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- JSObject::SetPrototype(obj, prototype, true));
+ isolate, result, JSObject::SetPrototype(obj, prototype, true));
return *result;
}
@@ -1975,28 +779,28 @@ MUST_USE_RESULT static MaybeHandle<Object> GetOwnProperty(Isolate* isolate,
// Get value if not an AccessorPair.
if (maybe_accessors.is_null()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
- Runtime::GetElementOrCharAt(isolate, obj, index), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, value, Runtime::GetElementOrCharAt(isolate, obj, index),
+ Object);
}
} else {
// Get attributes.
- LookupIterator it(obj, name, LookupIterator::CHECK_OWN);
+ LookupIterator it(obj, name, LookupIterator::HIDDEN);
Maybe<PropertyAttributes> maybe = JSObject::GetPropertyAttributes(&it);
if (!maybe.has_value) return MaybeHandle<Object>();
attrs = maybe.value;
if (attrs == ABSENT) return factory->undefined_value();
// Get AccessorPair if present.
- if (it.state() == LookupIterator::PROPERTY &&
- it.property_kind() == LookupIterator::ACCESSOR &&
+ if (it.state() == LookupIterator::ACCESSOR &&
it.GetAccessors()->IsAccessorPair()) {
maybe_accessors = Handle<AccessorPair>::cast(it.GetAccessors());
}
// Get value if not an AccessorPair.
if (maybe_accessors.is_null()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, value, Object::GetProperty(&it), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, value, Object::GetProperty(&it),
+ Object);
}
}
DCHECK(!isolate->has_pending_exception());
@@ -2033,8 +837,8 @@ RUNTIME_FUNCTION(Runtime_GetOwnProperty) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, GetOwnProperty(isolate, obj, name));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ GetOwnProperty(isolate, obj, name));
return *result;
}
@@ -2044,12 +848,103 @@ RUNTIME_FUNCTION(Runtime_PreventExtensions) {
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSObject::PreventExtensions(obj));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_ToMethod) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
+ Handle<JSFunction> clone = JSFunction::CloneClosure(fun);
+ Handle<Symbol> home_object_symbol(isolate->heap()->home_object_symbol());
+ JSObject::SetOwnPropertyIgnoreAttributes(clone, home_object_symbol,
+ home_object, DONT_ENUM).Assert();
+ return *clone;
+}
+
+
+RUNTIME_FUNCTION(Runtime_HomeObjectSymbol) {
+ DCHECK(args.length() == 0);
+ return isolate->heap()->home_object_symbol();
+}
+
+
+RUNTIME_FUNCTION(Runtime_LoadFromSuper) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 2);
+
+ if (home_object->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(home_object, name, v8::ACCESS_GET)) {
+ isolate->ReportFailedAccessCheck(home_object, v8::ACCESS_GET);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ }
+
+ PrototypeIterator iter(isolate, home_object);
+ Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
+ if (!proto->IsJSReceiver()) return isolate->heap()->undefined_value();
+
+ LookupIterator it(receiver, name, Handle<JSReceiver>::cast(proto));
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Object::GetProperty(&it));
+ return *result;
+}
+
+
+static Object* StoreToSuper(Isolate* isolate, Handle<JSObject> home_object,
+ Handle<Object> receiver, Handle<Name> name,
+ Handle<Object> value, StrictMode strict_mode) {
+ if (home_object->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(home_object, name, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheck(home_object, v8::ACCESS_SET);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ }
+
+ PrototypeIterator iter(isolate, home_object);
+ Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
+ if (!proto->IsJSReceiver()) return isolate->heap()->undefined_value();
+
+ LookupIterator it(receiver, name, Handle<JSReceiver>::cast(proto));
+ Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSObject::PreventExtensions(obj));
+ isolate, result,
+ Object::SetProperty(&it, value, strict_mode,
+ Object::CERTAINLY_NOT_STORE_FROM_KEYED,
+ Object::SUPER_PROPERTY));
return *result;
}
+RUNTIME_FUNCTION(Runtime_StoreToSuper_Strict) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 4);
+ CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 3);
+
+ return StoreToSuper(isolate, home_object, receiver, name, value, STRICT);
+}
+
+
+RUNTIME_FUNCTION(Runtime_StoreToSuper_Sloppy) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 4);
+ CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 3);
+
+ return StoreToSuper(isolate, home_object, receiver, name, value, SLOPPY);
+}
+
+
RUNTIME_FUNCTION(Runtime_IsExtensible) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -2064,19 +959,6 @@ RUNTIME_FUNCTION(Runtime_IsExtensible) {
}
-RUNTIME_FUNCTION(Runtime_RegExpCompile) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, re, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, RegExpImpl::Compile(re, pattern, flags));
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_CreateApiFunction) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
@@ -2146,10 +1028,9 @@ RUNTIME_FUNCTION(Runtime_EnableAccessChecks) {
static Object* ThrowRedeclarationError(Isolate* isolate, Handle<String> name) {
HandleScope scope(isolate);
- Handle<Object> args[1] = { name };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "var_redeclaration", HandleVector(args, 1));
- return isolate->Throw(*error);
+ Handle<Object> args[1] = {name};
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError("var_redeclaration", HandleVector(args, 1)));
}
@@ -2159,12 +1040,12 @@ static Object* DeclareGlobals(Isolate* isolate, Handle<GlobalObject> global,
PropertyAttributes attr, bool is_var,
bool is_const, bool is_function) {
// Do the lookup own properties only, see ES5 erratum.
- LookupIterator it(global, name, LookupIterator::CHECK_HIDDEN);
+ LookupIterator it(global, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- DCHECK(maybe.has_value);
- PropertyAttributes old_attributes = maybe.value;
+ if (!maybe.has_value) return isolate->heap()->exception();
- if (old_attributes != ABSENT) {
+ if (it.IsFound()) {
+ PropertyAttributes old_attributes = maybe.value;
// The name was declared before; check for conflicting re-declarations.
if (is_const) return ThrowRedeclarationError(isolate, name);
@@ -2289,7 +1170,7 @@ RUNTIME_FUNCTION(Runtime_InitializeConstGlobal) {
Handle<GlobalObject> global = isolate->global_object();
// Lookup the property as own on the global object.
- LookupIterator it(global, name, LookupIterator::CHECK_HIDDEN);
+ LookupIterator it(global, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
DCHECK(maybe.has_value);
PropertyAttributes old_attributes = maybe.value;
@@ -2302,7 +1183,7 @@ RUNTIME_FUNCTION(Runtime_InitializeConstGlobal) {
// Ignore if we can't reconfigure the value.
if ((old_attributes & DONT_DELETE) != 0) {
if ((old_attributes & READ_ONLY) != 0 ||
- it.property_kind() == LookupIterator::ACCESSOR) {
+ it.state() == LookupIterator::ACCESSOR) {
return *value;
}
attr = static_cast<PropertyAttributes>(old_attributes | READ_ONLY);
@@ -2439,7 +1320,7 @@ RUNTIME_FUNCTION(Runtime_InitializeLegacyConstLookupSlot) {
// code can run in between that modifies the declared property.
DCHECK(holder->IsJSGlobalObject() || holder->IsJSContextExtensionObject());
- LookupIterator it(holder, name, LookupIterator::CHECK_HIDDEN);
+ LookupIterator it(holder, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
if (!maybe.has_value) return isolate->heap()->exception();
PropertyAttributes old_attributes = maybe.value;
@@ -2447,7 +1328,7 @@ RUNTIME_FUNCTION(Runtime_InitializeLegacyConstLookupSlot) {
// Ignore if we can't reconfigure the value.
if ((old_attributes & DONT_DELETE) != 0) {
if ((old_attributes & READ_ONLY) != 0 ||
- it.property_kind() == LookupIterator::ACCESSOR) {
+ it.state() == LookupIterator::ACCESSOR) {
return *value;
}
attr = static_cast<PropertyAttributes>(old_attributes | READ_ONLY);
@@ -2476,105 +1357,6 @@ RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
}
-RUNTIME_FUNCTION(Runtime_RegExpExecRT) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
- // Due to the way the JS calls are constructed this must be less than the
- // length of a string, i.e. it is always a Smi. We check anyway for security.
- CONVERT_SMI_ARG_CHECKED(index, 2);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
- RUNTIME_ASSERT(index >= 0);
- RUNTIME_ASSERT(index <= subject->length());
- isolate->counters()->regexp_entry_runtime()->Increment();
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- RegExpImpl::Exec(regexp, subject, index, last_match_info));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_RegExpConstructResult) {
- HandleScope handle_scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_SMI_ARG_CHECKED(size, 0);
- RUNTIME_ASSERT(size >= 0 && size <= FixedArray::kMaxLength);
- CONVERT_ARG_HANDLE_CHECKED(Object, index, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, input, 2);
- Handle<FixedArray> elements = isolate->factory()->NewFixedArray(size);
- Handle<Map> regexp_map(isolate->native_context()->regexp_result_map());
- Handle<JSObject> object =
- isolate->factory()->NewJSObjectFromMap(regexp_map, NOT_TENURED, false);
- Handle<JSArray> array = Handle<JSArray>::cast(object);
- array->set_elements(*elements);
- array->set_length(Smi::FromInt(size));
- // Write in-object properties after the length of the array.
- array->InObjectPropertyAtPut(JSRegExpResult::kIndexIndex, *index);
- array->InObjectPropertyAtPut(JSRegExpResult::kInputIndex, *input);
- return *array;
-}
-
-
-RUNTIME_FUNCTION(Runtime_RegExpInitializeObject) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 5);
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
- // If source is the empty string we set it to "(?:)" instead as
- // suggested by ECMA-262, 5th, section 15.10.4.1.
- if (source->length() == 0) source = isolate->factory()->query_colon_string();
-
- CONVERT_ARG_HANDLE_CHECKED(Object, global, 2);
- if (!global->IsTrue()) global = isolate->factory()->false_value();
-
- CONVERT_ARG_HANDLE_CHECKED(Object, ignoreCase, 3);
- if (!ignoreCase->IsTrue()) ignoreCase = isolate->factory()->false_value();
-
- CONVERT_ARG_HANDLE_CHECKED(Object, multiline, 4);
- if (!multiline->IsTrue()) multiline = isolate->factory()->false_value();
-
- Map* map = regexp->map();
- Object* constructor = map->constructor();
- if (constructor->IsJSFunction() &&
- JSFunction::cast(constructor)->initial_map() == map) {
- // If we still have the original map, set in-object properties directly.
- regexp->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, *source);
- // Both true and false are immovable immortal objects so no need for write
- // barrier.
- regexp->InObjectPropertyAtPut(
- JSRegExp::kGlobalFieldIndex, *global, SKIP_WRITE_BARRIER);
- regexp->InObjectPropertyAtPut(
- JSRegExp::kIgnoreCaseFieldIndex, *ignoreCase, SKIP_WRITE_BARRIER);
- regexp->InObjectPropertyAtPut(
- JSRegExp::kMultilineFieldIndex, *multiline, SKIP_WRITE_BARRIER);
- regexp->InObjectPropertyAtPut(
- JSRegExp::kLastIndexFieldIndex, Smi::FromInt(0), SKIP_WRITE_BARRIER);
- return *regexp;
- }
-
- // Map has changed, so use generic, but slower, method.
- PropertyAttributes final =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_ENUM | DONT_DELETE);
- PropertyAttributes writable =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- Handle<Object> zero(Smi::FromInt(0), isolate);
- Factory* factory = isolate->factory();
- JSObject::SetOwnPropertyIgnoreAttributes(
- regexp, factory->source_string(), source, final).Check();
- JSObject::SetOwnPropertyIgnoreAttributes(
- regexp, factory->global_string(), global, final).Check();
- JSObject::SetOwnPropertyIgnoreAttributes(
- regexp, factory->ignore_case_string(), ignoreCase, final).Check();
- JSObject::SetOwnPropertyIgnoreAttributes(
- regexp, factory->multiline_string(), multiline, final).Check();
- JSObject::SetOwnPropertyIgnoreAttributes(
- regexp, factory->last_index_string(), zero, writable).Check();
- return *regexp;
-}
-
-
RUNTIME_FUNCTION(Runtime_FinishArrayPrototypeSetup) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -2589,10 +1371,8 @@ RUNTIME_FUNCTION(Runtime_FinishArrayPrototypeSetup) {
}
-static void InstallBuiltin(Isolate* isolate,
- Handle<JSObject> holder,
- const char* name,
- Builtins::Name builtin_name) {
+static void InstallBuiltin(Isolate* isolate, Handle<JSObject> holder,
+ const char* name, Builtins::Name builtin_name) {
Handle<String> key = isolate->factory()->InternalizeUtf8String(name);
Handle<Code> code(isolate->builtins()->builtin(builtin_name));
Handle<JSFunction> optimized =
@@ -2628,9 +1408,8 @@ RUNTIME_FUNCTION(Runtime_IsSloppyModeFunction) {
HandleScope scope(isolate);
Handle<Object> delegate;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, delegate,
- Execution::TryGetFunctionDelegate(
- isolate, Handle<JSReceiver>(callable)));
+ isolate, delegate, Execution::TryGetFunctionDelegate(
+ isolate, Handle<JSReceiver>(callable)));
callable = JSFunction::cast(*delegate);
}
JSFunction* function = JSFunction::cast(callable);
@@ -2648,9 +1427,8 @@ RUNTIME_FUNCTION(Runtime_GetDefaultReceiver) {
HandleScope scope(isolate);
Handle<Object> delegate;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, delegate,
- Execution::TryGetFunctionDelegate(
- isolate, Handle<JSReceiver>(callable)));
+ isolate, delegate, Execution::TryGetFunctionDelegate(
+ isolate, Handle<JSReceiver>(callable)));
callable = JSFunction::cast(*delegate);
}
JSFunction* function = JSFunction::cast(callable);
@@ -2666,32 +1444,6 @@ RUNTIME_FUNCTION(Runtime_GetDefaultReceiver) {
}
-RUNTIME_FUNCTION(Runtime_MaterializeRegExpLiteral) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_ARG_CHECKED(index, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2);
- CONVERT_ARG_HANDLE_CHECKED(String, flags, 3);
-
- // Get the RegExp function from the context in the literals array.
- // This is the RegExp function from the context in which the
- // function was created. We do not use the RegExp function from the
- // current native context because this might be the RegExp function
- // from another context which we should not have access to.
- Handle<JSFunction> constructor =
- Handle<JSFunction>(
- JSFunction::NativeContextFromLiterals(*literals)->regexp_function());
- // Compute the regular expression literal.
- Handle<Object> regexp;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, regexp,
- RegExpImpl::CreateRegExpLiteral(constructor, pattern, flags));
- literals->set(index, *regexp);
- return *regexp;
-}
-
-
RUNTIME_FUNCTION(Runtime_FunctionGetName) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -2746,6 +1498,14 @@ RUNTIME_FUNCTION(Runtime_FunctionIsArrow) {
}
+RUNTIME_FUNCTION(Runtime_FunctionIsConciseMethod) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSFunction, f, 0);
+ return isolate->heap()->ToBoolean(f->shared()->is_concise_method());
+}
+
+
RUNTIME_FUNCTION(Runtime_FunctionRemovePrototype) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -2915,8 +1675,8 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
if (isolate->logger()->is_logging_code_events() ||
isolate->cpu_profiler()->is_profiling()) {
- isolate->logger()->LogExistingFunction(
- source_shared, Handle<Code>(source_shared->code()));
+ isolate->logger()->LogExistingFunction(source_shared,
+ Handle<Code>(source_shared->code()));
}
return *target;
@@ -3052,11 +1812,11 @@ RUNTIME_FUNCTION(Runtime_ThrowGeneratorStateError) {
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
int continuation = generator->continuation();
- const char* message = continuation == JSGeneratorObject::kGeneratorClosed ?
- "generator_finished" : "generator_running";
- Vector< Handle<Object> > argv = HandleVector<Object>(NULL, 0);
- Handle<Object> error = isolate->factory()->NewError(message, argv);
- return isolate->Throw(*error);
+ const char* message = continuation == JSGeneratorObject::kGeneratorClosed
+ ? "generator_finished"
+ : "generator_running";
+ Vector<Handle<Object> > argv = HandleVector<Object>(NULL, 0);
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewError(message, argv));
}
@@ -3067,8 +1827,7 @@ RUNTIME_FUNCTION(Runtime_ObjectFreeze) {
// %ObjectFreeze is a fast path and these cases are handled elsewhere.
RUNTIME_ASSERT(!object->HasSloppyArgumentsElements() &&
- !object->map()->is_observed() &&
- !object->IsJSProxy());
+ !object->map()->is_observed() && !object->IsJSProxy());
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, JSObject::Freeze(object));
@@ -3076,1644 +1835,6 @@ RUNTIME_FUNCTION(Runtime_ObjectFreeze) {
}
-RUNTIME_FUNCTION(Runtime_StringCharCodeAtRT) {
- HandleScope handle_scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- CONVERT_NUMBER_CHECKED(uint32_t, i, Uint32, args[1]);
-
- // Flatten the string. If someone wants to get a char at an index
- // in a cons string, it is likely that more indices will be
- // accessed.
- subject = String::Flatten(subject);
-
- if (i >= static_cast<uint32_t>(subject->length())) {
- return isolate->heap()->nan_value();
- }
-
- return Smi::FromInt(subject->Get(i));
-}
-
-
-RUNTIME_FUNCTION(Runtime_CharFromCode) {
- HandleScope handlescope(isolate);
- DCHECK(args.length() == 1);
- if (args[0]->IsNumber()) {
- CONVERT_NUMBER_CHECKED(uint32_t, code, Uint32, args[0]);
- code &= 0xffff;
- return *isolate->factory()->LookupSingleCharacterStringFromCode(code);
- }
- return isolate->heap()->empty_string();
-}
-
-
-class FixedArrayBuilder {
- public:
- explicit FixedArrayBuilder(Isolate* isolate, int initial_capacity)
- : array_(isolate->factory()->NewFixedArrayWithHoles(initial_capacity)),
- length_(0),
- has_non_smi_elements_(false) {
- // Require a non-zero initial size. Ensures that doubling the size to
- // extend the array will work.
- DCHECK(initial_capacity > 0);
- }
-
- explicit FixedArrayBuilder(Handle<FixedArray> backing_store)
- : array_(backing_store),
- length_(0),
- has_non_smi_elements_(false) {
- // Require a non-zero initial size. Ensures that doubling the size to
- // extend the array will work.
- DCHECK(backing_store->length() > 0);
- }
-
- bool HasCapacity(int elements) {
- int length = array_->length();
- int required_length = length_ + elements;
- return (length >= required_length);
- }
-
- void EnsureCapacity(int elements) {
- int length = array_->length();
- int required_length = length_ + elements;
- if (length < required_length) {
- int new_length = length;
- do {
- new_length *= 2;
- } while (new_length < required_length);
- Handle<FixedArray> extended_array =
- array_->GetIsolate()->factory()->NewFixedArrayWithHoles(new_length);
- array_->CopyTo(0, *extended_array, 0, length_);
- array_ = extended_array;
- }
- }
-
- void Add(Object* value) {
- DCHECK(!value->IsSmi());
- DCHECK(length_ < capacity());
- array_->set(length_, value);
- length_++;
- has_non_smi_elements_ = true;
- }
-
- void Add(Smi* value) {
- DCHECK(value->IsSmi());
- DCHECK(length_ < capacity());
- array_->set(length_, value);
- length_++;
- }
-
- Handle<FixedArray> array() {
- return array_;
- }
-
- int length() {
- return length_;
- }
-
- int capacity() {
- return array_->length();
- }
-
- Handle<JSArray> ToJSArray(Handle<JSArray> target_array) {
- JSArray::SetContent(target_array, array_);
- target_array->set_length(Smi::FromInt(length_));
- return target_array;
- }
-
-
- private:
- Handle<FixedArray> array_;
- int length_;
- bool has_non_smi_elements_;
-};
-
-
-// Forward declarations.
-const int kStringBuilderConcatHelperLengthBits = 11;
-const int kStringBuilderConcatHelperPositionBits = 19;
-
-template <typename schar>
-static inline void StringBuilderConcatHelper(String*,
- schar*,
- FixedArray*,
- int);
-
-typedef BitField<int, 0, kStringBuilderConcatHelperLengthBits>
- StringBuilderSubstringLength;
-typedef BitField<int,
- kStringBuilderConcatHelperLengthBits,
- kStringBuilderConcatHelperPositionBits>
- StringBuilderSubstringPosition;
-
-
-class ReplacementStringBuilder {
- public:
- ReplacementStringBuilder(Heap* heap,
- Handle<String> subject,
- int estimated_part_count)
- : heap_(heap),
- array_builder_(heap->isolate(), estimated_part_count),
- subject_(subject),
- character_count_(0),
- is_ascii_(subject->IsOneByteRepresentation()) {
- // Require a non-zero initial size. Ensures that doubling the size to
- // extend the array will work.
- DCHECK(estimated_part_count > 0);
- }
-
- static inline void AddSubjectSlice(FixedArrayBuilder* builder,
- int from,
- int to) {
- DCHECK(from >= 0);
- int length = to - from;
- DCHECK(length > 0);
- if (StringBuilderSubstringLength::is_valid(length) &&
- StringBuilderSubstringPosition::is_valid(from)) {
- int encoded_slice = StringBuilderSubstringLength::encode(length) |
- StringBuilderSubstringPosition::encode(from);
- builder->Add(Smi::FromInt(encoded_slice));
- } else {
- // Otherwise encode as two smis.
- builder->Add(Smi::FromInt(-length));
- builder->Add(Smi::FromInt(from));
- }
- }
-
-
- void EnsureCapacity(int elements) {
- array_builder_.EnsureCapacity(elements);
- }
-
-
- void AddSubjectSlice(int from, int to) {
- AddSubjectSlice(&array_builder_, from, to);
- IncrementCharacterCount(to - from);
- }
-
-
- void AddString(Handle<String> string) {
- int length = string->length();
- DCHECK(length > 0);
- AddElement(*string);
- if (!string->IsOneByteRepresentation()) {
- is_ascii_ = false;
- }
- IncrementCharacterCount(length);
- }
-
-
- MaybeHandle<String> ToString() {
- Isolate* isolate = heap_->isolate();
- if (array_builder_.length() == 0) {
- return isolate->factory()->empty_string();
- }
-
- Handle<String> joined_string;
- if (is_ascii_) {
- Handle<SeqOneByteString> seq;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, seq,
- isolate->factory()->NewRawOneByteString(character_count_),
- String);
-
- DisallowHeapAllocation no_gc;
- uint8_t* char_buffer = seq->GetChars();
- StringBuilderConcatHelper(*subject_,
- char_buffer,
- *array_builder_.array(),
- array_builder_.length());
- joined_string = Handle<String>::cast(seq);
- } else {
- // Non-ASCII.
- Handle<SeqTwoByteString> seq;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, seq,
- isolate->factory()->NewRawTwoByteString(character_count_),
- String);
-
- DisallowHeapAllocation no_gc;
- uc16* char_buffer = seq->GetChars();
- StringBuilderConcatHelper(*subject_,
- char_buffer,
- *array_builder_.array(),
- array_builder_.length());
- joined_string = Handle<String>::cast(seq);
- }
- return joined_string;
- }
-
-
- void IncrementCharacterCount(int by) {
- if (character_count_ > String::kMaxLength - by) {
- STATIC_ASSERT(String::kMaxLength < kMaxInt);
- character_count_ = kMaxInt;
- } else {
- character_count_ += by;
- }
- }
-
- private:
- void AddElement(Object* element) {
- DCHECK(element->IsSmi() || element->IsString());
- DCHECK(array_builder_.capacity() > array_builder_.length());
- array_builder_.Add(element);
- }
-
- Heap* heap_;
- FixedArrayBuilder array_builder_;
- Handle<String> subject_;
- int character_count_;
- bool is_ascii_;
-};
-
-
-class CompiledReplacement {
- public:
- explicit CompiledReplacement(Zone* zone)
- : parts_(1, zone), replacement_substrings_(0, zone), zone_(zone) {}
-
- // Return whether the replacement is simple.
- bool Compile(Handle<String> replacement,
- int capture_count,
- int subject_length);
-
- // Use Apply only if Compile returned false.
- void Apply(ReplacementStringBuilder* builder,
- int match_from,
- int match_to,
- int32_t* match);
-
- // Number of distinct parts of the replacement pattern.
- int parts() {
- return parts_.length();
- }
-
- Zone* zone() const { return zone_; }
-
- private:
- enum PartType {
- SUBJECT_PREFIX = 1,
- SUBJECT_SUFFIX,
- SUBJECT_CAPTURE,
- REPLACEMENT_SUBSTRING,
- REPLACEMENT_STRING,
-
- NUMBER_OF_PART_TYPES
- };
-
- struct ReplacementPart {
- static inline ReplacementPart SubjectMatch() {
- return ReplacementPart(SUBJECT_CAPTURE, 0);
- }
- static inline ReplacementPart SubjectCapture(int capture_index) {
- return ReplacementPart(SUBJECT_CAPTURE, capture_index);
- }
- static inline ReplacementPart SubjectPrefix() {
- return ReplacementPart(SUBJECT_PREFIX, 0);
- }
- static inline ReplacementPart SubjectSuffix(int subject_length) {
- return ReplacementPart(SUBJECT_SUFFIX, subject_length);
- }
- static inline ReplacementPart ReplacementString() {
- return ReplacementPart(REPLACEMENT_STRING, 0);
- }
- static inline ReplacementPart ReplacementSubString(int from, int to) {
- DCHECK(from >= 0);
- DCHECK(to > from);
- return ReplacementPart(-from, to);
- }
-
- // If tag <= 0 then it is the negation of a start index of a substring of
- // the replacement pattern, otherwise it's a value from PartType.
- ReplacementPart(int tag, int data)
- : tag(tag), data(data) {
- // Must be non-positive or a PartType value.
- DCHECK(tag < NUMBER_OF_PART_TYPES);
- }
- // Either a value of PartType or a non-positive number that is
- // the negation of an index into the replacement string.
- int tag;
- // The data value's interpretation depends on the value of tag:
- // tag == SUBJECT_PREFIX ||
- // tag == SUBJECT_SUFFIX: data is unused.
- // tag == SUBJECT_CAPTURE: data is the number of the capture.
- // tag == REPLACEMENT_SUBSTRING ||
- // tag == REPLACEMENT_STRING: data is index into array of substrings
- // of the replacement string.
- // tag <= 0: Temporary representation of the substring of the replacement
- // string ranging over -tag .. data.
- // Is replaced by REPLACEMENT_{SUB,}STRING when we create the
- // substring objects.
- int data;
- };
-
- template<typename Char>
- bool ParseReplacementPattern(ZoneList<ReplacementPart>* parts,
- Vector<Char> characters,
- int capture_count,
- int subject_length,
- Zone* zone) {
- int length = characters.length();
- int last = 0;
- for (int i = 0; i < length; i++) {
- Char c = characters[i];
- if (c == '$') {
- int next_index = i + 1;
- if (next_index == length) { // No next character!
- break;
- }
- Char c2 = characters[next_index];
- switch (c2) {
- case '$':
- if (i > last) {
- // There is a substring before. Include the first "$".
- parts->Add(ReplacementPart::ReplacementSubString(last, next_index),
- zone);
- last = next_index + 1; // Continue after the second "$".
- } else {
- // Let the next substring start with the second "$".
- last = next_index;
- }
- i = next_index;
- break;
- case '`':
- if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
- }
- parts->Add(ReplacementPart::SubjectPrefix(), zone);
- i = next_index;
- last = i + 1;
- break;
- case '\'':
- if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
- }
- parts->Add(ReplacementPart::SubjectSuffix(subject_length), zone);
- i = next_index;
- last = i + 1;
- break;
- case '&':
- if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
- }
- parts->Add(ReplacementPart::SubjectMatch(), zone);
- i = next_index;
- last = i + 1;
- break;
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9': {
- int capture_ref = c2 - '0';
- if (capture_ref > capture_count) {
- i = next_index;
- continue;
- }
- int second_digit_index = next_index + 1;
- if (second_digit_index < length) {
- // Peek ahead to see if we have two digits.
- Char c3 = characters[second_digit_index];
- if ('0' <= c3 && c3 <= '9') { // Double digits.
- int double_digit_ref = capture_ref * 10 + c3 - '0';
- if (double_digit_ref <= capture_count) {
- next_index = second_digit_index;
- capture_ref = double_digit_ref;
- }
- }
- }
- if (capture_ref > 0) {
- if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
- }
- DCHECK(capture_ref <= capture_count);
- parts->Add(ReplacementPart::SubjectCapture(capture_ref), zone);
- last = next_index + 1;
- }
- i = next_index;
- break;
- }
- default:
- i = next_index;
- break;
- }
- }
- }
- if (length > last) {
- if (last == 0) {
- // Replacement is simple. Do not use Apply to do the replacement.
- return true;
- } else {
- parts->Add(ReplacementPart::ReplacementSubString(last, length), zone);
- }
- }
- return false;
- }
-
- ZoneList<ReplacementPart> parts_;
- ZoneList<Handle<String> > replacement_substrings_;
- Zone* zone_;
-};
-
-
-bool CompiledReplacement::Compile(Handle<String> replacement,
- int capture_count,
- int subject_length) {
- {
- DisallowHeapAllocation no_gc;
- String::FlatContent content = replacement->GetFlatContent();
- DCHECK(content.IsFlat());
- bool simple = false;
- if (content.IsAscii()) {
- simple = ParseReplacementPattern(&parts_,
- content.ToOneByteVector(),
- capture_count,
- subject_length,
- zone());
- } else {
- DCHECK(content.IsTwoByte());
- simple = ParseReplacementPattern(&parts_,
- content.ToUC16Vector(),
- capture_count,
- subject_length,
- zone());
- }
- if (simple) return true;
- }
-
- Isolate* isolate = replacement->GetIsolate();
- // Find substrings of replacement string and create them as String objects.
- int substring_index = 0;
- for (int i = 0, n = parts_.length(); i < n; i++) {
- int tag = parts_[i].tag;
- if (tag <= 0) { // A replacement string slice.
- int from = -tag;
- int to = parts_[i].data;
- replacement_substrings_.Add(
- isolate->factory()->NewSubString(replacement, from, to), zone());
- parts_[i].tag = REPLACEMENT_SUBSTRING;
- parts_[i].data = substring_index;
- substring_index++;
- } else if (tag == REPLACEMENT_STRING) {
- replacement_substrings_.Add(replacement, zone());
- parts_[i].data = substring_index;
- substring_index++;
- }
- }
- return false;
-}
-
-
-void CompiledReplacement::Apply(ReplacementStringBuilder* builder,
- int match_from,
- int match_to,
- int32_t* match) {
- DCHECK_LT(0, parts_.length());
- for (int i = 0, n = parts_.length(); i < n; i++) {
- ReplacementPart part = parts_[i];
- switch (part.tag) {
- case SUBJECT_PREFIX:
- if (match_from > 0) builder->AddSubjectSlice(0, match_from);
- break;
- case SUBJECT_SUFFIX: {
- int subject_length = part.data;
- if (match_to < subject_length) {
- builder->AddSubjectSlice(match_to, subject_length);
- }
- break;
- }
- case SUBJECT_CAPTURE: {
- int capture = part.data;
- int from = match[capture * 2];
- int to = match[capture * 2 + 1];
- if (from >= 0 && to > from) {
- builder->AddSubjectSlice(from, to);
- }
- break;
- }
- case REPLACEMENT_SUBSTRING:
- case REPLACEMENT_STRING:
- builder->AddString(replacement_substrings_[part.data]);
- break;
- default:
- UNREACHABLE();
- }
- }
-}
-
-
-void FindAsciiStringIndices(Vector<const uint8_t> subject,
- char pattern,
- ZoneList<int>* indices,
- unsigned int limit,
- Zone* zone) {
- DCHECK(limit > 0);
- // Collect indices of pattern in subject using memchr.
- // Stop after finding at most limit values.
- const uint8_t* subject_start = subject.start();
- const uint8_t* subject_end = subject_start + subject.length();
- const uint8_t* pos = subject_start;
- while (limit > 0) {
- pos = reinterpret_cast<const uint8_t*>(
- memchr(pos, pattern, subject_end - pos));
- if (pos == NULL) return;
- indices->Add(static_cast<int>(pos - subject_start), zone);
- pos++;
- limit--;
- }
-}
-
-
-void FindTwoByteStringIndices(const Vector<const uc16> subject,
- uc16 pattern,
- ZoneList<int>* indices,
- unsigned int limit,
- Zone* zone) {
- DCHECK(limit > 0);
- const uc16* subject_start = subject.start();
- const uc16* subject_end = subject_start + subject.length();
- for (const uc16* pos = subject_start; pos < subject_end && limit > 0; pos++) {
- if (*pos == pattern) {
- indices->Add(static_cast<int>(pos - subject_start), zone);
- limit--;
- }
- }
-}
-
-
-template <typename SubjectChar, typename PatternChar>
-void FindStringIndices(Isolate* isolate,
- Vector<const SubjectChar> subject,
- Vector<const PatternChar> pattern,
- ZoneList<int>* indices,
- unsigned int limit,
- Zone* zone) {
- DCHECK(limit > 0);
- // Collect indices of pattern in subject.
- // Stop after finding at most limit values.
- int pattern_length = pattern.length();
- int index = 0;
- StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
- while (limit > 0) {
- index = search.Search(subject, index);
- if (index < 0) return;
- indices->Add(index, zone);
- index += pattern_length;
- limit--;
- }
-}
-
-
-void FindStringIndicesDispatch(Isolate* isolate,
- String* subject,
- String* pattern,
- ZoneList<int>* indices,
- unsigned int limit,
- Zone* zone) {
- {
- DisallowHeapAllocation no_gc;
- String::FlatContent subject_content = subject->GetFlatContent();
- String::FlatContent pattern_content = pattern->GetFlatContent();
- DCHECK(subject_content.IsFlat());
- DCHECK(pattern_content.IsFlat());
- if (subject_content.IsAscii()) {
- Vector<const uint8_t> subject_vector = subject_content.ToOneByteVector();
- if (pattern_content.IsAscii()) {
- Vector<const uint8_t> pattern_vector =
- pattern_content.ToOneByteVector();
- if (pattern_vector.length() == 1) {
- FindAsciiStringIndices(subject_vector,
- pattern_vector[0],
- indices,
- limit,
- zone);
- } else {
- FindStringIndices(isolate,
- subject_vector,
- pattern_vector,
- indices,
- limit,
- zone);
- }
- } else {
- FindStringIndices(isolate,
- subject_vector,
- pattern_content.ToUC16Vector(),
- indices,
- limit,
- zone);
- }
- } else {
- Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
- if (pattern_content.IsAscii()) {
- Vector<const uint8_t> pattern_vector =
- pattern_content.ToOneByteVector();
- if (pattern_vector.length() == 1) {
- FindTwoByteStringIndices(subject_vector,
- pattern_vector[0],
- indices,
- limit,
- zone);
- } else {
- FindStringIndices(isolate,
- subject_vector,
- pattern_vector,
- indices,
- limit,
- zone);
- }
- } else {
- Vector<const uc16> pattern_vector = pattern_content.ToUC16Vector();
- if (pattern_vector.length() == 1) {
- FindTwoByteStringIndices(subject_vector,
- pattern_vector[0],
- indices,
- limit,
- zone);
- } else {
- FindStringIndices(isolate,
- subject_vector,
- pattern_vector,
- indices,
- limit,
- zone);
- }
- }
- }
- }
-}
-
-
-template<typename ResultSeqString>
-MUST_USE_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
- Isolate* isolate,
- Handle<String> subject,
- Handle<JSRegExp> pattern_regexp,
- Handle<String> replacement,
- Handle<JSArray> last_match_info) {
- DCHECK(subject->IsFlat());
- DCHECK(replacement->IsFlat());
-
- ZoneScope zone_scope(isolate->runtime_zone());
- ZoneList<int> indices(8, zone_scope.zone());
- DCHECK_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag());
- String* pattern =
- String::cast(pattern_regexp->DataAt(JSRegExp::kAtomPatternIndex));
- int subject_len = subject->length();
- int pattern_len = pattern->length();
- int replacement_len = replacement->length();
-
- FindStringIndicesDispatch(
- isolate, *subject, pattern, &indices, 0xffffffff, zone_scope.zone());
-
- int matches = indices.length();
- if (matches == 0) return *subject;
-
- // Detect integer overflow.
- int64_t result_len_64 =
- (static_cast<int64_t>(replacement_len) -
- static_cast<int64_t>(pattern_len)) *
- static_cast<int64_t>(matches) +
- static_cast<int64_t>(subject_len);
- int result_len;
- if (result_len_64 > static_cast<int64_t>(String::kMaxLength)) {
- STATIC_ASSERT(String::kMaxLength < kMaxInt);
- result_len = kMaxInt; // Provoke exception.
- } else {
- result_len = static_cast<int>(result_len_64);
- }
-
- int subject_pos = 0;
- int result_pos = 0;
-
- MaybeHandle<SeqString> maybe_res;
- if (ResultSeqString::kHasAsciiEncoding) {
- maybe_res = isolate->factory()->NewRawOneByteString(result_len);
- } else {
- maybe_res = isolate->factory()->NewRawTwoByteString(result_len);
- }
- Handle<SeqString> untyped_res;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, untyped_res, maybe_res);
- Handle<ResultSeqString> result = Handle<ResultSeqString>::cast(untyped_res);
-
- for (int i = 0; i < matches; i++) {
- // Copy non-matched subject content.
- if (subject_pos < indices.at(i)) {
- String::WriteToFlat(*subject,
- result->GetChars() + result_pos,
- subject_pos,
- indices.at(i));
- result_pos += indices.at(i) - subject_pos;
- }
-
- // Replace match.
- if (replacement_len > 0) {
- String::WriteToFlat(*replacement,
- result->GetChars() + result_pos,
- 0,
- replacement_len);
- result_pos += replacement_len;
- }
-
- subject_pos = indices.at(i) + pattern_len;
- }
- // Add remaining subject content at the end.
- if (subject_pos < subject_len) {
- String::WriteToFlat(*subject,
- result->GetChars() + result_pos,
- subject_pos,
- subject_len);
- }
-
- int32_t match_indices[] = { indices.at(matches - 1),
- indices.at(matches - 1) + pattern_len };
- RegExpImpl::SetLastMatchInfo(last_match_info, subject, 0, match_indices);
-
- return *result;
-}
-
-
-MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithString(
- Isolate* isolate,
- Handle<String> subject,
- Handle<JSRegExp> regexp,
- Handle<String> replacement,
- Handle<JSArray> last_match_info) {
- DCHECK(subject->IsFlat());
- DCHECK(replacement->IsFlat());
-
- int capture_count = regexp->CaptureCount();
- int subject_length = subject->length();
-
- // CompiledReplacement uses zone allocation.
- ZoneScope zone_scope(isolate->runtime_zone());
- CompiledReplacement compiled_replacement(zone_scope.zone());
- bool simple_replace = compiled_replacement.Compile(replacement,
- capture_count,
- subject_length);
-
- // Shortcut for simple non-regexp global replacements
- if (regexp->TypeTag() == JSRegExp::ATOM && simple_replace) {
- if (subject->HasOnlyOneByteChars() &&
- replacement->HasOnlyOneByteChars()) {
- return StringReplaceGlobalAtomRegExpWithString<SeqOneByteString>(
- isolate, subject, regexp, replacement, last_match_info);
- } else {
- return StringReplaceGlobalAtomRegExpWithString<SeqTwoByteString>(
- isolate, subject, regexp, replacement, last_match_info);
- }
- }
-
- RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
- if (global_cache.HasException()) return isolate->heap()->exception();
-
- int32_t* current_match = global_cache.FetchNext();
- if (current_match == NULL) {
- if (global_cache.HasException()) return isolate->heap()->exception();
- return *subject;
- }
-
- // Guessing the number of parts that the final result string is built
- // from. Global regexps can match any number of times, so we guess
- // conservatively.
- int expected_parts = (compiled_replacement.parts() + 1) * 4 + 1;
- ReplacementStringBuilder builder(isolate->heap(),
- subject,
- expected_parts);
-
- // Number of parts added by compiled replacement plus preceeding
- // string and possibly suffix after last match. It is possible for
- // all components to use two elements when encoded as two smis.
- const int parts_added_per_loop = 2 * (compiled_replacement.parts() + 2);
-
- int prev = 0;
-
- do {
- builder.EnsureCapacity(parts_added_per_loop);
-
- int start = current_match[0];
- int end = current_match[1];
-
- if (prev < start) {
- builder.AddSubjectSlice(prev, start);
- }
-
- if (simple_replace) {
- builder.AddString(replacement);
- } else {
- compiled_replacement.Apply(&builder,
- start,
- end,
- current_match);
- }
- prev = end;
-
- current_match = global_cache.FetchNext();
- } while (current_match != NULL);
-
- if (global_cache.HasException()) return isolate->heap()->exception();
-
- if (prev < subject_length) {
- builder.EnsureCapacity(2);
- builder.AddSubjectSlice(prev, subject_length);
- }
-
- RegExpImpl::SetLastMatchInfo(last_match_info,
- subject,
- capture_count,
- global_cache.LastSuccessfulMatch());
-
- Handle<String> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, builder.ToString());
- return *result;
-}
-
-
-template <typename ResultSeqString>
-MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
- Isolate* isolate,
- Handle<String> subject,
- Handle<JSRegExp> regexp,
- Handle<JSArray> last_match_info) {
- DCHECK(subject->IsFlat());
-
- // Shortcut for simple non-regexp global replacements
- if (regexp->TypeTag() == JSRegExp::ATOM) {
- Handle<String> empty_string = isolate->factory()->empty_string();
- if (subject->IsOneByteRepresentation()) {
- return StringReplaceGlobalAtomRegExpWithString<SeqOneByteString>(
- isolate, subject, regexp, empty_string, last_match_info);
- } else {
- return StringReplaceGlobalAtomRegExpWithString<SeqTwoByteString>(
- isolate, subject, regexp, empty_string, last_match_info);
- }
- }
-
- RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
- if (global_cache.HasException()) return isolate->heap()->exception();
-
- int32_t* current_match = global_cache.FetchNext();
- if (current_match == NULL) {
- if (global_cache.HasException()) return isolate->heap()->exception();
- return *subject;
- }
-
- int start = current_match[0];
- int end = current_match[1];
- int capture_count = regexp->CaptureCount();
- int subject_length = subject->length();
-
- int new_length = subject_length - (end - start);
- if (new_length == 0) return isolate->heap()->empty_string();
-
- Handle<ResultSeqString> answer;
- if (ResultSeqString::kHasAsciiEncoding) {
- answer = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawOneByteString(new_length).ToHandleChecked());
- } else {
- answer = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawTwoByteString(new_length).ToHandleChecked());
- }
-
- int prev = 0;
- int position = 0;
-
- do {
- start = current_match[0];
- end = current_match[1];
- if (prev < start) {
- // Add substring subject[prev;start] to answer string.
- String::WriteToFlat(*subject, answer->GetChars() + position, prev, start);
- position += start - prev;
- }
- prev = end;
-
- current_match = global_cache.FetchNext();
- } while (current_match != NULL);
-
- if (global_cache.HasException()) return isolate->heap()->exception();
-
- RegExpImpl::SetLastMatchInfo(last_match_info,
- subject,
- capture_count,
- global_cache.LastSuccessfulMatch());
-
- if (prev < subject_length) {
- // Add substring subject[prev;length] to answer string.
- String::WriteToFlat(
- *subject, answer->GetChars() + position, prev, subject_length);
- position += subject_length - prev;
- }
-
- if (position == 0) return isolate->heap()->empty_string();
-
- // Shorten string and fill
- int string_size = ResultSeqString::SizeFor(position);
- int allocated_string_size = ResultSeqString::SizeFor(new_length);
- int delta = allocated_string_size - string_size;
-
- answer->set_length(position);
- if (delta == 0) return *answer;
-
- Address end_of_string = answer->address() + string_size;
- Heap* heap = isolate->heap();
-
- // The trimming is performed on a newly allocated object, which is on a
- // fresly allocated page or on an already swept page. Hence, the sweeper
- // thread can not get confused with the filler creation. No synchronization
- // needed.
- heap->CreateFillerObjectAt(end_of_string, delta);
- heap->AdjustLiveBytes(answer->address(), -delta, Heap::FROM_MUTATOR);
- return *answer;
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringReplaceGlobalRegExpWithString) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 4);
-
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, replacement, 2);
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
-
- RUNTIME_ASSERT(regexp->GetFlags().is_global());
- RUNTIME_ASSERT(last_match_info->HasFastObjectElements());
-
- subject = String::Flatten(subject);
-
- if (replacement->length() == 0) {
- if (subject->HasOnlyOneByteChars()) {
- return StringReplaceGlobalRegExpWithEmptyString<SeqOneByteString>(
- isolate, subject, regexp, last_match_info);
- } else {
- return StringReplaceGlobalRegExpWithEmptyString<SeqTwoByteString>(
- isolate, subject, regexp, last_match_info);
- }
- }
-
- replacement = String::Flatten(replacement);
-
- return StringReplaceGlobalRegExpWithString(
- isolate, subject, regexp, replacement, last_match_info);
-}
-
-
-// This may return an empty MaybeHandle if an exception is thrown or
-// we abort due to reaching the recursion limit.
-MaybeHandle<String> StringReplaceOneCharWithString(Isolate* isolate,
- Handle<String> subject,
- Handle<String> search,
- Handle<String> replace,
- bool* found,
- int recursion_limit) {
- StackLimitCheck stackLimitCheck(isolate);
- if (stackLimitCheck.HasOverflowed() || (recursion_limit == 0)) {
- return MaybeHandle<String>();
- }
- recursion_limit--;
- if (subject->IsConsString()) {
- ConsString* cons = ConsString::cast(*subject);
- Handle<String> first = Handle<String>(cons->first());
- Handle<String> second = Handle<String>(cons->second());
- Handle<String> new_first;
- if (!StringReplaceOneCharWithString(
- isolate, first, search, replace, found, recursion_limit)
- .ToHandle(&new_first)) {
- return MaybeHandle<String>();
- }
- if (*found) return isolate->factory()->NewConsString(new_first, second);
-
- Handle<String> new_second;
- if (!StringReplaceOneCharWithString(
- isolate, second, search, replace, found, recursion_limit)
- .ToHandle(&new_second)) {
- return MaybeHandle<String>();
- }
- if (*found) return isolate->factory()->NewConsString(first, new_second);
-
- return subject;
- } else {
- int index = Runtime::StringMatch(isolate, subject, search, 0);
- if (index == -1) return subject;
- *found = true;
- Handle<String> first = isolate->factory()->NewSubString(subject, 0, index);
- Handle<String> cons1;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, cons1,
- isolate->factory()->NewConsString(first, replace),
- String);
- Handle<String> second =
- isolate->factory()->NewSubString(subject, index + 1, subject->length());
- return isolate->factory()->NewConsString(cons1, second);
- }
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringReplaceOneCharWithString) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, search, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, replace, 2);
-
- // If the cons string tree is too deep, we simply abort the recursion and
- // retry with a flattened subject string.
- const int kRecursionLimit = 0x1000;
- bool found = false;
- Handle<String> result;
- if (StringReplaceOneCharWithString(
- isolate, subject, search, replace, &found, kRecursionLimit)
- .ToHandle(&result)) {
- return *result;
- }
- if (isolate->has_pending_exception()) return isolate->heap()->exception();
-
- subject = String::Flatten(subject);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- StringReplaceOneCharWithString(
- isolate, subject, search, replace, &found, kRecursionLimit));
- return *result;
-}
-
-
-// Perform string match of pattern on subject, starting at start index.
-// Caller must ensure that 0 <= start_index <= sub->length(),
-// and should check that pat->length() + start_index <= sub->length().
-int Runtime::StringMatch(Isolate* isolate,
- Handle<String> sub,
- Handle<String> pat,
- int start_index) {
- DCHECK(0 <= start_index);
- DCHECK(start_index <= sub->length());
-
- int pattern_length = pat->length();
- if (pattern_length == 0) return start_index;
-
- int subject_length = sub->length();
- if (start_index + pattern_length > subject_length) return -1;
-
- sub = String::Flatten(sub);
- pat = String::Flatten(pat);
-
- DisallowHeapAllocation no_gc; // ensure vectors stay valid
- // Extract flattened substrings of cons strings before determining asciiness.
- String::FlatContent seq_sub = sub->GetFlatContent();
- String::FlatContent seq_pat = pat->GetFlatContent();
-
- // dispatch on type of strings
- if (seq_pat.IsAscii()) {
- Vector<const uint8_t> pat_vector = seq_pat.ToOneByteVector();
- if (seq_sub.IsAscii()) {
- return SearchString(isolate,
- seq_sub.ToOneByteVector(),
- pat_vector,
- start_index);
- }
- return SearchString(isolate,
- seq_sub.ToUC16Vector(),
- pat_vector,
- start_index);
- }
- Vector<const uc16> pat_vector = seq_pat.ToUC16Vector();
- if (seq_sub.IsAscii()) {
- return SearchString(isolate,
- seq_sub.ToOneByteVector(),
- pat_vector,
- start_index);
- }
- return SearchString(isolate,
- seq_sub.ToUC16Vector(),
- pat_vector,
- start_index);
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringIndexOf) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
-
- CONVERT_ARG_HANDLE_CHECKED(String, sub, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, pat, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, index, 2);
-
- uint32_t start_index;
- if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
-
- RUNTIME_ASSERT(start_index <= static_cast<uint32_t>(sub->length()));
- int position = Runtime::StringMatch(isolate, sub, pat, start_index);
- return Smi::FromInt(position);
-}
-
-
-template <typename schar, typename pchar>
-static int StringMatchBackwards(Vector<const schar> subject,
- Vector<const pchar> pattern,
- int idx) {
- int pattern_length = pattern.length();
- DCHECK(pattern_length >= 1);
- DCHECK(idx + pattern_length <= subject.length());
-
- if (sizeof(schar) == 1 && sizeof(pchar) > 1) {
- for (int i = 0; i < pattern_length; i++) {
- uc16 c = pattern[i];
- if (c > String::kMaxOneByteCharCode) {
- return -1;
- }
- }
- }
-
- pchar pattern_first_char = pattern[0];
- for (int i = idx; i >= 0; i--) {
- if (subject[i] != pattern_first_char) continue;
- int j = 1;
- while (j < pattern_length) {
- if (pattern[j] != subject[i+j]) {
- break;
- }
- j++;
- }
- if (j == pattern_length) {
- return i;
- }
- }
- return -1;
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringLastIndexOf) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
-
- CONVERT_ARG_HANDLE_CHECKED(String, sub, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, pat, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, index, 2);
-
- uint32_t start_index;
- if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
-
- uint32_t pat_length = pat->length();
- uint32_t sub_length = sub->length();
-
- if (start_index + pat_length > sub_length) {
- start_index = sub_length - pat_length;
- }
-
- if (pat_length == 0) {
- return Smi::FromInt(start_index);
- }
-
- sub = String::Flatten(sub);
- pat = String::Flatten(pat);
-
- int position = -1;
- DisallowHeapAllocation no_gc; // ensure vectors stay valid
-
- String::FlatContent sub_content = sub->GetFlatContent();
- String::FlatContent pat_content = pat->GetFlatContent();
-
- if (pat_content.IsAscii()) {
- Vector<const uint8_t> pat_vector = pat_content.ToOneByteVector();
- if (sub_content.IsAscii()) {
- position = StringMatchBackwards(sub_content.ToOneByteVector(),
- pat_vector,
- start_index);
- } else {
- position = StringMatchBackwards(sub_content.ToUC16Vector(),
- pat_vector,
- start_index);
- }
- } else {
- Vector<const uc16> pat_vector = pat_content.ToUC16Vector();
- if (sub_content.IsAscii()) {
- position = StringMatchBackwards(sub_content.ToOneByteVector(),
- pat_vector,
- start_index);
- } else {
- position = StringMatchBackwards(sub_content.ToUC16Vector(),
- pat_vector,
- start_index);
- }
- }
-
- return Smi::FromInt(position);
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringLocaleCompare) {
- HandleScope handle_scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(String, str1, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, str2, 1);
-
- if (str1.is_identical_to(str2)) return Smi::FromInt(0); // Equal.
- int str1_length = str1->length();
- int str2_length = str2->length();
-
- // Decide trivial cases without flattening.
- if (str1_length == 0) {
- if (str2_length == 0) return Smi::FromInt(0); // Equal.
- return Smi::FromInt(-str2_length);
- } else {
- if (str2_length == 0) return Smi::FromInt(str1_length);
- }
-
- int end = str1_length < str2_length ? str1_length : str2_length;
-
- // No need to flatten if we are going to find the answer on the first
- // character. At this point we know there is at least one character
- // in each string, due to the trivial case handling above.
- int d = str1->Get(0) - str2->Get(0);
- if (d != 0) return Smi::FromInt(d);
-
- str1 = String::Flatten(str1);
- str2 = String::Flatten(str2);
-
- DisallowHeapAllocation no_gc;
- String::FlatContent flat1 = str1->GetFlatContent();
- String::FlatContent flat2 = str2->GetFlatContent();
-
- for (int i = 0; i < end; i++) {
- if (flat1.Get(i) != flat2.Get(i)) {
- return Smi::FromInt(flat1.Get(i) - flat2.Get(i));
- }
- }
-
- return Smi::FromInt(str1_length - str2_length);
-}
-
-
-RUNTIME_FUNCTION(Runtime_SubString) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
-
- CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
- int start, end;
- // We have a fast integer-only case here to avoid a conversion to double in
- // the common case where from and to are Smis.
- if (args[1]->IsSmi() && args[2]->IsSmi()) {
- CONVERT_SMI_ARG_CHECKED(from_number, 1);
- CONVERT_SMI_ARG_CHECKED(to_number, 2);
- start = from_number;
- end = to_number;
- } else {
- CONVERT_DOUBLE_ARG_CHECKED(from_number, 1);
- CONVERT_DOUBLE_ARG_CHECKED(to_number, 2);
- start = FastD2IChecked(from_number);
- end = FastD2IChecked(to_number);
- }
- RUNTIME_ASSERT(end >= start);
- RUNTIME_ASSERT(start >= 0);
- RUNTIME_ASSERT(end <= string->length());
- isolate->counters()->sub_string_runtime()->Increment();
-
- return *isolate->factory()->NewSubString(string, start, end);
-}
-
-
-RUNTIME_FUNCTION(Runtime_InternalizeString) {
- HandleScope handles(isolate);
- RUNTIME_ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
- return *isolate->factory()->InternalizeString(string);
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringMatch) {
- HandleScope handles(isolate);
- DCHECK(args.length() == 3);
-
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, regexp_info, 2);
-
- RUNTIME_ASSERT(regexp_info->HasFastObjectElements());
-
- RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
- if (global_cache.HasException()) return isolate->heap()->exception();
-
- int capture_count = regexp->CaptureCount();
-
- ZoneScope zone_scope(isolate->runtime_zone());
- ZoneList<int> offsets(8, zone_scope.zone());
-
- while (true) {
- int32_t* match = global_cache.FetchNext();
- if (match == NULL) break;
- offsets.Add(match[0], zone_scope.zone()); // start
- offsets.Add(match[1], zone_scope.zone()); // end
- }
-
- if (global_cache.HasException()) return isolate->heap()->exception();
-
- if (offsets.length() == 0) {
- // Not a single match.
- return isolate->heap()->null_value();
- }
-
- RegExpImpl::SetLastMatchInfo(regexp_info,
- subject,
- capture_count,
- global_cache.LastSuccessfulMatch());
-
- int matches = offsets.length() / 2;
- Handle<FixedArray> elements = isolate->factory()->NewFixedArray(matches);
- Handle<String> substring =
- isolate->factory()->NewSubString(subject, offsets.at(0), offsets.at(1));
- elements->set(0, *substring);
- for (int i = 1; i < matches; i++) {
- HandleScope temp_scope(isolate);
- int from = offsets.at(i * 2);
- int to = offsets.at(i * 2 + 1);
- Handle<String> substring =
- isolate->factory()->NewProperSubString(subject, from, to);
- elements->set(i, *substring);
- }
- Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(elements);
- result->set_length(Smi::FromInt(matches));
- return *result;
-}
-
-
-// Only called from Runtime_RegExpExecMultiple so it doesn't need to maintain
-// separate last match info. See comment on that function.
-template<bool has_capture>
-static Object* SearchRegExpMultiple(
- Isolate* isolate,
- Handle<String> subject,
- Handle<JSRegExp> regexp,
- Handle<JSArray> last_match_array,
- Handle<JSArray> result_array) {
- DCHECK(subject->IsFlat());
- DCHECK_NE(has_capture, regexp->CaptureCount() == 0);
-
- int capture_count = regexp->CaptureCount();
- int subject_length = subject->length();
-
- static const int kMinLengthToCache = 0x1000;
-
- if (subject_length > kMinLengthToCache) {
- Handle<Object> cached_answer(RegExpResultsCache::Lookup(
- isolate->heap(),
- *subject,
- regexp->data(),
- RegExpResultsCache::REGEXP_MULTIPLE_INDICES), isolate);
- if (*cached_answer != Smi::FromInt(0)) {
- Handle<FixedArray> cached_fixed_array =
- Handle<FixedArray>(FixedArray::cast(*cached_answer));
- // The cache FixedArray is a COW-array and can therefore be reused.
- JSArray::SetContent(result_array, cached_fixed_array);
- // The actual length of the result array is stored in the last element of
- // the backing store (the backing FixedArray may have a larger capacity).
- Object* cached_fixed_array_last_element =
- cached_fixed_array->get(cached_fixed_array->length() - 1);
- Smi* js_array_length = Smi::cast(cached_fixed_array_last_element);
- result_array->set_length(js_array_length);
- RegExpImpl::SetLastMatchInfo(
- last_match_array, subject, capture_count, NULL);
- return *result_array;
- }
- }
-
- RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
- if (global_cache.HasException()) return isolate->heap()->exception();
-
- // Ensured in Runtime_RegExpExecMultiple.
- DCHECK(result_array->HasFastObjectElements());
- Handle<FixedArray> result_elements(
- FixedArray::cast(result_array->elements()));
- if (result_elements->length() < 16) {
- result_elements = isolate->factory()->NewFixedArrayWithHoles(16);
- }
-
- FixedArrayBuilder builder(result_elements);
-
- // Position to search from.
- int match_start = -1;
- int match_end = 0;
- bool first = true;
-
- // Two smis before and after the match, for very long strings.
- static const int kMaxBuilderEntriesPerRegExpMatch = 5;
-
- while (true) {
- int32_t* current_match = global_cache.FetchNext();
- if (current_match == NULL) break;
- match_start = current_match[0];
- builder.EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
- if (match_end < match_start) {
- ReplacementStringBuilder::AddSubjectSlice(&builder,
- match_end,
- match_start);
- }
- match_end = current_match[1];
- {
- // Avoid accumulating new handles inside loop.
- HandleScope temp_scope(isolate);
- Handle<String> match;
- if (!first) {
- match = isolate->factory()->NewProperSubString(subject,
- match_start,
- match_end);
- } else {
- match = isolate->factory()->NewSubString(subject,
- match_start,
- match_end);
- first = false;
- }
-
- if (has_capture) {
- // Arguments array to replace function is match, captures, index and
- // subject, i.e., 3 + capture count in total.
- Handle<FixedArray> elements =
- isolate->factory()->NewFixedArray(3 + capture_count);
-
- elements->set(0, *match);
- for (int i = 1; i <= capture_count; i++) {
- int start = current_match[i * 2];
- if (start >= 0) {
- int end = current_match[i * 2 + 1];
- DCHECK(start <= end);
- Handle<String> substring =
- isolate->factory()->NewSubString(subject, start, end);
- elements->set(i, *substring);
- } else {
- DCHECK(current_match[i * 2 + 1] < 0);
- elements->set(i, isolate->heap()->undefined_value());
- }
- }
- elements->set(capture_count + 1, Smi::FromInt(match_start));
- elements->set(capture_count + 2, *subject);
- builder.Add(*isolate->factory()->NewJSArrayWithElements(elements));
- } else {
- builder.Add(*match);
- }
- }
- }
-
- if (global_cache.HasException()) return isolate->heap()->exception();
-
- if (match_start >= 0) {
- // Finished matching, with at least one match.
- if (match_end < subject_length) {
- ReplacementStringBuilder::AddSubjectSlice(&builder,
- match_end,
- subject_length);
- }
-
- RegExpImpl::SetLastMatchInfo(
- last_match_array, subject, capture_count, NULL);
-
- if (subject_length > kMinLengthToCache) {
- // Store the length of the result array into the last element of the
- // backing FixedArray.
- builder.EnsureCapacity(1);
- Handle<FixedArray> fixed_array = builder.array();
- fixed_array->set(fixed_array->length() - 1,
- Smi::FromInt(builder.length()));
- // Cache the result and turn the FixedArray into a COW array.
- RegExpResultsCache::Enter(isolate,
- subject,
- handle(regexp->data(), isolate),
- fixed_array,
- RegExpResultsCache::REGEXP_MULTIPLE_INDICES);
- }
- return *builder.ToJSArray(result_array);
- } else {
- return isolate->heap()->null_value(); // No matches at all.
- }
-}
-
-
-// This is only called for StringReplaceGlobalRegExpWithFunction. This sets
-// lastMatchInfoOverride to maintain the last match info, so we don't need to
-// set any other last match array info.
-RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) {
- HandleScope handles(isolate);
- DCHECK(args.length() == 4);
-
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 2);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3);
- RUNTIME_ASSERT(last_match_info->HasFastObjectElements());
- RUNTIME_ASSERT(result_array->HasFastObjectElements());
-
- subject = String::Flatten(subject);
- RUNTIME_ASSERT(regexp->GetFlags().is_global());
-
- if (regexp->CaptureCount() == 0) {
- return SearchRegExpMultiple<false>(
- isolate, subject, regexp, last_match_info, result_array);
- } else {
- return SearchRegExpMultiple<true>(
- isolate, subject, regexp, last_match_info, result_array);
- }
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberToRadixString) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_SMI_ARG_CHECKED(radix, 1);
- RUNTIME_ASSERT(2 <= radix && radix <= 36);
-
- // Fast case where the result is a one character string.
- if (args[0]->IsSmi()) {
- int value = args.smi_at(0);
- if (value >= 0 && value < radix) {
- // Character array used for conversion.
- static const char kCharTable[] = "0123456789abcdefghijklmnopqrstuvwxyz";
- return *isolate->factory()->
- LookupSingleCharacterStringFromCode(kCharTable[value]);
- }
- }
-
- // Slow case.
- CONVERT_DOUBLE_ARG_CHECKED(value, 0);
- if (std::isnan(value)) {
- return isolate->heap()->nan_string();
- }
- if (std::isinf(value)) {
- if (value < 0) {
- return isolate->heap()->minus_infinity_string();
- }
- return isolate->heap()->infinity_string();
- }
- char* str = DoubleToRadixCString(value, radix);
- Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
- DeleteArray(str);
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberToFixed) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(value, 0);
- CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
- int f = FastD2IChecked(f_number);
- // See DoubleToFixedCString for these constants:
- RUNTIME_ASSERT(f >= 0 && f <= 20);
- RUNTIME_ASSERT(!Double(value).IsSpecial());
- char* str = DoubleToFixedCString(value, f);
- Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
- DeleteArray(str);
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberToExponential) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(value, 0);
- CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
- int f = FastD2IChecked(f_number);
- RUNTIME_ASSERT(f >= -1 && f <= 20);
- RUNTIME_ASSERT(!Double(value).IsSpecial());
- char* str = DoubleToExponentialCString(value, f);
- Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
- DeleteArray(str);
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberToPrecision) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(value, 0);
- CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
- int f = FastD2IChecked(f_number);
- RUNTIME_ASSERT(f >= 1 && f <= 21);
- RUNTIME_ASSERT(!Double(value).IsSpecial());
- char* str = DoubleToPrecisionCString(value, f);
- Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
- DeleteArray(str);
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_IsValidSmi) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
-
- CONVERT_NUMBER_CHECKED(int32_t, number, Int32, args[0]);
- return isolate->heap()->ToBoolean(Smi::IsValid(number));
-}
-
-
// Returns a single character string where first character equals
// string->Get(index).
static Handle<Object> GetCharAt(Handle<String> string, uint32_t index) {
@@ -4760,8 +1881,8 @@ static MaybeHandle<Name> ToName(Isolate* isolate, Handle<Object> key) {
return Handle<Name>::cast(key);
} else {
Handle<Object> converted;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, converted, Execution::ToString(isolate, key), Name);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, converted,
+ Execution::ToString(isolate, key), Name);
return Handle<Name>::cast(converted);
}
}
@@ -4792,10 +1913,10 @@ MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
Handle<Object> object,
Handle<Object> key) {
if (object->IsUndefined() || object->IsNull()) {
- Handle<Object> args[2] = { key, object };
- return isolate->Throw<Object>(
- isolate->factory()->NewTypeError("non_object_property_load",
- HandleVector(args, 2)));
+ Handle<Object> args[2] = {key, object};
+ THROW_NEW_ERROR(isolate, NewTypeError("non_object_property_load",
+ HandleVector(args, 2)),
+ Object);
}
// Check if the given key is an array index.
@@ -4826,8 +1947,7 @@ RUNTIME_FUNCTION(Runtime_GetProperty) {
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Runtime::GetObjectProperty(isolate, object, key));
+ isolate, result, Runtime::GetObjectProperty(isolate, object, key));
return *result;
}
@@ -4853,8 +1973,7 @@ RUNTIME_FUNCTION(Runtime_KeyedGetProperty) {
// for objects that require access checks.
if (receiver_obj->IsJSObject()) {
if (!receiver_obj->IsJSGlobalProxy() &&
- !receiver_obj->IsAccessCheckNeeded() &&
- key_obj->IsName()) {
+ !receiver_obj->IsAccessCheckNeeded() && key_obj->IsName()) {
DisallowHeapAllocation no_allocation;
Handle<JSObject> receiver = Handle<JSObject>::cast(receiver_obj);
Handle<Name> key = Handle<Name>::cast(key_obj);
@@ -4870,18 +1989,18 @@ RUNTIME_FUNCTION(Runtime_KeyedGetProperty) {
}
// Lookup cache miss. Perform lookup and update the cache if
// appropriate.
- LookupResult result(isolate);
- receiver->LookupOwn(key, &result);
- if (result.IsField()) {
- FieldIndex field_index = result.GetFieldIndex();
+ LookupIterator it(receiver, key, LookupIterator::OWN);
+ if (it.state() == LookupIterator::DATA &&
+ it.property_details().type() == FIELD) {
+ FieldIndex field_index = it.GetFieldIndex();
// Do not track double fields in the keyed lookup cache. Reading
// double values requires boxing.
- if (!result.representation().IsDouble()) {
+ if (!it.representation().IsDouble()) {
keyed_lookup_cache->Update(receiver_map, key,
- field_index.GetKeyedLookupCacheIndex());
+ field_index.GetKeyedLookupCacheIndex());
}
AllowHeapAllocation allow_allocation;
- return *JSObject::FastPropertyAt(receiver, result.representation(),
+ return *JSObject::FastPropertyAt(receiver, it.representation(),
field_index);
}
} else {
@@ -4948,7 +2067,7 @@ static bool IsValidAccessor(Handle<Object> obj) {
// Transform getter or setter into something DefineAccessor can handle.
static Handle<Object> InstantiateAccessorComponent(Isolate* isolate,
Handle<Object> component) {
- if (component->IsUndefined()) return isolate->factory()->null_value();
+ if (component->IsUndefined()) return isolate->factory()->undefined_value();
Handle<FunctionTemplateInfo> info =
Handle<FunctionTemplateInfo>::cast(component);
return Utils::OpenHandle(*Utils::ToLocal(info)->GetFunction());
@@ -5020,40 +2139,31 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyUnchecked) {
RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
- // Check access rights if needed.
- if (js_object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(js_object, name, v8::ACCESS_SET)) {
- return isolate->heap()->undefined_value();
+ LookupIterator it(js_object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ if (it.IsFound() && it.state() == LookupIterator::ACCESS_CHECK) {
+ if (!isolate->MayNamedAccess(js_object, name, v8::ACCESS_SET)) {
+ return isolate->heap()->undefined_value();
+ }
+ it.Next();
}
- LookupResult lookup(isolate);
- js_object->LookupOwnRealNamedProperty(name, &lookup);
-
// Take special care when attributes are different and there is already
- // a property. For simplicity we normalize the property which enables us
- // to not worry about changing the instance_descriptor and creating a new
- // map.
- if (lookup.IsFound() &&
- (attr != lookup.GetAttributes() || lookup.IsPropertyCallbacks())) {
+ // a property.
+ if (it.state() == LookupIterator::ACCESSOR) {
// Use IgnoreAttributes version since a readonly property may be
// overridden and SetProperty does not allow this.
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
JSObject::SetOwnPropertyIgnoreAttributes(
- js_object, name, obj_value, attr,
- JSReceiver::PERFORM_EXTENSIBILITY_CHECK,
- JSReceiver::MAY_BE_STORE_FROM_KEYED,
- JSObject::DONT_FORCE_FIELD));
+ js_object, name, obj_value, attr, JSObject::DONT_FORCE_FIELD));
return *result;
}
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- Runtime::DefineObjectProperty(
- js_object, name, obj_value, attr,
- JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED));
+ Runtime::DefineObjectProperty(js_object, name, obj_value, attr));
return *result;
}
@@ -5074,11 +2184,10 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
Handle<Object> value,
StrictMode strict_mode) {
if (object->IsUndefined() || object->IsNull()) {
- Handle<Object> args[2] = { key, object };
- Handle<Object> error =
- isolate->factory()->NewTypeError("non_object_property_store",
- HandleVector(args, 2));
- return isolate->Throw<Object>(error);
+ Handle<Object> args[2] = {key, object};
+ THROW_NEW_ERROR(isolate, NewTypeError("non_object_property_store",
+ HandleVector(args, 2)),
+ Object);
}
if (object->IsJSProxy()) {
@@ -5086,8 +2195,8 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
if (key->IsSymbol()) {
name_object = key;
} else {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, name_object, Execution::ToString(isolate, key), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, name_object,
+ Execution::ToString(isolate, key), Object);
}
Handle<Name> name = Handle<Name>::cast(name_object);
return Object::SetProperty(Handle<JSProxy>::cast(object), name, value,
@@ -5116,8 +2225,8 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
if (js_object->HasExternalArrayElements() ||
js_object->HasFixedTypedArrayElements()) {
if (!value->IsNumber() && !value->IsUndefined()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, value, Execution::ToNumber(isolate, value), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
+ Execution::ToNumber(isolate, value), Object);
}
}
@@ -5150,8 +2259,8 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
// Call-back into JavaScript to convert the key to a string.
Handle<Object> converted;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, converted, Execution::ToString(isolate, key), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, converted,
+ Execution::ToString(isolate, key), Object);
Handle<String> name = Handle<String>::cast(converted);
if (name->AsArrayIndex(&index)) {
@@ -5165,12 +2274,10 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
}
-MaybeHandle<Object> Runtime::DefineObjectProperty(
- Handle<JSObject> js_object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attr,
- JSReceiver::StoreFromKeyed store_from_keyed) {
+MaybeHandle<Object> Runtime::DefineObjectProperty(Handle<JSObject> js_object,
+ Handle<Object> key,
+ Handle<Object> value,
+ PropertyAttributes attr) {
Isolate* isolate = js_object->GetIsolate();
// Check if the given key is an array index.
uint32_t index;
@@ -5186,36 +2293,34 @@ MaybeHandle<Object> Runtime::DefineObjectProperty(
return value;
}
- return JSObject::SetElement(js_object, index, value, attr,
- SLOPPY, false, DEFINE_PROPERTY);
+ return JSObject::SetElement(js_object, index, value, attr, SLOPPY, false,
+ DEFINE_PROPERTY);
}
if (key->IsName()) {
Handle<Name> name = Handle<Name>::cast(key);
if (name->AsArrayIndex(&index)) {
- return JSObject::SetElement(js_object, index, value, attr,
- SLOPPY, false, DEFINE_PROPERTY);
+ return JSObject::SetElement(js_object, index, value, attr, SLOPPY, false,
+ DEFINE_PROPERTY);
} else {
if (name->IsString()) name = String::Flatten(Handle<String>::cast(name));
- return JSObject::SetOwnPropertyIgnoreAttributes(
- js_object, name, value, attr, JSReceiver::PERFORM_EXTENSIBILITY_CHECK,
- store_from_keyed);
+ return JSObject::SetOwnPropertyIgnoreAttributes(js_object, name, value,
+ attr);
}
}
// Call-back into JavaScript to convert the key to a string.
Handle<Object> converted;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, converted, Execution::ToString(isolate, key), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, converted,
+ Execution::ToString(isolate, key), Object);
Handle<String> name = Handle<String>::cast(converted);
if (name->AsArrayIndex(&index)) {
- return JSObject::SetElement(js_object, index, value, attr,
- SLOPPY, false, DEFINE_PROPERTY);
+ return JSObject::SetElement(js_object, index, value, attr, SLOPPY, false,
+ DEFINE_PROPERTY);
} else {
- return JSObject::SetOwnPropertyIgnoreAttributes(
- js_object, name, value, attr, JSReceiver::PERFORM_EXTENSIBILITY_CHECK,
- store_from_keyed);
+ return JSObject::SetOwnPropertyIgnoreAttributes(js_object, name, value,
+ attr);
}
}
@@ -5246,8 +2351,8 @@ MaybeHandle<Object> Runtime::DeleteObjectProperty(Isolate* isolate,
} else {
// Call-back into JavaScript to convert the key to a string.
Handle<Object> converted;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, converted, Execution::ToString(isolate, key), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, converted,
+ Execution::ToString(isolate, key), Object);
name = Handle<String>::cast(converted);
}
@@ -5285,9 +2390,9 @@ RUNTIME_FUNCTION(Runtime_AddNamedProperty) {
#ifdef DEBUG
uint32_t index = 0;
DCHECK(!key->ToArrayIndex(&index));
- LookupIterator it(object, key, LookupIterator::CHECK_OWN_REAL);
+ LookupIterator it(object, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- DCHECK(maybe.has_value);
+ if (!maybe.has_value) return isolate->heap()->exception();
RUNTIME_ASSERT(!it.IsFound());
#endif
@@ -5317,7 +2422,7 @@ RUNTIME_FUNCTION(Runtime_AddPropertyForTemplate) {
bool duplicate;
if (key->IsName()) {
LookupIterator it(object, Handle<Name>::cast(key),
- LookupIterator::CHECK_OWN_REAL);
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
DCHECK(maybe.has_value);
duplicate = it.IsFound();
@@ -5329,10 +2434,10 @@ RUNTIME_FUNCTION(Runtime_AddPropertyForTemplate) {
duplicate = maybe.value;
}
if (duplicate) {
- Handle<Object> args[1] = { key };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "duplicate_template_property", HandleVector(args, 1));
- return isolate->Throw(*error);
+ Handle<Object> args[1] = {key};
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError("duplicate_template_property", HandleVector(args, 1)));
}
#endif
@@ -5362,6 +2467,33 @@ RUNTIME_FUNCTION(Runtime_SetProperty) {
}
+// Adds an element to an array.
+// This is used to create an indexed data property into an array.
+RUNTIME_FUNCTION(Runtime_AddElement) {
+ HandleScope scope(isolate);
+ RUNTIME_ASSERT(args.length() == 4);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ CONVERT_SMI_ARG_CHECKED(unchecked_attributes, 3);
+ RUNTIME_ASSERT(
+ (unchecked_attributes & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
+ // Compute attributes.
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(unchecked_attributes);
+
+ uint32_t index = 0;
+ key->ToArrayIndex(&index);
+
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, JSObject::SetElement(object, index, value, attributes,
+ SLOPPY, false, DEFINE_PROPERTY));
+ return *result;
+}
+
+
RUNTIME_FUNCTION(Runtime_TransitionElementsKind) {
HandleScope scope(isolate);
RUNTIME_ASSERT(args.length() == 2);
@@ -5428,11 +2560,10 @@ RUNTIME_FUNCTION(Runtime_StoreArrayLiteralElement) {
if (value->IsNumber()) {
DCHECK(IsFastSmiElementsKind(elements_kind));
ElementsKind transitioned_kind = IsFastHoleyElementsKind(elements_kind)
- ? FAST_HOLEY_DOUBLE_ELEMENTS
- : FAST_DOUBLE_ELEMENTS;
+ ? FAST_HOLEY_DOUBLE_ELEMENTS
+ : FAST_DOUBLE_ELEMENTS;
if (IsMoreGeneralElementsKindTransition(
- boilerplate_object->GetElementsKind(),
- transitioned_kind)) {
+ boilerplate_object->GetElementsKind(), transitioned_kind)) {
JSObject::TransitionElementsKind(boilerplate_object, transitioned_kind);
}
JSObject::TransitionElementsKind(object, transitioned_kind);
@@ -5443,8 +2574,8 @@ RUNTIME_FUNCTION(Runtime_StoreArrayLiteralElement) {
} else {
if (!IsFastObjectElementsKind(elements_kind)) {
ElementsKind transitioned_kind = IsFastHoleyElementsKind(elements_kind)
- ? FAST_HOLEY_ELEMENTS
- : FAST_ELEMENTS;
+ ? FAST_HOLEY_ELEMENTS
+ : FAST_ELEMENTS;
JSObject::TransitionElementsKind(object, transitioned_kind);
ElementsKind boilerplate_elements_kind =
boilerplate_object->GetElementsKind();
@@ -5469,8 +2600,8 @@ RUNTIME_FUNCTION(Runtime_DebugCallbackSupportsStepping) {
}
CONVERT_ARG_CHECKED(Object, callback, 0);
// We do not step into the callback if it's a builtin or not even a function.
- return isolate->heap()->ToBoolean(
- callback->IsJSFunction() && !JSFunction::cast(callback)->IsBuiltin());
+ return isolate->heap()->ToBoolean(callback->IsJSFunction() &&
+ !JSFunction::cast(callback)->IsBuiltin());
}
@@ -5480,13 +2611,22 @@ RUNTIME_FUNCTION(Runtime_DebugPrepareStepInIfStepping) {
DCHECK(args.length() == 1);
Debug* debug = isolate->debug();
if (!debug->IsStepping()) return isolate->heap()->undefined_value();
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, callback, 0);
+
HandleScope scope(isolate);
- // When leaving the callback, step out has been activated, but not performed
- // if we do not leave the builtin. To be able to step into the callback
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ RUNTIME_ASSERT(object->IsJSFunction() || object->IsJSGeneratorObject());
+ Handle<JSFunction> fun;
+ if (object->IsJSFunction()) {
+ fun = Handle<JSFunction>::cast(object);
+ } else {
+ fun = Handle<JSFunction>(
+ Handle<JSGeneratorObject>::cast(object)->function(), isolate);
+ }
+ // When leaving the function, step out has been activated, but not performed
+ // if we do not leave the builtin. To be able to step into the function
// again, we need to clear the step out at this point.
debug->ClearStepOut();
- debug->FloodWithOneShot(callback);
+ debug->FloodWithOneShot(fun);
return isolate->heap()->undefined_value();
}
@@ -5495,7 +2635,7 @@ RUNTIME_FUNCTION(Runtime_DebugPushPromise) {
DCHECK(args.length() == 1);
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
- isolate->debug()->PushPromise(promise);
+ isolate->PushPromise(promise);
return isolate->heap()->undefined_value();
}
@@ -5503,7 +2643,7 @@ RUNTIME_FUNCTION(Runtime_DebugPushPromise) {
RUNTIME_FUNCTION(Runtime_DebugPopPromise) {
DCHECK(args.length() == 0);
SealHandleScope shs(isolate);
- isolate->debug()->PopPromise();
+ isolate->PopPromise();
return isolate->heap()->undefined_value();
}
@@ -5543,11 +2683,11 @@ RUNTIME_FUNCTION(Runtime_DeleteProperty) {
CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 2);
JSReceiver::DeleteMode delete_mode = strict_mode == STRICT
- ? JSReceiver::STRICT_DELETION : JSReceiver::NORMAL_DELETION;
+ ? JSReceiver::STRICT_DELETION
+ : JSReceiver::NORMAL_DELETION;
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- JSReceiver::DeleteProperty(object, key, delete_mode));
+ isolate, result, JSReceiver::DeleteProperty(object, key, delete_mode));
return *result;
}
@@ -5599,14 +2739,12 @@ RUNTIME_FUNCTION(Runtime_HasOwnProperty) {
return isolate->heap()->true_value();
}
Map* map = js_obj->map();
- if (!key_is_array_index &&
- !map->has_named_interceptor() &&
+ if (!key_is_array_index && !map->has_named_interceptor() &&
!HeapObject::cast(map->prototype())->map()->is_hidden_prototype()) {
return isolate->heap()->false_value();
}
// Slow case.
- return HasOwnPropertyImplementation(isolate,
- Handle<JSObject>(js_obj),
+ return HasOwnPropertyImplementation(isolate, Handle<JSObject>(js_obj),
Handle<Name>(key));
} else if (object->IsString() && key_is_array_index) {
// Well, there is one exception: Handle [] on strings.
@@ -5731,8 +2869,8 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyNames) {
if (obj->IsJSGlobalProxy()) {
// Only collect names if access is permitted.
if (obj->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(
- obj, isolate->factory()->undefined_value(), v8::ACCESS_KEYS)) {
+ !isolate->MayNamedAccess(obj, isolate->factory()->undefined_value(),
+ v8::ACCESS_KEYS)) {
isolate->ReportFailedAccessCheck(obj, v8::ACCESS_KEYS);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return *isolate->factory()->NewJSArray(0);
@@ -5817,8 +2955,7 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyNames) {
// hidden prototype duplicates.
if (hidden_strings > 0) {
Handle<FixedArray> old_names = names;
- names = isolate->factory()->NewFixedArray(
- names->length() - hidden_strings);
+ names = isolate->factory()->NewFixedArray(names->length() - hidden_strings);
int dest_pos = 0;
for (int i = 0; i < total_property_count; i++) {
Object* name = old_names->get(i);
@@ -5913,8 +3050,8 @@ RUNTIME_FUNCTION(Runtime_OwnKeys) {
if (object->IsJSGlobalProxy()) {
// Do access checks before going to the global object.
if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(
- object, isolate->factory()->undefined_value(), v8::ACCESS_KEYS)) {
+ !isolate->MayNamedAccess(object, isolate->factory()->undefined_value(),
+ v8::ACCESS_KEYS)) {
isolate->ReportFailedAccessCheck(object, v8::ACCESS_KEYS);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return *isolate->factory()->NewJSArray(0);
@@ -5928,8 +3065,7 @@ RUNTIME_FUNCTION(Runtime_OwnKeys) {
Handle<FixedArray> contents;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, contents,
- JSReceiver::GetKeys(object, JSReceiver::OWN_ONLY));
+ isolate, contents, JSReceiver::GetKeys(object, JSReceiver::OWN_ONLY));
// Some fast paths through GetKeysInFixedArrayFor reuse a cached
// property array and since the result is mutable we have to create
@@ -5975,6 +3111,10 @@ RUNTIME_FUNCTION(Runtime_GetArgumentsProperty) {
HandleScope scope(isolate);
if (raw_key->IsSymbol()) {
+ Handle<Symbol> symbol = Handle<Symbol>::cast(raw_key);
+ if (symbol->Equals(isolate->native_context()->iterator_symbol())) {
+ return isolate->native_context()->array_values_iterator();
+ }
// Lookup in the initial Object.prototype object.
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -5986,8 +3126,8 @@ RUNTIME_FUNCTION(Runtime_GetArgumentsProperty) {
// Convert the key to a string.
Handle<Object> converted;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, converted, Execution::ToString(isolate, raw_key));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, converted,
+ Execution::ToString(isolate, raw_key));
Handle<String> key = Handle<String>::cast(converted);
// Try to convert the string key into an array index.
@@ -6011,8 +3151,9 @@ RUNTIME_FUNCTION(Runtime_GetArgumentsProperty) {
if (String::Equals(isolate->factory()->callee_string(), key)) {
JSFunction* function = frame->function();
if (function->shared()->strict_mode() == STRICT) {
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_arguments_callee", HandleVector<Object>(NULL, 0)));
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError("strict_arguments_callee",
+ HandleVector<Object>(NULL, 0)));
}
return function;
}
@@ -6117,729 +3258,6 @@ RUNTIME_FUNCTION(Runtime_Booleanize) {
}
-static bool AreDigits(const uint8_t*s, int from, int to) {
- for (int i = from; i < to; i++) {
- if (s[i] < '0' || s[i] > '9') return false;
- }
-
- return true;
-}
-
-
-static int ParseDecimalInteger(const uint8_t*s, int from, int to) {
- DCHECK(to - from < 10); // Overflow is not possible.
- DCHECK(from < to);
- int d = s[from] - '0';
-
- for (int i = from + 1; i < to; i++) {
- d = 10 * d + (s[i] - '0');
- }
-
- return d;
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringToNumber) {
- HandleScope handle_scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- subject = String::Flatten(subject);
-
- // Fast case: short integer or some sorts of junk values.
- if (subject->IsSeqOneByteString()) {
- int len = subject->length();
- if (len == 0) return Smi::FromInt(0);
-
- DisallowHeapAllocation no_gc;
- uint8_t const* data = Handle<SeqOneByteString>::cast(subject)->GetChars();
- bool minus = (data[0] == '-');
- int start_pos = (minus ? 1 : 0);
-
- if (start_pos == len) {
- return isolate->heap()->nan_value();
- } else if (data[start_pos] > '9') {
- // Fast check for a junk value. A valid string may start from a
- // whitespace, a sign ('+' or '-'), the decimal point, a decimal digit
- // or the 'I' character ('Infinity'). All of that have codes not greater
- // than '9' except 'I' and &nbsp;.
- if (data[start_pos] != 'I' && data[start_pos] != 0xa0) {
- return isolate->heap()->nan_value();
- }
- } else if (len - start_pos < 10 && AreDigits(data, start_pos, len)) {
- // The maximal/minimal smi has 10 digits. If the string has less digits
- // we know it will fit into the smi-data type.
- int d = ParseDecimalInteger(data, start_pos, len);
- if (minus) {
- if (d == 0) return isolate->heap()->minus_zero_value();
- d = -d;
- } else if (!subject->HasHashCode() &&
- len <= String::kMaxArrayIndexSize &&
- (len == 1 || data[0] != '0')) {
- // String hash is not calculated yet but all the data are present.
- // Update the hash field to speed up sequential convertions.
- uint32_t hash = StringHasher::MakeArrayIndexHash(d, len);
-#ifdef DEBUG
- subject->Hash(); // Force hash calculation.
- DCHECK_EQ(static_cast<int>(subject->hash_field()),
- static_cast<int>(hash));
-#endif
- subject->set_hash_field(hash);
- }
- return Smi::FromInt(d);
- }
- }
-
- // Slower case.
- int flags = ALLOW_HEX;
- if (FLAG_harmony_numeric_literals) {
- // The current spec draft has not updated "ToNumber Applied to the String
- // Type", https://bugs.ecmascript.org/show_bug.cgi?id=1584
- flags |= ALLOW_OCTAL | ALLOW_BINARY;
- }
-
- return *isolate->factory()->NewNumber(StringToDouble(
- isolate->unicode_cache(), *subject, flags));
-}
-
-
-RUNTIME_FUNCTION(Runtime_NewString) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_SMI_ARG_CHECKED(length, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(is_one_byte, 1);
- if (length == 0) return isolate->heap()->empty_string();
- Handle<String> result;
- if (is_one_byte) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, isolate->factory()->NewRawOneByteString(length));
- } else {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, isolate->factory()->NewRawTwoByteString(length));
- }
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_TruncateString) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(SeqString, string, 0);
- CONVERT_SMI_ARG_CHECKED(new_length, 1);
- RUNTIME_ASSERT(new_length >= 0);
- return *SeqString::Truncate(string, new_length);
-}
-
-
-RUNTIME_FUNCTION(Runtime_URIEscape) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
- Handle<String> string = String::Flatten(source);
- DCHECK(string->IsFlat());
- Handle<String> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- string->IsOneByteRepresentationUnderneath()
- ? URIEscape::Escape<uint8_t>(isolate, source)
- : URIEscape::Escape<uc16>(isolate, source));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_URIUnescape) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
- Handle<String> string = String::Flatten(source);
- DCHECK(string->IsFlat());
- Handle<String> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- string->IsOneByteRepresentationUnderneath()
- ? URIUnescape::Unescape<uint8_t>(isolate, source)
- : URIUnescape::Unescape<uc16>(isolate, source));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_QuoteJSONString) {
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
- DCHECK(args.length() == 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, BasicJsonStringifier::StringifyString(isolate, string));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_BasicJSONStringify) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- BasicJsonStringifier stringifier(isolate);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, stringifier.Stringify(object));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringParseInt) {
- HandleScope handle_scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- CONVERT_NUMBER_CHECKED(int, radix, Int32, args[1]);
- RUNTIME_ASSERT(radix == 0 || (2 <= radix && radix <= 36));
-
- subject = String::Flatten(subject);
- double value;
-
- { DisallowHeapAllocation no_gc;
- String::FlatContent flat = subject->GetFlatContent();
-
- // ECMA-262 section 15.1.2.3, empty string is NaN
- if (flat.IsAscii()) {
- value = StringToInt(
- isolate->unicode_cache(), flat.ToOneByteVector(), radix);
- } else {
- value = StringToInt(
- isolate->unicode_cache(), flat.ToUC16Vector(), radix);
- }
- }
-
- return *isolate->factory()->NewNumber(value);
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringParseFloat) {
- HandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
-
- subject = String::Flatten(subject);
- double value = StringToDouble(isolate->unicode_cache(), *subject,
- ALLOW_TRAILING_JUNK, base::OS::nan_value());
-
- return *isolate->factory()->NewNumber(value);
-}
-
-
-static inline bool ToUpperOverflows(uc32 character) {
- // y with umlauts and the micro sign are the only characters that stop
- // fitting into one-byte when converting to uppercase.
- static const uc32 yuml_code = 0xff;
- static const uc32 micro_code = 0xb5;
- return (character == yuml_code || character == micro_code);
-}
-
-
-template <class Converter>
-MUST_USE_RESULT static Object* ConvertCaseHelper(
- Isolate* isolate,
- String* string,
- SeqString* result,
- int result_length,
- unibrow::Mapping<Converter, 128>* mapping) {
- DisallowHeapAllocation no_gc;
- // We try this twice, once with the assumption that the result is no longer
- // than the input and, if that assumption breaks, again with the exact
- // length. This may not be pretty, but it is nicer than what was here before
- // and I hereby claim my vaffel-is.
- //
- // NOTE: This assumes that the upper/lower case of an ASCII
- // character is also ASCII. This is currently the case, but it
- // might break in the future if we implement more context and locale
- // dependent upper/lower conversions.
- bool has_changed_character = false;
-
- // Convert all characters to upper case, assuming that they will fit
- // in the buffer
- Access<ConsStringIteratorOp> op(
- isolate->runtime_state()->string_iterator());
- StringCharacterStream stream(string, op.value());
- unibrow::uchar chars[Converter::kMaxWidth];
- // We can assume that the string is not empty
- uc32 current = stream.GetNext();
- bool ignore_overflow = Converter::kIsToLower || result->IsSeqTwoByteString();
- for (int i = 0; i < result_length;) {
- bool has_next = stream.HasMore();
- uc32 next = has_next ? stream.GetNext() : 0;
- int char_length = mapping->get(current, next, chars);
- if (char_length == 0) {
- // The case conversion of this character is the character itself.
- result->Set(i, current);
- i++;
- } else if (char_length == 1 &&
- (ignore_overflow || !ToUpperOverflows(current))) {
- // Common case: converting the letter resulted in one character.
- DCHECK(static_cast<uc32>(chars[0]) != current);
- result->Set(i, chars[0]);
- has_changed_character = true;
- i++;
- } else if (result_length == string->length()) {
- bool overflows = ToUpperOverflows(current);
- // We've assumed that the result would be as long as the
- // input but here is a character that converts to several
- // characters. No matter, we calculate the exact length
- // of the result and try the whole thing again.
- //
- // Note that this leaves room for optimization. We could just
- // memcpy what we already have to the result string. Also,
- // the result string is the last object allocated we could
- // "realloc" it and probably, in the vast majority of cases,
- // extend the existing string to be able to hold the full
- // result.
- int next_length = 0;
- if (has_next) {
- next_length = mapping->get(next, 0, chars);
- if (next_length == 0) next_length = 1;
- }
- int current_length = i + char_length + next_length;
- while (stream.HasMore()) {
- current = stream.GetNext();
- overflows |= ToUpperOverflows(current);
- // NOTE: we use 0 as the next character here because, while
- // the next character may affect what a character converts to,
- // it does not in any case affect the length of what it convert
- // to.
- int char_length = mapping->get(current, 0, chars);
- if (char_length == 0) char_length = 1;
- current_length += char_length;
- if (current_length > String::kMaxLength) {
- AllowHeapAllocation allocate_error_and_return;
- return isolate->ThrowInvalidStringLength();
- }
- }
- // Try again with the real length. Return signed if we need
- // to allocate a two-byte string for to uppercase.
- return (overflows && !ignore_overflow) ? Smi::FromInt(-current_length)
- : Smi::FromInt(current_length);
- } else {
- for (int j = 0; j < char_length; j++) {
- result->Set(i, chars[j]);
- i++;
- }
- has_changed_character = true;
- }
- current = next;
- }
- if (has_changed_character) {
- return result;
- } else {
- // If we didn't actually change anything in doing the conversion
- // we simple return the result and let the converted string
- // become garbage; there is no reason to keep two identical strings
- // alive.
- return string;
- }
-}
-
-
-namespace {
-
-static const uintptr_t kOneInEveryByte = kUintptrAllBitsSet / 0xFF;
-static const uintptr_t kAsciiMask = kOneInEveryByte << 7;
-
-// Given a word and two range boundaries returns a word with high bit
-// set in every byte iff the corresponding input byte was strictly in
-// the range (m, n). All the other bits in the result are cleared.
-// This function is only useful when it can be inlined and the
-// boundaries are statically known.
-// Requires: all bytes in the input word and the boundaries must be
-// ASCII (less than 0x7F).
-static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) {
- // Use strict inequalities since in edge cases the function could be
- // further simplified.
- DCHECK(0 < m && m < n);
- // Has high bit set in every w byte less than n.
- uintptr_t tmp1 = kOneInEveryByte * (0x7F + n) - w;
- // Has high bit set in every w byte greater than m.
- uintptr_t tmp2 = w + kOneInEveryByte * (0x7F - m);
- return (tmp1 & tmp2 & (kOneInEveryByte * 0x80));
-}
-
-
-#ifdef DEBUG
-static bool CheckFastAsciiConvert(char* dst,
- const char* src,
- int length,
- bool changed,
- bool is_to_lower) {
- bool expected_changed = false;
- for (int i = 0; i < length; i++) {
- if (dst[i] == src[i]) continue;
- expected_changed = true;
- if (is_to_lower) {
- DCHECK('A' <= src[i] && src[i] <= 'Z');
- DCHECK(dst[i] == src[i] + ('a' - 'A'));
- } else {
- DCHECK('a' <= src[i] && src[i] <= 'z');
- DCHECK(dst[i] == src[i] - ('a' - 'A'));
- }
- }
- return (expected_changed == changed);
-}
-#endif
-
-
-template<class Converter>
-static bool FastAsciiConvert(char* dst,
- const char* src,
- int length,
- bool* changed_out) {
-#ifdef DEBUG
- char* saved_dst = dst;
- const char* saved_src = src;
-#endif
- DisallowHeapAllocation no_gc;
- // We rely on the distance between upper and lower case letters
- // being a known power of 2.
- DCHECK('a' - 'A' == (1 << 5));
- // Boundaries for the range of input characters than require conversion.
- static const char lo = Converter::kIsToLower ? 'A' - 1 : 'a' - 1;
- static const char hi = Converter::kIsToLower ? 'Z' + 1 : 'z' + 1;
- bool changed = false;
- uintptr_t or_acc = 0;
- const char* const limit = src + length;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- // Process the prefix of the input that requires no conversion one
- // (machine) word at a time.
- while (src <= limit - sizeof(uintptr_t)) {
- const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
- or_acc |= w;
- if (AsciiRangeMask(w, lo, hi) != 0) {
- changed = true;
- break;
- }
- *reinterpret_cast<uintptr_t*>(dst) = w;
- src += sizeof(uintptr_t);
- dst += sizeof(uintptr_t);
- }
- // Process the remainder of the input performing conversion when
- // required one word at a time.
- while (src <= limit - sizeof(uintptr_t)) {
- const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
- or_acc |= w;
- uintptr_t m = AsciiRangeMask(w, lo, hi);
- // The mask has high (7th) bit set in every byte that needs
- // conversion and we know that the distance between cases is
- // 1 << 5.
- *reinterpret_cast<uintptr_t*>(dst) = w ^ (m >> 2);
- src += sizeof(uintptr_t);
- dst += sizeof(uintptr_t);
- }
-#endif
- // Process the last few bytes of the input (or the whole input if
- // unaligned access is not supported).
- while (src < limit) {
- char c = *src;
- or_acc |= c;
- if (lo < c && c < hi) {
- c ^= (1 << 5);
- changed = true;
- }
- *dst = c;
- ++src;
- ++dst;
- }
- if ((or_acc & kAsciiMask) != 0) {
- return false;
- }
-
- DCHECK(CheckFastAsciiConvert(
- saved_dst, saved_src, length, changed, Converter::kIsToLower));
-
- *changed_out = changed;
- return true;
-}
-
-} // namespace
-
-
-template <class Converter>
-MUST_USE_RESULT static Object* ConvertCase(
- Handle<String> s,
- Isolate* isolate,
- unibrow::Mapping<Converter, 128>* mapping) {
- s = String::Flatten(s);
- int length = s->length();
- // Assume that the string is not empty; we need this assumption later
- if (length == 0) return *s;
-
- // Simpler handling of ASCII strings.
- //
- // NOTE: This assumes that the upper/lower case of an ASCII
- // character is also ASCII. This is currently the case, but it
- // might break in the future if we implement more context and locale
- // dependent upper/lower conversions.
- if (s->IsOneByteRepresentationUnderneath()) {
- // Same length as input.
- Handle<SeqOneByteString> result =
- isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
- DisallowHeapAllocation no_gc;
- String::FlatContent flat_content = s->GetFlatContent();
- DCHECK(flat_content.IsFlat());
- bool has_changed_character = false;
- bool is_ascii = FastAsciiConvert<Converter>(
- reinterpret_cast<char*>(result->GetChars()),
- reinterpret_cast<const char*>(flat_content.ToOneByteVector().start()),
- length,
- &has_changed_character);
- // If not ASCII, we discard the result and take the 2 byte path.
- if (is_ascii) return has_changed_character ? *result : *s;
- }
-
- Handle<SeqString> result; // Same length as input.
- if (s->IsOneByteRepresentation()) {
- result = isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
- } else {
- result = isolate->factory()->NewRawTwoByteString(length).ToHandleChecked();
- }
-
- Object* answer = ConvertCaseHelper(isolate, *s, *result, length, mapping);
- if (answer->IsException() || answer->IsString()) return answer;
-
- DCHECK(answer->IsSmi());
- length = Smi::cast(answer)->value();
- if (s->IsOneByteRepresentation() && length > 0) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, isolate->factory()->NewRawOneByteString(length));
- } else {
- if (length < 0) length = -length;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, isolate->factory()->NewRawTwoByteString(length));
- }
- return ConvertCaseHelper(isolate, *s, *result, length, mapping);
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringToLowerCase) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
- return ConvertCase(
- s, isolate, isolate->runtime_state()->to_lower_mapping());
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringToUpperCase) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
- return ConvertCase(
- s, isolate, isolate->runtime_state()->to_upper_mapping());
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringTrim) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
-
- CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(trimLeft, 1);
- CONVERT_BOOLEAN_ARG_CHECKED(trimRight, 2);
-
- string = String::Flatten(string);
- int length = string->length();
-
- int left = 0;
- UnicodeCache* unicode_cache = isolate->unicode_cache();
- if (trimLeft) {
- while (left < length &&
- unicode_cache->IsWhiteSpaceOrLineTerminator(string->Get(left))) {
- left++;
- }
- }
-
- int right = length;
- if (trimRight) {
- while (right > left &&
- unicode_cache->IsWhiteSpaceOrLineTerminator(
- string->Get(right - 1))) {
- right--;
- }
- }
-
- return *isolate->factory()->NewSubString(string, left, right);
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringSplit) {
- HandleScope handle_scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
- CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[2]);
- RUNTIME_ASSERT(limit > 0);
-
- int subject_length = subject->length();
- int pattern_length = pattern->length();
- RUNTIME_ASSERT(pattern_length > 0);
-
- if (limit == 0xffffffffu) {
- Handle<Object> cached_answer(
- RegExpResultsCache::Lookup(isolate->heap(),
- *subject,
- *pattern,
- RegExpResultsCache::STRING_SPLIT_SUBSTRINGS),
- isolate);
- if (*cached_answer != Smi::FromInt(0)) {
- // The cache FixedArray is a COW-array and can therefore be reused.
- Handle<JSArray> result =
- isolate->factory()->NewJSArrayWithElements(
- Handle<FixedArray>::cast(cached_answer));
- return *result;
- }
- }
-
- // The limit can be very large (0xffffffffu), but since the pattern
- // isn't empty, we can never create more parts than ~half the length
- // of the subject.
-
- subject = String::Flatten(subject);
- pattern = String::Flatten(pattern);
-
- static const int kMaxInitialListCapacity = 16;
-
- ZoneScope zone_scope(isolate->runtime_zone());
-
- // Find (up to limit) indices of separator and end-of-string in subject
- int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit);
- ZoneList<int> indices(initial_capacity, zone_scope.zone());
-
- FindStringIndicesDispatch(isolate, *subject, *pattern,
- &indices, limit, zone_scope.zone());
-
- if (static_cast<uint32_t>(indices.length()) < limit) {
- indices.Add(subject_length, zone_scope.zone());
- }
-
- // The list indices now contains the end of each part to create.
-
- // Create JSArray of substrings separated by separator.
- int part_count = indices.length();
-
- Handle<JSArray> result = isolate->factory()->NewJSArray(part_count);
- JSObject::EnsureCanContainHeapObjectElements(result);
- result->set_length(Smi::FromInt(part_count));
-
- DCHECK(result->HasFastObjectElements());
-
- if (part_count == 1 && indices.at(0) == subject_length) {
- FixedArray::cast(result->elements())->set(0, *subject);
- return *result;
- }
-
- Handle<FixedArray> elements(FixedArray::cast(result->elements()));
- int part_start = 0;
- for (int i = 0; i < part_count; i++) {
- HandleScope local_loop_handle(isolate);
- int part_end = indices.at(i);
- Handle<String> substring =
- isolate->factory()->NewProperSubString(subject, part_start, part_end);
- elements->set(i, *substring);
- part_start = part_end + pattern_length;
- }
-
- if (limit == 0xffffffffu) {
- if (result->HasFastObjectElements()) {
- RegExpResultsCache::Enter(isolate,
- subject,
- pattern,
- elements,
- RegExpResultsCache::STRING_SPLIT_SUBSTRINGS);
- }
- }
-
- return *result;
-}
-
-
-// Copies ASCII characters to the given fixed array looking up
-// one-char strings in the cache. Gives up on the first char that is
-// not in the cache and fills the remainder with smi zeros. Returns
-// the length of the successfully copied prefix.
-static int CopyCachedAsciiCharsToArray(Heap* heap,
- const uint8_t* chars,
- FixedArray* elements,
- int length) {
- DisallowHeapAllocation no_gc;
- FixedArray* ascii_cache = heap->single_character_string_cache();
- Object* undefined = heap->undefined_value();
- int i;
- WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
- for (i = 0; i < length; ++i) {
- Object* value = ascii_cache->get(chars[i]);
- if (value == undefined) break;
- elements->set(i, value, mode);
- }
- if (i < length) {
- DCHECK(Smi::FromInt(0) == 0);
- memset(elements->data_start() + i, 0, kPointerSize * (length - i));
- }
-#ifdef DEBUG
- for (int j = 0; j < length; ++j) {
- Object* element = elements->get(j);
- DCHECK(element == Smi::FromInt(0) ||
- (element->IsString() && String::cast(element)->LooksValid()));
- }
-#endif
- return i;
-}
-
-
-// Converts a String to JSArray.
-// For example, "foo" => ["f", "o", "o"].
-RUNTIME_FUNCTION(Runtime_StringToArray) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
- CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
-
- s = String::Flatten(s);
- const int length = static_cast<int>(Min<uint32_t>(s->length(), limit));
-
- Handle<FixedArray> elements;
- int position = 0;
- if (s->IsFlat() && s->IsOneByteRepresentation()) {
- // Try using cached chars where possible.
- elements = isolate->factory()->NewUninitializedFixedArray(length);
-
- DisallowHeapAllocation no_gc;
- String::FlatContent content = s->GetFlatContent();
- if (content.IsAscii()) {
- Vector<const uint8_t> chars = content.ToOneByteVector();
- // Note, this will initialize all elements (not only the prefix)
- // to prevent GC from seeing partially initialized array.
- position = CopyCachedAsciiCharsToArray(isolate->heap(),
- chars.start(),
- *elements,
- length);
- } else {
- MemsetPointer(elements->data_start(),
- isolate->heap()->undefined_value(),
- length);
- }
- } else {
- elements = isolate->factory()->NewFixedArray(length);
- }
- for (int i = position; i < length; ++i) {
- Handle<Object> str =
- isolate->factory()->LookupSingleCharacterStringFromCode(s->Get(i));
- elements->set(i, *str);
- }
-
-#ifdef DEBUG
- for (int i = 0; i < length; ++i) {
- DCHECK(String::cast(elements->get(i))->length() == 1);
- }
-#endif
-
- return *isolate->factory()->NewJSArrayWithElements(elements);
-}
-
-
RUNTIME_FUNCTION(Runtime_NewStringWrapper) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -6848,91 +3266,6 @@ RUNTIME_FUNCTION(Runtime_NewStringWrapper) {
}
-bool Runtime::IsUpperCaseChar(RuntimeState* runtime_state, uint16_t ch) {
- unibrow::uchar chars[unibrow::ToUppercase::kMaxWidth];
- int char_length = runtime_state->to_upper_mapping()->get(ch, 0, chars);
- return char_length == 0;
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberToStringRT) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(number, 0);
-
- return *isolate->factory()->NumberToString(number);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberToStringSkipCache) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(number, 0);
-
- return *isolate->factory()->NumberToString(number, false);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberToInteger) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(number, 0);
- return *isolate->factory()->NewNumber(DoubleToInteger(number));
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberToIntegerMapMinusZero) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(number, 0);
- double double_value = DoubleToInteger(number);
- // Map both -0 and +0 to +0.
- if (double_value == 0) double_value = 0;
-
- return *isolate->factory()->NewNumber(double_value);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberToJSUint32) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
-
- CONVERT_NUMBER_CHECKED(int32_t, number, Uint32, args[0]);
- return *isolate->factory()->NewNumberFromUint(number);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberToJSInt32) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(number, 0);
- return *isolate->factory()->NewNumberFromInt(DoubleToInt32(number));
-}
-
-
-// Converts a Number to a Smi, if possible. Returns NaN if the number is not
-// a small integer.
-RUNTIME_FUNCTION(Runtime_NumberToSmi) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(Object, obj, 0);
- if (obj->IsSmi()) {
- return obj;
- }
- if (obj->IsHeapNumber()) {
- double value = HeapNumber::cast(obj)->value();
- int int_value = FastD2I(value);
- if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
- return Smi::FromInt(int_value);
- }
- }
- return isolate->heap()->nan_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_AllocateHeapNumber) {
HandleScope scope(isolate);
DCHECK(args.length() == 0);
@@ -6940,943 +3273,8 @@ RUNTIME_FUNCTION(Runtime_AllocateHeapNumber) {
}
-RUNTIME_FUNCTION(Runtime_NumberAdd) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- return *isolate->factory()->NewNumber(x + y);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberSub) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- return *isolate->factory()->NewNumber(x - y);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberMul) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- return *isolate->factory()->NewNumber(x * y);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberUnaryMinus) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return *isolate->factory()->NewNumber(-x);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberDiv) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- return *isolate->factory()->NewNumber(x / y);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberMod) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- return *isolate->factory()->NewNumber(modulo(x, y));
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberImul) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- // We rely on implementation-defined behavior below, but at least not on
- // undefined behavior.
- CONVERT_NUMBER_CHECKED(uint32_t, x, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(uint32_t, y, Int32, args[1]);
- int32_t product = static_cast<int32_t>(x * y);
- return *isolate->factory()->NewNumberFromInt(product);
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringAdd) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(String, str1, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, str2, 1);
- isolate->counters()->string_add_runtime()->Increment();
- Handle<String> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, isolate->factory()->NewConsString(str1, str2));
- return *result;
-}
-
-
-template <typename sinkchar>
-static inline void StringBuilderConcatHelper(String* special,
- sinkchar* sink,
- FixedArray* fixed_array,
- int array_length) {
- DisallowHeapAllocation no_gc;
- int position = 0;
- for (int i = 0; i < array_length; i++) {
- Object* element = fixed_array->get(i);
- if (element->IsSmi()) {
- // Smi encoding of position and length.
- int encoded_slice = Smi::cast(element)->value();
- int pos;
- int len;
- if (encoded_slice > 0) {
- // Position and length encoded in one smi.
- pos = StringBuilderSubstringPosition::decode(encoded_slice);
- len = StringBuilderSubstringLength::decode(encoded_slice);
- } else {
- // Position and length encoded in two smis.
- Object* obj = fixed_array->get(++i);
- DCHECK(obj->IsSmi());
- pos = Smi::cast(obj)->value();
- len = -encoded_slice;
- }
- String::WriteToFlat(special,
- sink + position,
- pos,
- pos + len);
- position += len;
- } else {
- String* string = String::cast(element);
- int element_length = string->length();
- String::WriteToFlat(string, sink + position, 0, element_length);
- position += element_length;
- }
- }
-}
-
-
-// Returns the result length of the concatenation.
-// On illegal argument, -1 is returned.
-static inline int StringBuilderConcatLength(int special_length,
- FixedArray* fixed_array,
- int array_length,
- bool* one_byte) {
- DisallowHeapAllocation no_gc;
- int position = 0;
- for (int i = 0; i < array_length; i++) {
- int increment = 0;
- Object* elt = fixed_array->get(i);
- if (elt->IsSmi()) {
- // Smi encoding of position and length.
- int smi_value = Smi::cast(elt)->value();
- int pos;
- int len;
- if (smi_value > 0) {
- // Position and length encoded in one smi.
- pos = StringBuilderSubstringPosition::decode(smi_value);
- len = StringBuilderSubstringLength::decode(smi_value);
- } else {
- // Position and length encoded in two smis.
- len = -smi_value;
- // Get the position and check that it is a positive smi.
- i++;
- if (i >= array_length) return -1;
- Object* next_smi = fixed_array->get(i);
- if (!next_smi->IsSmi()) return -1;
- pos = Smi::cast(next_smi)->value();
- if (pos < 0) return -1;
- }
- DCHECK(pos >= 0);
- DCHECK(len >= 0);
- if (pos > special_length || len > special_length - pos) return -1;
- increment = len;
- } else if (elt->IsString()) {
- String* element = String::cast(elt);
- int element_length = element->length();
- increment = element_length;
- if (*one_byte && !element->HasOnlyOneByteChars()) {
- *one_byte = false;
- }
- } else {
- return -1;
- }
- if (increment > String::kMaxLength - position) {
- return kMaxInt; // Provoke throw on allocation.
- }
- position += increment;
- }
- return position;
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
- if (!args[1]->IsSmi()) return isolate->ThrowInvalidStringLength();
- CONVERT_SMI_ARG_CHECKED(array_length, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, special, 2);
-
- size_t actual_array_length = 0;
- RUNTIME_ASSERT(
- TryNumberToSize(isolate, array->length(), &actual_array_length));
- RUNTIME_ASSERT(array_length >= 0);
- RUNTIME_ASSERT(static_cast<size_t>(array_length) <= actual_array_length);
-
- // This assumption is used by the slice encoding in one or two smis.
- DCHECK(Smi::kMaxValue >= String::kMaxLength);
-
- RUNTIME_ASSERT(array->HasFastElements());
- JSObject::EnsureCanContainHeapObjectElements(array);
-
- int special_length = special->length();
- if (!array->HasFastObjectElements()) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
-
- int length;
- bool one_byte = special->HasOnlyOneByteChars();
-
- { DisallowHeapAllocation no_gc;
- FixedArray* fixed_array = FixedArray::cast(array->elements());
- if (fixed_array->length() < array_length) {
- array_length = fixed_array->length();
- }
-
- if (array_length == 0) {
- return isolate->heap()->empty_string();
- } else if (array_length == 1) {
- Object* first = fixed_array->get(0);
- if (first->IsString()) return first;
- }
- length = StringBuilderConcatLength(
- special_length, fixed_array, array_length, &one_byte);
- }
-
- if (length == -1) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
-
- if (one_byte) {
- Handle<SeqOneByteString> answer;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, answer,
- isolate->factory()->NewRawOneByteString(length));
- StringBuilderConcatHelper(*special,
- answer->GetChars(),
- FixedArray::cast(array->elements()),
- array_length);
- return *answer;
- } else {
- Handle<SeqTwoByteString> answer;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, answer,
- isolate->factory()->NewRawTwoByteString(length));
- StringBuilderConcatHelper(*special,
- answer->GetChars(),
- FixedArray::cast(array->elements()),
- array_length);
- return *answer;
- }
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
- if (!args[1]->IsSmi()) return isolate->ThrowInvalidStringLength();
- CONVERT_SMI_ARG_CHECKED(array_length, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, separator, 2);
- RUNTIME_ASSERT(array->HasFastObjectElements());
- RUNTIME_ASSERT(array_length >= 0);
-
- Handle<FixedArray> fixed_array(FixedArray::cast(array->elements()));
- if (fixed_array->length() < array_length) {
- array_length = fixed_array->length();
- }
-
- if (array_length == 0) {
- return isolate->heap()->empty_string();
- } else if (array_length == 1) {
- Object* first = fixed_array->get(0);
- RUNTIME_ASSERT(first->IsString());
- return first;
- }
-
- int separator_length = separator->length();
- RUNTIME_ASSERT(separator_length > 0);
- int max_nof_separators =
- (String::kMaxLength + separator_length - 1) / separator_length;
- if (max_nof_separators < (array_length - 1)) {
- return isolate->ThrowInvalidStringLength();
- }
- int length = (array_length - 1) * separator_length;
- for (int i = 0; i < array_length; i++) {
- Object* element_obj = fixed_array->get(i);
- RUNTIME_ASSERT(element_obj->IsString());
- String* element = String::cast(element_obj);
- int increment = element->length();
- if (increment > String::kMaxLength - length) {
- STATIC_ASSERT(String::kMaxLength < kMaxInt);
- length = kMaxInt; // Provoke exception;
- break;
- }
- length += increment;
- }
-
- Handle<SeqTwoByteString> answer;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, answer,
- isolate->factory()->NewRawTwoByteString(length));
-
- DisallowHeapAllocation no_gc;
-
- uc16* sink = answer->GetChars();
-#ifdef DEBUG
- uc16* end = sink + length;
-#endif
-
- RUNTIME_ASSERT(fixed_array->get(0)->IsString());
- String* first = String::cast(fixed_array->get(0));
- String* separator_raw = *separator;
- int first_length = first->length();
- String::WriteToFlat(first, sink, 0, first_length);
- sink += first_length;
-
- for (int i = 1; i < array_length; i++) {
- DCHECK(sink + separator_length <= end);
- String::WriteToFlat(separator_raw, sink, 0, separator_length);
- sink += separator_length;
-
- RUNTIME_ASSERT(fixed_array->get(i)->IsString());
- String* element = String::cast(fixed_array->get(i));
- int element_length = element->length();
- DCHECK(sink + element_length <= end);
- String::WriteToFlat(element, sink, 0, element_length);
- sink += element_length;
- }
- DCHECK(sink == end);
-
- // Use %_FastAsciiArrayJoin instead.
- DCHECK(!answer->IsOneByteRepresentation());
- return *answer;
-}
-
-template <typename Char>
-static void JoinSparseArrayWithSeparator(FixedArray* elements,
- int elements_length,
- uint32_t array_length,
- String* separator,
- Vector<Char> buffer) {
- DisallowHeapAllocation no_gc;
- int previous_separator_position = 0;
- int separator_length = separator->length();
- int cursor = 0;
- for (int i = 0; i < elements_length; i += 2) {
- int position = NumberToInt32(elements->get(i));
- String* string = String::cast(elements->get(i + 1));
- int string_length = string->length();
- if (string->length() > 0) {
- while (previous_separator_position < position) {
- String::WriteToFlat<Char>(separator, &buffer[cursor],
- 0, separator_length);
- cursor += separator_length;
- previous_separator_position++;
- }
- String::WriteToFlat<Char>(string, &buffer[cursor],
- 0, string_length);
- cursor += string->length();
- }
- }
- if (separator_length > 0) {
- // Array length must be representable as a signed 32-bit number,
- // otherwise the total string length would have been too large.
- DCHECK(array_length <= 0x7fffffff); // Is int32_t.
- int last_array_index = static_cast<int>(array_length - 1);
- while (previous_separator_position < last_array_index) {
- String::WriteToFlat<Char>(separator, &buffer[cursor],
- 0, separator_length);
- cursor += separator_length;
- previous_separator_position++;
- }
- }
- DCHECK(cursor <= buffer.length());
-}
-
-
-RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, elements_array, 0);
- CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]);
- CONVERT_ARG_HANDLE_CHECKED(String, separator, 2);
- // elements_array is fast-mode JSarray of alternating positions
- // (increasing order) and strings.
- RUNTIME_ASSERT(elements_array->HasFastSmiOrObjectElements());
- // array_length is length of original array (used to add separators);
- // separator is string to put between elements. Assumed to be non-empty.
- RUNTIME_ASSERT(array_length > 0);
-
- // Find total length of join result.
- int string_length = 0;
- bool is_ascii = separator->IsOneByteRepresentation();
- bool overflow = false;
- CONVERT_NUMBER_CHECKED(int, elements_length, Int32, elements_array->length());
- RUNTIME_ASSERT(elements_length <= elements_array->elements()->length());
- RUNTIME_ASSERT((elements_length & 1) == 0); // Even length.
- FixedArray* elements = FixedArray::cast(elements_array->elements());
- for (int i = 0; i < elements_length; i += 2) {
- RUNTIME_ASSERT(elements->get(i)->IsNumber());
- CONVERT_NUMBER_CHECKED(uint32_t, position, Uint32, elements->get(i));
- RUNTIME_ASSERT(position < array_length);
- RUNTIME_ASSERT(elements->get(i + 1)->IsString());
- }
-
- { DisallowHeapAllocation no_gc;
- for (int i = 0; i < elements_length; i += 2) {
- String* string = String::cast(elements->get(i + 1));
- int length = string->length();
- if (is_ascii && !string->IsOneByteRepresentation()) {
- is_ascii = false;
- }
- if (length > String::kMaxLength ||
- String::kMaxLength - length < string_length) {
- overflow = true;
- break;
- }
- string_length += length;
- }
- }
-
- int separator_length = separator->length();
- if (!overflow && separator_length > 0) {
- if (array_length <= 0x7fffffffu) {
- int separator_count = static_cast<int>(array_length) - 1;
- int remaining_length = String::kMaxLength - string_length;
- if ((remaining_length / separator_length) >= separator_count) {
- string_length += separator_length * (array_length - 1);
- } else {
- // Not room for the separators within the maximal string length.
- overflow = true;
- }
- } else {
- // Nonempty separator and at least 2^31-1 separators necessary
- // means that the string is too large to create.
- STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
- overflow = true;
- }
- }
- if (overflow) {
- // Throw an exception if the resulting string is too large. See
- // https://code.google.com/p/chromium/issues/detail?id=336820
- // for details.
- return isolate->ThrowInvalidStringLength();
- }
-
- if (is_ascii) {
- Handle<SeqOneByteString> result = isolate->factory()->NewRawOneByteString(
- string_length).ToHandleChecked();
- JoinSparseArrayWithSeparator<uint8_t>(
- FixedArray::cast(elements_array->elements()),
- elements_length,
- array_length,
- *separator,
- Vector<uint8_t>(result->GetChars(), string_length));
- return *result;
- } else {
- Handle<SeqTwoByteString> result = isolate->factory()->NewRawTwoByteString(
- string_length).ToHandleChecked();
- JoinSparseArrayWithSeparator<uc16>(
- FixedArray::cast(elements_array->elements()),
- elements_length,
- array_length,
- *separator,
- Vector<uc16>(result->GetChars(), string_length));
- return *result;
- }
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberOr) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return *isolate->factory()->NewNumberFromInt(x | y);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberAnd) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return *isolate->factory()->NewNumberFromInt(x & y);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberXor) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return *isolate->factory()->NewNumberFromInt(x ^ y);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberShl) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return *isolate->factory()->NewNumberFromInt(x << (y & 0x1f));
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberShr) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return *isolate->factory()->NewNumberFromUint(x >> (y & 0x1f));
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberSar) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return *isolate->factory()->NewNumberFromInt(
- ArithmeticShiftRight(x, y & 0x1f));
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberEquals) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- if (std::isnan(x)) return Smi::FromInt(NOT_EQUAL);
- if (std::isnan(y)) return Smi::FromInt(NOT_EQUAL);
- if (x == y) return Smi::FromInt(EQUAL);
- Object* result;
- if ((fpclassify(x) == FP_ZERO) && (fpclassify(y) == FP_ZERO)) {
- result = Smi::FromInt(EQUAL);
- } else {
- result = Smi::FromInt(NOT_EQUAL);
- }
- return result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringEquals) {
- HandleScope handle_scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
-
- bool not_equal = !String::Equals(x, y);
- // This is slightly convoluted because the value that signifies
- // equality is 0 and inequality is 1 so we have to negate the result
- // from String::Equals.
- DCHECK(not_equal == 0 || not_equal == 1);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(NOT_EQUAL == 1);
- return Smi::FromInt(not_equal);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberCompare) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 3);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, uncomparable_result, 2)
- if (std::isnan(x) || std::isnan(y)) return *uncomparable_result;
- if (x == y) return Smi::FromInt(EQUAL);
- if (isless(x, y)) return Smi::FromInt(LESS);
- return Smi::FromInt(GREATER);
-}
-
-
-// Compare two Smis as if they were converted to strings and then
-// compared lexicographically.
-RUNTIME_FUNCTION(Runtime_SmiLexicographicCompare) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
- CONVERT_SMI_ARG_CHECKED(x_value, 0);
- CONVERT_SMI_ARG_CHECKED(y_value, 1);
-
- // If the integers are equal so are the string representations.
- if (x_value == y_value) return Smi::FromInt(EQUAL);
-
- // If one of the integers is zero the normal integer order is the
- // same as the lexicographic order of the string representations.
- if (x_value == 0 || y_value == 0)
- return Smi::FromInt(x_value < y_value ? LESS : GREATER);
-
- // If only one of the integers is negative the negative number is
- // smallest because the char code of '-' is less than the char code
- // of any digit. Otherwise, we make both values positive.
-
- // Use unsigned values otherwise the logic is incorrect for -MIN_INT on
- // architectures using 32-bit Smis.
- uint32_t x_scaled = x_value;
- uint32_t y_scaled = y_value;
- if (x_value < 0 || y_value < 0) {
- if (y_value >= 0) return Smi::FromInt(LESS);
- if (x_value >= 0) return Smi::FromInt(GREATER);
- x_scaled = -x_value;
- y_scaled = -y_value;
- }
-
- static const uint32_t kPowersOf10[] = {
- 1, 10, 100, 1000, 10*1000, 100*1000,
- 1000*1000, 10*1000*1000, 100*1000*1000,
- 1000*1000*1000
- };
-
- // If the integers have the same number of decimal digits they can be
- // compared directly as the numeric order is the same as the
- // lexicographic order. If one integer has fewer digits, it is scaled
- // by some power of 10 to have the same number of digits as the longer
- // integer. If the scaled integers are equal it means the shorter
- // integer comes first in the lexicographic order.
-
- // From http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog10
- int x_log2 = IntegerLog2(x_scaled);
- int x_log10 = ((x_log2 + 1) * 1233) >> 12;
- x_log10 -= x_scaled < kPowersOf10[x_log10];
-
- int y_log2 = IntegerLog2(y_scaled);
- int y_log10 = ((y_log2 + 1) * 1233) >> 12;
- y_log10 -= y_scaled < kPowersOf10[y_log10];
-
- int tie = EQUAL;
-
- if (x_log10 < y_log10) {
- // X has fewer digits. We would like to simply scale up X but that
- // might overflow, e.g when comparing 9 with 1_000_000_000, 9 would
- // be scaled up to 9_000_000_000. So we scale up by the next
- // smallest power and scale down Y to drop one digit. It is OK to
- // drop one digit from the longer integer since the final digit is
- // past the length of the shorter integer.
- x_scaled *= kPowersOf10[y_log10 - x_log10 - 1];
- y_scaled /= 10;
- tie = LESS;
- } else if (y_log10 < x_log10) {
- y_scaled *= kPowersOf10[x_log10 - y_log10 - 1];
- x_scaled /= 10;
- tie = GREATER;
- }
-
- if (x_scaled < y_scaled) return Smi::FromInt(LESS);
- if (x_scaled > y_scaled) return Smi::FromInt(GREATER);
- return Smi::FromInt(tie);
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringCompare) {
- HandleScope handle_scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
-
- isolate->counters()->string_compare_runtime()->Increment();
-
- // A few fast case tests before we flatten.
- if (x.is_identical_to(y)) return Smi::FromInt(EQUAL);
- if (y->length() == 0) {
- if (x->length() == 0) return Smi::FromInt(EQUAL);
- return Smi::FromInt(GREATER);
- } else if (x->length() == 0) {
- return Smi::FromInt(LESS);
- }
-
- int d = x->Get(0) - y->Get(0);
- if (d < 0) return Smi::FromInt(LESS);
- else if (d > 0) return Smi::FromInt(GREATER);
-
- // Slow case.
- x = String::Flatten(x);
- y = String::Flatten(y);
-
- DisallowHeapAllocation no_gc;
- Object* equal_prefix_result = Smi::FromInt(EQUAL);
- int prefix_length = x->length();
- if (y->length() < prefix_length) {
- prefix_length = y->length();
- equal_prefix_result = Smi::FromInt(GREATER);
- } else if (y->length() > prefix_length) {
- equal_prefix_result = Smi::FromInt(LESS);
- }
- int r;
- String::FlatContent x_content = x->GetFlatContent();
- String::FlatContent y_content = y->GetFlatContent();
- if (x_content.IsAscii()) {
- Vector<const uint8_t> x_chars = x_content.ToOneByteVector();
- if (y_content.IsAscii()) {
- Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
- } else {
- Vector<const uc16> y_chars = y_content.ToUC16Vector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
- }
- } else {
- Vector<const uc16> x_chars = x_content.ToUC16Vector();
- if (y_content.IsAscii()) {
- Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
- } else {
- Vector<const uc16> y_chars = y_content.ToUC16Vector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
- }
- }
- Object* result;
- if (r == 0) {
- result = equal_prefix_result;
- } else {
- result = (r < 0) ? Smi::FromInt(LESS) : Smi::FromInt(GREATER);
- }
- return result;
-}
-
-
-#define RUNTIME_UNARY_MATH(Name, name) \
-RUNTIME_FUNCTION(Runtime_Math##Name) { \
- HandleScope scope(isolate); \
- DCHECK(args.length() == 1); \
- isolate->counters()->math_##name()->Increment(); \
- CONVERT_DOUBLE_ARG_CHECKED(x, 0); \
- return *isolate->factory()->NewHeapNumber(std::name(x)); \
-}
-
-RUNTIME_UNARY_MATH(Acos, acos)
-RUNTIME_UNARY_MATH(Asin, asin)
-RUNTIME_UNARY_MATH(Atan, atan)
-RUNTIME_UNARY_MATH(LogRT, log)
-#undef RUNTIME_UNARY_MATH
-
-
-RUNTIME_FUNCTION(Runtime_DoubleHi) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- uint64_t integer = double_to_uint64(x);
- integer = (integer >> 32) & 0xFFFFFFFFu;
- return *isolate->factory()->NewNumber(static_cast<int32_t>(integer));
-}
-
-
-RUNTIME_FUNCTION(Runtime_DoubleLo) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return *isolate->factory()->NewNumber(
- static_cast<int32_t>(double_to_uint64(x) & 0xFFFFFFFFu));
-}
-
-
-RUNTIME_FUNCTION(Runtime_ConstructDouble) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_NUMBER_CHECKED(uint32_t, hi, Uint32, args[0]);
- CONVERT_NUMBER_CHECKED(uint32_t, lo, Uint32, args[1]);
- uint64_t result = (static_cast<uint64_t>(hi) << 32) | lo;
- return *isolate->factory()->NewNumber(uint64_to_double(result));
-}
-
-
-RUNTIME_FUNCTION(Runtime_RemPiO2) {
- HandleScope handle_scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- Factory* factory = isolate->factory();
- double y[2];
- int n = fdlibm::rempio2(x, y);
- Handle<FixedArray> array = factory->NewFixedArray(3);
- Handle<HeapNumber> y0 = factory->NewHeapNumber(y[0]);
- Handle<HeapNumber> y1 = factory->NewHeapNumber(y[1]);
- array->set(0, Smi::FromInt(n));
- array->set(1, *y0);
- array->set(2, *y1);
- return *factory->NewJSArrayWithElements(array);
-}
-
-
-static const double kPiDividedBy4 = 0.78539816339744830962;
-
-
-RUNTIME_FUNCTION(Runtime_MathAtan2) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- isolate->counters()->math_atan2()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- double result;
- if (std::isinf(x) && std::isinf(y)) {
- // Make sure that the result in case of two infinite arguments
- // is a multiple of Pi / 4. The sign of the result is determined
- // by the first argument (x) and the sign of the second argument
- // determines the multiplier: one or three.
- int multiplier = (x < 0) ? -1 : 1;
- if (y < 0) multiplier *= 3;
- result = multiplier * kPiDividedBy4;
- } else {
- result = std::atan2(x, y);
- }
- return *isolate->factory()->NewNumber(result);
-}
-
-
-RUNTIME_FUNCTION(Runtime_MathExpRT) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- isolate->counters()->math_exp()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- lazily_initialize_fast_exp();
- return *isolate->factory()->NewNumber(fast_exp(x));
-}
-
-
-RUNTIME_FUNCTION(Runtime_MathFloorRT) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- isolate->counters()->math_floor()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return *isolate->factory()->NewNumber(Floor(x));
-}
-
-
-// Slow version of Math.pow. We check for fast paths for special cases.
-// Used if VFP3 is not available.
-RUNTIME_FUNCTION(Runtime_MathPowSlow) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- isolate->counters()->math_pow()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-
- // If the second argument is a smi, it is much faster to call the
- // custom powi() function than the generic pow().
- if (args[1]->IsSmi()) {
- int y = args.smi_at(1);
- return *isolate->factory()->NewNumber(power_double_int(x, y));
- }
-
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- double result = power_helper(x, y);
- if (std::isnan(result)) return isolate->heap()->nan_value();
- return *isolate->factory()->NewNumber(result);
-}
-// Fast version of Math.pow if we know that y is not an integer and y is not
-// -0.5 or 0.5. Used as slow case from full codegen.
-RUNTIME_FUNCTION(Runtime_MathPowRT) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- isolate->counters()->math_pow()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- if (y == 0) {
- return Smi::FromInt(1);
- } else {
- double result = power_double_double(x, y);
- if (std::isnan(result)) return isolate->heap()->nan_value();
- return *isolate->factory()->NewNumber(result);
- }
-}
-
-
-RUNTIME_FUNCTION(Runtime_RoundNumber) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(input, 0);
- isolate->counters()->math_round()->Increment();
-
- if (!input->IsHeapNumber()) {
- DCHECK(input->IsSmi());
- return *input;
- }
-
- Handle<HeapNumber> number = Handle<HeapNumber>::cast(input);
-
- double value = number->value();
- int exponent = number->get_exponent();
- int sign = number->get_sign();
-
- if (exponent < -1) {
- // Number in range ]-0.5..0.5[. These always round to +/-zero.
- if (sign) return isolate->heap()->minus_zero_value();
- return Smi::FromInt(0);
- }
-
- // We compare with kSmiValueSize - 2 because (2^30 - 0.1) has exponent 29 and
- // should be rounded to 2^30, which is not smi (for 31-bit smis, similar
- // argument holds for 32-bit smis).
- if (!sign && exponent < kSmiValueSize - 2) {
- return Smi::FromInt(static_cast<int>(value + 0.5));
- }
-
- // If the magnitude is big enough, there's no place for fraction part. If we
- // try to add 0.5 to this number, 1.0 will be added instead.
- if (exponent >= 52) {
- return *number;
- }
-
- if (sign && value >= -0.5) return isolate->heap()->minus_zero_value();
-
- // Do not call NumberFromDouble() to avoid extra checks.
- return *isolate->factory()->NewNumber(Floor(value + 0.5));
-}
-
-
-RUNTIME_FUNCTION(Runtime_MathSqrtRT) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- isolate->counters()->math_sqrt()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return *isolate->factory()->NewNumber(fast_sqrt(x));
-}
-
-
-RUNTIME_FUNCTION(Runtime_MathFround) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- float xf = static_cast<float>(x);
- return *isolate->factory()->NewNumber(xf);
-}
-
RUNTIME_FUNCTION(Runtime_DateMakeDay) {
SealHandleScope shs(isolate);
@@ -7901,23 +3299,22 @@ RUNTIME_FUNCTION(Runtime_DateSetValue) {
DateCache* date_cache = isolate->date_cache();
- Handle<Object> value;;
+ Handle<Object> value;
+ ;
bool is_value_nan = false;
if (std::isnan(time)) {
value = isolate->factory()->nan_value();
is_value_nan = true;
- } else if (!is_utc &&
- (time < -DateCache::kMaxTimeBeforeUTCInMs ||
- time > DateCache::kMaxTimeBeforeUTCInMs)) {
+ } else if (!is_utc && (time < -DateCache::kMaxTimeBeforeUTCInMs ||
+ time > DateCache::kMaxTimeBeforeUTCInMs)) {
value = isolate->factory()->nan_value();
is_value_nan = true;
} else {
time = is_utc ? time : date_cache->ToUTC(static_cast<int64_t>(time));
- if (time < -DateCache::kMaxTimeInMs ||
- time > DateCache::kMaxTimeInMs) {
+ if (time < -DateCache::kMaxTimeInMs || time > DateCache::kMaxTimeInMs) {
value = isolate->factory()->nan_value();
is_value_nan = true;
- } else {
+ } else {
value = isolate->factory()->NewNumber(DoubleToInteger(time));
}
}
@@ -7940,8 +3337,7 @@ static Handle<JSObject> NewSloppyArguments(Isolate* isolate,
int mapped_count = Min(argument_count, parameter_count);
Handle<FixedArray> parameter_map =
isolate->factory()->NewFixedArray(mapped_count + 2, NOT_TENURED);
- parameter_map->set_map(
- isolate->heap()->sloppy_arguments_elements_map());
+ parameter_map->set_map(isolate->heap()->sloppy_arguments_elements_map());
Handle<Map> map = Map::Copy(handle(result->map()));
map->set_elements_kind(SLOPPY_ARGUMENTS_ELEMENTS);
@@ -7996,8 +3392,9 @@ static Handle<JSObject> NewSloppyArguments(Isolate* isolate,
}
DCHECK(context_index >= 0);
arguments->set_the_hole(index);
- parameter_map->set(index + 2, Smi::FromInt(
- Context::MIN_CONTEXT_SLOTS + context_index));
+ parameter_map->set(
+ index + 2,
+ Smi::FromInt(Context::MIN_CONTEXT_SLOTS + context_index));
}
--index;
@@ -8083,8 +3480,8 @@ RUNTIME_FUNCTION(Runtime_NewClosureFromStubFailure) {
CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
Handle<Context> context(isolate->context());
PretenureFlag pretenure_flag = NOT_TENURED;
- return *isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared, context, pretenure_flag);
+ return *isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context,
+ pretenure_flag);
}
@@ -8098,18 +3495,17 @@ RUNTIME_FUNCTION(Runtime_NewClosure) {
// The caller ensures that we pretenure closures that are assigned
// directly to properties.
PretenureFlag pretenure_flag = pretenure ? TENURED : NOT_TENURED;
- return *isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared, context, pretenure_flag);
+ return *isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context,
+ pretenure_flag);
}
// Find the arguments of the JavaScript function invocation that called
// into C++ code. Collect these in a newly allocated array of handles (possibly
// prefixed by a number of empty handles).
-static SmartArrayPointer<Handle<Object> > GetCallerArguments(
- Isolate* isolate,
- int prefix_argc,
- int* total_argc) {
+static SmartArrayPointer<Handle<Object> > GetCallerArguments(Isolate* isolate,
+ int prefix_argc,
+ int* total_argc) {
// Find frame containing arguments passed to the caller.
JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
@@ -8119,8 +3515,7 @@ static SmartArrayPointer<Handle<Object> > GetCallerArguments(
int inlined_jsframe_index = functions.length() - 1;
JSFunction* inlined_function = functions[inlined_jsframe_index];
SlotRefValueBuilder slot_refs(
- frame,
- inlined_jsframe_index,
+ frame, inlined_jsframe_index,
inlined_function->shared()->formal_parameter_count());
int args_count = slot_refs.args_length();
@@ -8215,9 +3610,8 @@ RUNTIME_FUNCTION(Runtime_FunctionBindArguments) {
PropertyAttributes attr =
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY);
RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- JSObject::SetOwnPropertyIgnoreAttributes(
- bound_function, length_string, new_length, attr));
+ isolate, JSObject::SetOwnPropertyIgnoreAttributes(
+ bound_function, length_string, new_length, attr));
return *bound_function;
}
@@ -8260,8 +3654,8 @@ RUNTIME_FUNCTION(Runtime_NewObjectFromBound) {
SmartArrayPointer<Handle<Object> > param_data =
GetCallerArguments(isolate, bound_argc, &total_argc);
for (int i = 0; i < bound_argc; i++) {
- param_data[i] = Handle<Object>(bound_args->get(
- JSFunction::kBoundArgumentsStartIndex + i), isolate);
+ param_data[i] = Handle<Object>(
+ bound_args->get(JSFunction::kBoundArgumentsStartIndex + i), isolate);
}
if (!bound_function->IsJSFunction()) {
@@ -8273,22 +3667,20 @@ RUNTIME_FUNCTION(Runtime_NewObjectFromBound) {
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Execution::New(Handle<JSFunction>::cast(bound_function),
- total_argc, param_data.get()));
+ isolate, result, Execution::New(Handle<JSFunction>::cast(bound_function),
+ total_argc, param_data.get()));
return *result;
}
static Object* Runtime_NewObjectHelper(Isolate* isolate,
- Handle<Object> constructor,
- Handle<AllocationSite> site) {
+ Handle<Object> constructor,
+ Handle<AllocationSite> site) {
// If the constructor isn't a proper function we throw a type error.
if (!constructor->IsJSFunction()) {
- Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
- Handle<Object> type_error =
- isolate->factory()->NewTypeError("not_constructor", arguments);
- return isolate->Throw(*type_error);
+ Vector<Handle<Object> > arguments = HandleVector(&constructor, 1);
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+ NewTypeError("not_constructor", arguments));
}
Handle<JSFunction> function = Handle<JSFunction>::cast(constructor);
@@ -8296,10 +3688,9 @@ static Object* Runtime_NewObjectHelper(Isolate* isolate,
// If function should not have prototype, construction is not allowed. In this
// case generated code bailouts here, since function has no initial_map.
if (!function->should_have_prototype() && !function->shared()->bound()) {
- Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
- Handle<Object> type_error =
- isolate->factory()->NewTypeError("not_constructor", arguments);
- return isolate->Throw(*type_error);
+ Vector<Handle<Object> > arguments = HandleVector(&constructor, 1);
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+ NewTypeError("not_constructor", arguments));
}
Debug* debug = isolate->debug();
@@ -8347,8 +3738,7 @@ RUNTIME_FUNCTION(Runtime_NewObject) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, constructor, 0);
- return Runtime_NewObjectHelper(isolate,
- constructor,
+ return Runtime_NewObjectHelper(isolate, constructor,
Handle<AllocationSite>::null());
}
@@ -8378,476 +3768,6 @@ RUNTIME_FUNCTION(Runtime_FinalizeInstanceSize) {
}
-RUNTIME_FUNCTION(Runtime_CompileUnoptimized) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
-#ifdef DEBUG
- if (FLAG_trace_lazy && !function->shared()->is_compiled()) {
- PrintF("[unoptimized: ");
- function->PrintName();
- PrintF("]\n");
- }
-#endif
-
- // Compile the target function.
- DCHECK(function->shared()->allows_lazy_compilation());
-
- Handle<Code> code;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, code,
- Compiler::GetUnoptimizedCode(function));
- function->ReplaceCode(*code);
-
- // All done. Return the compiled code.
- DCHECK(function->is_compiled());
- DCHECK(function->code()->kind() == Code::FUNCTION ||
- (FLAG_always_opt &&
- function->code()->kind() == Code::OPTIMIZED_FUNCTION));
- return *code;
-}
-
-
-RUNTIME_FUNCTION(Runtime_CompileOptimized) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(concurrent, 1);
-
- Handle<Code> unoptimized(function->shared()->code());
- if (!function->shared()->is_compiled()) {
- // If the function is not compiled, do not optimize.
- // This can happen if the debugger is activated and
- // the function is returned to the not compiled state.
- // TODO(yangguo): reconsider this.
- function->ReplaceCode(function->shared()->code());
- } else if (!isolate->use_crankshaft() ||
- function->shared()->optimization_disabled() ||
- isolate->DebuggerHasBreakPoints()) {
- // If the function is not optimizable or debugger is active continue
- // using the code from the full compiler.
- if (FLAG_trace_opt) {
- PrintF("[failed to optimize ");
- function->PrintName();
- PrintF(": is code optimizable: %s, is debugger enabled: %s]\n",
- function->shared()->optimization_disabled() ? "F" : "T",
- isolate->DebuggerHasBreakPoints() ? "T" : "F");
- }
- function->ReplaceCode(*unoptimized);
- } else {
- Compiler::ConcurrencyMode mode = concurrent ? Compiler::CONCURRENT
- : Compiler::NOT_CONCURRENT;
- Handle<Code> code;
- if (Compiler::GetOptimizedCode(
- function, unoptimized, mode).ToHandle(&code)) {
- function->ReplaceCode(*code);
- } else {
- function->ReplaceCode(*unoptimized);
- }
- }
-
- DCHECK(function->code()->kind() == Code::FUNCTION ||
- function->code()->kind() == Code::OPTIMIZED_FUNCTION ||
- function->IsInOptimizationQueue());
- return function->code();
-}
-
-
-class ActivationsFinder : public ThreadVisitor {
- public:
- Code* code_;
- bool has_code_activations_;
-
- explicit ActivationsFinder(Code* code)
- : code_(code),
- has_code_activations_(false) { }
-
- void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
- JavaScriptFrameIterator it(isolate, top);
- VisitFrames(&it);
- }
-
- void VisitFrames(JavaScriptFrameIterator* it) {
- for (; !it->done(); it->Advance()) {
- JavaScriptFrame* frame = it->frame();
- if (code_->contains(frame->pc())) has_code_activations_ = true;
- }
- }
-};
-
-
-RUNTIME_FUNCTION(Runtime_NotifyStubFailure) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 0);
- Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
- DCHECK(AllowHeapAllocation::IsAllowed());
- delete deoptimizer;
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_SMI_ARG_CHECKED(type_arg, 0);
- Deoptimizer::BailoutType type =
- static_cast<Deoptimizer::BailoutType>(type_arg);
- Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
- DCHECK(AllowHeapAllocation::IsAllowed());
-
- Handle<JSFunction> function = deoptimizer->function();
- Handle<Code> optimized_code = deoptimizer->compiled_code();
-
- DCHECK(optimized_code->kind() == Code::OPTIMIZED_FUNCTION);
- DCHECK(type == deoptimizer->bailout_type());
-
- // Make sure to materialize objects before causing any allocation.
- JavaScriptFrameIterator it(isolate);
- deoptimizer->MaterializeHeapObjects(&it);
- delete deoptimizer;
-
- JavaScriptFrame* frame = it.frame();
- RUNTIME_ASSERT(frame->function()->IsJSFunction());
- DCHECK(frame->function() == *function);
-
- // Avoid doing too much work when running with --always-opt and keep
- // the optimized code around.
- if (FLAG_always_opt || type == Deoptimizer::LAZY) {
- return isolate->heap()->undefined_value();
- }
-
- // Search for other activations of the same function and code.
- ActivationsFinder activations_finder(*optimized_code);
- activations_finder.VisitFrames(&it);
- isolate->thread_manager()->IterateArchivedThreads(&activations_finder);
-
- if (!activations_finder.has_code_activations_) {
- if (function->code() == *optimized_code) {
- if (FLAG_trace_deopt) {
- PrintF("[removing optimized code for: ");
- function->PrintName();
- PrintF("]\n");
- }
- function->ReplaceCode(function->shared()->code());
- // Evict optimized code for this function from the cache so that it
- // doesn't get used for new closures.
- function->shared()->EvictFromOptimizedCodeMap(*optimized_code,
- "notify deoptimized");
- }
- } else {
- // TODO(titzer): we should probably do DeoptimizeCodeList(code)
- // unconditionally if the code is not already marked for deoptimization.
- // If there is an index by shared function info, all the better.
- Deoptimizer::DeoptimizeFunction(*function);
- }
-
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (!function->IsOptimized()) return isolate->heap()->undefined_value();
-
- // TODO(turbofan): Deoptimization is not supported yet.
- if (function->code()->is_turbofanned() && !FLAG_turbo_deoptimization) {
- return isolate->heap()->undefined_value();
- }
-
- Deoptimizer::DeoptimizeFunction(*function);
-
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_ClearFunctionTypeFeedback) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- function->shared()->ClearTypeFeedbackInfo();
- Code* unoptimized = function->shared()->code();
- if (unoptimized->kind() == Code::FUNCTION) {
- unoptimized->ClearInlineCaches();
- }
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_RunningInSimulator) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
-#if defined(USE_SIMULATOR)
- return isolate->heap()->true_value();
-#else
- return isolate->heap()->false_value();
-#endif
-}
-
-
-RUNTIME_FUNCTION(Runtime_IsConcurrentRecompilationSupported) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
- return isolate->heap()->ToBoolean(
- isolate->concurrent_recompilation_enabled());
-}
-
-
-RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
- HandleScope scope(isolate);
- RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
-
- if (!function->IsOptimizable() &&
- !function->IsMarkedForConcurrentOptimization() &&
- !function->IsInOptimizationQueue()) {
- return isolate->heap()->undefined_value();
- }
-
- function->MarkForOptimization();
-
- Code* unoptimized = function->shared()->code();
- if (args.length() == 2 &&
- unoptimized->kind() == Code::FUNCTION) {
- CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
- if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr")) && FLAG_use_osr) {
- // Start patching from the currently patched loop nesting level.
- DCHECK(BackEdgeTable::Verify(isolate, unoptimized));
- isolate->runtime_profiler()->AttemptOnStackReplacement(
- *function, Code::kMaxLoopNestingMarker);
- } else if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("concurrent")) &&
- isolate->concurrent_recompilation_enabled()) {
- function->MarkForConcurrentOptimization();
- }
- }
-
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, function, 0);
- function->shared()->set_optimization_disabled(true);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
- HandleScope scope(isolate);
- RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
- if (!isolate->use_crankshaft()) {
- return Smi::FromInt(4); // 4 == "never".
- }
- bool sync_with_compiler_thread = true;
- if (args.length() == 2) {
- CONVERT_ARG_HANDLE_CHECKED(String, sync, 1);
- if (sync->IsOneByteEqualTo(STATIC_ASCII_VECTOR("no sync"))) {
- sync_with_compiler_thread = false;
- }
- }
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (isolate->concurrent_recompilation_enabled() &&
- sync_with_compiler_thread) {
- while (function->IsInOptimizationQueue()) {
- isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
- base::OS::Sleep(50);
- }
- }
- if (FLAG_always_opt) {
- // We may have always opt, but that is more best-effort than a real
- // promise, so we still say "no" if it is not optimized.
- return function->IsOptimized() ? Smi::FromInt(3) // 3 == "always".
- : Smi::FromInt(2); // 2 == "no".
- }
- if (FLAG_deopt_every_n_times) {
- return Smi::FromInt(6); // 6 == "maybe deopted".
- }
- if (function->IsOptimized() && function->code()->is_turbofanned()) {
- return Smi::FromInt(7); // 7 == "TurboFan compiler".
- }
- return function->IsOptimized() ? Smi::FromInt(1) // 1 == "yes".
- : Smi::FromInt(2); // 2 == "no".
-}
-
-
-RUNTIME_FUNCTION(Runtime_UnblockConcurrentRecompilation) {
- DCHECK(args.length() == 0);
- RUNTIME_ASSERT(FLAG_block_concurrent_recompilation);
- RUNTIME_ASSERT(isolate->concurrent_recompilation_enabled());
- isolate->optimizing_compiler_thread()->Unblock();
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetOptimizationCount) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- return Smi::FromInt(function->shared()->opt_count());
-}
-
-
-static bool IsSuitableForOnStackReplacement(Isolate* isolate,
- Handle<JSFunction> function,
- Handle<Code> current_code) {
- // Keep track of whether we've succeeded in optimizing.
- if (!isolate->use_crankshaft() || !current_code->optimizable()) return false;
- // If we are trying to do OSR when there are already optimized
- // activations of the function, it means (a) the function is directly or
- // indirectly recursive and (b) an optimized invocation has been
- // deoptimized so that we are currently in an unoptimized activation.
- // Check for optimized activations of this function.
- for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- if (frame->is_optimized() && frame->function() == *function) return false;
- }
-
- return true;
-}
-
-
-RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- Handle<Code> caller_code(function->shared()->code());
-
- // We're not prepared to handle a function with arguments object.
- DCHECK(!function->shared()->uses_arguments());
-
- RUNTIME_ASSERT(FLAG_use_osr);
-
- // Passing the PC in the javascript frame from the caller directly is
- // not GC safe, so we walk the stack to get it.
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
- if (!caller_code->contains(frame->pc())) {
- // Code on the stack may not be the code object referenced by the shared
- // function info. It may have been replaced to include deoptimization data.
- caller_code = Handle<Code>(frame->LookupCode());
- }
-
- uint32_t pc_offset = static_cast<uint32_t>(
- frame->pc() - caller_code->instruction_start());
-
-#ifdef DEBUG
- DCHECK_EQ(frame->function(), *function);
- DCHECK_EQ(frame->LookupCode(), *caller_code);
- DCHECK(caller_code->contains(frame->pc()));
-#endif // DEBUG
-
-
- BailoutId ast_id = caller_code->TranslatePcOffsetToAstId(pc_offset);
- DCHECK(!ast_id.IsNone());
-
- Compiler::ConcurrencyMode mode =
- isolate->concurrent_osr_enabled() &&
- (function->shared()->ast_node_count() > 512) ? Compiler::CONCURRENT
- : Compiler::NOT_CONCURRENT;
- Handle<Code> result = Handle<Code>::null();
-
- OptimizedCompileJob* job = NULL;
- if (mode == Compiler::CONCURRENT) {
- // Gate the OSR entry with a stack check.
- BackEdgeTable::AddStackCheck(caller_code, pc_offset);
- // Poll already queued compilation jobs.
- OptimizingCompilerThread* thread = isolate->optimizing_compiler_thread();
- if (thread->IsQueuedForOSR(function, ast_id)) {
- if (FLAG_trace_osr) {
- PrintF("[OSR - Still waiting for queued: ");
- function->PrintName();
- PrintF(" at AST id %d]\n", ast_id.ToInt());
- }
- return NULL;
- }
-
- job = thread->FindReadyOSRCandidate(function, ast_id);
- }
-
- if (job != NULL) {
- if (FLAG_trace_osr) {
- PrintF("[OSR - Found ready: ");
- function->PrintName();
- PrintF(" at AST id %d]\n", ast_id.ToInt());
- }
- result = Compiler::GetConcurrentlyOptimizedCode(job);
- } else if (IsSuitableForOnStackReplacement(isolate, function, caller_code)) {
- if (FLAG_trace_osr) {
- PrintF("[OSR - Compiling: ");
- function->PrintName();
- PrintF(" at AST id %d]\n", ast_id.ToInt());
- }
- MaybeHandle<Code> maybe_result = Compiler::GetOptimizedCode(
- function, caller_code, mode, ast_id);
- if (maybe_result.ToHandle(&result) &&
- result.is_identical_to(isolate->builtins()->InOptimizationQueue())) {
- // Optimization is queued. Return to check later.
- return NULL;
- }
- }
-
- // Revert the patched back edge table, regardless of whether OSR succeeds.
- BackEdgeTable::Revert(isolate, *caller_code);
-
- // Check whether we ended up with usable optimized code.
- if (!result.is_null() && result->kind() == Code::OPTIMIZED_FUNCTION) {
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(result->deoptimization_data());
-
- if (data->OsrPcOffset()->value() >= 0) {
- DCHECK(BailoutId(data->OsrAstId()->value()) == ast_id);
- if (FLAG_trace_osr) {
- PrintF("[OSR - Entry at AST id %d, offset %d in optimized code]\n",
- ast_id.ToInt(), data->OsrPcOffset()->value());
- }
- // TODO(titzer): this is a massive hack to make the deopt counts
- // match. Fix heuristics for reenabling optimizations!
- function->shared()->increment_deopt_count();
-
- // TODO(titzer): Do not install code into the function.
- function->ReplaceCode(*result);
- return *result;
- }
- }
-
- // Failed.
- if (FLAG_trace_osr) {
- PrintF("[OSR - Failed: ");
- function->PrintName();
- PrintF(" at AST id %d]\n", ast_id.ToInt());
- }
-
- if (!function->IsOptimized()) {
- function->ReplaceCode(function->shared()->code());
- }
- return NULL;
-}
-
-
-RUNTIME_FUNCTION(Runtime_SetAllocationTimeout) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2 || args.length() == 3);
-#ifdef DEBUG
- CONVERT_SMI_ARG_CHECKED(interval, 0);
- CONVERT_SMI_ARG_CHECKED(timeout, 1);
- isolate->heap()->set_allocation_timeout(timeout);
- FLAG_gc_interval = interval;
- if (args.length() == 3) {
- // Enable/disable inline allocation if requested.
- CONVERT_BOOLEAN_ARG_CHECKED(inline_allocation, 2);
- if (inline_allocation) {
- isolate->heap()->EnableInlineAllocation();
- } else {
- isolate->heap()->DisableInlineAllocation();
- }
- }
-#endif
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_CheckIsBootstrapping) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 0);
@@ -8883,7 +3803,7 @@ RUNTIME_FUNCTION(Runtime_Call) {
}
for (int i = 0; i < argc; ++i) {
- argv[i] = Handle<Object>(args[1 + i], isolate);
+ argv[i] = Handle<Object>(args[1 + i], isolate);
}
Handle<JSReceiver> hfun(fun);
@@ -8902,8 +3822,8 @@ RUNTIME_FUNCTION(Runtime_Apply) {
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, fun, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, arguments, 2);
- CONVERT_SMI_ARG_CHECKED(offset, 3);
- CONVERT_SMI_ARG_CHECKED(argc, 4);
+ CONVERT_INT32_ARG_CHECKED(offset, 3);
+ CONVERT_INT32_ARG_CHECKED(argc, 4);
RUNTIME_ASSERT(offset >= 0);
// Loose upper bound to allow fuzzing. We'll most likely run out of
// stack space before hitting this limit.
@@ -8923,8 +3843,7 @@ RUNTIME_FUNCTION(Runtime_Apply) {
for (int i = 0; i < argc; ++i) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, argv[i],
- Object::GetElement(isolate, arguments, offset + i));
+ isolate, argv[i], Object::GetElement(isolate, arguments, offset + i));
}
Handle<Object> result;
@@ -8993,10 +3912,8 @@ RUNTIME_FUNCTION(Runtime_PushWithContext) {
Object::ToObject(isolate, args.at<Object>(0));
if (!maybe_object.ToHandle(&extension_object)) {
Handle<Object> handle = args.at<Object>(0);
- Handle<Object> result =
- isolate->factory()->NewTypeError("with_expression",
- HandleVector(&handle, 1));
- return isolate->Throw(*result);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError("with_expression", HandleVector(&handle, 1)));
}
}
@@ -9011,8 +3928,8 @@ RUNTIME_FUNCTION(Runtime_PushWithContext) {
}
Handle<Context> current(isolate->context());
- Handle<Context> context = isolate->factory()->NewWithContext(
- function, current, extension_object);
+ Handle<Context> context =
+ isolate->factory()->NewWithContext(function, current, extension_object);
isolate->set_context(*context);
return *context;
}
@@ -9054,8 +3971,8 @@ RUNTIME_FUNCTION(Runtime_PushBlockContext) {
function = args.at<JSFunction>(1);
}
Handle<Context> current(isolate->context());
- Handle<Context> context = isolate->factory()->NewBlockContext(
- function, current, scope_info);
+ Handle<Context> context =
+ isolate->factory()->NewBlockContext(function, current, scope_info);
isolate->set_context(*context);
return *context;
}
@@ -9170,11 +4087,8 @@ RUNTIME_FUNCTION(Runtime_DeleteLookupSlot) {
PropertyAttributes attributes;
ContextLookupFlags flags = FOLLOW_CHAINS;
BindingFlags binding_flags;
- Handle<Object> holder = context->Lookup(name,
- flags,
- &index,
- &attributes,
- &binding_flags);
+ Handle<Object> holder =
+ context->Lookup(name, flags, &index, &attributes, &binding_flags);
// If the slot was not found the result is true.
if (holder.is_null()) {
@@ -9191,69 +4105,13 @@ RUNTIME_FUNCTION(Runtime_DeleteLookupSlot) {
// (respecting DONT_DELETE).
Handle<JSObject> object = Handle<JSObject>::cast(holder);
Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- JSReceiver::DeleteProperty(object, name));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSReceiver::DeleteProperty(object, name));
return *result;
}
-// A mechanism to return a pair of Object pointers in registers (if possible).
-// How this is achieved is calling convention-dependent.
-// All currently supported x86 compiles uses calling conventions that are cdecl
-// variants where a 64-bit value is returned in two 32-bit registers
-// (edx:eax on ia32, r1:r0 on ARM).
-// In AMD-64 calling convention a struct of two pointers is returned in rdx:rax.
-// In Win64 calling convention, a struct of two pointers is returned in memory,
-// allocated by the caller, and passed as a pointer in a hidden first parameter.
-#ifdef V8_HOST_ARCH_64_BIT
-struct ObjectPair {
- Object* x;
- Object* y;
-};
-
-
-static inline ObjectPair MakePair(Object* x, Object* y) {
- ObjectPair result = {x, y};
- // Pointers x and y returned in rax and rdx, in AMD-x64-abi.
- // In Win64 they are assigned to a hidden first argument.
- return result;
-}
-#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
-// For x32 a 128-bit struct return is done as rax and rdx from the ObjectPair
-// are used in the full codegen and Crankshaft compiler. An alternative is
-// using uint64_t and modifying full codegen and Crankshaft compiler.
-struct ObjectPair {
- Object* x;
- uint32_t x_upper;
- Object* y;
- uint32_t y_upper;
-};
-
-
-static inline ObjectPair MakePair(Object* x, Object* y) {
- ObjectPair result = {x, 0, y, 0};
- // Pointers x and y returned in rax and rdx, in x32-abi.
- return result;
-}
-#else
-typedef uint64_t ObjectPair;
-static inline ObjectPair MakePair(Object* x, Object* y) {
-#if defined(V8_TARGET_LITTLE_ENDIAN)
- return reinterpret_cast<uint32_t>(x) |
- (reinterpret_cast<ObjectPair>(y) << 32);
-#elif defined(V8_TARGET_BIG_ENDIAN)
- return reinterpret_cast<uint32_t>(y) |
- (reinterpret_cast<ObjectPair>(x) << 32);
-#else
-#error Unknown endianness
-#endif
-}
-#endif
-
-
-static Object* ComputeReceiverForNonGlobal(Isolate* isolate,
- JSObject* holder) {
+static Object* ComputeReceiverForNonGlobal(Isolate* isolate, JSObject* holder) {
DCHECK(!holder->IsGlobalObject());
Context* top = isolate->context();
// Get the context extension function.
@@ -9287,11 +4145,8 @@ static ObjectPair LoadLookupSlotHelper(Arguments args, Isolate* isolate,
PropertyAttributes attributes;
ContextLookupFlags flags = FOLLOW_CHAINS;
BindingFlags binding_flags;
- Handle<Object> holder = context->Lookup(name,
- flags,
- &index,
- &attributes,
- &binding_flags);
+ Handle<Object> holder =
+ context->Lookup(name, flags, &index, &attributes, &binding_flags);
if (isolate->has_pending_exception()) {
return MakePair(isolate->heap()->exception(), NULL);
}
@@ -9308,12 +4163,14 @@ static ObjectPair LoadLookupSlotHelper(Arguments args, Isolate* isolate,
case MUTABLE_CHECK_INITIALIZED:
case IMMUTABLE_CHECK_INITIALIZED_HARMONY:
if (value->IsTheHole()) {
- Handle<Object> reference_error =
+ Handle<Object> error;
+ MaybeHandle<Object> maybe_error =
isolate->factory()->NewReferenceError("not_defined",
HandleVector(&name, 1));
- return MakePair(isolate->Throw(*reference_error), NULL);
+ if (maybe_error.ToHandle(&error)) isolate->Throw(*error);
+ return MakePair(isolate->heap()->exception(), NULL);
}
- // FALLTHROUGH
+ // FALLTHROUGH
case MUTABLE_IS_INITIALIZED:
case IMMUTABLE_IS_INITIALIZED:
case IMMUTABLE_IS_INITIALIZED_HARMONY:
@@ -9348,25 +4205,26 @@ static ObjectPair LoadLookupSlotHelper(Arguments args, Isolate* isolate,
object->IsGlobalObject()
? Object::cast(isolate->heap()->undefined_value())
: object->IsJSProxy() ? static_cast<Object*>(*object)
- : ComputeReceiverForNonGlobal(isolate, JSObject::cast(*object)),
+ : ComputeReceiverForNonGlobal(
+ isolate, JSObject::cast(*object)),
isolate);
// No need to unhole the value here. This is taken care of by the
// GetProperty function.
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value,
- Object::GetProperty(object, name),
+ isolate, value, Object::GetProperty(object, name),
MakePair(isolate->heap()->exception(), NULL));
return MakePair(*value, *receiver_handle);
}
if (throw_error) {
// The property doesn't exist - throw exception.
- Handle<Object> reference_error =
- isolate->factory()->NewReferenceError("not_defined",
- HandleVector(&name, 1));
- return MakePair(isolate->Throw(*reference_error), NULL);
+ Handle<Object> error;
+ MaybeHandle<Object> maybe_error = isolate->factory()->NewReferenceError(
+ "not_defined", HandleVector(&name, 1));
+ if (maybe_error.ToHandle(&error)) isolate->Throw(*error);
+ return MakePair(isolate->heap()->exception(), NULL);
} else {
// The property doesn't exist - return undefined.
return MakePair(isolate->heap()->undefined_value(),
@@ -9398,11 +4256,8 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot) {
PropertyAttributes attributes;
ContextLookupFlags flags = FOLLOW_CHAINS;
BindingFlags binding_flags;
- Handle<Object> holder = context->Lookup(name,
- flags,
- &index,
- &attributes,
- &binding_flags);
+ Handle<Object> holder =
+ context->Lookup(name, flags, &index, &attributes, &binding_flags);
// In case of JSProxy, an exception might have been thrown.
if (isolate->has_pending_exception()) return isolate->heap()->exception();
@@ -9412,10 +4267,9 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot) {
Handle<Context>::cast(holder)->set(index, *value);
} else if (strict_mode == STRICT) {
// Setting read only property in strict mode.
- Handle<Object> error =
- isolate->factory()->NewTypeError("strict_cannot_assign",
- HandleVector(&name, 1));
- return isolate->Throw(*error);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError("strict_cannot_assign", HandleVector(&name, 1)));
}
return *value;
}
@@ -9429,9 +4283,8 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot) {
object = Handle<JSReceiver>::cast(holder);
} else if (strict_mode == STRICT) {
// If absent in strict mode: throw.
- Handle<Object> error = isolate->factory()->NewReferenceError(
- "not_defined", HandleVector(&name, 1));
- return isolate->Throw(*error);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewReferenceError("not_defined", HandleVector(&name, 1)));
} else {
// If absent in sloppy mode: add the property to the global object.
object = Handle<JSReceiver>(context->global_object());
@@ -9471,50 +4324,47 @@ RUNTIME_FUNCTION(Runtime_ThrowReferenceError) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
- Handle<Object> reference_error =
- isolate->factory()->NewReferenceError("not_defined",
- HandleVector(&name, 1));
- return isolate->Throw(*reference_error);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewReferenceError("not_defined", HandleVector(&name, 1)));
}
-RUNTIME_FUNCTION(Runtime_ThrowNotDateError) {
+RUNTIME_FUNCTION(Runtime_ThrowNonMethodError) {
HandleScope scope(isolate);
DCHECK(args.length() == 0);
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "not_date_object", HandleVector<Object>(NULL, 0)));
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewReferenceError("non_method", HandleVector<Object>(NULL, 0)));
}
-RUNTIME_FUNCTION(Runtime_StackGuard) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_ThrowUnsupportedSuperError) {
+ HandleScope scope(isolate);
DCHECK(args.length() == 0);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewReferenceError("unsupported_super", HandleVector<Object>(NULL, 0)));
+}
- // First check if this is a real stack overflow.
- StackLimitCheck check(isolate);
- if (check.JsHasOverflowed()) {
- return isolate->StackOverflow();
- }
- return isolate->stack_guard()->HandleInterrupts();
+RUNTIME_FUNCTION(Runtime_ThrowNotDateError) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError("not_date_object", HandleVector<Object>(NULL, 0)));
}
-RUNTIME_FUNCTION(Runtime_TryInstallOptimizedCode) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+RUNTIME_FUNCTION(Runtime_StackGuard) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 0);
// First check if this is a real stack overflow.
StackLimitCheck check(isolate);
if (check.JsHasOverflowed()) {
- SealHandleScope shs(isolate);
return isolate->StackOverflow();
}
- isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
- return (function->IsOptimized()) ? function->code()
- : function->shared()->code();
+ return isolate->stack_guard()->HandleInterrupts();
}
@@ -9534,7 +4384,8 @@ static int StackSize(Isolate* isolate) {
static void PrintTransition(Isolate* isolate, Object* result) {
// indentation
- { const int nmax = 80;
+ {
+ const int nmax = 80;
int n = StackSize(isolate);
if (n <= nmax)
PrintF("%4d:%*s", n, n, "");
@@ -9571,45 +4422,6 @@ RUNTIME_FUNCTION(Runtime_TraceExit) {
}
-RUNTIME_FUNCTION(Runtime_DebugPrint) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
-
- OFStream os(stdout);
-#ifdef DEBUG
- if (args[0]->IsString()) {
- // If we have a string, assume it's a code "marker"
- // and print some interesting cpu debugging info.
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
- os << "fp = " << frame->fp() << ", sp = " << frame->sp()
- << ", caller_sp = " << frame->caller_sp() << ": ";
- } else {
- os << "DebugPrint: ";
- }
- args[0]->Print(os);
- if (args[0]->IsHeapObject()) {
- os << "\n";
- HeapObject::cast(args[0])->map()->Print(os);
- }
-#else
- // ShortPrint is available in release mode. Print is not.
- os << Brief(args[0]);
-#endif
- os << endl;
-
- return args[0]; // return TOS
-}
-
-
-RUNTIME_FUNCTION(Runtime_DebugTrace) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
- isolate->PrintStack(stdout);
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_DateCurrentTime) {
HandleScope scope(isolate);
DCHECK(args.length() == 0);
@@ -9647,14 +4459,12 @@ RUNTIME_FUNCTION(Runtime_DateParseString) {
bool result;
String::FlatContent str_content = str->GetFlatContent();
- if (str_content.IsAscii()) {
- result = DateParser::Parse(str_content.ToOneByteVector(),
- *output_array,
+ if (str_content.IsOneByte()) {
+ result = DateParser::Parse(str_content.ToOneByteVector(), *output_array,
isolate->unicode_cache());
} else {
DCHECK(str_content.IsTwoByte());
- result = DateParser::Parse(str_content.ToUC16Vector(),
- *output_array,
+ result = DateParser::Parse(str_content.ToUC16Vector(), *output_array,
isolate->unicode_cache());
}
@@ -9675,8 +4485,8 @@ RUNTIME_FUNCTION(Runtime_DateLocalTimezone) {
x <= DateCache::kMaxTimeBeforeUTCInMs);
const char* zone =
isolate->date_cache()->LocalTimezone(static_cast<int64_t>(x));
- Handle<String> result = isolate->factory()->NewStringFromUtf8(
- CStrVector(zone)).ToHandleChecked();
+ Handle<String> result =
+ isolate->factory()->NewStringFromUtf8(CStrVector(zone)).ToHandleChecked();
return *result;
}
@@ -9734,188 +4544,6 @@ RUNTIME_FUNCTION(Runtime_IsAttachedGlobal) {
}
-RUNTIME_FUNCTION(Runtime_ParseJson) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
-
- source = String::Flatten(source);
- // Optimized fast case where we only have ASCII characters.
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- source->IsSeqOneByteString() ? JsonParser<true>::Parse(source)
- : JsonParser<false>::Parse(source));
- return *result;
-}
-
-
-bool CodeGenerationFromStringsAllowed(Isolate* isolate,
- Handle<Context> context) {
- DCHECK(context->allow_code_gen_from_strings()->IsFalse());
- // Check with callback if set.
- AllowCodeGenerationFromStringsCallback callback =
- isolate->allow_code_gen_callback();
- if (callback == NULL) {
- // No callback set and code generation disallowed.
- return false;
- } else {
- // Callback set. Let it decide if code generation is allowed.
- VMState<EXTERNAL> state(isolate);
- return callback(v8::Utils::ToLocal(context));
- }
-}
-
-
-// Walk up the stack expecting:
-// - Runtime_CompileString
-// - JSFunction callee (eval, Function constructor, etc)
-// - call() (maybe)
-// - apply() (maybe)
-// - bind() (maybe)
-// - JSFunction caller (maybe)
-//
-// return true if the caller has the same security token as the callee
-// or if an exit frame was hit, in which case allow it through, as it could
-// have come through the api.
-static bool TokensMatchForCompileString(Isolate* isolate) {
- MaybeHandle<JSFunction> callee;
- bool exit_handled = true;
- bool tokens_match = true;
- bool done = false;
- for (StackFrameIterator it(isolate); !it.done() && !done; it.Advance()) {
- StackFrame* raw_frame = it.frame();
- if (!raw_frame->is_java_script()) {
- if (raw_frame->is_exit()) exit_handled = false;
- continue;
- }
- JavaScriptFrame* outer_frame = JavaScriptFrame::cast(raw_frame);
- List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
- outer_frame->Summarize(&frames);
- for (int i = frames.length() - 1; i >= 0 && !done; --i) {
- FrameSummary& frame = frames[i];
- Handle<JSFunction> fun = frame.function();
- // Capture the callee function.
- if (callee.is_null()) {
- callee = fun;
- exit_handled = true;
- continue;
- }
- // Exit condition.
- Handle<Context> context(callee.ToHandleChecked()->context());
- if (!fun->context()->HasSameSecurityTokenAs(*context)) {
- tokens_match = false;
- done = true;
- continue;
- }
- // Skip bound functions in correct origin.
- if (fun->shared()->bound()) {
- exit_handled = true;
- continue;
- }
- done = true;
- }
- }
- return !exit_handled || tokens_match;
-}
-
-
-RUNTIME_FUNCTION(Runtime_CompileString) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(function_literal_only, 1);
-
- // Extract native context.
- Handle<Context> context(isolate->native_context());
-
- // Filter cross security context calls.
- if (!TokensMatchForCompileString(isolate)) {
- return isolate->heap()->undefined_value();
- }
-
- // Check if native context allows code generation from
- // strings. Throw an exception if it doesn't.
- if (context->allow_code_gen_from_strings()->IsFalse() &&
- !CodeGenerationFromStringsAllowed(isolate, context)) {
- Handle<Object> error_message =
- context->ErrorMessageForCodeGenerationFromStrings();
- return isolate->Throw(*isolate->factory()->NewEvalError(
- "code_gen_from_strings", HandleVector<Object>(&error_message, 1)));
- }
-
- // Compile source string in the native context.
- ParseRestriction restriction = function_literal_only
- ? ONLY_SINGLE_FUNCTION_LITERAL : NO_PARSE_RESTRICTION;
- Handle<JSFunction> fun;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, fun,
- Compiler::GetFunctionFromEval(
- source, context, SLOPPY, restriction, RelocInfo::kNoPosition));
- return *fun;
-}
-
-
-static ObjectPair CompileGlobalEval(Isolate* isolate,
- Handle<String> source,
- Handle<Object> receiver,
- StrictMode strict_mode,
- int scope_position) {
- Handle<Context> context = Handle<Context>(isolate->context());
- Handle<Context> native_context = Handle<Context>(context->native_context());
-
- // Check if native context allows code generation from
- // strings. Throw an exception if it doesn't.
- if (native_context->allow_code_gen_from_strings()->IsFalse() &&
- !CodeGenerationFromStringsAllowed(isolate, native_context)) {
- Handle<Object> error_message =
- native_context->ErrorMessageForCodeGenerationFromStrings();
- isolate->Throw(*isolate->factory()->NewEvalError(
- "code_gen_from_strings", HandleVector<Object>(&error_message, 1)));
- return MakePair(isolate->heap()->exception(), NULL);
- }
-
- // Deal with a normal eval call with a string argument. Compile it
- // and return the compiled function bound in the local context.
- static const ParseRestriction restriction = NO_PARSE_RESTRICTION;
- Handle<JSFunction> compiled;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, compiled,
- Compiler::GetFunctionFromEval(
- source, context, strict_mode, restriction, scope_position),
- MakePair(isolate->heap()->exception(), NULL));
- return MakePair(*compiled, *receiver);
-}
-
-
-RUNTIME_FUNCTION_RETURN_PAIR(Runtime_ResolvePossiblyDirectEval) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 5);
-
- Handle<Object> callee = args.at<Object>(0);
-
- // If "eval" didn't refer to the original GlobalEval, it's not a
- // direct call to eval.
- // (And even if it is, but the first argument isn't a string, just let
- // execution default to an indirect call to eval, which will also return
- // the first argument without doing anything).
- if (*callee != isolate->native_context()->global_eval_fun() ||
- !args[1]->IsString()) {
- return MakePair(*callee, isolate->heap()->undefined_value());
- }
-
- DCHECK(args[3]->IsSmi());
- DCHECK(args.smi_at(3) == SLOPPY || args.smi_at(3) == STRICT);
- StrictMode strict_mode = static_cast<StrictMode>(args.smi_at(3));
- DCHECK(args[4]->IsSmi());
- return CompileGlobalEval(isolate,
- args.at<String>(1),
- args.at<Object>(2),
- strict_mode,
- args.smi_at(4));
-}
-
-
RUNTIME_FUNCTION(Runtime_AllocateInNewSpace) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -9958,8 +4586,7 @@ RUNTIME_FUNCTION(Runtime_PushIfAbsent) {
// Strict not needed. Used for cycle detection in Array join implementation.
RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- JSObject::SetFastElement(array, length, element, SLOPPY, true));
+ isolate, JSObject::SetFastElement(array, length, element, SLOPPY, true));
return isolate->heap()->true_value();
}
@@ -9977,19 +4604,16 @@ RUNTIME_FUNCTION(Runtime_PushIfAbsent) {
*/
class ArrayConcatVisitor {
public:
- ArrayConcatVisitor(Isolate* isolate,
- Handle<FixedArray> storage,
- bool fast_elements) :
- isolate_(isolate),
- storage_(Handle<FixedArray>::cast(
- isolate->global_handles()->Create(*storage))),
- index_offset_(0u),
- fast_elements_(fast_elements),
- exceeds_array_limit_(false) { }
-
- ~ArrayConcatVisitor() {
- clear_storage();
- }
+ ArrayConcatVisitor(Isolate* isolate, Handle<FixedArray> storage,
+ bool fast_elements)
+ : isolate_(isolate),
+ storage_(Handle<FixedArray>::cast(
+ isolate->global_handles()->Create(*storage))),
+ index_offset_(0u),
+ fast_elements_(fast_elements),
+ exceeds_array_limit_(false) {}
+
+ ~ArrayConcatVisitor() { clear_storage(); }
void visit(uint32_t i, Handle<Object> elm) {
if (i > JSObject::kMaxElementCount - index_offset_) {
@@ -10038,17 +4662,14 @@ class ArrayConcatVisitor {
}
}
- bool exceeds_array_limit() {
- return exceeds_array_limit_;
- }
+ bool exceeds_array_limit() { return exceeds_array_limit_; }
Handle<JSArray> ToArray() {
Handle<JSArray> array = isolate_->factory()->NewJSArray(0);
Handle<Object> length =
isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
Handle<Map> map = JSObject::GetElementsTransitionMap(
- array,
- fast_elements_ ? FAST_HOLEY_ELEMENTS : DICTIONARY_ELEMENTS);
+ array, fast_elements_ ? FAST_HOLEY_ELEMENTS : DICTIONARY_ELEMENTS);
array->set_map(*map);
array->set_length(*length);
array->set_elements(*storage_);
@@ -10084,8 +4705,8 @@ class ArrayConcatVisitor {
}
inline void set_storage(FixedArray* storage) {
- storage_ = Handle<FixedArray>::cast(
- isolate_->global_handles()->Create(storage));
+ storage_ =
+ Handle<FixedArray>::cast(isolate_->global_handles()->Create(storage));
}
Isolate* isolate_;
@@ -10146,11 +4767,11 @@ static uint32_t EstimateElementCount(Handle<JSArray> array) {
break;
}
case SLOPPY_ARGUMENTS_ELEMENTS:
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ELEMENTS: \
- case TYPE##_ELEMENTS: \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS:
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
// External arrays are always dense.
return length;
@@ -10161,8 +4782,7 @@ static uint32_t EstimateElementCount(Handle<JSArray> array) {
}
-
-template<class ExternalArrayClass, class ElementType>
+template <class ExternalArrayClass, class ElementType>
static void IterateExternalArrayElements(Isolate* isolate,
Handle<JSObject> receiver,
bool elements_are_ints,
@@ -10213,8 +4833,7 @@ static int compareUInt32(const uint32_t* ap, const uint32_t* bp) {
}
-static void CollectElementIndices(Handle<JSObject> object,
- uint32_t range,
+static void CollectElementIndices(Handle<JSObject> object, uint32_t range,
List<uint32_t>* indices) {
Isolate* isolate = object->GetIsolate();
ElementsKind kind = object->GetElementsKind();
@@ -10235,8 +4854,19 @@ static void CollectElementIndices(Handle<JSObject> object,
}
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
- // TODO(1810): Decide if it's worthwhile to implement this.
- UNREACHABLE();
+ if (object->elements()->IsFixedArray()) {
+ DCHECK(object->elements()->length() == 0);
+ break;
+ }
+ Handle<FixedDoubleArray> elements(
+ FixedDoubleArray::cast(object->elements()));
+ uint32_t length = static_cast<uint32_t>(elements->length());
+ if (range < length) length = range;
+ for (uint32_t i = 0; i < length; i++) {
+ if (!elements->is_the_hole(i)) {
+ indices->Add(i);
+ }
+ }
break;
}
case DICTIONARY_ELEMENTS: {
@@ -10256,35 +4886,38 @@ static void CollectElementIndices(Handle<JSObject> object,
}
break;
}
- default: {
- int dense_elements_length;
- switch (kind) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ELEMENTS: { \
- dense_elements_length = \
- External##Type##Array::cast(object->elements())->length(); \
- break; \
- }
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: \
+ case EXTERNAL_##TYPE##_ELEMENTS:
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
-
- default:
- UNREACHABLE();
- dense_elements_length = 0;
- break;
- }
- uint32_t length = static_cast<uint32_t>(dense_elements_length);
- if (range <= length) {
- length = range;
- // We will add all indices, so we might as well clear it first
- // and avoid duplicates.
- indices->Clear();
+ {
+ uint32_t length = static_cast<uint32_t>(
+ FixedArrayBase::cast(object->elements())->length());
+ if (range <= length) {
+ length = range;
+ // We will add all indices, so we might as well clear it first
+ // and avoid duplicates.
+ indices->Clear();
+ }
+ for (uint32_t i = 0; i < length; i++) {
+ indices->Add(i);
+ }
+ if (length == range) return; // All indices accounted for already.
+ break;
}
+ case SLOPPY_ARGUMENTS_ELEMENTS: {
+ MaybeHandle<Object> length_obj =
+ Object::GetProperty(object, isolate->factory()->length_string());
+ double length_num = length_obj.ToHandleChecked()->Number();
+ uint32_t length = static_cast<uint32_t>(DoubleToInt32(length_num));
+ ElementsAccessor* accessor = object->GetElementsAccessor();
for (uint32_t i = 0; i < length; i++) {
- indices->Add(i);
+ if (accessor->HasElement(object, object, i)) {
+ indices->Add(i);
+ }
}
- if (length == range) return; // All indices accounted for already.
break;
}
}
@@ -10310,8 +4943,7 @@ static void CollectElementIndices(Handle<JSObject> object,
* length.
* Returns false if any access threw an exception, otherwise true.
*/
-static bool IterateElements(Isolate* isolate,
- Handle<JSArray> receiver,
+static bool IterateElements(Isolate* isolate, Handle<JSArray> receiver,
ArrayConcatVisitor* visitor) {
uint32_t length = static_cast<uint32_t>(receiver->length()->Number());
switch (receiver->GetElementsKind()) {
@@ -10395,8 +5027,7 @@ static bool IterateElements(Isolate* isolate,
uint32_t index = indices[j];
Handle<Object> element;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element,
- Object::GetElement(isolate, receiver, index),
+ isolate, element, Object::GetElement(isolate, receiver, index),
false);
visitor->visit(index, element);
// Skip to next different index (i.e., omit duplicates).
@@ -10407,8 +5038,8 @@ static bool IterateElements(Isolate* isolate,
break;
}
case EXTERNAL_UINT8_CLAMPED_ELEMENTS: {
- Handle<ExternalUint8ClampedArray> pixels(ExternalUint8ClampedArray::cast(
- receiver->elements()));
+ Handle<ExternalUint8ClampedArray> pixels(
+ ExternalUint8ClampedArray::cast(receiver->elements()));
for (uint32_t j = 0; j < length; j++) {
Handle<Smi> e(Smi::FromInt(pixels->get_scalar(j)), isolate);
visitor->visit(j, e);
@@ -10518,14 +5149,12 @@ RUNTIME_FUNCTION(Runtime_ArrayConcat) {
element_estimate = 1;
}
// Avoid overflows by capping at kMaxElementCount.
- if (JSObject::kMaxElementCount - estimate_result_length <
- length_estimate) {
+ if (JSObject::kMaxElementCount - estimate_result_length < length_estimate) {
estimate_result_length = JSObject::kMaxElementCount;
} else {
estimate_result_length += length_estimate;
}
- if (JSObject::kMaxElementCount - estimate_nof_elements <
- element_estimate) {
+ if (JSObject::kMaxElementCount - estimate_nof_elements < element_estimate) {
estimate_nof_elements = JSObject::kMaxElementCount;
} else {
estimate_nof_elements += element_estimate;
@@ -10541,10 +5170,10 @@ RUNTIME_FUNCTION(Runtime_ArrayConcat) {
Handle<FixedArrayBase> storage =
isolate->factory()->NewFixedDoubleArray(estimate_result_length);
int j = 0;
+ bool failure = false;
if (estimate_result_length > 0) {
Handle<FixedDoubleArray> double_storage =
Handle<FixedDoubleArray>::cast(storage);
- bool failure = false;
for (int i = 0; i < argument_count; i++) {
Handle<Object> obj(elements->get(i), isolate);
if (obj->IsSmi()) {
@@ -10565,6 +5194,11 @@ RUNTIME_FUNCTION(Runtime_ArrayConcat) {
FixedDoubleArray::cast(array->elements());
for (uint32_t i = 0; i < length; i++) {
if (elements->is_the_hole(i)) {
+ // TODO(jkummerow/verwaest): We could be a bit more clever
+ // here: Check if there are no elements/getters on the
+ // prototype chain, and if so, allow creation of a holey
+ // result array.
+ // Same thing below (holey smi case).
failure = true;
break;
}
@@ -10576,8 +5210,7 @@ RUNTIME_FUNCTION(Runtime_ArrayConcat) {
}
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ELEMENTS: {
- FixedArray* elements(
- FixedArray::cast(array->elements()));
+ FixedArray* elements(FixedArray::cast(array->elements()));
for (uint32_t i = 0; i < length; i++) {
Object* element = elements->get(i);
if (element->IsTheHole()) {
@@ -10591,6 +5224,7 @@ RUNTIME_FUNCTION(Runtime_ArrayConcat) {
break;
}
case FAST_HOLEY_ELEMENTS:
+ case FAST_ELEMENTS:
DCHECK_EQ(0, length);
break;
default:
@@ -10600,26 +5234,29 @@ RUNTIME_FUNCTION(Runtime_ArrayConcat) {
if (failure) break;
}
}
- Handle<JSArray> array = isolate->factory()->NewJSArray(0);
- Smi* length = Smi::FromInt(j);
- Handle<Map> map;
- map = JSObject::GetElementsTransitionMap(array, kind);
- array->set_map(*map);
- array->set_length(length);
- array->set_elements(*storage);
- return *array;
+ if (!failure) {
+ Handle<JSArray> array = isolate->factory()->NewJSArray(0);
+ Smi* length = Smi::FromInt(j);
+ Handle<Map> map;
+ map = JSObject::GetElementsTransitionMap(array, kind);
+ array->set_map(*map);
+ array->set_length(length);
+ array->set_elements(*storage);
+ return *array;
+ }
+ // In case of failure, fall through.
}
Handle<FixedArray> storage;
if (fast_case) {
// The backing storage array must have non-existing elements to preserve
// holes across concat operations.
- storage = isolate->factory()->NewFixedArrayWithHoles(
- estimate_result_length);
+ storage =
+ isolate->factory()->NewFixedArrayWithHoles(estimate_result_length);
} else {
// TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
- uint32_t at_least_space_for = estimate_nof_elements +
- (estimate_nof_elements >> 2);
+ uint32_t at_least_space_for =
+ estimate_nof_elements + (estimate_nof_elements >> 2);
storage = Handle<FixedArray>::cast(
SeededNumberDictionary::New(isolate, at_least_space_for));
}
@@ -10640,31 +5277,14 @@ RUNTIME_FUNCTION(Runtime_ArrayConcat) {
}
if (visitor.exceeds_array_limit()) {
- return isolate->Throw(
- *isolate->factory()->NewRangeError("invalid_array_length",
- HandleVector<Object>(NULL, 0)));
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewRangeError("invalid_array_length", HandleVector<Object>(NULL, 0)));
}
return *visitor.ToArray();
}
-// This will not allocate (flatten the string), but it may run
-// very slowly for very deeply nested ConsStrings. For debugging use only.
-RUNTIME_FUNCTION(Runtime_GlobalPrint) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
-
- CONVERT_ARG_CHECKED(String, string, 0);
- ConsStringIteratorOp op;
- StringCharacterStream stream(string, &op);
- while (stream.HasMore()) {
- uint16_t character = stream.GetNext();
- PrintF("%c", character);
- }
- return string;
-}
-
-
// Moves all own elements of an object, that are below a limit, to positions
// starting at zero. All undefined values are placed after non-undefined values,
// and are followed by non-existing element. Does not change the length
@@ -10831,8 +5451,7 @@ static StackFrame::Id UnwrapFrameId(int wrapped) {
RUNTIME_FUNCTION(Runtime_SetDebugEventListener) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 2);
- RUNTIME_ASSERT(args[0]->IsJSFunction() ||
- args[0]->IsUndefined() ||
+ RUNTIME_ASSERT(args[0]->IsJSFunction() || args[0]->IsUndefined() ||
args[0]->IsNull());
CONVERT_ARG_HANDLE_CHECKED(Object, callback, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, data, 1);
@@ -10850,61 +5469,52 @@ RUNTIME_FUNCTION(Runtime_Break) {
}
-static Handle<Object> DebugLookupResultValue(Isolate* isolate,
- Handle<Object> receiver,
- Handle<Name> name,
- LookupResult* result,
- bool* has_caught = NULL) {
- Handle<Object> value = isolate->factory()->undefined_value();
- if (!result->IsFound()) return value;
- switch (result->type()) {
- case NORMAL:
- return JSObject::GetNormalizedProperty(handle(result->holder(), isolate),
- result);
- case FIELD:
- return JSObject::FastPropertyAt(handle(result->holder(), isolate),
- result->representation(),
- result->GetFieldIndex());
- case CONSTANT:
- return handle(result->GetConstant(), isolate);
- case CALLBACKS: {
- Handle<Object> structure(result->GetCallbackObject(), isolate);
- DCHECK(!structure->IsForeign());
- if (structure->IsAccessorInfo()) {
- MaybeHandle<Object> obj = JSObject::GetPropertyWithAccessor(
- receiver, name, handle(result->holder(), isolate), structure);
- if (!obj.ToHandle(&value)) {
- value = handle(isolate->pending_exception(), isolate);
- isolate->clear_pending_exception();
+static Handle<Object> DebugGetProperty(LookupIterator* it,
+ bool* has_caught = NULL) {
+ for (; it->IsFound(); it->Next()) {
+ switch (it->state()) {
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+ case LookupIterator::ACCESS_CHECK:
+ // Ignore access checks.
+ break;
+ case LookupIterator::INTERCEPTOR:
+ case LookupIterator::JSPROXY:
+ return it->isolate()->factory()->undefined_value();
+ case LookupIterator::ACCESSOR: {
+ Handle<Object> accessors = it->GetAccessors();
+ if (!accessors->IsAccessorInfo()) {
+ return it->isolate()->factory()->undefined_value();
+ }
+ MaybeHandle<Object> maybe_result = JSObject::GetPropertyWithAccessor(
+ it->GetReceiver(), it->name(), it->GetHolder<JSObject>(),
+ accessors);
+ Handle<Object> result;
+ if (!maybe_result.ToHandle(&result)) {
+ result = handle(it->isolate()->pending_exception(), it->isolate());
+ it->isolate()->clear_pending_exception();
if (has_caught != NULL) *has_caught = true;
- return value;
}
+ return result;
}
- break;
+
+ case LookupIterator::DATA:
+ return it->GetDataValue();
}
- case INTERCEPTOR:
- case HANDLER:
- break;
- case NONEXISTENT:
- UNREACHABLE();
- break;
}
- return value;
+
+ return it->isolate()->factory()->undefined_value();
}
-// Get debugger related details for an object property.
-// args[0]: object holding property
-// args[1]: name of the property
-//
-// The array returned contains the following information:
+// Get debugger related details for an object property, in the following format:
// 0: Property value
// 1: Property details
// 2: Property value is exception
// 3: Getter function if defined
// 4: Setter function if defined
-// Items 2-4 are only filled if the property has either a getter or a setter
-// defined through __defineGetter__ and/or __defineSetter__.
+// Items 2-4 are only filled if the property has either a getter or a setter.
RUNTIME_FUNCTION(Runtime_DebugGetPropertyDetails) {
HandleScope scope(isolate);
@@ -10934,58 +5544,42 @@ RUNTIME_FUNCTION(Runtime_DebugGetPropertyDetails) {
isolate, element_or_char,
Runtime::GetElementOrCharAt(isolate, obj, index));
details->set(0, *element_or_char);
- details->set(
- 1, PropertyDetails(NONE, NORMAL, Representation::None()).AsSmi());
+ details->set(1,
+ PropertyDetails(NONE, NORMAL, Representation::None()).AsSmi());
return *isolate->factory()->NewJSArrayWithElements(details);
}
- // Find the number of objects making up this.
- int length = OwnPrototypeChainLength(*obj);
+ LookupIterator it(obj, name, LookupIterator::HIDDEN);
+ bool has_caught = false;
+ Handle<Object> value = DebugGetProperty(&it, &has_caught);
+ if (!it.IsFound()) return isolate->heap()->undefined_value();
- // Try own lookup on each of the objects.
- PrototypeIterator iter(isolate, obj, PrototypeIterator::START_AT_RECEIVER);
- for (int i = 0; i < length; i++) {
- DCHECK(!iter.IsAtEnd());
- Handle<JSObject> jsproto =
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
- LookupResult result(isolate);
- jsproto->LookupOwn(name, &result);
- if (result.IsFound()) {
- // LookupResult is not GC safe as it holds raw object pointers.
- // GC can happen later in this code so put the required fields into
- // local variables using handles when required for later use.
- Handle<Object> result_callback_obj;
- if (result.IsPropertyCallbacks()) {
- result_callback_obj = Handle<Object>(result.GetCallbackObject(),
- isolate);
- }
-
-
- bool has_caught = false;
- Handle<Object> value = DebugLookupResultValue(
- isolate, obj, name, &result, &has_caught);
-
- // If the callback object is a fixed array then it contains JavaScript
- // getter and/or setter.
- bool has_js_accessors = result.IsPropertyCallbacks() &&
- result_callback_obj->IsAccessorPair();
- Handle<FixedArray> details =
- isolate->factory()->NewFixedArray(has_js_accessors ? 5 : 2);
- details->set(0, *value);
- details->set(1, result.GetPropertyDetails().AsSmi());
- if (has_js_accessors) {
- AccessorPair* accessors = AccessorPair::cast(*result_callback_obj);
- details->set(2, isolate->heap()->ToBoolean(has_caught));
- details->set(3, accessors->GetComponent(ACCESSOR_GETTER));
- details->set(4, accessors->GetComponent(ACCESSOR_SETTER));
- }
+ Handle<Object> maybe_pair;
+ if (it.state() == LookupIterator::ACCESSOR) {
+ maybe_pair = it.GetAccessors();
+ }
- return *isolate->factory()->NewJSArrayWithElements(details);
- }
- iter.Advance();
+ // If the callback object is a fixed array then it contains JavaScript
+ // getter and/or setter.
+ bool has_js_accessors = !maybe_pair.is_null() && maybe_pair->IsAccessorPair();
+ Handle<FixedArray> details =
+ isolate->factory()->NewFixedArray(has_js_accessors ? 6 : 3);
+ details->set(0, *value);
+ // TODO(verwaest): Get rid of this random way of handling interceptors.
+ PropertyDetails d = it.state() == LookupIterator::INTERCEPTOR
+ ? PropertyDetails(NONE, NORMAL, 0)
+ : it.property_details();
+ details->set(1, d.AsSmi());
+ details->set(
+ 2, isolate->heap()->ToBoolean(it.state() == LookupIterator::INTERCEPTOR));
+ if (has_js_accessors) {
+ AccessorPair* accessors = AccessorPair::cast(*maybe_pair);
+ details->set(3, isolate->heap()->ToBoolean(has_caught));
+ details->set(4, accessors->GetComponent(ACCESSOR_GETTER));
+ details->set(5, accessors->GetComponent(ACCESSOR_SETTER));
}
- return isolate->heap()->undefined_value();
+ return *isolate->factory()->NewJSArrayWithElements(details);
}
@@ -10997,9 +5591,8 @@ RUNTIME_FUNCTION(Runtime_DebugGetProperty) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
- LookupResult result(isolate);
- obj->Lookup(name, &result);
- return *DebugLookupResultValue(isolate, obj, name, &result);
+ LookupIterator it(obj, name);
+ return *DebugGetProperty(&it);
}
@@ -11045,8 +5638,8 @@ RUNTIME_FUNCTION(Runtime_DebugNamedInterceptorPropertyValue) {
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSObject::GetProperty(obj, name));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSObject::GetProperty(obj, name));
return *result;
}
@@ -11111,8 +5704,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameCount) {
class FrameInspector {
public:
- FrameInspector(JavaScriptFrame* frame,
- int inlined_jsframe_index,
+ FrameInspector(JavaScriptFrame* frame, int inlined_jsframe_index,
Isolate* isolate)
: frame_(frame), deoptimized_frame_(NULL), isolate_(isolate) {
// Calculate the deoptimized frame.
@@ -11128,41 +5720,38 @@ class FrameInspector {
~FrameInspector() {
// Get rid of the calculated deoptimized frame if any.
if (deoptimized_frame_ != NULL) {
- Deoptimizer::DeleteDebuggerInspectableFrame(deoptimized_frame_,
- isolate_);
+ Deoptimizer::DeleteDebuggerInspectableFrame(deoptimized_frame_, isolate_);
}
}
int GetParametersCount() {
- return is_optimized_
- ? deoptimized_frame_->parameters_count()
- : frame_->ComputeParametersCount();
+ return is_optimized_ ? deoptimized_frame_->parameters_count()
+ : frame_->ComputeParametersCount();
}
int expression_count() { return deoptimized_frame_->expression_count(); }
Object* GetFunction() {
- return is_optimized_
- ? deoptimized_frame_->GetFunction()
- : frame_->function();
+ return is_optimized_ ? deoptimized_frame_->GetFunction()
+ : frame_->function();
}
Object* GetParameter(int index) {
- return is_optimized_
- ? deoptimized_frame_->GetParameter(index)
- : frame_->GetParameter(index);
+ return is_optimized_ ? deoptimized_frame_->GetParameter(index)
+ : frame_->GetParameter(index);
}
Object* GetExpression(int index) {
- return is_optimized_
- ? deoptimized_frame_->GetExpression(index)
- : frame_->GetExpression(index);
+ return is_optimized_ ? deoptimized_frame_->GetExpression(index)
+ : frame_->GetExpression(index);
}
int GetSourcePosition() {
- return is_optimized_
- ? deoptimized_frame_->GetSourcePosition()
- : frame_->LookupCode()->SourcePosition(frame_->pc());
+ return is_optimized_ ? deoptimized_frame_->GetSourcePosition()
+ : frame_->LookupCode()->SourcePosition(frame_->pc());
}
bool IsConstructor() {
return is_optimized_ && !is_bottommost_
- ? deoptimized_frame_->HasConstructStub()
- : frame_->IsConstructor();
+ ? deoptimized_frame_->HasConstructStub()
+ : frame_->IsConstructor();
+ }
+ Object* GetContext() {
+ return is_optimized_ ? deoptimized_frame_->GetContext() : frame_->context();
}
// To inspect all the provided arguments the frame might need to be
@@ -11209,15 +5798,6 @@ static SaveContext* FindSavedContextForFrame(Isolate* isolate,
}
-RUNTIME_FUNCTION(Runtime_IsOptimized) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
- return isolate->heap()->ToBoolean(frame->is_optimized());
-}
-
-
// Advances the iterator to the frame that matches the index and returns the
// inlined frame index, or -1 if not found. Skips native JS functions.
static int FindIndexedNonNativeFrame(JavaScriptFrameIterator* it, int index) {
@@ -11300,8 +5880,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
for (int slot = 0; slot < scope_info->LocalCount(); ++slot) {
// Hide compiler-introduced temporary variables, whether on the stack or on
// the context.
- if (scope_info->LocalIsSynthetic(slot))
- local_count--;
+ if (scope_info->LocalIsSynthetic(slot)) local_count--;
}
Handle<FixedArray> locals =
@@ -11312,8 +5891,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
int i = 0;
for (; i < scope_info->StackLocalCount(); ++i) {
// Use the value from the stack.
- if (scope_info->LocalIsSynthetic(i))
- continue;
+ if (scope_info->LocalIsSynthetic(i)) continue;
locals->set(local * 2, scope_info->LocalName(i));
locals->set(local * 2 + 1, frame_inspector.GetExpression(i));
local++;
@@ -11321,10 +5899,9 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
if (local < local_count) {
// Get the context containing declarations.
Handle<Context> context(
- Context::cast(it.frame()->context())->declaration_context());
+ Context::cast(frame_inspector.GetContext())->declaration_context());
for (; i < scope_info->LocalCount(); ++i) {
- if (scope_info->LocalIsSynthetic(i))
- continue;
+ if (scope_info->LocalIsSynthetic(i)) continue;
Handle<String> name(scope_info->LocalName(i));
VariableMode mode;
InitializationFlag init_flag;
@@ -11363,8 +5940,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// entering the debug break exit frame.
if (internal_frame_sp != NULL) {
return_value =
- Handle<Object>(Memory::Object_at(internal_frame_sp),
- isolate);
+ Handle<Object>(Memory::Object_at(internal_frame_sp), isolate);
break;
}
}
@@ -11395,8 +5971,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// Calculate the size of the result.
int details_size = kFrameDetailsFirstDynamicIndex +
- 2 * (argument_count + local_count) +
- (at_return ? 1 : 0);
+ 2 * (argument_count + local_count) + (at_return ? 1 : 0);
Handle<FixedArray> details = isolate->factory()->NewFixedArray(details_size);
// Add the frame id.
@@ -11409,8 +5984,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
details->set(kFrameDetailsArgumentCountIndex, Smi::FromInt(argument_count));
// Add the locals count
- details->set(kFrameDetailsLocalCountIndex,
- Smi::FromInt(local_count));
+ details->set(kFrameDetailsLocalCountIndex, Smi::FromInt(local_count));
// Add the source position.
if (position != RelocInfo::kNoPosition) {
@@ -11474,8 +6048,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// THIS MUST BE DONE LAST SINCE WE MIGHT ADVANCE
// THE FRAME ITERATOR TO WRAP THE RECEIVER.
Handle<Object> receiver(it.frame()->receiver(), isolate);
- if (!receiver->IsJSObject() &&
- shared->strict_mode() == SLOPPY &&
+ if (!receiver->IsJSObject() && shared->strict_mode() == SLOPPY &&
!function->IsBuiltin()) {
// If the receiver is not a JSObject and the function is not a
// builtin or strict-mode we have hit an optimization where a
@@ -11487,11 +6060,13 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
if (receiver->IsUndefined()) {
receiver = handle(function->global_proxy());
} else {
- DCHECK(!receiver->IsNull());
Context* context = Context::cast(it.frame()->context());
Handle<Context> native_context(Context::cast(context->native_context()));
- receiver = Object::ToObject(
- isolate, receiver, native_context).ToHandleChecked();
+ if (!Object::ToObject(isolate, receiver, native_context)
+ .ToHandle(&receiver)) {
+ // This only happens if the receiver is forcibly set in %_CallFunction.
+ return heap->undefined_value();
+ }
}
}
details->set(kFrameDetailsReceiverIndex, *receiver);
@@ -11515,9 +6090,7 @@ static bool ParameterIsShadowedByContextLocal(Handle<ScopeInfo> info,
// frame.
MUST_USE_RESULT
static MaybeHandle<JSObject> MaterializeStackLocalsWithFrameInspector(
- Isolate* isolate,
- Handle<JSObject> target,
- Handle<JSFunction> function,
+ Isolate* isolate, Handle<JSObject> target, Handle<JSFunction> function,
FrameInspector* frame_inspector) {
Handle<SharedFunctionInfo> shared(function->shared());
Handle<ScopeInfo> scope_info(shared->scope_info());
@@ -11535,10 +6108,9 @@ static MaybeHandle<JSObject> MaterializeStackLocalsWithFrameInspector(
isolate);
DCHECK(!value->IsTheHole());
- RETURN_ON_EXCEPTION(
- isolate,
- Runtime::SetObjectProperty(isolate, target, name, value, SLOPPY),
- JSObject);
+ RETURN_ON_EXCEPTION(isolate, Runtime::SetObjectProperty(
+ isolate, target, name, value, SLOPPY),
+ JSObject);
}
// Second fill all stack locals.
@@ -11548,10 +6120,9 @@ static MaybeHandle<JSObject> MaterializeStackLocalsWithFrameInspector(
Handle<Object> value(frame_inspector->GetExpression(i), isolate);
if (value->IsTheHole()) continue;
- RETURN_ON_EXCEPTION(
- isolate,
- Runtime::SetObjectProperty(isolate, target, name, value, SLOPPY),
- JSObject);
+ RETURN_ON_EXCEPTION(isolate, Runtime::SetObjectProperty(
+ isolate, target, name, value, SLOPPY),
+ JSObject);
}
return target;
@@ -11592,17 +6163,15 @@ static void UpdateStackLocalsFromMaterializedObject(Isolate* isolate,
if (frame->GetExpression(i)->IsTheHole()) continue;
HandleScope scope(isolate);
Handle<Object> value = Object::GetPropertyOrElement(
- target,
- handle(scope_info->StackLocalName(i), isolate)).ToHandleChecked();
+ target, handle(scope_info->StackLocalName(i),
+ isolate)).ToHandleChecked();
frame->SetExpression(i, *value);
}
}
MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeLocalContext(
- Isolate* isolate,
- Handle<JSObject> target,
- Handle<JSFunction> function,
+ Isolate* isolate, Handle<JSObject> target, Handle<JSFunction> function,
JavaScriptFrame* frame) {
HandleScope scope(isolate);
Handle<SharedFunctionInfo> shared(function->shared());
@@ -11613,8 +6182,8 @@ MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeLocalContext(
// Third fill all context locals.
Handle<Context> frame_context(Context::cast(frame->context()));
Handle<Context> function_context(frame_context->declaration_context());
- if (!ScopeInfo::CopyContextLocalsToScopeObject(
- scope_info, function_context, target)) {
+ if (!ScopeInfo::CopyContextLocalsToScopeObject(scope_info, function_context,
+ target)) {
return MaybeHandle<JSObject>();
}
@@ -11626,8 +6195,7 @@ MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeLocalContext(
Handle<JSObject> ext(JSObject::cast(function_context->extension()));
Handle<FixedArray> keys;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, keys,
- JSReceiver::GetKeys(ext, JSReceiver::INCLUDE_PROTOS),
+ isolate, keys, JSReceiver::GetKeys(ext, JSReceiver::INCLUDE_PROTOS),
JSObject);
for (int i = 0; i < keys->length(); i++) {
@@ -11637,10 +6205,9 @@ MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeLocalContext(
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value, Object::GetPropertyOrElement(ext, key), JSObject);
- RETURN_ON_EXCEPTION(
- isolate,
- Runtime::SetObjectProperty(isolate, target, key, value, SLOPPY),
- JSObject);
+ RETURN_ON_EXCEPTION(isolate, Runtime::SetObjectProperty(
+ isolate, target, key, value, SLOPPY),
+ JSObject);
}
}
}
@@ -11650,9 +6217,7 @@ MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeLocalContext(
MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeLocalScope(
- Isolate* isolate,
- JavaScriptFrame* frame,
- int inlined_jsframe_index) {
+ Isolate* isolate, JavaScriptFrame* frame, int inlined_jsframe_index) {
FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
@@ -11660,8 +6225,8 @@ MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeLocalScope(
isolate->factory()->NewJSObject(isolate->object_function());
ASSIGN_RETURN_ON_EXCEPTION(
isolate, local_scope,
- MaterializeStackLocalsWithFrameInspector(
- isolate, local_scope, function, &frame_inspector),
+ MaterializeStackLocalsWithFrameInspector(isolate, local_scope, function,
+ &frame_inspector),
JSObject);
return MaterializeLocalContext(isolate, local_scope, function, frame);
@@ -11669,8 +6234,7 @@ MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeLocalScope(
// Set the context local variable value.
-static bool SetContextLocalValue(Isolate* isolate,
- Handle<ScopeInfo> scope_info,
+static bool SetContextLocalValue(Isolate* isolate, Handle<ScopeInfo> scope_info,
Handle<Context> context,
Handle<String> variable_name,
Handle<Object> new_value) {
@@ -11691,8 +6255,7 @@ static bool SetContextLocalValue(Isolate* isolate,
}
-static bool SetLocalVariableValue(Isolate* isolate,
- JavaScriptFrame* frame,
+static bool SetLocalVariableValue(Isolate* isolate, JavaScriptFrame* frame,
int inlined_jsframe_index,
Handle<String> variable_name,
Handle<Object> new_value) {
@@ -11730,8 +6293,8 @@ static bool SetLocalVariableValue(Isolate* isolate,
// Context locals.
Handle<Context> frame_context(Context::cast(frame->context()));
Handle<Context> function_context(frame_context->declaration_context());
- if (SetContextLocalValue(
- isolate, scope_info, function_context, variable_name, new_value)) {
+ if (SetContextLocalValue(isolate, scope_info, function_context,
+ variable_name, new_value)) {
return true;
}
@@ -11761,8 +6324,7 @@ static bool SetLocalVariableValue(Isolate* isolate,
// Create a plain JSObject which materializes the closure content for the
// context.
MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeClosure(
- Isolate* isolate,
- Handle<Context> context) {
+ Isolate* isolate, Handle<Context> context) {
DCHECK(context->IsFunctionContext());
Handle<SharedFunctionInfo> shared(context->closure()->shared());
@@ -11774,8 +6336,8 @@ MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeClosure(
isolate->factory()->NewJSObject(isolate->object_function());
// Fill all context locals to the context extension.
- if (!ScopeInfo::CopyContextLocalsToScopeObject(
- scope_info, context, closure_scope)) {
+ if (!ScopeInfo::CopyContextLocalsToScopeObject(scope_info, context,
+ closure_scope)) {
return MaybeHandle<JSObject>();
}
@@ -11785,8 +6347,8 @@ MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeClosure(
Handle<JSObject> ext(JSObject::cast(context->extension()));
Handle<FixedArray> keys;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, keys,
- JSReceiver::GetKeys(ext, JSReceiver::INCLUDE_PROTOS), JSObject);
+ isolate, keys, JSReceiver::GetKeys(ext, JSReceiver::INCLUDE_PROTOS),
+ JSObject);
for (int i = 0; i < keys->length(); i++) {
HandleScope scope(isolate);
@@ -11796,10 +6358,9 @@ MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeClosure(
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value, Object::GetPropertyOrElement(ext, key), JSObject);
- RETURN_ON_EXCEPTION(
- isolate,
- Runtime::DefineObjectProperty(closure_scope, key, value, NONE),
- JSObject);
+ RETURN_ON_EXCEPTION(isolate, Runtime::DefineObjectProperty(
+ closure_scope, key, value, NONE),
+ JSObject);
}
}
@@ -11808,8 +6369,7 @@ MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeClosure(
// This method copies structure of MaterializeClosure method above.
-static bool SetClosureVariableValue(Isolate* isolate,
- Handle<Context> context,
+static bool SetClosureVariableValue(Isolate* isolate, Handle<Context> context,
Handle<String> variable_name,
Handle<Object> new_value) {
DCHECK(context->IsFunctionContext());
@@ -11818,8 +6378,8 @@ static bool SetClosureVariableValue(Isolate* isolate,
Handle<ScopeInfo> scope_info(shared->scope_info());
// Context locals to the context extension.
- if (SetContextLocalValue(
- isolate, scope_info, context, variable_name, new_value)) {
+ if (SetContextLocalValue(isolate, scope_info, context, variable_name,
+ new_value)) {
return true;
}
@@ -11831,8 +6391,8 @@ static bool SetClosureVariableValue(Isolate* isolate,
DCHECK(maybe.has_value);
if (maybe.value) {
// We don't expect this to do anything except replacing property value.
- Runtime::DefineObjectProperty(
- ext, variable_name, new_value, NONE).Assert();
+ Runtime::DefineObjectProperty(ext, variable_name, new_value, NONE)
+ .Assert();
return true;
}
}
@@ -11844,24 +6404,21 @@ static bool SetClosureVariableValue(Isolate* isolate,
// Create a plain JSObject which materializes the scope for the specified
// catch context.
MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeCatchScope(
- Isolate* isolate,
- Handle<Context> context) {
+ Isolate* isolate, Handle<Context> context) {
DCHECK(context->IsCatchContext());
Handle<String> name(String::cast(context->extension()));
Handle<Object> thrown_object(context->get(Context::THROWN_OBJECT_INDEX),
isolate);
Handle<JSObject> catch_scope =
isolate->factory()->NewJSObject(isolate->object_function());
- RETURN_ON_EXCEPTION(
- isolate,
- Runtime::DefineObjectProperty(catch_scope, name, thrown_object, NONE),
- JSObject);
+ RETURN_ON_EXCEPTION(isolate, Runtime::DefineObjectProperty(
+ catch_scope, name, thrown_object, NONE),
+ JSObject);
return catch_scope;
}
-static bool SetCatchVariableValue(Isolate* isolate,
- Handle<Context> context,
+static bool SetCatchVariableValue(Isolate* isolate, Handle<Context> context,
Handle<String> variable_name,
Handle<Object> new_value) {
DCHECK(context->IsCatchContext());
@@ -11877,8 +6434,7 @@ static bool SetCatchVariableValue(Isolate* isolate,
// Create a plain JSObject which materializes the block scope for the specified
// block context.
MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeBlockScope(
- Isolate* isolate,
- Handle<Context> context) {
+ Isolate* isolate, Handle<Context> context) {
DCHECK(context->IsBlockContext());
Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
@@ -11888,8 +6444,8 @@ MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeBlockScope(
isolate->factory()->NewJSObject(isolate->object_function());
// Fill all context locals.
- if (!ScopeInfo::CopyContextLocalsToScopeObject(
- scope_info, context, block_scope)) {
+ if (!ScopeInfo::CopyContextLocalsToScopeObject(scope_info, context,
+ block_scope)) {
return MaybeHandle<JSObject>();
}
@@ -11900,8 +6456,7 @@ MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeBlockScope(
// Create a plain JSObject which materializes the module scope for the specified
// module context.
MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeModuleScope(
- Isolate* isolate,
- Handle<Context> context) {
+ Isolate* isolate, Handle<Context> context) {
DCHECK(context->IsModuleContext());
Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
@@ -11911,8 +6466,8 @@ MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeModuleScope(
isolate->factory()->NewJSObject(isolate->object_function());
// Fill all context locals.
- if (!ScopeInfo::CopyContextLocalsToScopeObject(
- scope_info, context, module_scope)) {
+ if (!ScopeInfo::CopyContextLocalsToScopeObject(scope_info, context,
+ module_scope)) {
return MaybeHandle<JSObject>();
}
@@ -11936,18 +6491,15 @@ class ScopeIterator {
ScopeTypeModule
};
- ScopeIterator(Isolate* isolate,
- JavaScriptFrame* frame,
- int inlined_jsframe_index,
- bool ignore_nested_scopes = false)
- : isolate_(isolate),
- frame_(frame),
- inlined_jsframe_index_(inlined_jsframe_index),
- function_(frame->function()),
- context_(Context::cast(frame->context())),
- nested_scope_chain_(4),
- failed_(false) {
-
+ ScopeIterator(Isolate* isolate, JavaScriptFrame* frame,
+ int inlined_jsframe_index, bool ignore_nested_scopes = false)
+ : isolate_(isolate),
+ frame_(frame),
+ inlined_jsframe_index_(inlined_jsframe_index),
+ function_(frame->function()),
+ context_(Context::cast(frame->context())),
+ nested_scope_chain_(4),
+ failed_(false) {
// Catch the case when the debugger stops in an internal function.
Handle<SharedFunctionInfo> shared_info(function_->shared());
Handle<ScopeInfo> scope_info(shared_info->scope_info());
@@ -12030,14 +6582,13 @@ class ScopeIterator {
}
}
- ScopeIterator(Isolate* isolate,
- Handle<JSFunction> function)
- : isolate_(isolate),
- frame_(NULL),
- inlined_jsframe_index_(0),
- function_(function),
- context_(function->context()),
- failed_(false) {
+ ScopeIterator(Isolate* isolate, Handle<JSFunction> function)
+ : isolate_(isolate),
+ frame_(NULL),
+ inlined_jsframe_index_(0),
+ function_(function),
+ context_(function->context()),
+ failed_(false) {
if (function->IsBuiltin()) {
context_ = Handle<Context>();
}
@@ -12079,8 +6630,7 @@ class ScopeIterator {
Handle<ScopeInfo> scope_info = nested_scope_chain_.last();
switch (scope_info->scope_type()) {
case FUNCTION_SCOPE:
- DCHECK(context_->IsFunctionContext() ||
- !scope_info->HasContext());
+ DCHECK(context_->IsFunctionContext() || !scope_info->HasContext());
return ScopeTypeLocal;
case MODULE_SCOPE:
DCHECK(context_->IsModuleContext());
@@ -12095,8 +6645,7 @@ class ScopeIterator {
DCHECK(context_->IsCatchContext());
return ScopeTypeCatch;
case BLOCK_SCOPE:
- DCHECK(!scope_info->HasContext() ||
- context_->IsBlockContext());
+ DCHECK(!scope_info->HasContext() || context_->IsBlockContext());
return ScopeTypeBlock;
case EVAL_SCOPE:
UNREACHABLE();
@@ -12157,15 +6706,15 @@ class ScopeIterator {
break;
case ScopeIterator::ScopeTypeLocal:
return SetLocalVariableValue(isolate_, frame_, inlined_jsframe_index_,
- variable_name, new_value);
+ variable_name, new_value);
case ScopeIterator::ScopeTypeWith:
break;
case ScopeIterator::ScopeTypeCatch:
- return SetCatchVariableValue(isolate_, CurrentContext(),
- variable_name, new_value);
+ return SetCatchVariableValue(isolate_, CurrentContext(), variable_name,
+ new_value);
case ScopeIterator::ScopeTypeClosure:
return SetClosureVariableValue(isolate_, CurrentContext(),
- variable_name, new_value);
+ variable_name, new_value);
case ScopeIterator::ScopeTypeBlock:
// TODO(2399): should we implement it?
break;
@@ -12192,8 +6741,7 @@ class ScopeIterator {
// be an actual context.
Handle<Context> CurrentContext() {
DCHECK(!failed_);
- if (Type() == ScopeTypeGlobal ||
- nested_scope_chain_.is_empty()) {
+ if (Type() == ScopeTypeGlobal || nested_scope_chain_.is_empty()) {
return context_;
} else if (nested_scope_chain_.last()->HasContext()) {
return context_;
@@ -12302,9 +6850,7 @@ RUNTIME_FUNCTION(Runtime_GetScopeCount) {
// Count the visible scopes.
int n = 0;
- for (ScopeIterator it(isolate, frame, 0);
- !it.Done();
- it.Next()) {
+ for (ScopeIterator it(isolate, frame, 0); !it.Done(); it.Next()) {
n++;
}
@@ -12330,10 +6876,8 @@ RUNTIME_FUNCTION(Runtime_GetStepInPositions) {
JavaScriptFrame* frame = frame_it.frame();
- Handle<JSFunction> fun =
- Handle<JSFunction>(frame->function());
- Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>(fun->shared());
+ Handle<JSFunction> fun = Handle<JSFunction>(frame->function());
+ Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>(fun->shared());
if (!isolate->debug()->EnsureDebugInfo(shared, fun)) {
return isolate->heap()->undefined_value();
@@ -12372,17 +6916,15 @@ RUNTIME_FUNCTION(Runtime_GetStepInPositions) {
if (break_location_iterator.IsStepInLocation(isolate)) {
Smi* position_value = Smi::FromInt(break_location_iterator.position());
RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- JSObject::SetElement(array, len,
- Handle<Object>(position_value, isolate),
- NONE, SLOPPY));
+ isolate, JSObject::SetElement(
+ array, len, Handle<Object>(position_value, isolate),
+ NONE, SLOPPY));
len++;
}
}
// Advance iterator.
break_location_iterator.Next();
- if (current_statement_pos !=
- break_location_iterator.statement_position()) {
+ if (current_statement_pos != break_location_iterator.statement_position()) {
break;
}
}
@@ -12396,8 +6938,7 @@ static const int kScopeDetailsSize = 2;
MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeScopeDetails(
- Isolate* isolate,
- ScopeIterator* it) {
+ Isolate* isolate, ScopeIterator* it) {
// Calculate the size of the result.
int details_size = kScopeDetailsSize;
Handle<FixedArray> details = isolate->factory()->NewFixedArray(details_size);
@@ -12405,8 +6946,8 @@ MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeScopeDetails(
// Fill in scope details.
details->set(kScopeDetailsTypeIndex, Smi::FromInt(it->Type()));
Handle<JSObject> scope_object;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, scope_object, it->ScopeObject(), JSObject);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, scope_object, it->ScopeObject(),
+ JSObject);
details->set(kScopeDetailsObjectIndex, *scope_object);
return isolate->factory()->NewJSArrayWithElements(details);
@@ -12447,8 +6988,8 @@ RUNTIME_FUNCTION(Runtime_GetScopeDetails) {
return isolate->heap()->undefined_value();
}
Handle<JSObject> details;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, details, MaterializeScopeDetails(isolate, &it));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, details,
+ MaterializeScopeDetails(isolate, &it));
return *details;
}
@@ -12486,8 +7027,8 @@ RUNTIME_FUNCTION(Runtime_GetAllScopesDetails) {
ScopeIterator it(isolate, frame, inlined_jsframe_index, ignore_nested_scopes);
for (; !it.Done(); it.Next()) {
Handle<JSObject> details;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, details, MaterializeScopeDetails(isolate, &it));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, details,
+ MaterializeScopeDetails(isolate, &it));
result.Add(details);
}
@@ -12535,8 +7076,8 @@ RUNTIME_FUNCTION(Runtime_GetFunctionScopeDetails) {
}
Handle<JSObject> details;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, details, MaterializeScopeDetails(isolate, &it));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, details,
+ MaterializeScopeDetails(isolate, &it));
return *details;
}
@@ -12605,9 +7146,7 @@ RUNTIME_FUNCTION(Runtime_DebugPrintScopes) {
// Print the scopes for the top frame.
StackFrameLocator locator(isolate);
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- for (ScopeIterator it(isolate, frame, 0);
- !it.Done();
- it.Next()) {
+ for (ScopeIterator it(isolate, frame, 0); !it.Done(); it.Next()) {
it.DebugPrint();
}
#endif
@@ -12623,10 +7162,8 @@ RUNTIME_FUNCTION(Runtime_GetThreadCount) {
// Count all archived V8 threads.
int n = 0;
- for (ThreadState* thread =
- isolate->thread_manager()->FirstThreadStateInUse();
- thread != NULL;
- thread = thread->Next()) {
+ for (ThreadState* thread = isolate->thread_manager()->FirstThreadStateInUse();
+ thread != NULL; thread = thread->Next()) {
n++;
}
@@ -12668,8 +7205,7 @@ RUNTIME_FUNCTION(Runtime_GetThreadDetails) {
} else {
// Find the thread with the requested index.
int n = 1;
- ThreadState* thread =
- isolate->thread_manager()->FirstThreadStateInUse();
+ ThreadState* thread = isolate->thread_manager()->FirstThreadStateInUse();
while (index != n && thread != NULL) {
thread = thread->Next();
n++;
@@ -12697,7 +7233,7 @@ RUNTIME_FUNCTION(Runtime_SetDisableBreak) {
DCHECK(args.length() == 1);
CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 0);
isolate->debug()->set_disable_break(disable_break);
- return isolate->heap()->undefined_value();
+ return isolate->heap()->undefined_value();
}
@@ -12779,8 +7315,7 @@ RUNTIME_FUNCTION(Runtime_SetScriptBreakPoint) {
// Set break point.
if (!isolate->debug()->SetBreakPointForScript(script, break_point_object_arg,
- &source_position,
- alignment)) {
+ &source_position, alignment)) {
return isolate->heap()->undefined_value();
}
@@ -12859,10 +7394,8 @@ RUNTIME_FUNCTION(Runtime_PrepareStep) {
// Get the step action and check validity.
StepAction step_action = static_cast<StepAction>(NumberToInt32(args[1]));
- if (step_action != StepIn &&
- step_action != StepNext &&
- step_action != StepOut &&
- step_action != StepInMin &&
+ if (step_action != StepIn && step_action != StepNext &&
+ step_action != StepOut && step_action != StepInMin &&
step_action != StepMin) {
return isolate->Throw(isolate->heap()->illegal_argument_string());
}
@@ -12883,8 +7416,7 @@ RUNTIME_FUNCTION(Runtime_PrepareStep) {
// Prepare step.
isolate->debug()->PrepareStep(static_cast<StepAction>(step_action),
- step_count,
- frame_id);
+ step_count, frame_id);
return isolate->heap()->undefined_value();
}
@@ -12901,9 +7433,7 @@ RUNTIME_FUNCTION(Runtime_ClearStepping) {
// Helper function to find or create the arguments object for
// Runtime_DebugEvaluate.
MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeArgumentsObject(
- Isolate* isolate,
- Handle<JSObject> target,
- Handle<JSFunction> function) {
+ Isolate* isolate, Handle<JSObject> target, Handle<JSFunction> function) {
// Do not materialize the arguments object for eval or top-level code.
// Skip if "arguments" is already taken.
if (!function->shared()->is_function()) return target;
@@ -12913,13 +7443,12 @@ MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeArgumentsObject(
if (maybe.value) return target;
// FunctionGetArguments can't throw an exception.
- Handle<JSObject> arguments = Handle<JSObject>::cast(
- Accessors::FunctionGetArguments(function));
+ Handle<JSObject> arguments =
+ Handle<JSObject>::cast(Accessors::FunctionGetArguments(function));
Handle<String> arguments_str = isolate->factory()->arguments_string();
- RETURN_ON_EXCEPTION(
- isolate,
- Runtime::DefineObjectProperty(target, arguments_str, arguments, NONE),
- JSObject);
+ RETURN_ON_EXCEPTION(isolate, Runtime::DefineObjectProperty(
+ target, arguments_str, arguments, NONE),
+ JSObject);
return target;
}
@@ -12938,18 +7467,14 @@ static MaybeHandle<Object> DebugEvaluate(Isolate* isolate,
Handle<JSFunction> eval_fun;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, eval_fun,
- Compiler::GetFunctionFromEval(source,
- context,
- SLOPPY,
- NO_PARSE_RESTRICTION,
- RelocInfo::kNoPosition),
+ isolate, eval_fun, Compiler::GetFunctionFromEval(source, context, SLOPPY,
+ NO_PARSE_RESTRICTION,
+ RelocInfo::kNoPosition),
Object);
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- Execution::Call(isolate, eval_fun, receiver, 0, NULL),
+ isolate, result, Execution::Call(isolate, eval_fun, receiver, 0, NULL),
Object);
// Skip the global proxy as it has no properties and always delegates to the
@@ -12966,6 +7491,16 @@ static MaybeHandle<Object> DebugEvaluate(Isolate* isolate,
}
+static Handle<JSObject> NewJSObjectWithNullProto(Isolate* isolate) {
+ Handle<JSObject> result =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ Handle<Map> new_map = Map::Copy(Handle<Map>(result->map()));
+ new_map->set_prototype(*isolate->factory()->null_value());
+ JSObject::MigrateToMap(result, new_map);
+ return result;
+}
+
+
// Evaluate a piece of JavaScript in the context of a stack frame for
// debugging. Things that need special attention are:
// - Parameters and stack-allocated locals need to be materialized. Altered
@@ -13003,35 +7538,69 @@ RUNTIME_FUNCTION(Runtime_DebugEvaluate) {
SaveContext savex(isolate);
isolate->set_context(*(save->context()));
- // Evaluate on the context of the frame.
- Handle<Context> context(Context::cast(frame->context()));
- DCHECK(!context.is_null());
-
// Materialize stack locals and the arguments object.
- Handle<JSObject> materialized =
- isolate->factory()->NewJSObject(isolate->object_function());
+ Handle<JSObject> materialized = NewJSObjectWithNullProto(isolate);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, materialized,
- MaterializeStackLocalsWithFrameInspector(
- isolate, materialized, function, &frame_inspector));
+ MaterializeStackLocalsWithFrameInspector(isolate, materialized, function,
+ &frame_inspector));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, materialized,
MaterializeArgumentsObject(isolate, materialized, function));
- // Add the materialized object in a with-scope to shadow the stack locals.
- context = isolate->factory()->NewWithContext(function, context, materialized);
+ // At this point, the lookup chain may look like this:
+ // [inner context] -> [function stack]+[function context] -> [outer context]
+ // The function stack is not an actual context, it complements the function
+ // context. In order to have the same lookup chain when debug-evaluating,
+ // we materialize the stack and insert it into the context chain as a
+ // with-context before the function context.
+ // [inner context] -> [with context] -> [function context] -> [outer context]
+ // Ordering the with-context before the function context forces a dynamic
+ // lookup instead of a static lookup that could fail as the scope info is
+ // outdated and may expect variables to still be stack-allocated.
+ // Afterwards, we write changes to the with-context back to the stack
+ // and remove it from the context chain.
+ // This could cause lookup failures if debug-evaluate creates a closure that
+ // uses this temporary context chain.
+
+ Handle<Context> eval_context(Context::cast(frame_inspector.GetContext()));
+ DCHECK(!eval_context.is_null());
+ Handle<Context> function_context = eval_context;
+ Handle<Context> outer_context(function->context(), isolate);
+ Handle<Context> inner_context;
+ // We iterate to find the function's context. If the function has no
+ // context-allocated variables, we iterate until we hit the outer context.
+ while (!function_context->IsFunctionContext() &&
+ !function_context.is_identical_to(outer_context)) {
+ inner_context = function_context;
+ function_context = Handle<Context>(function_context->previous(), isolate);
+ }
+
+ Handle<Context> materialized_context = isolate->factory()->NewWithContext(
+ function, function_context, materialized);
+
+ if (inner_context.is_null()) {
+ // No inner context. The with-context is now inner-most.
+ eval_context = materialized_context;
+ } else {
+ inner_context->set_previous(*materialized_context);
+ }
Handle<Object> receiver(frame->receiver(), isolate);
+ MaybeHandle<Object> maybe_result =
+ DebugEvaluate(isolate, eval_context, context_extension, receiver, source);
+
+ // Remove with-context if it was inserted in between.
+ if (!inner_context.is_null()) inner_context->set_previous(*function_context);
+
Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- DebugEvaluate(isolate, context, context_extension, receiver, source));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, maybe_result);
// Write back potential changes to materialized stack locals to the stack.
- UpdateStackLocalsFromMaterializedObject(
- isolate, materialized, function, frame, inlined_jsframe_index);
+ UpdateStackLocalsFromMaterializedObject(isolate, materialized, function,
+ frame, inlined_jsframe_index);
return *result;
}
@@ -13103,8 +7672,7 @@ RUNTIME_FUNCTION(Runtime_DebugGetLoadedScripts) {
// Helper function used by Runtime_DebugReferencedBy below.
-static int DebugReferencedBy(HeapIterator* iterator,
- JSObject* target,
+static int DebugReferencedBy(HeapIterator* iterator, JSObject* target,
Object* instance_filter, int max_references,
FixedArray* instances, int instances_size,
JSFunction* arguments_function) {
@@ -13197,9 +7765,8 @@ RUNTIME_FUNCTION(Runtime_DebugReferencedBy) {
heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "%DebugConstructedBy");
{
HeapIterator heap_iterator(heap);
- count = DebugReferencedBy(&heap_iterator,
- *target, *instance_filter, max_references,
- NULL, 0, *arguments_function);
+ count = DebugReferencedBy(&heap_iterator, *target, *instance_filter,
+ max_references, NULL, 0, *arguments_function);
}
// Allocate an array to hold the result.
@@ -13208,9 +7775,9 @@ RUNTIME_FUNCTION(Runtime_DebugReferencedBy) {
// Fill the referencing objects.
{
HeapIterator heap_iterator(heap);
- count = DebugReferencedBy(&heap_iterator,
- *target, *instance_filter, max_references,
- *instances, count, *arguments_function);
+ count = DebugReferencedBy(&heap_iterator, *target, *instance_filter,
+ max_references, *instances, count,
+ *arguments_function);
}
// Return result as JS array.
@@ -13223,10 +7790,8 @@ RUNTIME_FUNCTION(Runtime_DebugReferencedBy) {
// Helper function used by Runtime_DebugConstructedBy below.
-static int DebugConstructedBy(HeapIterator* iterator,
- JSFunction* constructor,
- int max_references,
- FixedArray* instances,
+static int DebugConstructedBy(HeapIterator* iterator, JSFunction* constructor,
+ int max_references, FixedArray* instances,
int instances_size) {
DisallowHeapAllocation no_allocation;
@@ -13275,11 +7840,8 @@ RUNTIME_FUNCTION(Runtime_DebugConstructedBy) {
heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "%DebugConstructedBy");
{
HeapIterator heap_iterator(heap);
- count = DebugConstructedBy(&heap_iterator,
- *constructor,
- max_references,
- NULL,
- 0);
+ count = DebugConstructedBy(&heap_iterator, *constructor, max_references,
+ NULL, 0);
}
// Allocate an array to hold the result.
@@ -13288,11 +7850,8 @@ RUNTIME_FUNCTION(Runtime_DebugConstructedBy) {
// Fill the referencing objects.
{
HeapIterator heap_iterator2(heap);
- count = DebugConstructedBy(&heap_iterator2,
- *constructor,
- max_references,
- *instances,
- count);
+ count = DebugConstructedBy(&heap_iterator2, *constructor, max_references,
+ *instances, count);
}
// Return result as JS array.
@@ -13332,14 +7891,6 @@ RUNTIME_FUNCTION(Runtime_DebugSetScriptSource) {
}
-RUNTIME_FUNCTION(Runtime_SystemBreak) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
- base::OS::DebugBreak();
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_DebugDisassembleFunction) {
HandleScope scope(isolate);
#ifdef DEBUG
@@ -13389,8 +7940,7 @@ static int FindSharedFunctionInfosForScript(HeapIterator* iterator,
DisallowHeapAllocation no_allocation;
int counter = 0;
int buffer_size = buffer->length();
- for (HeapObject* obj = iterator->next();
- obj != NULL;
+ for (HeapObject* obj = iterator->next(); obj != NULL;
obj = iterator->next()) {
DCHECK(obj != NULL);
if (!obj->IsSharedFunctionInfo()) {
@@ -13489,7 +8039,7 @@ RUNTIME_FUNCTION(Runtime_LiveEditReplaceScript) {
Handle<Script> original_script(Script::cast(original_script_value->value()));
Handle<Object> old_script = LiveEdit::ChangeScriptSource(
- original_script, new_source, old_script_name);
+ original_script, new_source, old_script_name);
if (old_script->IsScript()) {
Handle<Script> script_handle = Handle<Script>::cast(old_script);
@@ -13566,8 +8116,8 @@ RUNTIME_FUNCTION(Runtime_LiveEditReplaceRefToNestedFunction) {
RUNTIME_ASSERT(orig_wrapper->value()->IsSharedFunctionInfo());
RUNTIME_ASSERT(subst_wrapper->value()->IsSharedFunctionInfo());
- LiveEdit::ReplaceRefToNestedFunction(
- parent_wrapper, orig_wrapper, subst_wrapper);
+ LiveEdit::ReplaceRefToNestedFunction(parent_wrapper, orig_wrapper,
+ subst_wrapper);
return isolate->heap()->undefined_value();
}
@@ -13709,18 +8259,12 @@ RUNTIME_FUNCTION(Runtime_ExecuteInDebugContext) {
MaybeHandle<Object> maybe_result;
if (without_debugger) {
- maybe_result = Execution::Call(isolate,
- function,
- handle(function->global_proxy()),
- 0,
- NULL);
+ maybe_result = Execution::Call(isolate, function,
+ handle(function->global_proxy()), 0, NULL);
} else {
DebugScope debug_scope(isolate->debug());
- maybe_result = Execution::Call(isolate,
- function,
- handle(function->global_proxy()),
- 0,
- NULL);
+ maybe_result = Execution::Call(isolate, function,
+ handle(function->global_proxy()), 0, NULL);
}
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, maybe_result);
@@ -13728,18 +8272,6 @@ RUNTIME_FUNCTION(Runtime_ExecuteInDebugContext) {
}
-// Sets a v8 flag.
-RUNTIME_FUNCTION(Runtime_SetFlags) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(String, arg, 0);
- SmartArrayPointer<char> flags =
- arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- FlagList::SetFlagsFromString(flags.get(), StrLength(flags.get()));
- return isolate->heap()->undefined_value();
-}
-
-
// Performs a GC.
// Presently, it only does a full GC.
RUNTIME_FUNCTION(Runtime_CollectGarbage) {
@@ -13762,730 +8294,6 @@ RUNTIME_FUNCTION(Runtime_GetHeapUsage) {
}
-#ifdef V8_I18N_SUPPORT
-RUNTIME_FUNCTION(Runtime_CanonicalizeLanguageTag) {
- HandleScope scope(isolate);
- Factory* factory = isolate->factory();
-
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(String, locale_id_str, 0);
-
- v8::String::Utf8Value locale_id(v8::Utils::ToLocal(locale_id_str));
-
- // Return value which denotes invalid language tag.
- const char* const kInvalidTag = "invalid-tag";
-
- UErrorCode error = U_ZERO_ERROR;
- char icu_result[ULOC_FULLNAME_CAPACITY];
- int icu_length = 0;
-
- uloc_forLanguageTag(*locale_id, icu_result, ULOC_FULLNAME_CAPACITY,
- &icu_length, &error);
- if (U_FAILURE(error) || icu_length == 0) {
- return *factory->NewStringFromAsciiChecked(kInvalidTag);
- }
-
- char result[ULOC_FULLNAME_CAPACITY];
-
- // Force strict BCP47 rules.
- uloc_toLanguageTag(icu_result, result, ULOC_FULLNAME_CAPACITY, TRUE, &error);
-
- if (U_FAILURE(error)) {
- return *factory->NewStringFromAsciiChecked(kInvalidTag);
- }
-
- return *factory->NewStringFromAsciiChecked(result);
-}
-
-
-RUNTIME_FUNCTION(Runtime_AvailableLocalesOf) {
- HandleScope scope(isolate);
- Factory* factory = isolate->factory();
-
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(String, service, 0);
-
- const icu::Locale* available_locales = NULL;
- int32_t count = 0;
-
- if (service->IsUtf8EqualTo(CStrVector("collator"))) {
- available_locales = icu::Collator::getAvailableLocales(count);
- } else if (service->IsUtf8EqualTo(CStrVector("numberformat"))) {
- available_locales = icu::NumberFormat::getAvailableLocales(count);
- } else if (service->IsUtf8EqualTo(CStrVector("dateformat"))) {
- available_locales = icu::DateFormat::getAvailableLocales(count);
- } else if (service->IsUtf8EqualTo(CStrVector("breakiterator"))) {
- available_locales = icu::BreakIterator::getAvailableLocales(count);
- }
-
- UErrorCode error = U_ZERO_ERROR;
- char result[ULOC_FULLNAME_CAPACITY];
- Handle<JSObject> locales =
- factory->NewJSObject(isolate->object_function());
-
- for (int32_t i = 0; i < count; ++i) {
- const char* icu_name = available_locales[i].getName();
-
- error = U_ZERO_ERROR;
- // No need to force strict BCP47 rules.
- uloc_toLanguageTag(icu_name, result, ULOC_FULLNAME_CAPACITY, FALSE, &error);
- if (U_FAILURE(error)) {
- // This shouldn't happen, but lets not break the user.
- continue;
- }
-
- RETURN_FAILURE_ON_EXCEPTION(isolate,
- JSObject::SetOwnPropertyIgnoreAttributes(
- locales,
- factory->NewStringFromAsciiChecked(result),
- factory->NewNumber(i),
- NONE));
- }
-
- return *locales;
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetDefaultICULocale) {
- HandleScope scope(isolate);
- Factory* factory = isolate->factory();
-
- DCHECK(args.length() == 0);
-
- icu::Locale default_locale;
-
- // Set the locale
- char result[ULOC_FULLNAME_CAPACITY];
- UErrorCode status = U_ZERO_ERROR;
- uloc_toLanguageTag(
- default_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
- if (U_SUCCESS(status)) {
- return *factory->NewStringFromAsciiChecked(result);
- }
-
- return *factory->NewStringFromStaticAscii("und");
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetLanguageTagVariants) {
- HandleScope scope(isolate);
- Factory* factory = isolate->factory();
-
- DCHECK(args.length() == 1);
-
- CONVERT_ARG_HANDLE_CHECKED(JSArray, input, 0);
-
- uint32_t length = static_cast<uint32_t>(input->length()->Number());
- // Set some limit to prevent fuzz tests from going OOM.
- // Can be bumped when callers' requirements change.
- RUNTIME_ASSERT(length < 100);
- Handle<FixedArray> output = factory->NewFixedArray(length);
- Handle<Name> maximized = factory->NewStringFromStaticAscii("maximized");
- Handle<Name> base = factory->NewStringFromStaticAscii("base");
- for (unsigned int i = 0; i < length; ++i) {
- Handle<Object> locale_id;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, locale_id, Object::GetElement(isolate, input, i));
- if (!locale_id->IsString()) {
- return isolate->Throw(*factory->illegal_argument_string());
- }
-
- v8::String::Utf8Value utf8_locale_id(
- v8::Utils::ToLocal(Handle<String>::cast(locale_id)));
-
- UErrorCode error = U_ZERO_ERROR;
-
- // Convert from BCP47 to ICU format.
- // de-DE-u-co-phonebk -> de_DE@collation=phonebook
- char icu_locale[ULOC_FULLNAME_CAPACITY];
- int icu_locale_length = 0;
- uloc_forLanguageTag(*utf8_locale_id, icu_locale, ULOC_FULLNAME_CAPACITY,
- &icu_locale_length, &error);
- if (U_FAILURE(error) || icu_locale_length == 0) {
- return isolate->Throw(*factory->illegal_argument_string());
- }
-
- // Maximize the locale.
- // de_DE@collation=phonebook -> de_Latn_DE@collation=phonebook
- char icu_max_locale[ULOC_FULLNAME_CAPACITY];
- uloc_addLikelySubtags(
- icu_locale, icu_max_locale, ULOC_FULLNAME_CAPACITY, &error);
-
- // Remove extensions from maximized locale.
- // de_Latn_DE@collation=phonebook -> de_Latn_DE
- char icu_base_max_locale[ULOC_FULLNAME_CAPACITY];
- uloc_getBaseName(
- icu_max_locale, icu_base_max_locale, ULOC_FULLNAME_CAPACITY, &error);
-
- // Get original name without extensions.
- // de_DE@collation=phonebook -> de_DE
- char icu_base_locale[ULOC_FULLNAME_CAPACITY];
- uloc_getBaseName(
- icu_locale, icu_base_locale, ULOC_FULLNAME_CAPACITY, &error);
-
- // Convert from ICU locale format to BCP47 format.
- // de_Latn_DE -> de-Latn-DE
- char base_max_locale[ULOC_FULLNAME_CAPACITY];
- uloc_toLanguageTag(icu_base_max_locale, base_max_locale,
- ULOC_FULLNAME_CAPACITY, FALSE, &error);
-
- // de_DE -> de-DE
- char base_locale[ULOC_FULLNAME_CAPACITY];
- uloc_toLanguageTag(
- icu_base_locale, base_locale, ULOC_FULLNAME_CAPACITY, FALSE, &error);
-
- if (U_FAILURE(error)) {
- return isolate->Throw(*factory->illegal_argument_string());
- }
-
- Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
- Handle<String> value = factory->NewStringFromAsciiChecked(base_max_locale);
- JSObject::AddProperty(result, maximized, value, NONE);
- value = factory->NewStringFromAsciiChecked(base_locale);
- JSObject::AddProperty(result, base, value, NONE);
- output->set(i, *result);
- }
-
- Handle<JSArray> result = factory->NewJSArrayWithElements(output);
- result->set_length(Smi::FromInt(length));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_IsInitializedIntlObject) {
- HandleScope scope(isolate);
-
- DCHECK(args.length() == 1);
-
- CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
-
- if (!input->IsJSObject()) return isolate->heap()->false_value();
- Handle<JSObject> obj = Handle<JSObject>::cast(input);
-
- Handle<String> marker = isolate->factory()->intl_initialized_marker_string();
- Handle<Object> tag(obj->GetHiddenProperty(marker), isolate);
- return isolate->heap()->ToBoolean(!tag->IsTheHole());
-}
-
-
-RUNTIME_FUNCTION(Runtime_IsInitializedIntlObjectOfType) {
- HandleScope scope(isolate);
-
- DCHECK(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, expected_type, 1);
-
- if (!input->IsJSObject()) return isolate->heap()->false_value();
- Handle<JSObject> obj = Handle<JSObject>::cast(input);
-
- Handle<String> marker = isolate->factory()->intl_initialized_marker_string();
- Handle<Object> tag(obj->GetHiddenProperty(marker), isolate);
- return isolate->heap()->ToBoolean(
- tag->IsString() && String::cast(*tag)->Equals(*expected_type));
-}
-
-
-RUNTIME_FUNCTION(Runtime_MarkAsInitializedIntlObjectOfType) {
- HandleScope scope(isolate);
-
- DCHECK(args.length() == 3);
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, input, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, impl, 2);
-
- Handle<String> marker = isolate->factory()->intl_initialized_marker_string();
- JSObject::SetHiddenProperty(input, marker, type);
-
- marker = isolate->factory()->intl_impl_object_string();
- JSObject::SetHiddenProperty(input, marker, impl);
-
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetImplFromInitializedIntlObject) {
- HandleScope scope(isolate);
-
- DCHECK(args.length() == 1);
-
- CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
-
- if (!input->IsJSObject()) {
- Vector< Handle<Object> > arguments = HandleVector(&input, 1);
- Handle<Object> type_error =
- isolate->factory()->NewTypeError("not_intl_object", arguments);
- return isolate->Throw(*type_error);
- }
-
- Handle<JSObject> obj = Handle<JSObject>::cast(input);
-
- Handle<String> marker = isolate->factory()->intl_impl_object_string();
- Handle<Object> impl(obj->GetHiddenProperty(marker), isolate);
- if (impl->IsTheHole()) {
- Vector< Handle<Object> > arguments = HandleVector(&obj, 1);
- Handle<Object> type_error =
- isolate->factory()->NewTypeError("not_intl_object", arguments);
- return isolate->Throw(*type_error);
- }
- return *impl;
-}
-
-
-RUNTIME_FUNCTION(Runtime_CreateDateTimeFormat) {
- HandleScope scope(isolate);
-
- DCHECK(args.length() == 3);
-
- CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
-
- Handle<ObjectTemplateInfo> date_format_template =
- I18N::GetTemplate(isolate);
-
- // Create an empty object wrapper.
- Handle<JSObject> local_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, local_object,
- Execution::InstantiateObject(date_format_template));
-
- // Set date time formatter as internal field of the resulting JS object.
- icu::SimpleDateFormat* date_format = DateFormat::InitializeDateTimeFormat(
- isolate, locale, options, resolved);
-
- if (!date_format) return isolate->ThrowIllegalOperation();
-
- local_object->SetInternalField(0, reinterpret_cast<Smi*>(date_format));
-
- Factory* factory = isolate->factory();
- Handle<String> key = factory->NewStringFromStaticAscii("dateFormat");
- Handle<String> value = factory->NewStringFromStaticAscii("valid");
- JSObject::AddProperty(local_object, key, value, NONE);
-
- // Make object handle weak so we can delete the data format once GC kicks in.
- Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
- GlobalHandles::MakeWeak(wrapper.location(),
- reinterpret_cast<void*>(wrapper.location()),
- DateFormat::DeleteDateFormat);
- return *local_object;
-}
-
-
-RUNTIME_FUNCTION(Runtime_InternalDateFormat) {
- HandleScope scope(isolate);
-
- DCHECK(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 1);
-
- Handle<Object> value;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, value, Execution::ToNumber(isolate, date));
-
- icu::SimpleDateFormat* date_format =
- DateFormat::UnpackDateFormat(isolate, date_format_holder);
- if (!date_format) return isolate->ThrowIllegalOperation();
-
- icu::UnicodeString result;
- date_format->format(value->Number(), result);
-
- Handle<String> result_str;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result_str,
- isolate->factory()->NewStringFromTwoByte(
- Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(result.getBuffer()),
- result.length())));
- return *result_str;
-}
-
-
-RUNTIME_FUNCTION(Runtime_InternalDateParse) {
- HandleScope scope(isolate);
-
- DCHECK(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, date_string, 1);
-
- v8::String::Utf8Value utf8_date(v8::Utils::ToLocal(date_string));
- icu::UnicodeString u_date(icu::UnicodeString::fromUTF8(*utf8_date));
- icu::SimpleDateFormat* date_format =
- DateFormat::UnpackDateFormat(isolate, date_format_holder);
- if (!date_format) return isolate->ThrowIllegalOperation();
-
- UErrorCode status = U_ZERO_ERROR;
- UDate date = date_format->parse(u_date, status);
- if (U_FAILURE(status)) return isolate->heap()->undefined_value();
-
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Execution::NewDate(isolate, static_cast<double>(date)));
- DCHECK(result->IsJSDate());
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_CreateNumberFormat) {
- HandleScope scope(isolate);
-
- DCHECK(args.length() == 3);
-
- CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
-
- Handle<ObjectTemplateInfo> number_format_template =
- I18N::GetTemplate(isolate);
-
- // Create an empty object wrapper.
- Handle<JSObject> local_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, local_object,
- Execution::InstantiateObject(number_format_template));
-
- // Set number formatter as internal field of the resulting JS object.
- icu::DecimalFormat* number_format = NumberFormat::InitializeNumberFormat(
- isolate, locale, options, resolved);
-
- if (!number_format) return isolate->ThrowIllegalOperation();
-
- local_object->SetInternalField(0, reinterpret_cast<Smi*>(number_format));
-
- Factory* factory = isolate->factory();
- Handle<String> key = factory->NewStringFromStaticAscii("numberFormat");
- Handle<String> value = factory->NewStringFromStaticAscii("valid");
- JSObject::AddProperty(local_object, key, value, NONE);
-
- Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
- GlobalHandles::MakeWeak(wrapper.location(),
- reinterpret_cast<void*>(wrapper.location()),
- NumberFormat::DeleteNumberFormat);
- return *local_object;
-}
-
-
-RUNTIME_FUNCTION(Runtime_InternalNumberFormat) {
- HandleScope scope(isolate);
-
- DCHECK(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, number, 1);
-
- Handle<Object> value;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, value, Execution::ToNumber(isolate, number));
-
- icu::DecimalFormat* number_format =
- NumberFormat::UnpackNumberFormat(isolate, number_format_holder);
- if (!number_format) return isolate->ThrowIllegalOperation();
-
- icu::UnicodeString result;
- number_format->format(value->Number(), result);
-
- Handle<String> result_str;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result_str,
- isolate->factory()->NewStringFromTwoByte(
- Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(result.getBuffer()),
- result.length())));
- return *result_str;
-}
-
-
-RUNTIME_FUNCTION(Runtime_InternalNumberParse) {
- HandleScope scope(isolate);
-
- DCHECK(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, number_string, 1);
-
- v8::String::Utf8Value utf8_number(v8::Utils::ToLocal(number_string));
- icu::UnicodeString u_number(icu::UnicodeString::fromUTF8(*utf8_number));
- icu::DecimalFormat* number_format =
- NumberFormat::UnpackNumberFormat(isolate, number_format_holder);
- if (!number_format) return isolate->ThrowIllegalOperation();
-
- UErrorCode status = U_ZERO_ERROR;
- icu::Formattable result;
- // ICU 4.6 doesn't support parseCurrency call. We need to wait for ICU49
- // to be part of Chrome.
- // TODO(cira): Include currency parsing code using parseCurrency call.
- // We need to check if the formatter parses all currencies or only the
- // one it was constructed with (it will impact the API - how to return ISO
- // code and the value).
- number_format->parse(u_number, result, status);
- if (U_FAILURE(status)) return isolate->heap()->undefined_value();
-
- switch (result.getType()) {
- case icu::Formattable::kDouble:
- return *isolate->factory()->NewNumber(result.getDouble());
- case icu::Formattable::kLong:
- return *isolate->factory()->NewNumberFromInt(result.getLong());
- case icu::Formattable::kInt64:
- return *isolate->factory()->NewNumber(
- static_cast<double>(result.getInt64()));
- default:
- return isolate->heap()->undefined_value();
- }
-}
-
-
-RUNTIME_FUNCTION(Runtime_CreateCollator) {
- HandleScope scope(isolate);
-
- DCHECK(args.length() == 3);
-
- CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
-
- Handle<ObjectTemplateInfo> collator_template = I18N::GetTemplate(isolate);
-
- // Create an empty object wrapper.
- Handle<JSObject> local_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, local_object, Execution::InstantiateObject(collator_template));
-
- // Set collator as internal field of the resulting JS object.
- icu::Collator* collator = Collator::InitializeCollator(
- isolate, locale, options, resolved);
-
- if (!collator) return isolate->ThrowIllegalOperation();
-
- local_object->SetInternalField(0, reinterpret_cast<Smi*>(collator));
-
- Factory* factory = isolate->factory();
- Handle<String> key = factory->NewStringFromStaticAscii("collator");
- Handle<String> value = factory->NewStringFromStaticAscii("valid");
- JSObject::AddProperty(local_object, key, value, NONE);
-
- Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
- GlobalHandles::MakeWeak(wrapper.location(),
- reinterpret_cast<void*>(wrapper.location()),
- Collator::DeleteCollator);
- return *local_object;
-}
-
-
-RUNTIME_FUNCTION(Runtime_InternalCompare) {
- HandleScope scope(isolate);
-
- DCHECK(args.length() == 3);
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, collator_holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, string1, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, string2, 2);
-
- icu::Collator* collator = Collator::UnpackCollator(isolate, collator_holder);
- if (!collator) return isolate->ThrowIllegalOperation();
-
- v8::String::Value string_value1(v8::Utils::ToLocal(string1));
- v8::String::Value string_value2(v8::Utils::ToLocal(string2));
- const UChar* u_string1 = reinterpret_cast<const UChar*>(*string_value1);
- const UChar* u_string2 = reinterpret_cast<const UChar*>(*string_value2);
- UErrorCode status = U_ZERO_ERROR;
- UCollationResult result = collator->compare(u_string1,
- string_value1.length(),
- u_string2,
- string_value2.length(),
- status);
- if (U_FAILURE(status)) return isolate->ThrowIllegalOperation();
-
- return *isolate->factory()->NewNumberFromInt(result);
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringNormalize) {
- HandleScope scope(isolate);
- static const UNormalizationMode normalizationForms[] =
- { UNORM_NFC, UNORM_NFD, UNORM_NFKC, UNORM_NFKD };
-
- DCHECK(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(String, stringValue, 0);
- CONVERT_NUMBER_CHECKED(int, form_id, Int32, args[1]);
- RUNTIME_ASSERT(form_id >= 0 &&
- static_cast<size_t>(form_id) < ARRAY_SIZE(normalizationForms));
-
- v8::String::Value string_value(v8::Utils::ToLocal(stringValue));
- const UChar* u_value = reinterpret_cast<const UChar*>(*string_value);
-
- // TODO(mnita): check Normalizer2 (not available in ICU 46)
- UErrorCode status = U_ZERO_ERROR;
- icu::UnicodeString result;
- icu::Normalizer::normalize(u_value, normalizationForms[form_id], 0,
- result, status);
- if (U_FAILURE(status)) {
- return isolate->heap()->undefined_value();
- }
-
- Handle<String> result_str;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result_str,
- isolate->factory()->NewStringFromTwoByte(
- Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(result.getBuffer()),
- result.length())));
- return *result_str;
-}
-
-
-RUNTIME_FUNCTION(Runtime_CreateBreakIterator) {
- HandleScope scope(isolate);
-
- DCHECK(args.length() == 3);
-
- CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
-
- Handle<ObjectTemplateInfo> break_iterator_template =
- I18N::GetTemplate2(isolate);
-
- // Create an empty object wrapper.
- Handle<JSObject> local_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, local_object,
- Execution::InstantiateObject(break_iterator_template));
-
- // Set break iterator as internal field of the resulting JS object.
- icu::BreakIterator* break_iterator = BreakIterator::InitializeBreakIterator(
- isolate, locale, options, resolved);
-
- if (!break_iterator) return isolate->ThrowIllegalOperation();
-
- local_object->SetInternalField(0, reinterpret_cast<Smi*>(break_iterator));
- // Make sure that the pointer to adopted text is NULL.
- local_object->SetInternalField(1, reinterpret_cast<Smi*>(NULL));
-
- Factory* factory = isolate->factory();
- Handle<String> key = factory->NewStringFromStaticAscii("breakIterator");
- Handle<String> value = factory->NewStringFromStaticAscii("valid");
- JSObject::AddProperty(local_object, key, value, NONE);
-
- // Make object handle weak so we can delete the break iterator once GC kicks
- // in.
- Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
- GlobalHandles::MakeWeak(wrapper.location(),
- reinterpret_cast<void*>(wrapper.location()),
- BreakIterator::DeleteBreakIterator);
- return *local_object;
-}
-
-
-RUNTIME_FUNCTION(Runtime_BreakIteratorAdoptText) {
- HandleScope scope(isolate);
-
- DCHECK(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, text, 1);
-
- icu::BreakIterator* break_iterator =
- BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
- if (!break_iterator) return isolate->ThrowIllegalOperation();
-
- icu::UnicodeString* u_text = reinterpret_cast<icu::UnicodeString*>(
- break_iterator_holder->GetInternalField(1));
- delete u_text;
-
- v8::String::Value text_value(v8::Utils::ToLocal(text));
- u_text = new icu::UnicodeString(
- reinterpret_cast<const UChar*>(*text_value), text_value.length());
- break_iterator_holder->SetInternalField(1, reinterpret_cast<Smi*>(u_text));
-
- break_iterator->setText(*u_text);
-
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_BreakIteratorFirst) {
- HandleScope scope(isolate);
-
- DCHECK(args.length() == 1);
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
-
- icu::BreakIterator* break_iterator =
- BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
- if (!break_iterator) return isolate->ThrowIllegalOperation();
-
- return *isolate->factory()->NewNumberFromInt(break_iterator->first());
-}
-
-
-RUNTIME_FUNCTION(Runtime_BreakIteratorNext) {
- HandleScope scope(isolate);
-
- DCHECK(args.length() == 1);
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
-
- icu::BreakIterator* break_iterator =
- BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
- if (!break_iterator) return isolate->ThrowIllegalOperation();
-
- return *isolate->factory()->NewNumberFromInt(break_iterator->next());
-}
-
-
-RUNTIME_FUNCTION(Runtime_BreakIteratorCurrent) {
- HandleScope scope(isolate);
-
- DCHECK(args.length() == 1);
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
-
- icu::BreakIterator* break_iterator =
- BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
- if (!break_iterator) return isolate->ThrowIllegalOperation();
-
- return *isolate->factory()->NewNumberFromInt(break_iterator->current());
-}
-
-
-RUNTIME_FUNCTION(Runtime_BreakIteratorBreakType) {
- HandleScope scope(isolate);
-
- DCHECK(args.length() == 1);
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
-
- icu::BreakIterator* break_iterator =
- BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
- if (!break_iterator) return isolate->ThrowIllegalOperation();
-
- // TODO(cira): Remove cast once ICU fixes base BreakIterator class.
- icu::RuleBasedBreakIterator* rule_based_iterator =
- static_cast<icu::RuleBasedBreakIterator*>(break_iterator);
- int32_t status = rule_based_iterator->getRuleStatus();
- // Keep return values in sync with JavaScript BreakType enum.
- if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
- return *isolate->factory()->NewStringFromStaticAscii("none");
- } else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
- return *isolate->factory()->NewStringFromStaticAscii("number");
- } else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
- return *isolate->factory()->NewStringFromStaticAscii("letter");
- } else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
- return *isolate->factory()->NewStringFromStaticAscii("kana");
- } else if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) {
- return *isolate->factory()->NewStringFromStaticAscii("ideo");
- } else {
- return *isolate->factory()->NewStringFromStaticAscii("unknown");
- }
-}
-#endif // V8_I18N_SUPPORT
-
-
// Finds the script object from the script data. NOTE: This operation uses
// heap traversal to find the function generated for the source position
// for the requested break point. For lazily compiled functions several heap
@@ -14567,44 +8375,61 @@ RUNTIME_FUNCTION(Runtime_GetV8Version) {
}
-RUNTIME_FUNCTION(Runtime_Abort) {
- SealHandleScope shs(isolate);
+// Returns function of generator activation.
+RUNTIME_FUNCTION(Runtime_GeneratorGetFunction) {
+ HandleScope scope(isolate);
DCHECK(args.length() == 1);
- CONVERT_SMI_ARG_CHECKED(message_id, 0);
- const char* message = GetBailoutReason(
- static_cast<BailoutReason>(message_id));
- base::OS::PrintError("abort: %s\n", message);
- isolate->PrintStack(stderr);
- base::OS::Abort();
- UNREACHABLE();
- return NULL;
+ CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+
+ return generator->function();
}
-RUNTIME_FUNCTION(Runtime_AbortJS) {
+// Returns context of generator activation.
+RUNTIME_FUNCTION(Runtime_GeneratorGetContext) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(String, message, 0);
- base::OS::PrintError("abort: %s\n", message->ToCString().get());
- isolate->PrintStack(stderr);
- base::OS::Abort();
- UNREACHABLE();
- return NULL;
+ CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+
+ return generator->context();
}
-RUNTIME_FUNCTION(Runtime_FlattenString) {
+// Returns receiver of generator activation.
+RUNTIME_FUNCTION(Runtime_GeneratorGetReceiver) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(String, str, 0);
- return *String::Flatten(str);
+ CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+
+ return generator->receiver();
}
-RUNTIME_FUNCTION(Runtime_NotifyContextDisposed) {
+// Returns generator continuation as a PC offset, or the magic -1 or 0 values.
+RUNTIME_FUNCTION(Runtime_GeneratorGetContinuation) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
- isolate->heap()->NotifyContextDisposed();
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+
+ return Smi::FromInt(generator->continuation());
+}
+
+
+RUNTIME_FUNCTION(Runtime_GeneratorGetSourcePosition) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+
+ if (generator->is_suspended()) {
+ Handle<Code> code(generator->function()->code(), isolate);
+ int offset = generator->continuation();
+
+ RUNTIME_ASSERT(0 <= offset && offset < code->Size());
+ Address pc = code->address() + offset;
+
+ return Smi::FromInt(code->SourcePosition(pc));
+ }
+
return isolate->heap()->undefined_value();
}
@@ -14662,8 +8487,7 @@ RUNTIME_FUNCTION(Runtime_GetFromCache) {
return cache->get(finger_index + 1);
}
- for (int i = finger_index - 2;
- i >= JSFunctionResultCache::kEntriesIndex;
+ for (int i = finger_index - 2; i >= JSFunctionResultCache::kEntriesIndex;
i -= 2) {
o = cache->get(i);
if (o == key) {
@@ -14692,14 +8516,14 @@ RUNTIME_FUNCTION(Runtime_GetFromCache) {
Handle<Object> value;
{
Handle<JSFunction> factory(JSFunction::cast(
- cache_handle->get(JSFunctionResultCache::kFactoryIndex)));
+ cache_handle->get(JSFunctionResultCache::kFactoryIndex)));
// TODO(antonm): consider passing a receiver when constructing a cache.
Handle<JSObject> receiver(isolate->global_proxy());
// This handle is nor shared, nor used later, so it's safe.
- Handle<Object> argv[] = { key_handle };
+ Handle<Object> argv[] = {key_handle};
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, value,
- Execution::Call(isolate, factory, receiver, ARRAY_SIZE(argv), argv));
+ Execution::Call(isolate, factory, receiver, arraysize(argv), argv));
}
#ifdef VERIFY_HEAP
@@ -14765,31 +8589,30 @@ RUNTIME_FUNCTION(Runtime_MessageGetScript) {
RUNTIME_FUNCTION(Runtime_ListNatives) {
HandleScope scope(isolate);
DCHECK(args.length() == 0);
-#define COUNT_ENTRY(Name, argc, ressize) + 1
- int entry_count = 0
- RUNTIME_FUNCTION_LIST(COUNT_ENTRY)
- INLINE_FUNCTION_LIST(COUNT_ENTRY)
+#define COUNT_ENTRY(Name, argc, ressize) +1
+ int entry_count =
+ 0 RUNTIME_FUNCTION_LIST(COUNT_ENTRY) INLINE_FUNCTION_LIST(COUNT_ENTRY)
INLINE_OPTIMIZED_FUNCTION_LIST(COUNT_ENTRY);
#undef COUNT_ENTRY
Factory* factory = isolate->factory();
Handle<FixedArray> elements = factory->NewFixedArray(entry_count);
int index = 0;
bool inline_runtime_functions = false;
-#define ADD_ENTRY(Name, argc, ressize) \
- { \
- HandleScope inner(isolate); \
- Handle<String> name; \
- /* Inline runtime functions have an underscore in front of the name. */ \
- if (inline_runtime_functions) { \
- name = factory->NewStringFromStaticAscii("_" #Name); \
- } else { \
- name = factory->NewStringFromStaticAscii(#Name); \
- } \
- Handle<FixedArray> pair_elements = factory->NewFixedArray(2); \
- pair_elements->set(0, *name); \
- pair_elements->set(1, Smi::FromInt(argc)); \
- Handle<JSArray> pair = factory->NewJSArrayWithElements(pair_elements); \
- elements->set(index++, *pair); \
+#define ADD_ENTRY(Name, argc, ressize) \
+ { \
+ HandleScope inner(isolate); \
+ Handle<String> name; \
+ /* Inline runtime functions have an underscore in front of the name. */ \
+ if (inline_runtime_functions) { \
+ name = factory->NewStringFromStaticChars("_" #Name); \
+ } else { \
+ name = factory->NewStringFromStaticChars(#Name); \
+ } \
+ Handle<FixedArray> pair_elements = factory->NewFixedArray(2); \
+ pair_elements->set(0, *name); \
+ pair_elements->set(1, Smi::FromInt(argc)); \
+ Handle<JSArray> pair = factory->NewJSArrayWithElements(pair_elements); \
+ elements->set(index++, *pair); \
}
inline_runtime_functions = false;
RUNTIME_FUNCTION_LIST(ADD_ENTRY)
@@ -14810,30 +8633,10 @@ RUNTIME_FUNCTION(Runtime_IS_VAR) {
}
-#define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name) \
- RUNTIME_FUNCTION(Runtime_Has##Name) { \
- CONVERT_ARG_CHECKED(JSObject, obj, 0); \
- return isolate->heap()->ToBoolean(obj->Has##Name()); \
- }
-
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastObjectElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiOrObjectElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastHoleyElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(SloppyArgumentsElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalArrayElements)
-// Properties test sitting with elements tests - not fooling anyone.
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastProperties)
-
-#undef ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
-
-
-#define TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION(Type, type, TYPE, ctype, size) \
- RUNTIME_FUNCTION(Runtime_HasExternal##Type##Elements) { \
- CONVERT_ARG_CHECKED(JSObject, obj, 0); \
- return isolate->heap()->ToBoolean(obj->HasExternal##Type##Elements()); \
+#define TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION(Type, type, TYPE, ctype, size) \
+ RUNTIME_FUNCTION(Runtime_HasExternal##Type##Elements) { \
+ CONVERT_ARG_CHECKED(JSObject, obj, 0); \
+ return isolate->heap()->ToBoolean(obj->HasExternal##Type##Elements()); \
}
TYPED_ARRAYS(TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION)
@@ -14841,10 +8644,10 @@ TYPED_ARRAYS(TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION)
#undef TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
-#define FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION(Type, type, TYPE, ctype, s) \
- RUNTIME_FUNCTION(Runtime_HasFixed##Type##Elements) { \
- CONVERT_ARG_CHECKED(JSObject, obj, 0); \
- return isolate->heap()->ToBoolean(obj->HasFixed##Type##Elements()); \
+#define FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION(Type, type, TYPE, ctype, s) \
+ RUNTIME_FUNCTION(Runtime_HasFixed##Type##Elements) { \
+ CONVERT_ARG_CHECKED(JSObject, obj, 0); \
+ return isolate->heap()->ToBoolean(obj->HasFixed##Type##Elements()); \
}
TYPED_ARRAYS(FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION)
@@ -14852,15 +8655,6 @@ TYPED_ARRAYS(FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION)
#undef FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
-RUNTIME_FUNCTION(Runtime_HaveSameMap) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_CHECKED(JSObject, obj1, 0);
- CONVERT_ARG_CHECKED(JSObject, obj2, 1);
- return isolate->heap()->ToBoolean(obj1->map() == obj2->map());
-}
-
-
RUNTIME_FUNCTION(Runtime_IsJSGlobalProxy) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -14918,20 +8712,6 @@ RUNTIME_FUNCTION(Runtime_GetObservationState) {
}
-RUNTIME_FUNCTION(Runtime_ObservationWeakMapCreate) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 0);
- // TODO(adamk): Currently this runtime function is only called three times per
- // isolate. If it's called more often, the map should be moved into the
- // strong root list.
- Handle<Map> map =
- isolate->factory()->NewMap(JS_WEAK_MAP_TYPE, JSWeakMap::kSize);
- Handle<JSWeakMap> weakmap =
- Handle<JSWeakMap>::cast(isolate->factory()->NewJSObjectFromMap(map));
- return *WeakCollectionInitialize(isolate, weakmap);
-}
-
-
static bool ContextsHaveSameOrigin(Handle<Context> context1,
Handle<Context> context2) {
return context1->security_token() == context2->security_token();
@@ -14997,9 +8777,9 @@ RUNTIME_FUNCTION(Runtime_GetObjectContextNotifierPerformChange) {
static Object* ArrayConstructorCommon(Isolate* isolate,
- Handle<JSFunction> constructor,
- Handle<AllocationSite> site,
- Arguments* caller_args) {
+ Handle<JSFunction> constructor,
+ Handle<AllocationSite> site,
+ Arguments* caller_args) {
Factory* factory = isolate->factory();
bool holey = false;
@@ -15063,8 +8843,7 @@ static Object* ArrayConstructorCommon(Isolate* isolate,
RETURN_FAILURE_ON_EXCEPTION(
isolate, ArrayConstructInitializeElements(array, caller_args));
if (!site.is_null() &&
- (old_kind != array->GetElementsKind() ||
- !can_use_type_feedback)) {
+ (old_kind != array->GetElementsKind() || !can_use_type_feedback)) {
// The arguments passed in caused a transition. This kind of complexity
// can't be dealt with in the inlined hydrogen array constructor case.
// We must mark the allocationsite as un-inlinable.
@@ -15085,9 +8864,8 @@ RUNTIME_FUNCTION(Runtime_ArrayConstructor) {
bool no_caller_args = args.length() == 2;
DCHECK(no_caller_args || args.length() == 4);
int parameters_start = no_caller_args ? 0 : 1;
- Arguments* caller_args = no_caller_args
- ? &empty_args
- : reinterpret_cast<Arguments*>(args[0]);
+ Arguments* caller_args =
+ no_caller_args ? &empty_args : reinterpret_cast<Arguments*>(args[0]);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, parameters_start);
CONVERT_ARG_HANDLE_CHECKED(Object, type_info, parameters_start + 1);
#ifdef DEBUG
@@ -15104,10 +8882,7 @@ RUNTIME_FUNCTION(Runtime_ArrayConstructor) {
DCHECK(!site->SitePointsToLiteral());
}
- return ArrayConstructorCommon(isolate,
- constructor,
- site,
- caller_args);
+ return ArrayConstructorCommon(isolate, constructor, site, caller_args);
}
@@ -15117,9 +8892,8 @@ RUNTIME_FUNCTION(Runtime_InternalArrayConstructor) {
bool no_caller_args = args.length() == 1;
DCHECK(no_caller_args || args.length() == 3);
int parameters_start = no_caller_args ? 0 : 1;
- Arguments* caller_args = no_caller_args
- ? &empty_args
- : reinterpret_cast<Arguments*>(args[0]);
+ Arguments* caller_args =
+ no_caller_args ? &empty_args : reinterpret_cast<Arguments*>(args[0]);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, parameters_start);
#ifdef DEBUG
if (!no_caller_args) {
@@ -15127,10 +8901,8 @@ RUNTIME_FUNCTION(Runtime_InternalArrayConstructor) {
DCHECK(arg_count == caller_args->length());
}
#endif
- return ArrayConstructorCommon(isolate,
- constructor,
- Handle<AllocationSite>::null(),
- caller_args);
+ return ArrayConstructorCommon(isolate, constructor,
+ Handle<AllocationSite>::null(), caller_args);
}
@@ -15211,17 +8983,17 @@ RUNTIME_FUNCTION(Runtime_ForInCacheArrayLength) {
RUNTIME_FUNCTION_RETURN_PAIR(Runtime_ForInNext) {
SealHandleScope scope(isolate);
DCHECK(args.length() == 4);
+ int32_t index;
// This simulates CONVERT_ARG_HANDLE_CHECKED for calls returning pairs.
// Not worth creating a macro atm as this function should be removed.
if (!args[0]->IsJSReceiver() || !args[1]->IsFixedArray() ||
- !args[2]->IsObject() || !args[3]->IsSmi()) {
+ !args[2]->IsObject() || !args[3]->ToInt32(&index)) {
Object* error = isolate->ThrowIllegalOperation();
return MakePair(error, isolate->heap()->undefined_value());
}
Handle<JSReceiver> object = args.at<JSReceiver>(0);
Handle<FixedArray> array = args.at<FixedArray>(1);
Handle<Object> cache_type = args.at<Object>(2);
- int index = args.smi_at(3);
// Figure out first if a slow check is needed for this object.
bool slow_check_needed = false;
if (cache_type->IsMap()) {
@@ -15252,8 +9024,6 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_ForInNext) {
}
U(IsStringWrapperSafeForDefaultValueOf)
-U(GeneratorNext)
-U(GeneratorThrow)
U(DebugBreakInOptimizedCode)
#undef U
@@ -15349,8 +9119,9 @@ RUNTIME_FUNCTION(RuntimeReference_DateField) {
CONVERT_SMI_ARG_CHECKED(index, 1);
if (!obj->IsJSDate()) {
HandleScope scope(isolate);
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "not_date_object", HandleVector<Object>(NULL, 0)));
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError("not_date_object", HandleVector<Object>(NULL, 0)));
}
JSDate* date = JSDate::cast(obj);
if (index == 0) return date->value();
@@ -15358,46 +9129,6 @@ RUNTIME_FUNCTION(RuntimeReference_DateField) {
}
-RUNTIME_FUNCTION(RuntimeReference_StringCharFromCode) {
- SealHandleScope shs(isolate);
- return __RT_impl_Runtime_CharFromCode(args, isolate);
-}
-
-
-RUNTIME_FUNCTION(RuntimeReference_StringCharAt) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
- if (!args[0]->IsString()) return Smi::FromInt(0);
- if (!args[1]->IsNumber()) return Smi::FromInt(0);
- if (std::isinf(args.number_at(1))) return isolate->heap()->empty_string();
- Object* code = __RT_impl_Runtime_StringCharCodeAtRT(args, isolate);
- if (code->IsNaN()) return isolate->heap()->empty_string();
- return __RT_impl_Runtime_CharFromCode(Arguments(1, &code), isolate);
-}
-
-
-RUNTIME_FUNCTION(RuntimeReference_OneByteSeqStringSetChar) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_CHECKED(SeqOneByteString, string, 0);
- CONVERT_SMI_ARG_CHECKED(index, 1);
- CONVERT_SMI_ARG_CHECKED(value, 2);
- string->SeqOneByteStringSet(index, value);
- return string;
-}
-
-
-RUNTIME_FUNCTION(RuntimeReference_TwoByteSeqStringSetChar) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_CHECKED(SeqTwoByteString, string, 0);
- CONVERT_SMI_ARG_CHECKED(index, 1);
- CONVERT_SMI_ARG_CHECKED(value, 2);
- string->SeqTwoByteStringSet(index, value);
- return string;
-}
-
-
RUNTIME_FUNCTION(RuntimeReference_ObjectEquals) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 2);
@@ -15446,22 +9177,6 @@ RUNTIME_FUNCTION(RuntimeReference_IsSpecObject) {
}
-RUNTIME_FUNCTION(RuntimeReference_MathPow) {
- SealHandleScope shs(isolate);
- return __RT_impl_Runtime_MathPowSlow(args, isolate);
-}
-
-
-RUNTIME_FUNCTION(RuntimeReference_IsMinusZero) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(Object, obj, 0);
- if (!obj->IsHeapNumber()) return isolate->heap()->false_value();
- HeapNumber* number = HeapNumber::cast(obj);
- return isolate->heap()->ToBoolean(IsMinusZero(number->value()));
-}
-
-
RUNTIME_FUNCTION(RuntimeReference_HasCachedArrayIndex) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -15476,59 +9191,31 @@ RUNTIME_FUNCTION(RuntimeReference_GetCachedArrayIndex) {
}
-RUNTIME_FUNCTION(RuntimeReference_FastAsciiArrayJoin) {
+RUNTIME_FUNCTION(RuntimeReference_FastOneByteArrayJoin) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 2);
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(RuntimeReference_ClassOf) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(Object, obj, 0);
- if (!obj->IsJSReceiver()) return isolate->heap()->null_value();
- return JSReceiver::cast(obj)->class_name();
-}
-
-
-RUNTIME_FUNCTION(RuntimeReference_StringCharCodeAt) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
- if (!args[0]->IsString()) return isolate->heap()->undefined_value();
- if (!args[1]->IsNumber()) return isolate->heap()->undefined_value();
- if (std::isinf(args.number_at(1))) return isolate->heap()->nan_value();
- return __RT_impl_Runtime_StringCharCodeAtRT(args, isolate);
-}
-
-
-RUNTIME_FUNCTION(RuntimeReference_StringAdd) {
- SealHandleScope shs(isolate);
- return __RT_impl_Runtime_StringAdd(args, isolate);
-}
-
-
-RUNTIME_FUNCTION(RuntimeReference_SubString) {
- SealHandleScope shs(isolate);
- return __RT_impl_Runtime_SubString(args, isolate);
-}
-
-
-RUNTIME_FUNCTION(RuntimeReference_StringCompare) {
- SealHandleScope shs(isolate);
- return __RT_impl_Runtime_StringCompare(args, isolate);
+RUNTIME_FUNCTION(RuntimeReference_GeneratorNext) {
+ UNREACHABLE(); // Optimization disabled in SetUpGenerators().
+ return NULL;
}
-RUNTIME_FUNCTION(RuntimeReference_RegExpExec) {
- SealHandleScope shs(isolate);
- return __RT_impl_Runtime_RegExpExecRT(args, isolate);
+RUNTIME_FUNCTION(RuntimeReference_GeneratorThrow) {
+ UNREACHABLE(); // Optimization disabled in SetUpGenerators().
+ return NULL;
}
-RUNTIME_FUNCTION(RuntimeReference_RegExpConstructResult) {
+RUNTIME_FUNCTION(RuntimeReference_ClassOf) {
SealHandleScope shs(isolate);
- return __RT_impl_Runtime_RegExpConstructResult(args, isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(Object, obj, 0);
+ if (!obj->IsJSReceiver()) return isolate->heap()->null_value();
+ return JSReceiver::cast(obj)->class_name();
}
@@ -15541,12 +9228,6 @@ RUNTIME_FUNCTION(RuntimeReference_GetFromCache) {
}
-RUNTIME_FUNCTION(RuntimeReference_NumberToString) {
- SealHandleScope shs(isolate);
- return __RT_impl_Runtime_NumberToStringRT(args, isolate);
-}
-
-
RUNTIME_FUNCTION(RuntimeReference_DebugIsActive) {
SealHandleScope shs(isolate);
return Smi::FromInt(isolate->debug()->is_active());
@@ -15581,11 +9262,8 @@ RUNTIME_FUNCTION(RuntimeReference_DebugIsActive) {
static const Runtime::Function kIntrinsicFunctions[] = {
- RUNTIME_FUNCTION_LIST(F)
- INLINE_OPTIMIZED_FUNCTION_LIST(F)
- INLINE_FUNCTION_LIST(I)
- INLINE_OPTIMIZED_FUNCTION_LIST(IO)
-};
+ RUNTIME_FUNCTION_LIST(F) INLINE_OPTIMIZED_FUNCTION_LIST(F)
+ INLINE_FUNCTION_LIST(I) INLINE_OPTIMIZED_FUNCTION_LIST(IO)};
#undef IO
#undef I
@@ -15600,8 +9278,7 @@ void Runtime::InitializeIntrinsicFunctionNames(Isolate* isolate,
const char* name = kIntrinsicFunctions[i].name;
if (name == NULL) continue;
Handle<NameDictionary> new_dict = NameDictionary::Add(
- dict,
- isolate->factory()->InternalizeUtf8String(name),
+ dict, isolate->factory()->InternalizeUtf8String(name),
Handle<Smi>(Smi::FromInt(i), isolate),
PropertyDetails(NONE, NORMAL, Representation::None()));
// The dictionary does not need to grow.
@@ -15623,7 +9300,7 @@ const Runtime::Function* Runtime::FunctionForName(Handle<String> name) {
const Runtime::Function* Runtime::FunctionForEntry(Address entry) {
- for (size_t i = 0; i < ARRAY_SIZE(kIntrinsicFunctions); ++i) {
+ for (size_t i = 0; i < arraysize(kIntrinsicFunctions); ++i) {
if (entry == kIntrinsicFunctions[i].entry) {
return &(kIntrinsicFunctions[i]);
}
@@ -15635,5 +9312,5 @@ const Runtime::Function* Runtime::FunctionForEntry(Address entry) {
const Runtime::Function* Runtime::FunctionForId(Runtime::FunctionId id) {
return &(kIntrinsicFunctions[static_cast<int>(id)]);
}
-
-} } // namespace v8::internal
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
new file mode 100644
index 0000000000..da8511b1a4
--- /dev/null
+++ b/deps/v8/src/runtime/runtime.h
@@ -0,0 +1,907 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_RUNTIME_H_
+#define V8_RUNTIME_H_
+
+#include "src/allocation.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+// The interface to C++ runtime functions.
+
+// ----------------------------------------------------------------------------
+// RUNTIME_FUNCTION_LIST_ALWAYS defines runtime calls available in both
+// release and debug mode.
+// This macro should only be used by the macro RUNTIME_FUNCTION_LIST.
+
+// WARNING: RUNTIME_FUNCTION_LIST_ALWAYS_* is a very large macro that caused
+// MSVC Intellisense to crash. It was broken into two macros to work around
+// this problem. Please avoid large recursive macros whenever possible.
+#define RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
+ /* Property access */ \
+ F(GetProperty, 2, 1) \
+ F(KeyedGetProperty, 2, 1) \
+ F(DeleteProperty, 3, 1) \
+ F(HasOwnProperty, 2, 1) \
+ F(HasProperty, 2, 1) \
+ F(HasElement, 2, 1) \
+ F(IsPropertyEnumerable, 2, 1) \
+ F(GetPropertyNames, 1, 1) \
+ F(GetPropertyNamesFast, 1, 1) \
+ F(GetOwnPropertyNames, 2, 1) \
+ F(GetOwnElementNames, 1, 1) \
+ F(GetInterceptorInfo, 1, 1) \
+ F(GetNamedInterceptorPropertyNames, 1, 1) \
+ F(GetIndexedInterceptorElementNames, 1, 1) \
+ F(GetArgumentsProperty, 1, 1) \
+ F(ToFastProperties, 1, 1) \
+ F(FinishArrayPrototypeSetup, 1, 1) \
+ F(SpecialArrayFunctions, 0, 1) \
+ F(IsSloppyModeFunction, 1, 1) \
+ F(GetDefaultReceiver, 1, 1) \
+ \
+ F(GetPrototype, 1, 1) \
+ F(SetPrototype, 2, 1) \
+ F(InternalSetPrototype, 2, 1) \
+ F(IsInPrototypeChain, 2, 1) \
+ \
+ F(GetOwnProperty, 2, 1) \
+ \
+ F(IsExtensible, 1, 1) \
+ F(PreventExtensions, 1, 1) \
+ \
+ /* Utilities */ \
+ F(CheckIsBootstrapping, 0, 1) \
+ F(GetRootNaN, 0, 1) \
+ F(Call, -1 /* >= 2 */, 1) \
+ F(Apply, 5, 1) \
+ F(GetFunctionDelegate, 1, 1) \
+ F(GetConstructorDelegate, 1, 1) \
+ F(DeoptimizeFunction, 1, 1) \
+ F(ClearFunctionTypeFeedback, 1, 1) \
+ F(RunningInSimulator, 0, 1) \
+ F(IsConcurrentRecompilationSupported, 0, 1) \
+ F(OptimizeFunctionOnNextCall, -1, 1) \
+ F(NeverOptimizeFunction, 1, 1) \
+ F(GetOptimizationStatus, -1, 1) \
+ F(GetOptimizationCount, 1, 1) \
+ F(UnblockConcurrentRecompilation, 0, 1) \
+ F(CompileForOnStackReplacement, 1, 1) \
+ F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
+ F(SetNativeFlag, 1, 1) \
+ F(SetInlineBuiltinFlag, 1, 1) \
+ F(StoreArrayLiteralElement, 5, 1) \
+ F(DebugPrepareStepInIfStepping, 1, 1) \
+ F(DebugPushPromise, 1, 1) \
+ F(DebugPopPromise, 0, 1) \
+ F(DebugPromiseEvent, 1, 1) \
+ F(DebugPromiseRejectEvent, 2, 1) \
+ F(DebugAsyncTaskEvent, 1, 1) \
+ F(FlattenString, 1, 1) \
+ F(LoadMutableDouble, 2, 1) \
+ F(TryMigrateInstance, 1, 1) \
+ F(NotifyContextDisposed, 0, 1) \
+ \
+ /* Array join support */ \
+ F(PushIfAbsent, 2, 1) \
+ F(ArrayConcat, 1, 1) \
+ \
+ /* Conversions */ \
+ F(ToBool, 1, 1) \
+ F(Typeof, 1, 1) \
+ \
+ F(Booleanize, 2, 1) /* TODO(turbofan): Only temporary */ \
+ \
+ F(StringToNumber, 1, 1) \
+ F(StringParseInt, 2, 1) \
+ F(StringParseFloat, 1, 1) \
+ F(StringToLowerCase, 1, 1) \
+ F(StringToUpperCase, 1, 1) \
+ F(StringSplit, 3, 1) \
+ F(CharFromCode, 1, 1) \
+ F(URIEscape, 1, 1) \
+ F(URIUnescape, 1, 1) \
+ \
+ F(NumberToInteger, 1, 1) \
+ F(NumberToIntegerMapMinusZero, 1, 1) \
+ F(NumberToJSUint32, 1, 1) \
+ F(NumberToJSInt32, 1, 1) \
+ \
+ /* Arithmetic operations */ \
+ F(NumberAdd, 2, 1) \
+ F(NumberSub, 2, 1) \
+ F(NumberMul, 2, 1) \
+ F(NumberDiv, 2, 1) \
+ F(NumberMod, 2, 1) \
+ F(NumberUnaryMinus, 1, 1) \
+ F(NumberImul, 2, 1) \
+ \
+ F(StringBuilderConcat, 3, 1) \
+ F(StringBuilderJoin, 3, 1) \
+ F(SparseJoinWithSeparator, 3, 1) \
+ \
+ /* Bit operations */ \
+ F(NumberOr, 2, 1) \
+ F(NumberAnd, 2, 1) \
+ F(NumberXor, 2, 1) \
+ \
+ F(NumberShl, 2, 1) \
+ F(NumberShr, 2, 1) \
+ F(NumberSar, 2, 1) \
+ \
+ /* Comparisons */ \
+ F(NumberEquals, 2, 1) \
+ F(StringEquals, 2, 1) \
+ \
+ F(NumberCompare, 3, 1) \
+ F(SmiLexicographicCompare, 2, 1) \
+ \
+ /* Math */ \
+ F(MathAcos, 1, 1) \
+ F(MathAsin, 1, 1) \
+ F(MathAtan, 1, 1) \
+ F(MathFloorRT, 1, 1) \
+ F(MathAtan2, 2, 1) \
+ F(MathExpRT, 1, 1) \
+ F(RoundNumber, 1, 1) \
+ F(MathFround, 1, 1) \
+ F(RemPiO2, 1, 1) \
+ \
+ /* Regular expressions */ \
+ F(RegExpCompile, 3, 1) \
+ F(RegExpExecMultiple, 4, 1) \
+ F(RegExpInitializeObject, 6, 1) \
+ \
+ /* JSON */ \
+ F(ParseJson, 1, 1) \
+ F(BasicJSONStringify, 1, 1) \
+ F(QuoteJSONString, 1, 1) \
+ \
+ /* Strings */ \
+ F(StringIndexOf, 3, 1) \
+ F(StringLastIndexOf, 3, 1) \
+ F(StringLocaleCompare, 2, 1) \
+ F(StringReplaceGlobalRegExpWithString, 4, 1) \
+ F(StringReplaceOneCharWithString, 3, 1) \
+ F(StringMatch, 3, 1) \
+ F(StringTrim, 3, 1) \
+ F(StringToArray, 2, 1) \
+ F(NewStringWrapper, 1, 1) \
+ F(NewString, 2, 1) \
+ F(TruncateString, 2, 1) \
+ \
+ /* Numbers */ \
+ F(NumberToRadixString, 2, 1) \
+ F(NumberToFixed, 2, 1) \
+ F(NumberToExponential, 2, 1) \
+ F(NumberToPrecision, 2, 1) \
+ F(IsValidSmi, 1, 1) \
+ \
+ /* Classes support */ \
+ F(ToMethod, 2, 1) \
+ F(HomeObjectSymbol, 0, 1) \
+ F(ThrowNonMethodError, 0, 1) \
+ F(ThrowUnsupportedSuperError, 0, 1) \
+ F(LoadFromSuper, 3, 1) \
+ F(StoreToSuper_Strict, 4, 1) \
+ F(StoreToSuper_Sloppy, 4, 1)
+
+
+#define RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
+ /* Reflection */ \
+ F(FunctionSetInstanceClassName, 2, 1) \
+ F(FunctionSetLength, 2, 1) \
+ F(FunctionSetPrototype, 2, 1) \
+ F(FunctionGetName, 1, 1) \
+ F(FunctionSetName, 2, 1) \
+ F(FunctionNameShouldPrintAsAnonymous, 1, 1) \
+ F(FunctionMarkNameShouldPrintAsAnonymous, 1, 1) \
+ F(FunctionIsGenerator, 1, 1) \
+ F(FunctionIsArrow, 1, 1) \
+ F(FunctionIsConciseMethod, 1, 1) \
+ F(FunctionBindArguments, 4, 1) \
+ F(BoundFunctionGetBindings, 1, 1) \
+ F(FunctionRemovePrototype, 1, 1) \
+ F(FunctionGetSourceCode, 1, 1) \
+ F(FunctionGetScript, 1, 1) \
+ F(FunctionGetScriptSourcePosition, 1, 1) \
+ F(FunctionGetPositionForOffset, 2, 1) \
+ F(FunctionIsAPIFunction, 1, 1) \
+ F(FunctionIsBuiltin, 1, 1) \
+ F(GetScript, 1, 1) \
+ F(CollectStackTrace, 2, 1) \
+ F(GetV8Version, 0, 1) \
+ F(GeneratorGetFunction, 1, 1) \
+ F(GeneratorGetContext, 1, 1) \
+ F(GeneratorGetReceiver, 1, 1) \
+ F(GeneratorGetContinuation, 1, 1) \
+ F(GeneratorGetSourcePosition, 1, 1) \
+ \
+ F(SetCode, 2, 1) \
+ \
+ F(CreateApiFunction, 2, 1) \
+ F(IsTemplate, 1, 1) \
+ F(GetTemplateField, 2, 1) \
+ F(DisableAccessChecks, 1, 1) \
+ F(EnableAccessChecks, 1, 1) \
+ \
+ /* Dates */ \
+ F(DateCurrentTime, 0, 1) \
+ F(DateParseString, 2, 1) \
+ F(DateLocalTimezone, 1, 1) \
+ F(DateToUTC, 1, 1) \
+ F(DateMakeDay, 2, 1) \
+ F(DateSetValue, 3, 1) \
+ F(DateCacheVersion, 0, 1) \
+ \
+ /* Globals */ \
+ F(CompileString, 2, 1) \
+ \
+ /* Eval */ \
+ F(GlobalProxy, 1, 1) \
+ F(IsAttachedGlobal, 1, 1) \
+ \
+ F(AddNamedProperty, 4, 1) \
+ F(AddPropertyForTemplate, 4, 1) \
+ F(SetProperty, 4, 1) \
+ F(AddElement, 4, 1) \
+ F(DefineApiAccessorProperty, 5, 1) \
+ F(DefineDataPropertyUnchecked, 4, 1) \
+ F(DefineAccessorPropertyUnchecked, 5, 1) \
+ F(GetDataProperty, 2, 1) \
+ F(SetHiddenProperty, 3, 1) \
+ \
+ /* Arrays */ \
+ F(RemoveArrayHoles, 2, 1) \
+ F(GetArrayKeys, 2, 1) \
+ F(MoveArrayContents, 2, 1) \
+ F(EstimateNumberOfElements, 1, 1) \
+ F(NormalizeElements, 1, 1) \
+ \
+ /* Getters and Setters */ \
+ F(LookupAccessor, 3, 1) \
+ \
+ /* ES5 */ \
+ F(ObjectFreeze, 1, 1) \
+ \
+ /* Harmony modules */ \
+ F(IsJSModule, 1, 1) \
+ \
+ /* Harmony symbols */ \
+ F(CreateSymbol, 1, 1) \
+ F(CreatePrivateSymbol, 1, 1) \
+ F(CreateGlobalPrivateOwnSymbol, 1, 1) \
+ F(CreatePrivateOwnSymbol, 1, 1) \
+ F(NewSymbolWrapper, 1, 1) \
+ F(SymbolDescription, 1, 1) \
+ F(SymbolRegistry, 0, 1) \
+ F(SymbolIsPrivate, 1, 1) \
+ \
+ /* Harmony proxies */ \
+ F(CreateJSProxy, 2, 1) \
+ F(CreateJSFunctionProxy, 4, 1) \
+ F(IsJSProxy, 1, 1) \
+ F(IsJSFunctionProxy, 1, 1) \
+ F(GetHandler, 1, 1) \
+ F(GetCallTrap, 1, 1) \
+ F(GetConstructTrap, 1, 1) \
+ F(Fix, 1, 1) \
+ \
+ /* Harmony sets */ \
+ F(SetInitialize, 1, 1) \
+ F(SetAdd, 2, 1) \
+ F(SetHas, 2, 1) \
+ F(SetDelete, 2, 1) \
+ F(SetClear, 1, 1) \
+ F(SetGetSize, 1, 1) \
+ \
+ F(SetIteratorInitialize, 3, 1) \
+ F(SetIteratorNext, 2, 1) \
+ \
+ /* Harmony maps */ \
+ F(MapInitialize, 1, 1) \
+ F(MapGet, 2, 1) \
+ F(MapHas, 2, 1) \
+ F(MapDelete, 2, 1) \
+ F(MapClear, 1, 1) \
+ F(MapSet, 3, 1) \
+ F(MapGetSize, 1, 1) \
+ \
+ F(MapIteratorInitialize, 3, 1) \
+ F(MapIteratorNext, 2, 1) \
+ \
+ /* Harmony weak maps and sets */ \
+ F(WeakCollectionInitialize, 1, 1) \
+ F(WeakCollectionGet, 2, 1) \
+ F(WeakCollectionHas, 2, 1) \
+ F(WeakCollectionDelete, 2, 1) \
+ F(WeakCollectionSet, 3, 1) \
+ \
+ F(GetWeakMapEntries, 1, 1) \
+ F(GetWeakSetValues, 1, 1) \
+ \
+ /* Harmony events */ \
+ F(EnqueueMicrotask, 1, 1) \
+ F(RunMicrotasks, 0, 1) \
+ \
+ /* Harmony observe */ \
+ F(IsObserved, 1, 1) \
+ F(SetIsObserved, 1, 1) \
+ F(GetObservationState, 0, 1) \
+ F(ObservationWeakMapCreate, 0, 1) \
+ F(ObserverObjectAndRecordHaveSameOrigin, 3, 1) \
+ F(ObjectWasCreatedInCurrentOrigin, 1, 1) \
+ F(GetObjectContextObjectObserve, 1, 1) \
+ F(GetObjectContextObjectGetNotifier, 1, 1) \
+ F(GetObjectContextNotifierPerformChange, 1, 1) \
+ \
+ /* Harmony typed arrays */ \
+ F(ArrayBufferInitialize, 2, 1) \
+ F(ArrayBufferSliceImpl, 3, 1) \
+ F(ArrayBufferIsView, 1, 1) \
+ F(ArrayBufferNeuter, 1, 1) \
+ \
+ F(TypedArrayInitializeFromArrayLike, 4, 1) \
+ F(TypedArrayGetBuffer, 1, 1) \
+ F(TypedArraySetFastCases, 3, 1) \
+ \
+ F(DataViewGetBuffer, 1, 1) \
+ F(DataViewGetInt8, 3, 1) \
+ F(DataViewGetUint8, 3, 1) \
+ F(DataViewGetInt16, 3, 1) \
+ F(DataViewGetUint16, 3, 1) \
+ F(DataViewGetInt32, 3, 1) \
+ F(DataViewGetUint32, 3, 1) \
+ F(DataViewGetFloat32, 3, 1) \
+ F(DataViewGetFloat64, 3, 1) \
+ \
+ F(DataViewSetInt8, 4, 1) \
+ F(DataViewSetUint8, 4, 1) \
+ F(DataViewSetInt16, 4, 1) \
+ F(DataViewSetUint16, 4, 1) \
+ F(DataViewSetInt32, 4, 1) \
+ F(DataViewSetUint32, 4, 1) \
+ F(DataViewSetFloat32, 4, 1) \
+ F(DataViewSetFloat64, 4, 1) \
+ \
+ /* Statements */ \
+ F(NewObjectFromBound, 1, 1) \
+ \
+ /* Declarations and initialization */ \
+ F(InitializeVarGlobal, 3, 1) \
+ F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
+ \
+ /* Debugging */ \
+ F(DebugPrint, 1, 1) \
+ F(GlobalPrint, 1, 1) \
+ F(DebugTrace, 0, 1) \
+ F(TraceEnter, 0, 1) \
+ F(TraceExit, 1, 1) \
+ F(Abort, 1, 1) \
+ F(AbortJS, 1, 1) \
+ /* ES5 */ \
+ F(OwnKeys, 1, 1) \
+ \
+ /* Message objects */ \
+ F(MessageGetStartPosition, 1, 1) \
+ F(MessageGetScript, 1, 1) \
+ \
+ /* Pseudo functions - handled as macros by parser */ \
+ F(IS_VAR, 1, 1) \
+ \
+ /* expose boolean functions from objects-inl.h */ \
+ F(HasFastSmiElements, 1, 1) \
+ F(HasFastSmiOrObjectElements, 1, 1) \
+ F(HasFastObjectElements, 1, 1) \
+ F(HasFastDoubleElements, 1, 1) \
+ F(HasFastHoleyElements, 1, 1) \
+ F(HasDictionaryElements, 1, 1) \
+ F(HasSloppyArgumentsElements, 1, 1) \
+ F(HasExternalUint8ClampedElements, 1, 1) \
+ F(HasExternalArrayElements, 1, 1) \
+ F(HasExternalInt8Elements, 1, 1) \
+ F(HasExternalUint8Elements, 1, 1) \
+ F(HasExternalInt16Elements, 1, 1) \
+ F(HasExternalUint16Elements, 1, 1) \
+ F(HasExternalInt32Elements, 1, 1) \
+ F(HasExternalUint32Elements, 1, 1) \
+ F(HasExternalFloat32Elements, 1, 1) \
+ F(HasExternalFloat64Elements, 1, 1) \
+ F(HasFixedUint8ClampedElements, 1, 1) \
+ F(HasFixedInt8Elements, 1, 1) \
+ F(HasFixedUint8Elements, 1, 1) \
+ F(HasFixedInt16Elements, 1, 1) \
+ F(HasFixedUint16Elements, 1, 1) \
+ F(HasFixedInt32Elements, 1, 1) \
+ F(HasFixedUint32Elements, 1, 1) \
+ F(HasFixedFloat32Elements, 1, 1) \
+ F(HasFixedFloat64Elements, 1, 1) \
+ F(HasFastProperties, 1, 1) \
+ F(TransitionElementsKind, 2, 1) \
+ F(HaveSameMap, 2, 1) \
+ F(IsJSGlobalProxy, 1, 1) \
+ F(ForInCacheArrayLength, 2, 1) /* TODO(turbofan): Only temporary */
+
+
+#define RUNTIME_FUNCTION_LIST_ALWAYS_3(F) \
+ /* String and Regexp */ \
+ F(NumberToStringRT, 1, 1) \
+ F(RegExpConstructResult, 3, 1) \
+ F(RegExpExecRT, 4, 1) \
+ F(StringAdd, 2, 1) \
+ F(SubString, 3, 1) \
+ F(InternalizeString, 1, 1) \
+ F(StringCompare, 2, 1) \
+ F(StringCharCodeAtRT, 2, 1) \
+ F(GetFromCache, 2, 1) \
+ \
+ /* Compilation */ \
+ F(CompileLazy, 1, 1) \
+ F(CompileOptimized, 2, 1) \
+ F(TryInstallOptimizedCode, 1, 1) \
+ F(NotifyDeoptimized, 1, 1) \
+ F(NotifyStubFailure, 0, 1) \
+ \
+ /* Utilities */ \
+ F(AllocateInNewSpace, 1, 1) \
+ F(AllocateInTargetSpace, 2, 1) \
+ F(AllocateHeapNumber, 0, 1) \
+ F(NumberToSmi, 1, 1) \
+ F(NumberToStringSkipCache, 1, 1) \
+ \
+ F(NewArguments, 1, 1) /* TODO(turbofan): Only temporary */ \
+ F(NewSloppyArguments, 3, 1) \
+ F(NewStrictArguments, 3, 1) \
+ \
+ /* Harmony generators */ \
+ F(CreateJSGeneratorObject, 0, 1) \
+ F(SuspendJSGeneratorObject, 1, 1) \
+ F(ResumeJSGeneratorObject, 3, 1) \
+ F(ThrowGeneratorStateError, 1, 1) \
+ \
+ /* Arrays */ \
+ F(ArrayConstructor, -1, 1) \
+ F(InternalArrayConstructor, -1, 1) \
+ \
+ /* Literals */ \
+ F(MaterializeRegExpLiteral, 4, 1) \
+ F(CreateObjectLiteral, 4, 1) \
+ F(CreateArrayLiteral, 4, 1) \
+ F(CreateArrayLiteralStubBailout, 3, 1) \
+ \
+ /* Statements */ \
+ F(NewClosure, 3, 1) \
+ F(NewClosureFromStubFailure, 1, 1) \
+ F(NewObject, 1, 1) \
+ F(NewObjectWithAllocationSite, 2, 1) \
+ F(FinalizeInstanceSize, 1, 1) \
+ F(Throw, 1, 1) \
+ F(ReThrow, 1, 1) \
+ F(ThrowReferenceError, 1, 1) \
+ F(ThrowNotDateError, 0, 1) \
+ F(StackGuard, 0, 1) \
+ F(Interrupt, 0, 1) \
+ F(PromoteScheduledException, 0, 1) \
+ \
+ /* Contexts */ \
+ F(NewGlobalContext, 2, 1) \
+ F(NewFunctionContext, 1, 1) \
+ F(PushWithContext, 2, 1) \
+ F(PushCatchContext, 3, 1) \
+ F(PushBlockContext, 2, 1) \
+ F(PushModuleContext, 2, 1) \
+ F(DeleteLookupSlot, 2, 1) \
+ F(StoreLookupSlot, 4, 1) \
+ \
+ /* Declarations and initialization */ \
+ F(DeclareGlobals, 3, 1) \
+ F(DeclareModules, 1, 1) \
+ F(DeclareLookupSlot, 4, 1) \
+ F(InitializeConstGlobal, 2, 1) \
+ F(InitializeLegacyConstLookupSlot, 3, 1) \
+ \
+ /* Maths */ \
+ F(MathPowSlow, 2, 1) \
+ F(MathPowRT, 2, 1)
+
+
+#define RUNTIME_FUNCTION_LIST_RETURN_PAIR(F) \
+ F(LoadLookupSlot, 2, 2) \
+ F(LoadLookupSlotNoReferenceError, 2, 2) \
+ F(ResolvePossiblyDirectEval, 5, 2) \
+ F(ForInInit, 2, 2) /* TODO(turbofan): Only temporary */ \
+ F(ForInNext, 4, 2) /* TODO(turbofan): Only temporary */
+
+
+#define RUNTIME_FUNCTION_LIST_DEBUGGER(F) \
+ /* Debugger support*/ \
+ F(DebugBreak, 0, 1) \
+ F(SetDebugEventListener, 2, 1) \
+ F(Break, 0, 1) \
+ F(DebugGetPropertyDetails, 2, 1) \
+ F(DebugGetProperty, 2, 1) \
+ F(DebugPropertyTypeFromDetails, 1, 1) \
+ F(DebugPropertyAttributesFromDetails, 1, 1) \
+ F(DebugPropertyIndexFromDetails, 1, 1) \
+ F(DebugNamedInterceptorPropertyValue, 2, 1) \
+ F(DebugIndexedInterceptorElementValue, 2, 1) \
+ F(CheckExecutionState, 1, 1) \
+ F(GetFrameCount, 1, 1) \
+ F(GetFrameDetails, 2, 1) \
+ F(GetScopeCount, 2, 1) \
+ F(GetStepInPositions, 2, 1) \
+ F(GetScopeDetails, 4, 1) \
+ F(GetAllScopesDetails, 4, 1) \
+ F(GetFunctionScopeCount, 1, 1) \
+ F(GetFunctionScopeDetails, 2, 1) \
+ F(SetScopeVariableValue, 6, 1) \
+ F(DebugPrintScopes, 0, 1) \
+ F(GetThreadCount, 1, 1) \
+ F(GetThreadDetails, 2, 1) \
+ F(SetDisableBreak, 1, 1) \
+ F(GetBreakLocations, 2, 1) \
+ F(SetFunctionBreakPoint, 3, 1) \
+ F(SetScriptBreakPoint, 4, 1) \
+ F(ClearBreakPoint, 1, 1) \
+ F(ChangeBreakOnException, 2, 1) \
+ F(IsBreakOnException, 1, 1) \
+ F(PrepareStep, 4, 1) \
+ F(ClearStepping, 0, 1) \
+ F(DebugEvaluate, 6, 1) \
+ F(DebugEvaluateGlobal, 4, 1) \
+ F(DebugGetLoadedScripts, 0, 1) \
+ F(DebugReferencedBy, 3, 1) \
+ F(DebugConstructedBy, 2, 1) \
+ F(DebugGetPrototype, 1, 1) \
+ F(DebugSetScriptSource, 2, 1) \
+ F(DebugCallbackSupportsStepping, 1, 1) \
+ F(SystemBreak, 0, 1) \
+ F(DebugDisassembleFunction, 1, 1) \
+ F(DebugDisassembleConstructor, 1, 1) \
+ F(FunctionGetInferredName, 1, 1) \
+ F(LiveEditFindSharedFunctionInfosForScript, 1, 1) \
+ F(LiveEditGatherCompileInfo, 2, 1) \
+ F(LiveEditReplaceScript, 3, 1) \
+ F(LiveEditReplaceFunctionCode, 2, 1) \
+ F(LiveEditFunctionSourceUpdated, 1, 1) \
+ F(LiveEditFunctionSetScript, 2, 1) \
+ F(LiveEditReplaceRefToNestedFunction, 3, 1) \
+ F(LiveEditPatchFunctionPositions, 2, 1) \
+ F(LiveEditCheckAndDropActivations, 2, 1) \
+ F(LiveEditCompareStrings, 2, 1) \
+ F(LiveEditRestartFrame, 2, 1) \
+ F(GetFunctionCodePositionFromSource, 2, 1) \
+ F(ExecuteInDebugContext, 2, 1) \
+ \
+ F(SetFlags, 1, 1) \
+ F(CollectGarbage, 1, 1) \
+ F(GetHeapUsage, 0, 1)
+
+
+#ifdef V8_I18N_SUPPORT
+#define RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F) \
+ /* i18n support */ \
+ /* Standalone, helper methods. */ \
+ F(CanonicalizeLanguageTag, 1, 1) \
+ F(AvailableLocalesOf, 1, 1) \
+ F(GetDefaultICULocale, 0, 1) \
+ F(GetLanguageTagVariants, 1, 1) \
+ F(IsInitializedIntlObject, 1, 1) \
+ F(IsInitializedIntlObjectOfType, 2, 1) \
+ F(MarkAsInitializedIntlObjectOfType, 3, 1) \
+ F(GetImplFromInitializedIntlObject, 1, 1) \
+ \
+ /* Date format and parse. */ \
+ F(CreateDateTimeFormat, 3, 1) \
+ F(InternalDateFormat, 2, 1) \
+ F(InternalDateParse, 2, 1) \
+ \
+ /* Number format and parse. */ \
+ F(CreateNumberFormat, 3, 1) \
+ F(InternalNumberFormat, 2, 1) \
+ F(InternalNumberParse, 2, 1) \
+ \
+ /* Collator. */ \
+ F(CreateCollator, 3, 1) \
+ F(InternalCompare, 3, 1) \
+ \
+ /* String.prototype.normalize. */ \
+ F(StringNormalize, 2, 1) \
+ \
+ /* Break iterator. */ \
+ F(CreateBreakIterator, 3, 1) \
+ F(BreakIteratorAdoptText, 2, 1) \
+ F(BreakIteratorFirst, 1, 1) \
+ F(BreakIteratorNext, 1, 1) \
+ F(BreakIteratorCurrent, 1, 1) \
+ F(BreakIteratorBreakType, 1, 1)
+
+#else
+#define RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F)
+#endif
+
+
+#ifdef DEBUG
+#define RUNTIME_FUNCTION_LIST_DEBUG(F) \
+ /* Testing */ \
+ F(ListNatives, 0, 1)
+#else
+#define RUNTIME_FUNCTION_LIST_DEBUG(F)
+#endif
+
+// ----------------------------------------------------------------------------
+// RUNTIME_FUNCTION_LIST defines all runtime functions accessed
+// either directly by id (via the code generator), or indirectly
+// via a native call by name (from within JS code).
+// Entries have the form F(name, number of arguments, number of return values).
+
+#define RUNTIME_FUNCTION_LIST_RETURN_OBJECT(F) \
+ RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
+ RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
+ RUNTIME_FUNCTION_LIST_ALWAYS_3(F) \
+ RUNTIME_FUNCTION_LIST_DEBUG(F) \
+ RUNTIME_FUNCTION_LIST_DEBUGGER(F) \
+ RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F)
+
+
+#define RUNTIME_FUNCTION_LIST(F) \
+ RUNTIME_FUNCTION_LIST_RETURN_OBJECT(F) \
+ RUNTIME_FUNCTION_LIST_RETURN_PAIR(F)
+
+// ----------------------------------------------------------------------------
+// INLINE_FUNCTION_LIST defines all inlined functions accessed
+// with a native call of the form %_name from within JS code.
+// Entries have the form F(name, number of arguments, number of return values).
+#define INLINE_FUNCTION_LIST(F) \
+ F(IsSmi, 1, 1) \
+ F(IsNonNegativeSmi, 1, 1) \
+ F(IsArray, 1, 1) \
+ F(IsRegExp, 1, 1) \
+ F(IsConstructCall, 0, 1) \
+ F(CallFunction, -1 /* receiver + n args + function */, 1) \
+ F(ArgumentsLength, 0, 1) \
+ F(Arguments, 1, 1) \
+ F(ValueOf, 1, 1) \
+ F(SetValueOf, 2, 1) \
+ F(DateField, 2 /* date object, field index */, 1) \
+ F(StringCharFromCode, 1, 1) \
+ F(StringCharAt, 2, 1) \
+ F(OneByteSeqStringSetChar, 3, 1) \
+ F(TwoByteSeqStringSetChar, 3, 1) \
+ F(ObjectEquals, 2, 1) \
+ F(IsObject, 1, 1) \
+ F(IsFunction, 1, 1) \
+ F(IsUndetectableObject, 1, 1) \
+ F(IsSpecObject, 1, 1) \
+ F(IsStringWrapperSafeForDefaultValueOf, 1, 1) \
+ F(MathPow, 2, 1) \
+ F(IsMinusZero, 1, 1) \
+ F(HasCachedArrayIndex, 1, 1) \
+ F(GetCachedArrayIndex, 1, 1) \
+ F(FastOneByteArrayJoin, 2, 1) \
+ F(GeneratorNext, 2, 1) \
+ F(GeneratorThrow, 2, 1) \
+ F(DebugBreakInOptimizedCode, 0, 1) \
+ F(ClassOf, 1, 1) \
+ F(StringCharCodeAt, 2, 1) \
+ F(StringAdd, 2, 1) \
+ F(SubString, 3, 1) \
+ F(StringCompare, 2, 1) \
+ F(RegExpExec, 4, 1) \
+ F(RegExpConstructResult, 3, 1) \
+ F(GetFromCache, 2, 1) \
+ F(NumberToString, 1, 1) \
+ F(DebugIsActive, 0, 1)
+
+
+// ----------------------------------------------------------------------------
+// INLINE_OPTIMIZED_FUNCTION_LIST defines all inlined functions accessed
+// with a native call of the form %_name from within JS code that also have
+// a corresponding runtime function, that is called from non-optimized code.
+// For the benefit of (fuzz) tests, the runtime version can also be called
+// directly as %name (i.e. without the leading underscore).
+// Entries have the form F(name, number of arguments, number of return values).
+#define INLINE_OPTIMIZED_FUNCTION_LIST(F) \
+ /* Typed Arrays */ \
+ F(TypedArrayInitialize, 5, 1) \
+ F(DataViewInitialize, 4, 1) \
+ F(MaxSmi, 0, 1) \
+ F(TypedArrayMaxSizeInHeap, 0, 1) \
+ F(ArrayBufferViewGetByteLength, 1, 1) \
+ F(ArrayBufferViewGetByteOffset, 1, 1) \
+ F(TypedArrayGetLength, 1, 1) \
+ /* ArrayBuffer */ \
+ F(ArrayBufferGetByteLength, 1, 1) \
+ /* Maths */ \
+ F(ConstructDouble, 2, 1) \
+ F(DoubleHi, 1, 1) \
+ F(DoubleLo, 1, 1) \
+ F(MathSqrtRT, 1, 1) \
+ F(MathLogRT, 1, 1)
+
+
+//---------------------------------------------------------------------------
+// Runtime provides access to all C++ runtime functions.
+
+class RuntimeState {
+ public:
+ StaticResource<ConsStringIteratorOp>* string_iterator() {
+ return &string_iterator_;
+ }
+ unibrow::Mapping<unibrow::ToUppercase, 128>* to_upper_mapping() {
+ return &to_upper_mapping_;
+ }
+ unibrow::Mapping<unibrow::ToLowercase, 128>* to_lower_mapping() {
+ return &to_lower_mapping_;
+ }
+ ConsStringIteratorOp* string_iterator_compare_x() {
+ return &string_iterator_compare_x_;
+ }
+ ConsStringIteratorOp* string_iterator_compare_y() {
+ return &string_iterator_compare_y_;
+ }
+ ConsStringIteratorOp* string_locale_compare_it1() {
+ return &string_locale_compare_it1_;
+ }
+ ConsStringIteratorOp* string_locale_compare_it2() {
+ return &string_locale_compare_it2_;
+ }
+
+ private:
+ RuntimeState() {}
+ // Non-reentrant string buffer for efficient general use in the runtime.
+ StaticResource<ConsStringIteratorOp> string_iterator_;
+ unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping_;
+ unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping_;
+ ConsStringIteratorOp string_iterator_compare_x_;
+ ConsStringIteratorOp string_iterator_compare_y_;
+ ConsStringIteratorOp string_locale_compare_it1_;
+ ConsStringIteratorOp string_locale_compare_it2_;
+
+ friend class Isolate;
+ friend class Runtime;
+
+ DISALLOW_COPY_AND_ASSIGN(RuntimeState);
+};
+
+
+class Runtime : public AllStatic {
+ public:
+ enum FunctionId {
+#define F(name, nargs, ressize) k##name,
+ RUNTIME_FUNCTION_LIST(F) INLINE_OPTIMIZED_FUNCTION_LIST(F)
+#undef F
+#define F(name, nargs, ressize) kInline##name,
+ INLINE_FUNCTION_LIST(F)
+#undef F
+#define F(name, nargs, ressize) kInlineOptimized##name,
+ INLINE_OPTIMIZED_FUNCTION_LIST(F)
+#undef F
+ kNumFunctions,
+ kFirstInlineFunction = kInlineIsSmi
+ };
+
+ enum IntrinsicType { RUNTIME, INLINE, INLINE_OPTIMIZED };
+
+ // Intrinsic function descriptor.
+ struct Function {
+ FunctionId function_id;
+ IntrinsicType intrinsic_type;
+ // The JS name of the function.
+ const char* name;
+
+ // The C++ (native) entry point. NULL if the function is inlined.
+ byte* entry;
+
+ // The number of arguments expected. nargs is -1 if the function takes
+ // a variable number of arguments.
+ int nargs;
+ // Size of result. Most functions return a single pointer, size 1.
+ int result_size;
+ };
+
+ static const int kNotFound = -1;
+
+ // Add internalized strings for all the intrinsic function names to a
+ // StringDictionary.
+ static void InitializeIntrinsicFunctionNames(Isolate* isolate,
+ Handle<NameDictionary> dict);
+
+ // Get the intrinsic function with the given name, which must be internalized.
+ static const Function* FunctionForName(Handle<String> name);
+
+ // Get the intrinsic function with the given FunctionId.
+ static const Function* FunctionForId(FunctionId id);
+
+ // Get the intrinsic function with the given function entry address.
+ static const Function* FunctionForEntry(Address ref);
+
+ // General-purpose helper functions for runtime system.
+ static int StringMatch(Isolate* isolate, Handle<String> sub,
+ Handle<String> pat, int index);
+
+ // TODO(1240886): Some of the following methods are *not* handle safe, but
+ // accept handle arguments. This seems fragile.
+
+ // Support getting the characters in a string using [] notation as
+ // in Firefox/SpiderMonkey, Safari and Opera.
+ MUST_USE_RESULT static MaybeHandle<Object> GetElementOrCharAt(
+ Isolate* isolate, Handle<Object> object, uint32_t index);
+
+ MUST_USE_RESULT static MaybeHandle<Object> SetObjectProperty(
+ Isolate* isolate, Handle<Object> object, Handle<Object> key,
+ Handle<Object> value, StrictMode strict_mode);
+
+ MUST_USE_RESULT static MaybeHandle<Object> DefineObjectProperty(
+ Handle<JSObject> object, Handle<Object> key, Handle<Object> value,
+ PropertyAttributes attr);
+
+ MUST_USE_RESULT static MaybeHandle<Object> DeleteObjectProperty(
+ Isolate* isolate, Handle<JSReceiver> object, Handle<Object> key,
+ JSReceiver::DeleteMode mode);
+
+ MUST_USE_RESULT static MaybeHandle<Object> HasObjectProperty(
+ Isolate* isolate, Handle<JSReceiver> object, Handle<Object> key);
+
+ MUST_USE_RESULT static MaybeHandle<Object> GetObjectProperty(
+ Isolate* isolate, Handle<Object> object, Handle<Object> key);
+
+ static void SetupArrayBuffer(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer,
+ bool is_external, void* data,
+ size_t allocated_length);
+
+ static bool SetupArrayBufferAllocatingData(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer,
+ size_t allocated_length,
+ bool initialize = true);
+
+ static void NeuterArrayBuffer(Handle<JSArrayBuffer> array_buffer);
+
+ static void FreeArrayBuffer(Isolate* isolate,
+ JSArrayBuffer* phantom_array_buffer);
+
+ enum TypedArrayId {
+ // arrayIds below should be synchromized with typedarray.js natives.
+ ARRAY_ID_UINT8 = 1,
+ ARRAY_ID_INT8 = 2,
+ ARRAY_ID_UINT16 = 3,
+ ARRAY_ID_INT16 = 4,
+ ARRAY_ID_UINT32 = 5,
+ ARRAY_ID_INT32 = 6,
+ ARRAY_ID_FLOAT32 = 7,
+ ARRAY_ID_FLOAT64 = 8,
+ ARRAY_ID_UINT8_CLAMPED = 9,
+ ARRAY_ID_FIRST = ARRAY_ID_UINT8,
+ ARRAY_ID_LAST = ARRAY_ID_UINT8_CLAMPED
+ };
+
+ static void ArrayIdToTypeAndSize(int array_id, ExternalArrayType* type,
+ ElementsKind* external_elements_kind,
+ ElementsKind* fixed_elements_kind,
+ size_t* element_size);
+
+ // Used in runtime.cc and hydrogen's VisitArrayLiteral.
+ MUST_USE_RESULT static MaybeHandle<Object> CreateArrayLiteralBoilerplate(
+ Isolate* isolate, Handle<FixedArray> literals,
+ Handle<FixedArray> elements);
+};
+
+
+//---------------------------------------------------------------------------
+// Constants used by interface to runtime functions.
+
+class AllocateDoubleAlignFlag : public BitField<bool, 0, 1> {};
+class AllocateTargetSpace : public BitField<AllocationSpace, 1, 3> {};
+
+class DeclareGlobalsEvalFlag : public BitField<bool, 0, 1> {};
+class DeclareGlobalsNativeFlag : public BitField<bool, 1, 1> {};
+class DeclareGlobalsStrictMode : public BitField<StrictMode, 2, 1> {};
+}
+} // namespace v8::internal
+
+#endif // V8_RUNTIME_H_
diff --git a/deps/v8/src/runtime/string-builder.h b/deps/v8/src/runtime/string-builder.h
new file mode 100644
index 0000000000..37ff7b7018
--- /dev/null
+++ b/deps/v8/src/runtime/string-builder.h
@@ -0,0 +1,296 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_STRING_BUILDER_H_
+#define V8_STRING_BUILDER_H_
+
+namespace v8 {
+namespace internal {
+
+const int kStringBuilderConcatHelperLengthBits = 11;
+const int kStringBuilderConcatHelperPositionBits = 19;
+
+typedef BitField<int, 0, kStringBuilderConcatHelperLengthBits>
+ StringBuilderSubstringLength;
+typedef BitField<int, kStringBuilderConcatHelperLengthBits,
+ kStringBuilderConcatHelperPositionBits>
+ StringBuilderSubstringPosition;
+
+
+template <typename sinkchar>
+static inline void StringBuilderConcatHelper(String* special, sinkchar* sink,
+ FixedArray* fixed_array,
+ int array_length) {
+ DisallowHeapAllocation no_gc;
+ int position = 0;
+ for (int i = 0; i < array_length; i++) {
+ Object* element = fixed_array->get(i);
+ if (element->IsSmi()) {
+ // Smi encoding of position and length.
+ int encoded_slice = Smi::cast(element)->value();
+ int pos;
+ int len;
+ if (encoded_slice > 0) {
+ // Position and length encoded in one smi.
+ pos = StringBuilderSubstringPosition::decode(encoded_slice);
+ len = StringBuilderSubstringLength::decode(encoded_slice);
+ } else {
+ // Position and length encoded in two smis.
+ Object* obj = fixed_array->get(++i);
+ DCHECK(obj->IsSmi());
+ pos = Smi::cast(obj)->value();
+ len = -encoded_slice;
+ }
+ String::WriteToFlat(special, sink + position, pos, pos + len);
+ position += len;
+ } else {
+ String* string = String::cast(element);
+ int element_length = string->length();
+ String::WriteToFlat(string, sink + position, 0, element_length);
+ position += element_length;
+ }
+ }
+}
+
+
+// Returns the result length of the concatenation.
+// On illegal argument, -1 is returned.
+static inline int StringBuilderConcatLength(int special_length,
+ FixedArray* fixed_array,
+ int array_length, bool* one_byte) {
+ DisallowHeapAllocation no_gc;
+ int position = 0;
+ for (int i = 0; i < array_length; i++) {
+ int increment = 0;
+ Object* elt = fixed_array->get(i);
+ if (elt->IsSmi()) {
+ // Smi encoding of position and length.
+ int smi_value = Smi::cast(elt)->value();
+ int pos;
+ int len;
+ if (smi_value > 0) {
+ // Position and length encoded in one smi.
+ pos = StringBuilderSubstringPosition::decode(smi_value);
+ len = StringBuilderSubstringLength::decode(smi_value);
+ } else {
+ // Position and length encoded in two smis.
+ len = -smi_value;
+ // Get the position and check that it is a positive smi.
+ i++;
+ if (i >= array_length) return -1;
+ Object* next_smi = fixed_array->get(i);
+ if (!next_smi->IsSmi()) return -1;
+ pos = Smi::cast(next_smi)->value();
+ if (pos < 0) return -1;
+ }
+ DCHECK(pos >= 0);
+ DCHECK(len >= 0);
+ if (pos > special_length || len > special_length - pos) return -1;
+ increment = len;
+ } else if (elt->IsString()) {
+ String* element = String::cast(elt);
+ int element_length = element->length();
+ increment = element_length;
+ if (*one_byte && !element->HasOnlyOneByteChars()) {
+ *one_byte = false;
+ }
+ } else {
+ return -1;
+ }
+ if (increment > String::kMaxLength - position) {
+ return kMaxInt; // Provoke throw on allocation.
+ }
+ position += increment;
+ }
+ return position;
+}
+
+
+class FixedArrayBuilder {
+ public:
+ explicit FixedArrayBuilder(Isolate* isolate, int initial_capacity)
+ : array_(isolate->factory()->NewFixedArrayWithHoles(initial_capacity)),
+ length_(0),
+ has_non_smi_elements_(false) {
+ // Require a non-zero initial size. Ensures that doubling the size to
+ // extend the array will work.
+ DCHECK(initial_capacity > 0);
+ }
+
+ explicit FixedArrayBuilder(Handle<FixedArray> backing_store)
+ : array_(backing_store), length_(0), has_non_smi_elements_(false) {
+ // Require a non-zero initial size. Ensures that doubling the size to
+ // extend the array will work.
+ DCHECK(backing_store->length() > 0);
+ }
+
+ bool HasCapacity(int elements) {
+ int length = array_->length();
+ int required_length = length_ + elements;
+ return (length >= required_length);
+ }
+
+ void EnsureCapacity(int elements) {
+ int length = array_->length();
+ int required_length = length_ + elements;
+ if (length < required_length) {
+ int new_length = length;
+ do {
+ new_length *= 2;
+ } while (new_length < required_length);
+ Handle<FixedArray> extended_array =
+ array_->GetIsolate()->factory()->NewFixedArrayWithHoles(new_length);
+ array_->CopyTo(0, *extended_array, 0, length_);
+ array_ = extended_array;
+ }
+ }
+
+ void Add(Object* value) {
+ DCHECK(!value->IsSmi());
+ DCHECK(length_ < capacity());
+ array_->set(length_, value);
+ length_++;
+ has_non_smi_elements_ = true;
+ }
+
+ void Add(Smi* value) {
+ DCHECK(value->IsSmi());
+ DCHECK(length_ < capacity());
+ array_->set(length_, value);
+ length_++;
+ }
+
+ Handle<FixedArray> array() { return array_; }
+
+ int length() { return length_; }
+
+ int capacity() { return array_->length(); }
+
+ Handle<JSArray> ToJSArray(Handle<JSArray> target_array) {
+ JSArray::SetContent(target_array, array_);
+ target_array->set_length(Smi::FromInt(length_));
+ return target_array;
+ }
+
+
+ private:
+ Handle<FixedArray> array_;
+ int length_;
+ bool has_non_smi_elements_;
+};
+
+
+class ReplacementStringBuilder {
+ public:
+ ReplacementStringBuilder(Heap* heap, Handle<String> subject,
+ int estimated_part_count)
+ : heap_(heap),
+ array_builder_(heap->isolate(), estimated_part_count),
+ subject_(subject),
+ character_count_(0),
+ is_one_byte_(subject->IsOneByteRepresentation()) {
+ // Require a non-zero initial size. Ensures that doubling the size to
+ // extend the array will work.
+ DCHECK(estimated_part_count > 0);
+ }
+
+ static inline void AddSubjectSlice(FixedArrayBuilder* builder, int from,
+ int to) {
+ DCHECK(from >= 0);
+ int length = to - from;
+ DCHECK(length > 0);
+ if (StringBuilderSubstringLength::is_valid(length) &&
+ StringBuilderSubstringPosition::is_valid(from)) {
+ int encoded_slice = StringBuilderSubstringLength::encode(length) |
+ StringBuilderSubstringPosition::encode(from);
+ builder->Add(Smi::FromInt(encoded_slice));
+ } else {
+ // Otherwise encode as two smis.
+ builder->Add(Smi::FromInt(-length));
+ builder->Add(Smi::FromInt(from));
+ }
+ }
+
+
+ void EnsureCapacity(int elements) { array_builder_.EnsureCapacity(elements); }
+
+
+ void AddSubjectSlice(int from, int to) {
+ AddSubjectSlice(&array_builder_, from, to);
+ IncrementCharacterCount(to - from);
+ }
+
+
+ void AddString(Handle<String> string) {
+ int length = string->length();
+ DCHECK(length > 0);
+ AddElement(*string);
+ if (!string->IsOneByteRepresentation()) {
+ is_one_byte_ = false;
+ }
+ IncrementCharacterCount(length);
+ }
+
+
+ MaybeHandle<String> ToString() {
+ Isolate* isolate = heap_->isolate();
+ if (array_builder_.length() == 0) {
+ return isolate->factory()->empty_string();
+ }
+
+ Handle<String> joined_string;
+ if (is_one_byte_) {
+ Handle<SeqOneByteString> seq;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, seq,
+ isolate->factory()->NewRawOneByteString(character_count_), String);
+
+ DisallowHeapAllocation no_gc;
+ uint8_t* char_buffer = seq->GetChars();
+ StringBuilderConcatHelper(*subject_, char_buffer, *array_builder_.array(),
+ array_builder_.length());
+ joined_string = Handle<String>::cast(seq);
+ } else {
+ // Two-byte.
+ Handle<SeqTwoByteString> seq;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, seq,
+ isolate->factory()->NewRawTwoByteString(character_count_), String);
+
+ DisallowHeapAllocation no_gc;
+ uc16* char_buffer = seq->GetChars();
+ StringBuilderConcatHelper(*subject_, char_buffer, *array_builder_.array(),
+ array_builder_.length());
+ joined_string = Handle<String>::cast(seq);
+ }
+ return joined_string;
+ }
+
+
+ void IncrementCharacterCount(int by) {
+ if (character_count_ > String::kMaxLength - by) {
+ STATIC_ASSERT(String::kMaxLength < kMaxInt);
+ character_count_ = kMaxInt;
+ } else {
+ character_count_ += by;
+ }
+ }
+
+ private:
+ void AddElement(Object* element) {
+ DCHECK(element->IsSmi() || element->IsString());
+ DCHECK(array_builder_.capacity() > array_builder_.length());
+ array_builder_.Add(element);
+ }
+
+ Heap* heap_;
+ FixedArrayBuilder array_builder_;
+ Handle<String> subject_;
+ int character_count_;
+ bool is_one_byte_;
+};
+}
+} // namespace v8::internal
+
+#endif // V8_STRING_BUILDER_H_
diff --git a/deps/v8/src/safepoint-table.cc b/deps/v8/src/safepoint-table.cc
index d00b751a05..89500e2047 100644
--- a/deps/v8/src/safepoint-table.cc
+++ b/deps/v8/src/safepoint-table.cc
@@ -44,7 +44,7 @@ SafepointTable::SafepointTable(Code* code) {
entry_size_ = Memory::uint32_at(header + kEntrySizeOffset);
pc_and_deoptimization_indexes_ = header + kHeaderSize;
entries_ = pc_and_deoptimization_indexes_ +
- (length_ * kPcAndDeoptimizationIndexSize);
+ (length_ * kPcAndDeoptimizationIndexSize);
DCHECK(entry_size_ > 0);
STATIC_ASSERT(SafepointEntry::DeoptimizationIndexField::kMax ==
Safepoint::kNoDeoptimizationIndex);
diff --git a/deps/v8/src/safepoint-table.h b/deps/v8/src/safepoint-table.h
index 0bdd43104d..5fbfe419db 100644
--- a/deps/v8/src/safepoint-table.h
+++ b/deps/v8/src/safepoint-table.h
@@ -84,7 +84,8 @@ class SafepointTable BASE_EMBEDDED {
int size() const {
return kHeaderSize +
- (length_ * (kPcAndDeoptimizationIndexSize + entry_size_)); }
+ (length_ * (kPcAndDeoptimizationIndexSize + entry_size_));
+ }
unsigned length() const { return length_; }
unsigned entry_size() const { return entry_size_; }
@@ -165,8 +166,8 @@ class Safepoint BASE_EMBEDDED {
void DefinePointerRegister(Register reg, Zone* zone);
private:
- Safepoint(ZoneList<int>* indexes, ZoneList<int>* registers) :
- indexes_(indexes), registers_(registers) { }
+ Safepoint(ZoneList<int>* indexes, ZoneList<int>* registers)
+ : indexes_(indexes), registers_(registers) {}
ZoneList<int>* indexes_;
ZoneList<int>* registers_;
diff --git a/deps/v8/src/sampler.cc b/deps/v8/src/sampler.cc
index 413b6be906..394efeb764 100644
--- a/deps/v8/src/sampler.cc
+++ b/deps/v8/src/sampler.cc
@@ -13,7 +13,7 @@
#include <signal.h>
#include <sys/time.h>
-#if !V8_OS_QNX
+#if !V8_OS_QNX && !V8_OS_NACL
#include <sys/syscall.h> // NOLINT
#endif
@@ -21,8 +21,8 @@
#include <mach/mach.h>
// OpenBSD doesn't have <ucontext.h>. ucontext_t lives in <signal.h>
// and is a typedef for struct sigcontext. There is no uc_mcontext.
-#elif(!V8_OS_ANDROID || defined(__BIONIC_HAVE_UCONTEXT_T)) \
- && !V8_OS_OPENBSD
+#elif(!V8_OS_ANDROID || defined(__BIONIC_HAVE_UCONTEXT_T)) && \
+ !V8_OS_OPENBSD && !V8_OS_NACL
#include <ucontext.h>
#endif
@@ -276,7 +276,7 @@ class SimulatorHelper {
class SignalHandler : public AllStatic {
public:
static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
- static void TearDown() { delete mutex_; }
+ static void TearDown() { delete mutex_; mutex_ = NULL; }
static void IncreaseSamplerCount() {
base::LockGuard<base::Mutex> lock_guard(mutex_);
@@ -294,6 +294,7 @@ class SignalHandler : public AllStatic {
private:
static void Install() {
+#if !V8_OS_NACL
struct sigaction sa;
sa.sa_sigaction = &HandleProfilerSignal;
sigemptyset(&sa.sa_mask);
@@ -304,16 +305,21 @@ class SignalHandler : public AllStatic {
#endif
signal_handler_installed_ =
(sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
+#endif
}
static void Restore() {
+#if !V8_OS_NACL
if (signal_handler_installed_) {
sigaction(SIGPROF, &old_signal_handler_, 0);
signal_handler_installed_ = false;
}
+#endif
}
+#if !V8_OS_NACL
static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
+#endif
// Protects the process wide state below.
static base::Mutex* mutex_;
static int client_count_;
@@ -328,13 +334,10 @@ struct sigaction SignalHandler::old_signal_handler_;
bool SignalHandler::signal_handler_installed_ = false;
+// As Native Client does not support signal handling, profiling is disabled.
+#if !V8_OS_NACL
void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
void* context) {
-#if V8_OS_NACL
- // As Native Client does not support signal handling, profiling
- // is disabled.
- return;
-#else
USE(info);
if (signal != SIGPROF) return;
Isolate* isolate = Isolate::UnsafeCurrent();
@@ -477,8 +480,8 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
#endif // V8_OS_QNX
#endif // USE_SIMULATOR
sampler->SampleStack(state);
-#endif // V8_OS_NACL
}
+#endif // V8_OS_NACL
#endif
diff --git a/deps/v8/src/scanner-character-streams.cc b/deps/v8/src/scanner-character-streams.cc
index 9ec0ad1008..d06f479f94 100644
--- a/deps/v8/src/scanner-character-streams.cc
+++ b/deps/v8/src/scanner-character-streams.cc
@@ -6,12 +6,40 @@
#include "src/scanner-character-streams.h"
+#include "include/v8.h"
#include "src/handles.h"
#include "src/unicode-inl.h"
namespace v8 {
namespace internal {
+namespace {
+
+unsigned CopyCharsHelper(uint16_t* dest, unsigned length, const uint8_t* src,
+ unsigned* src_pos, unsigned src_length,
+ ScriptCompiler::StreamedSource::Encoding encoding) {
+ if (encoding == ScriptCompiler::StreamedSource::UTF8) {
+ return v8::internal::Utf8ToUtf16CharacterStream::CopyChars(
+ dest, length, src, src_pos, src_length);
+ }
+
+ unsigned to_fill = length;
+ if (to_fill > src_length - *src_pos) to_fill = src_length - *src_pos;
+
+ if (encoding == ScriptCompiler::StreamedSource::ONE_BYTE) {
+ v8::internal::CopyChars<uint8_t, uint16_t>(dest, src + *src_pos, to_fill);
+ } else {
+ DCHECK(encoding == ScriptCompiler::StreamedSource::TWO_BYTE);
+ v8::internal::CopyChars<uint16_t, uint16_t>(
+ dest, reinterpret_cast<const uint16_t*>(src + *src_pos), to_fill);
+ }
+ *src_pos += to_fill;
+ return to_fill;
+}
+
+} // namespace
+
+
// ----------------------------------------------------------------------------
// BufferedUtf16CharacterStreams
@@ -145,6 +173,35 @@ Utf8ToUtf16CharacterStream::Utf8ToUtf16CharacterStream(const byte* data,
Utf8ToUtf16CharacterStream::~Utf8ToUtf16CharacterStream() { }
+unsigned Utf8ToUtf16CharacterStream::CopyChars(uint16_t* dest, unsigned length,
+ const byte* src,
+ unsigned* src_pos,
+ unsigned src_length) {
+ static const unibrow::uchar kMaxUtf16Character = 0xffff;
+ unsigned i = 0;
+ // Because of the UTF-16 lead and trail surrogates, we stop filling the buffer
+ // one character early (in the normal case), because we need to have at least
+ // two free spaces in the buffer to be sure that the next character will fit.
+ while (i < length - 1) {
+ if (*src_pos == src_length) break;
+ unibrow::uchar c = src[*src_pos];
+ if (c <= unibrow::Utf8::kMaxOneByteChar) {
+ *src_pos = *src_pos + 1;
+ } else {
+ c = unibrow::Utf8::CalculateValue(src + *src_pos, src_length - *src_pos,
+ src_pos);
+ }
+ if (c > kMaxUtf16Character) {
+ dest[i++] = unibrow::Utf16::LeadSurrogate(c);
+ dest[i++] = unibrow::Utf16::TrailSurrogate(c);
+ } else {
+ dest[i++] = static_cast<uc16>(c);
+ }
+ }
+ return i;
+}
+
+
unsigned Utf8ToUtf16CharacterStream::BufferSeekForward(unsigned delta) {
unsigned old_pos = pos_;
unsigned target_pos = pos_ + delta;
@@ -156,31 +213,14 @@ unsigned Utf8ToUtf16CharacterStream::BufferSeekForward(unsigned delta) {
unsigned Utf8ToUtf16CharacterStream::FillBuffer(unsigned char_position) {
- static const unibrow::uchar kMaxUtf16Character = 0xffff;
SetRawPosition(char_position);
if (raw_character_position_ != char_position) {
// char_position was not a valid position in the stream (hit the end
// while spooling to it).
return 0u;
}
- unsigned i = 0;
- while (i < kBufferSize - 1) {
- if (raw_data_pos_ == raw_data_length_) break;
- unibrow::uchar c = raw_data_[raw_data_pos_];
- if (c <= unibrow::Utf8::kMaxOneByteChar) {
- raw_data_pos_++;
- } else {
- c = unibrow::Utf8::CalculateValue(raw_data_ + raw_data_pos_,
- raw_data_length_ - raw_data_pos_,
- &raw_data_pos_);
- }
- if (c > kMaxUtf16Character) {
- buffer_[i++] = unibrow::Utf16::LeadSurrogate(c);
- buffer_[i++] = unibrow::Utf16::TrailSurrogate(c);
- } else {
- buffer_[i++] = static_cast<uc16>(c);
- }
- }
+ unsigned i = CopyChars(buffer_, kBufferSize, raw_data_, &raw_data_pos_,
+ raw_data_length_);
raw_character_position_ = char_position + i;
return i;
}
@@ -276,6 +316,118 @@ void Utf8ToUtf16CharacterStream::SetRawPosition(unsigned target_position) {
}
+unsigned ExternalStreamingStream::FillBuffer(unsigned position) {
+ // Ignore "position" which is the position in the decoded data. Instead,
+ // ExternalStreamingStream keeps track of the position in the raw data.
+ unsigned data_in_buffer = 0;
+ // Note that the UTF-8 decoder might not be able to fill the buffer
+ // completely; it will typically leave the last character empty (see
+ // Utf8ToUtf16CharacterStream::CopyChars).
+ while (data_in_buffer < kBufferSize - 1) {
+ if (current_data_ == NULL) {
+ // GetSomeData will wait until the embedder has enough data. Here's an
+ // interface between the API which uses size_t (which is the correct type
+ // here) and the internal parts which use unsigned. TODO(marja): make the
+ // internal parts use size_t too.
+ current_data_length_ =
+ static_cast<unsigned>(source_stream_->GetMoreData(&current_data_));
+ current_data_offset_ = 0;
+ bool data_ends = current_data_length_ == 0;
+
+ // A caveat: a data chunk might end with bytes from an incomplete UTF-8
+ // character (the rest of the bytes will be in the next chunk).
+ if (encoding_ == ScriptCompiler::StreamedSource::UTF8) {
+ HandleUtf8SplitCharacters(&data_in_buffer);
+ if (!data_ends && current_data_offset_ == current_data_length_) {
+ // The data stream didn't end, but we used all the data in the
+ // chunk. This will only happen when the chunk was really small. We
+ // don't handle the case where a UTF-8 character is split over several
+ // chunks; in that case V8 won't crash, but it will be a parse error.
+ delete[] current_data_;
+ current_data_ = NULL;
+ current_data_length_ = 0;
+ current_data_offset_ = 0;
+ continue; // Request a new chunk.
+ }
+ }
+
+ // Did the data stream end?
+ if (data_ends) {
+ DCHECK(utf8_split_char_buffer_length_ == 0);
+ return data_in_buffer;
+ }
+ }
+
+ // Fill the buffer from current_data_.
+ unsigned new_offset = 0;
+ unsigned new_chars_in_buffer =
+ CopyCharsHelper(buffer_ + data_in_buffer, kBufferSize - data_in_buffer,
+ current_data_ + current_data_offset_, &new_offset,
+ current_data_length_ - current_data_offset_, encoding_);
+ data_in_buffer += new_chars_in_buffer;
+ current_data_offset_ += new_offset;
+ DCHECK(data_in_buffer <= kBufferSize);
+
+ // Did we use all the data in the data chunk?
+ if (current_data_offset_ == current_data_length_) {
+ delete[] current_data_;
+ current_data_ = NULL;
+ current_data_length_ = 0;
+ current_data_offset_ = 0;
+ }
+ }
+ return data_in_buffer;
+}
+
+void ExternalStreamingStream::HandleUtf8SplitCharacters(
+ unsigned* data_in_buffer) {
+ // First check if we have leftover data from the last chunk.
+ unibrow::uchar c;
+ if (utf8_split_char_buffer_length_ > 0) {
+ // Move the bytes which are part of the split character (which started in
+ // the previous chunk) into utf8_split_char_buffer_.
+ while (current_data_offset_ < current_data_length_ &&
+ utf8_split_char_buffer_length_ < 4 &&
+ (c = current_data_[current_data_offset_]) >
+ unibrow::Utf8::kMaxOneByteChar) {
+ utf8_split_char_buffer_[utf8_split_char_buffer_length_] = c;
+ ++utf8_split_char_buffer_length_;
+ ++current_data_offset_;
+ }
+
+ // Convert the data in utf8_split_char_buffer_.
+ unsigned new_offset = 0;
+ unsigned new_chars_in_buffer =
+ CopyCharsHelper(buffer_ + *data_in_buffer,
+ kBufferSize - *data_in_buffer, utf8_split_char_buffer_,
+ &new_offset, utf8_split_char_buffer_length_, encoding_);
+ *data_in_buffer += new_chars_in_buffer;
+ // Make sure we used all the data.
+ DCHECK(new_offset == utf8_split_char_buffer_length_);
+ DCHECK(*data_in_buffer <= kBufferSize);
+
+ utf8_split_char_buffer_length_ = 0;
+ }
+
+ // Move bytes which are part of an incomplete character from the end of the
+ // current chunk to utf8_split_char_buffer_. They will be converted when the
+ // next data chunk arrives. Note that all valid UTF-8 characters are at most 4
+ // bytes long, but if the data is invalid, we can have character values bigger
+ // than unibrow::Utf8::kMaxOneByteChar for more than 4 consecutive bytes.
+ while (current_data_length_ > current_data_offset_ &&
+ (c = current_data_[current_data_length_ - 1]) >
+ unibrow::Utf8::kMaxOneByteChar &&
+ utf8_split_char_buffer_length_ < 4) {
+ --current_data_length_;
+ ++utf8_split_char_buffer_length_;
+ }
+ CHECK(utf8_split_char_buffer_length_ <= 4);
+ for (unsigned i = 0; i < utf8_split_char_buffer_length_; ++i) {
+ utf8_split_char_buffer_[i] = current_data_[current_data_length_ + i];
+ }
+}
+
+
// ----------------------------------------------------------------------------
// ExternalTwoByteStringUtf16CharacterStream
diff --git a/deps/v8/src/scanner-character-streams.h b/deps/v8/src/scanner-character-streams.h
index eeb40e260f..afca13f180 100644
--- a/deps/v8/src/scanner-character-streams.h
+++ b/deps/v8/src/scanner-character-streams.h
@@ -59,6 +59,9 @@ class Utf8ToUtf16CharacterStream: public BufferedUtf16CharacterStream {
Utf8ToUtf16CharacterStream(const byte* data, unsigned length);
virtual ~Utf8ToUtf16CharacterStream();
+ static unsigned CopyChars(uint16_t* dest, unsigned length, const byte* src,
+ unsigned* src_pos, unsigned src_length);
+
protected:
virtual unsigned BufferSeekForward(unsigned delta);
virtual unsigned FillBuffer(unsigned char_position);
@@ -73,6 +76,46 @@ class Utf8ToUtf16CharacterStream: public BufferedUtf16CharacterStream {
};
+// ExternalStreamingStream is a wrapper around an ExternalSourceStream (see
+// include/v8.h) subclass implemented by the embedder.
+class ExternalStreamingStream : public BufferedUtf16CharacterStream {
+ public:
+ ExternalStreamingStream(ScriptCompiler::ExternalSourceStream* source_stream,
+ v8::ScriptCompiler::StreamedSource::Encoding encoding)
+ : source_stream_(source_stream),
+ encoding_(encoding),
+ current_data_(NULL),
+ current_data_offset_(0),
+ current_data_length_(0),
+ utf8_split_char_buffer_length_(0) {}
+
+ virtual ~ExternalStreamingStream() { delete[] current_data_; }
+
+ virtual unsigned BufferSeekForward(unsigned delta) OVERRIDE {
+ // We never need to seek forward when streaming scripts. We only seek
+ // forward when we want to parse a function whose location we already know,
+ // and when streaming, we don't know the locations of anything we haven't
+ // seen yet.
+ UNREACHABLE();
+ return 0;
+ }
+
+ virtual unsigned FillBuffer(unsigned position);
+
+ private:
+ void HandleUtf8SplitCharacters(unsigned* data_in_buffer);
+
+ ScriptCompiler::ExternalSourceStream* source_stream_;
+ v8::ScriptCompiler::StreamedSource::Encoding encoding_;
+ const uint8_t* current_data_;
+ unsigned current_data_offset_;
+ unsigned current_data_length_;
+ // For converting UTF-8 characters which are split across two data chunks.
+ uint8_t utf8_split_char_buffer_[4];
+ unsigned utf8_split_char_buffer_length_;
+};
+
+
// UTF16 buffer to read characters from an external string.
class ExternalTwoByteStringUtf16CharacterStream: public Utf16CharacterStream {
public:
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index 2e8e24b06f..72874aacae 100644
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -36,7 +36,8 @@ Scanner::Scanner(UnicodeCache* unicode_cache)
octal_pos_(Location::invalid()),
harmony_scoping_(false),
harmony_modules_(false),
- harmony_numeric_literals_(false) { }
+ harmony_numeric_literals_(false),
+ harmony_classes_(false) { }
void Scanner::Initialize(Utf16CharacterStream* source) {
@@ -329,9 +330,9 @@ void Scanner::TryToParseSourceURLComment() {
if (!name.is_one_byte()) return;
Vector<const uint8_t> name_literal = name.one_byte_literal();
LiteralBuffer* value;
- if (name_literal == STATIC_ASCII_VECTOR("sourceURL")) {
+ if (name_literal == STATIC_CHAR_VECTOR("sourceURL")) {
value = &source_url_;
- } else if (name_literal == STATIC_ASCII_VECTOR("sourceMappingURL")) {
+ } else if (name_literal == STATIC_CHAR_VECTOR("sourceMappingURL")) {
value = &source_mapping_url_;
} else {
return;
@@ -901,76 +902,81 @@ uc32 Scanner::ScanIdentifierUnicodeEscape() {
// ----------------------------------------------------------------------------
// Keyword Matcher
-#define KEYWORDS(KEYWORD_GROUP, KEYWORD) \
- KEYWORD_GROUP('b') \
- KEYWORD("break", Token::BREAK) \
- KEYWORD_GROUP('c') \
- KEYWORD("case", Token::CASE) \
- KEYWORD("catch", Token::CATCH) \
- KEYWORD("class", Token::FUTURE_RESERVED_WORD) \
- KEYWORD("const", Token::CONST) \
- KEYWORD("continue", Token::CONTINUE) \
- KEYWORD_GROUP('d') \
- KEYWORD("debugger", Token::DEBUGGER) \
- KEYWORD("default", Token::DEFAULT) \
- KEYWORD("delete", Token::DELETE) \
- KEYWORD("do", Token::DO) \
- KEYWORD_GROUP('e') \
- KEYWORD("else", Token::ELSE) \
- KEYWORD("enum", Token::FUTURE_RESERVED_WORD) \
- KEYWORD("export", harmony_modules \
- ? Token::EXPORT : Token::FUTURE_RESERVED_WORD) \
- KEYWORD("extends", Token::FUTURE_RESERVED_WORD) \
- KEYWORD_GROUP('f') \
- KEYWORD("false", Token::FALSE_LITERAL) \
- KEYWORD("finally", Token::FINALLY) \
- KEYWORD("for", Token::FOR) \
- KEYWORD("function", Token::FUNCTION) \
- KEYWORD_GROUP('i') \
- KEYWORD("if", Token::IF) \
- KEYWORD("implements", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("import", harmony_modules \
- ? Token::IMPORT : Token::FUTURE_RESERVED_WORD) \
- KEYWORD("in", Token::IN) \
- KEYWORD("instanceof", Token::INSTANCEOF) \
- KEYWORD("interface", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD_GROUP('l') \
- KEYWORD("let", harmony_scoping \
- ? Token::LET : Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD_GROUP('n') \
- KEYWORD("new", Token::NEW) \
- KEYWORD("null", Token::NULL_LITERAL) \
- KEYWORD_GROUP('p') \
- KEYWORD("package", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("private", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("protected", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("public", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD_GROUP('r') \
- KEYWORD("return", Token::RETURN) \
- KEYWORD_GROUP('s') \
- KEYWORD("static", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("super", Token::FUTURE_RESERVED_WORD) \
- KEYWORD("switch", Token::SWITCH) \
- KEYWORD_GROUP('t') \
- KEYWORD("this", Token::THIS) \
- KEYWORD("throw", Token::THROW) \
- KEYWORD("true", Token::TRUE_LITERAL) \
- KEYWORD("try", Token::TRY) \
- KEYWORD("typeof", Token::TYPEOF) \
- KEYWORD_GROUP('v') \
- KEYWORD("var", Token::VAR) \
- KEYWORD("void", Token::VOID) \
- KEYWORD_GROUP('w') \
- KEYWORD("while", Token::WHILE) \
- KEYWORD("with", Token::WITH) \
- KEYWORD_GROUP('y') \
+#define KEYWORDS(KEYWORD_GROUP, KEYWORD) \
+ KEYWORD_GROUP('b') \
+ KEYWORD("break", Token::BREAK) \
+ KEYWORD_GROUP('c') \
+ KEYWORD("case", Token::CASE) \
+ KEYWORD("catch", Token::CATCH) \
+ KEYWORD("class", \
+ harmony_classes ? Token::CLASS : Token::FUTURE_RESERVED_WORD) \
+ KEYWORD("const", Token::CONST) \
+ KEYWORD("continue", Token::CONTINUE) \
+ KEYWORD_GROUP('d') \
+ KEYWORD("debugger", Token::DEBUGGER) \
+ KEYWORD("default", Token::DEFAULT) \
+ KEYWORD("delete", Token::DELETE) \
+ KEYWORD("do", Token::DO) \
+ KEYWORD_GROUP('e') \
+ KEYWORD("else", Token::ELSE) \
+ KEYWORD("enum", Token::FUTURE_RESERVED_WORD) \
+ KEYWORD("export", \
+ harmony_modules ? Token::EXPORT : Token::FUTURE_RESERVED_WORD) \
+ KEYWORD("extends", \
+ harmony_classes ? Token::EXTENDS : Token::FUTURE_RESERVED_WORD) \
+ KEYWORD_GROUP('f') \
+ KEYWORD("false", Token::FALSE_LITERAL) \
+ KEYWORD("finally", Token::FINALLY) \
+ KEYWORD("for", Token::FOR) \
+ KEYWORD("function", Token::FUNCTION) \
+ KEYWORD_GROUP('i') \
+ KEYWORD("if", Token::IF) \
+ KEYWORD("implements", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD("import", \
+ harmony_modules ? Token::IMPORT : Token::FUTURE_RESERVED_WORD) \
+ KEYWORD("in", Token::IN) \
+ KEYWORD("instanceof", Token::INSTANCEOF) \
+ KEYWORD("interface", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD_GROUP('l') \
+ KEYWORD("let", \
+ harmony_scoping ? Token::LET : Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD_GROUP('n') \
+ KEYWORD("new", Token::NEW) \
+ KEYWORD("null", Token::NULL_LITERAL) \
+ KEYWORD_GROUP('p') \
+ KEYWORD("package", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD("private", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD("protected", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD("public", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD_GROUP('r') \
+ KEYWORD("return", Token::RETURN) \
+ KEYWORD_GROUP('s') \
+ KEYWORD("static", harmony_classes ? Token::STATIC \
+ : Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD("super", \
+ harmony_classes ? Token::SUPER : Token::FUTURE_RESERVED_WORD) \
+ KEYWORD("switch", Token::SWITCH) \
+ KEYWORD_GROUP('t') \
+ KEYWORD("this", Token::THIS) \
+ KEYWORD("throw", Token::THROW) \
+ KEYWORD("true", Token::TRUE_LITERAL) \
+ KEYWORD("try", Token::TRY) \
+ KEYWORD("typeof", Token::TYPEOF) \
+ KEYWORD_GROUP('v') \
+ KEYWORD("var", Token::VAR) \
+ KEYWORD("void", Token::VOID) \
+ KEYWORD_GROUP('w') \
+ KEYWORD("while", Token::WHILE) \
+ KEYWORD("with", Token::WITH) \
+ KEYWORD_GROUP('y') \
KEYWORD("yield", Token::YIELD)
static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
int input_length,
bool harmony_scoping,
- bool harmony_modules) {
+ bool harmony_modules,
+ bool harmony_classes) {
DCHECK(input_length >= 1);
const int kMinLength = 2;
const int kMaxLength = 10;
@@ -1014,7 +1020,8 @@ bool Scanner::IdentifierIsFutureStrictReserved(
return string->is_one_byte() &&
Token::FUTURE_STRICT_RESERVED_WORD ==
KeywordOrIdentifierToken(string->raw_data(), string->length(),
- harmony_scoping_, harmony_modules_);
+ harmony_scoping_, harmony_modules_,
+ harmony_classes_);
}
@@ -1057,7 +1064,8 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
return KeywordOrIdentifierToken(chars.start(),
chars.length(),
harmony_scoping_,
- harmony_modules_);
+ harmony_modules_,
+ harmony_classes_);
}
return Token::IDENTIFIER;
@@ -1300,7 +1308,7 @@ bool DuplicateFinder::IsNumberCanonical(Vector<const uint8_t> number) {
uint32_t DuplicateFinder::Hash(Vector<const uint8_t> key, bool is_one_byte) {
// Primitive hash function, almost identical to the one used
- // for strings (except that it's seeded by the length and ASCII-ness).
+ // for strings (except that it's seeded by the length and representation).
int length = key.length();
uint32_t hash = (length << 1) | (is_one_byte ? 1 : 0) ;
for (int i = 0; i < length; i++) {
@@ -1314,10 +1322,10 @@ uint32_t DuplicateFinder::Hash(Vector<const uint8_t> key, bool is_one_byte) {
bool DuplicateFinder::Match(void* first, void* second) {
// Decode lengths.
- // Length + ASCII-bit is encoded as base 128, most significant heptet first,
- // with a 8th bit being non-zero while there are more heptets.
+ // Length + representation is encoded as base 128, most significant heptet
+ // first, with a 8th bit being non-zero while there are more heptets.
// The value encodes the number of bytes following, and whether the original
- // was ASCII.
+ // was Latin1.
byte* s1 = reinterpret_cast<byte*>(first);
byte* s2 = reinterpret_cast<byte*>(second);
uint32_t length_one_byte_field = 0;
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index d3a6c6b22e..356c8e4a54 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -152,7 +152,7 @@ class DuplicateFinder {
int AddTwoByteSymbol(Vector<const uint16_t> key, int value);
// Add a a number literal by converting it (if necessary)
// to the string that ToString(ToNumber(literal)) would generate.
- // and then adding that string with AddAsciiSymbol.
+ // and then adding that string with AddOneByteSymbol.
// This string is the actual value used as key in an object literal,
// and the one that must be different from the other keys.
int AddNumber(Vector<const uint8_t> key, int value);
@@ -166,7 +166,7 @@ class DuplicateFinder {
uint8_t* BackupKey(Vector<const uint8_t> key, bool is_one_byte);
// Compare two encoded keys (both pointing into the backing store)
- // for having the same base-127 encoded lengths and ASCII-ness,
+ // for having the same base-127 encoded lengths and representation.
// and then having the same 'length' bytes following.
static bool Match(void* first, void* second);
// Creates a hash from a sequence of bytes.
@@ -438,6 +438,12 @@ class Scanner {
void SetHarmonyNumericLiterals(bool numeric_literals) {
harmony_numeric_literals_ = numeric_literals;
}
+ bool HarmonyClasses() const {
+ return harmony_classes_;
+ }
+ void SetHarmonyClasses(bool classes) {
+ harmony_classes_ = classes;
+ }
// Returns true if there was a line terminator before the peek'ed token,
// possibly inside a multi-line comment.
@@ -647,6 +653,8 @@ class Scanner {
bool harmony_modules_;
// Whether we scan 0o777 and 0b111 as numbers.
bool harmony_numeric_literals_;
+ // Whether we scan 'class', 'extends', 'static' and 'super' as keywords.
+ bool harmony_classes_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index 6aed7252a1..75bf014358 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -54,10 +54,12 @@ Handle<ScopeInfo> ScopeInfo::Create(Scope* scope, Zone* zone) {
// Encode the flags.
int flags = ScopeTypeField::encode(scope->scope_type()) |
- CallsEvalField::encode(scope->calls_eval()) |
- StrictModeField::encode(scope->strict_mode()) |
- FunctionVariableField::encode(function_name_info) |
- FunctionVariableMode::encode(function_variable_mode);
+ CallsEvalField::encode(scope->calls_eval()) |
+ StrictModeField::encode(scope->strict_mode()) |
+ FunctionVariableField::encode(function_name_info) |
+ FunctionVariableMode::encode(function_variable_mode) |
+ AsmModuleField::encode(scope->asm_module()) |
+ AsmFunctionField::encode(scope->asm_function());
scope_info->SetFlags(flags);
scope_info->SetParameterCount(parameter_count);
scope_info->SetStackLocalCount(stack_local_count);
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index e810d98800..440c7f280b 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -160,6 +160,8 @@ void Scope::SetDefaults(ScopeType scope_type,
scope_inside_with_ = false;
scope_contains_with_ = false;
scope_calls_eval_ = false;
+ asm_module_ = false;
+ asm_function_ = outer_scope != NULL && outer_scope->asm_module_;
// Inherit the strict mode from the parent scope.
strict_mode_ = outer_scope != NULL ? outer_scope->strict_mode_ : SLOPPY;
outer_scope_calls_sloppy_eval_ = false;
@@ -222,6 +224,8 @@ Scope* Scope::DeserializeScopeChain(Context* context, Scope* global_scope,
Handle<ScopeInfo>(scope_info),
global_scope->ast_value_factory_,
zone);
+ if (scope_info->IsAsmFunction()) current_scope->asm_function_ = true;
+ if (scope_info->IsAsmModule()) current_scope->asm_module_ = true;
} else if (context->IsBlockContext()) {
ScopeInfo* scope_info = ScopeInfo::cast(context->extension());
current_scope = new(zone) Scope(current_scope,
@@ -267,9 +271,8 @@ bool Scope::Analyze(CompilationInfo* info) {
// Allocate the variables.
{
- // Passing NULL as AstValueFactory is ok, because AllocateVariables doesn't
- // need to create new strings or values.
- AstNodeFactory<AstNullVisitor> ast_node_factory(info->zone(), NULL);
+ AstNodeFactory<AstNullVisitor> ast_node_factory(
+ info->zone(), info->ast_value_factory(), info->ast_node_id_gen());
if (!top->AllocateVariables(info, &ast_node_factory)) return false;
}
@@ -1081,9 +1084,10 @@ bool Scope::ResolveVariable(CompilationInfo* info,
Isolate* isolate = info->isolate();
Factory* factory = isolate->factory();
Handle<JSArray> array = factory->NewJSArray(0);
- Handle<Object> result =
+ Handle<Object> error;
+ MaybeHandle<Object> maybe_error =
factory->NewSyntaxError("harmony_const_assign", array);
- isolate->Throw(*result, &location);
+ if (maybe_error.ToHandle(&error)) isolate->Throw(*error, &location);
return false;
}
@@ -1115,9 +1119,10 @@ bool Scope::ResolveVariable(CompilationInfo* info,
Factory* factory = isolate->factory();
Handle<JSArray> array = factory->NewJSArray(1);
JSObject::SetElement(array, 0, var->name(), NONE, STRICT).Assert();
- Handle<Object> result =
+ Handle<Object> error;
+ MaybeHandle<Object> maybe_error =
factory->NewSyntaxError("module_type_error", array);
- isolate->Throw(*result, &location);
+ if (maybe_error.ToHandle(&error)) isolate->Throw(*error, &location);
return false;
}
}
@@ -1164,6 +1169,9 @@ void Scope::PropagateScopeInfo(bool outer_scope_calls_sloppy_eval ) {
if (inner->force_eager_compilation_) {
force_eager_compilation_ = true;
}
+ if (asm_module_ && inner->scope_type() == FUNCTION_SCOPE) {
+ inner->asm_function_ = true;
+ }
}
}
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index 2757bf2402..06c6c9995f 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -214,6 +214,9 @@ class Scope: public ZoneObject {
// Set the strict mode flag (unless disabled by a global flag).
void SetStrictMode(StrictMode strict_mode) { strict_mode_ = strict_mode; }
+ // Set the ASM module flag.
+ void SetAsmModule() { asm_module_ = true; }
+
// Position in the source where this scope begins and ends.
//
// * For the scope of a with statement
@@ -281,6 +284,8 @@ class Scope: public ZoneObject {
bool outer_scope_calls_sloppy_eval() const {
return outer_scope_calls_sloppy_eval_;
}
+ bool asm_module() const { return asm_module_; }
+ bool asm_function() const { return asm_function_; }
// Is this scope inside a with statement.
bool inside_with() const { return scope_inside_with_; }
@@ -463,6 +468,10 @@ class Scope: public ZoneObject {
// This scope or a nested catch scope or with scope contain an 'eval' call. At
// the 'eval' call site this scope is the declaration scope.
bool scope_calls_eval_;
+ // This scope contains an "use asm" annotation.
+ bool asm_module_;
+ // This scope's outer context is an asm module.
+ bool asm_function_;
// The strict mode of this scope.
StrictMode strict_mode_;
// Source positions.
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index 4b28d23fe9..9d59b6fd4f 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -8,17 +8,18 @@
#include "src/api.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/global-handles.h"
-#include "src/ic-inl.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
#include "src/natives.h"
#include "src/objects.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
#include "src/serialize.h"
#include "src/snapshot.h"
#include "src/snapshot-source-sink.h"
-#include "src/stub-cache.h"
#include "src/v8threads.h"
#include "src/version.h"
@@ -314,7 +315,7 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
#undef IC_ENTRY
}; // end of ref_table[].
- for (size_t i = 0; i < ARRAY_SIZE(ref_table); ++i) {
+ for (size_t i = 0; i < arraysize(ref_table); ++i) {
AddFromId(ref_table[i].type,
ref_table[i].id,
ref_table[i].name,
@@ -340,7 +341,7 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
}; // end of stats_ref_table[].
Counters* counters = isolate->counters();
- for (size_t i = 0; i < ARRAY_SIZE(stats_ref_table); ++i) {
+ for (size_t i = 0; i < arraysize(stats_ref_table); ++i) {
Add(reinterpret_cast<Address>(GetInternalPointer(
(counters->*(stats_ref_table[i].counter))())),
STATS_COUNTER,
@@ -595,8 +596,9 @@ Deserializer::Deserializer(SnapshotByteSource* source)
: isolate_(NULL),
attached_objects_(NULL),
source_(source),
- external_reference_decoder_(NULL) {
- for (int i = 0; i < LAST_SPACE + 1; i++) {
+ external_reference_decoder_(NULL),
+ deserialized_large_objects_(0) {
+ for (int i = 0; i < kNumberOfSpaces; i++) {
reservations_[i] = kUninitializedReservation;
}
}
@@ -614,7 +616,7 @@ void Deserializer::FlushICacheForNewCodeObjects() {
void Deserializer::Deserialize(Isolate* isolate) {
isolate_ = isolate;
DCHECK(isolate_ != NULL);
- isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]);
+ isolate_->heap()->ReserveSpace(reservations_, high_water_);
// No active threads.
DCHECK_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse());
// No active handles.
@@ -644,7 +646,7 @@ void Deserializer::Deserialize(Isolate* isolate) {
for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
Object* source = isolate_->heap()->natives_source_cache()->get(i);
if (!source->IsUndefined()) {
- ExternalAsciiString::cast(source)->update_data_cache();
+ ExternalOneByteString::cast(source)->update_data_cache();
}
}
@@ -661,7 +663,8 @@ void Deserializer::DeserializePartial(Isolate* isolate, Object** root) {
for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) {
DCHECK(reservations_[i] != kUninitializedReservation);
}
- isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]);
+ Heap* heap = isolate->heap();
+ heap->ReserveSpace(reservations_, high_water_);
if (external_reference_decoder_ == NULL) {
external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
}
@@ -727,14 +730,14 @@ class StringTableInsertionKey : public HashTableKey {
return string_->SlowEquals(String::cast(string));
}
- virtual uint32_t Hash() V8_OVERRIDE { return hash_; }
+ virtual uint32_t Hash() OVERRIDE { return hash_; }
- virtual uint32_t HashForObject(Object* key) V8_OVERRIDE {
+ virtual uint32_t HashForObject(Object* key) OVERRIDE {
return String::cast(key)->Hash();
}
MUST_USE_RESULT virtual Handle<Object> AsHandle(Isolate* isolate)
- V8_OVERRIDE {
+ OVERRIDE {
return handle(string_, isolate);
}
@@ -797,11 +800,40 @@ void Deserializer::ReadObject(int space_number,
*write_back = obj;
#ifdef DEBUG
- bool is_codespace = (space_number == CODE_SPACE);
- DCHECK(obj->IsCode() == is_codespace);
+ if (obj->IsCode()) {
+ DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE);
+ } else {
+ DCHECK(space_number != CODE_SPACE);
+ }
#endif
}
+
+// We know the space requirements before deserialization and can
+// pre-allocate that reserved space. During deserialization, all we need
+// to do is to bump up the pointer for each space in the reserved
+// space. This is also used for fixing back references.
+// Since multiple large objects cannot be folded into one large object
+// space allocation, we have to do an actual allocation when deserializing
+// each large object. Instead of tracking offset for back references, we
+// reference large objects by index.
+Address Deserializer::Allocate(int space_index, int size) {
+ if (space_index == LO_SPACE) {
+ AlwaysAllocateScope scope(isolate_);
+ LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
+ Executability exec = static_cast<Executability>(source_->GetInt());
+ AllocationResult result = lo_space->AllocateRaw(size, exec);
+ HeapObject* obj = HeapObject::cast(result.ToObjectChecked());
+ deserialized_large_objects_.Add(obj);
+ return obj->address();
+ } else {
+ DCHECK(space_index < kNumberOfPreallocatedSpaces);
+ Address address = high_water_[space_index];
+ high_water_[space_index] = address + size;
+ return address;
+ }
+}
+
void Deserializer::ReadChunk(Object** current,
Object** limit,
int source_space,
@@ -872,7 +904,7 @@ void Deserializer::ReadChunk(Object** current,
} else if (where == kAttachedReference) { \
DCHECK(deserializing_user_code()); \
int index = source_->GetInt(); \
- new_object = attached_objects_->at(index); \
+ new_object = *attached_objects_->at(index); \
emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
} else { \
DCHECK(where == kBackrefWithSkip); \
@@ -924,15 +956,16 @@ void Deserializer::ReadChunk(Object** current,
// This generates a case and a body for the new space (which has to do extra
// write barrier handling) and handles the other spaces with 8 fall-through
// cases and one body.
-#define ALL_SPACES(where, how, within) \
- CASE_STATEMENT(where, how, within, NEW_SPACE) \
- CASE_BODY(where, how, within, NEW_SPACE) \
- CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \
- CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \
- CASE_STATEMENT(where, how, within, CODE_SPACE) \
- CASE_STATEMENT(where, how, within, CELL_SPACE) \
- CASE_STATEMENT(where, how, within, PROPERTY_CELL_SPACE) \
- CASE_STATEMENT(where, how, within, MAP_SPACE) \
+#define ALL_SPACES(where, how, within) \
+ CASE_STATEMENT(where, how, within, NEW_SPACE) \
+ CASE_BODY(where, how, within, NEW_SPACE) \
+ CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \
+ CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \
+ CASE_STATEMENT(where, how, within, CODE_SPACE) \
+ CASE_STATEMENT(where, how, within, MAP_SPACE) \
+ CASE_STATEMENT(where, how, within, CELL_SPACE) \
+ CASE_STATEMENT(where, how, within, PROPERTY_CELL_SPACE) \
+ CASE_STATEMENT(where, how, within, LO_SPACE) \
CASE_BODY(where, how, within, kAnyOldSpace)
#define FOUR_CASES(byte_code) \
@@ -1087,6 +1120,12 @@ void Deserializer::ReadChunk(Object** current,
// current object.
CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0)
CASE_BODY(kRootArray, kPlain, kStartOfObject, 0)
+#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \
+ defined(V8_TARGET_ARCH_MIPS64)
+ // Find an object in the roots array and write a pointer to it to in code.
+ CASE_STATEMENT(kRootArray, kFromCode, kStartOfObject, 0)
+ CASE_BODY(kRootArray, kFromCode, kStartOfObject, 0)
+#endif
// Find an object in the partial snapshots cache and write a pointer to it
// to the current object.
CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
@@ -1118,6 +1157,12 @@ void Deserializer::ReadChunk(Object** current,
// Find a builtin and write a pointer to it to the current object.
CASE_STATEMENT(kBuiltin, kPlain, kStartOfObject, 0)
CASE_BODY(kBuiltin, kPlain, kStartOfObject, 0)
+#if V8_OOL_CONSTANT_POOL
+ // Find a builtin code entry and write a pointer to it to the current
+ // object.
+ CASE_STATEMENT(kBuiltin, kPlain, kInnerPointer, 0)
+ CASE_BODY(kBuiltin, kPlain, kInnerPointer, 0)
+#endif
// Find a builtin and write a pointer to it in the current code object.
CASE_STATEMENT(kBuiltin, kFromCode, kInnerPointer, 0)
CASE_BODY(kBuiltin, kFromCode, kInnerPointer, 0)
@@ -1125,6 +1170,10 @@ void Deserializer::ReadChunk(Object** current,
// the current object.
CASE_STATEMENT(kAttachedReference, kPlain, kStartOfObject, 0)
CASE_BODY(kAttachedReference, kPlain, kStartOfObject, 0)
+ CASE_STATEMENT(kAttachedReference, kPlain, kInnerPointer, 0)
+ CASE_BODY(kAttachedReference, kPlain, kInnerPointer, 0)
+ CASE_STATEMENT(kAttachedReference, kFromCode, kInnerPointer, 0)
+ CASE_BODY(kAttachedReference, kFromCode, kInnerPointer, 0)
#undef CASE_STATEMENT
#undef CASE_BODY
@@ -1167,12 +1216,11 @@ Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
sink_(sink),
external_reference_encoder_(new ExternalReferenceEncoder(isolate)),
root_index_wave_front_(0),
- code_address_map_(NULL) {
+ code_address_map_(NULL),
+ seen_large_objects_index_(0) {
// The serializer is meant to be used only to generate initial heap images
// from a context in which there is only one isolate.
- for (int i = 0; i <= LAST_SPACE; i++) {
- fullness_[i] = 0;
- }
+ for (int i = 0; i < kNumberOfSpaces; i++) fullness_[i] = 0;
}
@@ -1291,15 +1339,6 @@ int Serializer::RootIndex(HeapObject* heap_object, HowToCode from) {
for (int i = 0; i < root_index_wave_front_; i++) {
Object* root = heap->roots_array_start()[i];
if (!root->IsSmi() && root == heap_object) {
-#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \
- defined(V8_TARGET_ARCH_MIPS64)
- if (from == kFromCode) {
- // In order to avoid code bloat in the deserializer we don't have
- // support for the encoding that specifies a particular root should
- // be written from within code.
- return kInvalidRootIndex;
- }
-#endif
return i;
}
}
@@ -1311,15 +1350,12 @@ int Serializer::RootIndex(HeapObject* heap_object, HowToCode from) {
// location into a later object. We can encode the location as an offset from
// the start of the deserialized objects or as an offset backwards from the
// current allocation pointer.
-void Serializer::SerializeReferenceToPreviousObject(
- int space,
- int address,
- HowToCode how_to_code,
- WhereToPoint where_to_point,
- int skip) {
- int offset = CurrentAllocationAddress(space) - address;
- // Shift out the bits that are always 0.
- offset >>= kObjectAlignmentBits;
+void Serializer::SerializeReferenceToPreviousObject(HeapObject* heap_object,
+ HowToCode how_to_code,
+ WhereToPoint where_to_point,
+ int skip) {
+ int space = SpaceOfObject(heap_object);
+
if (skip == 0) {
sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer");
} else {
@@ -1327,7 +1363,17 @@ void Serializer::SerializeReferenceToPreviousObject(
"BackRefSerWithSkip");
sink_->PutInt(skip, "BackRefSkipDistance");
}
- sink_->PutInt(offset, "offset");
+
+ if (space == LO_SPACE) {
+ int index = address_mapper_.MappedTo(heap_object);
+ sink_->PutInt(index, "large object index");
+ } else {
+ int address = address_mapper_.MappedTo(heap_object);
+ int offset = CurrentAllocationAddress(space) - address;
+ // Shift out the bits that are always 0.
+ offset >>= kObjectAlignmentBits;
+ sink_->PutInt(offset, "offset");
+ }
}
@@ -1347,12 +1393,7 @@ void StartupSerializer::SerializeObject(
}
if (address_mapper_.IsMapped(heap_object)) {
- int space = SpaceOfObject(heap_object);
- int address = address_mapper_.MappedTo(heap_object);
- SerializeReferenceToPreviousObject(space,
- address,
- how_to_code,
- where_to_point,
+ SerializeReferenceToPreviousObject(heap_object, how_to_code, where_to_point,
skip);
} else {
if (skip != 0) {
@@ -1455,12 +1496,7 @@ void PartialSerializer::SerializeObject(
DCHECK(!heap_object->IsInternalizedString());
if (address_mapper_.IsMapped(heap_object)) {
- int space = SpaceOfObject(heap_object);
- int address = address_mapper_.MappedTo(heap_object);
- SerializeReferenceToPreviousObject(space,
- address,
- how_to_code,
- where_to_point,
+ SerializeReferenceToPreviousObject(heap_object, how_to_code, where_to_point,
skip);
} else {
if (skip != 0) {
@@ -1496,8 +1532,18 @@ void Serializer::ObjectSerializer::Serialize() {
}
// Mark this object as already serialized.
- int offset = serializer_->Allocate(space, size);
- serializer_->address_mapper()->AddMapping(object_, offset);
+ if (space == LO_SPACE) {
+ if (object_->IsCode()) {
+ sink_->PutInt(EXECUTABLE, "executable large object");
+ } else {
+ sink_->PutInt(NOT_EXECUTABLE, "not executable large object");
+ }
+ int index = serializer_->AllocateLargeObject(size);
+ serializer_->address_mapper()->AddMapping(object_, index);
+ } else {
+ int offset = serializer_->Allocate(space, size);
+ serializer_->address_mapper()->AddMapping(object_, offset);
+ }
// Serialize the map (first word of the object).
serializer_->SerializeObject(object_->map(), kPlain, kStartOfObject, 0);
@@ -1529,7 +1575,8 @@ void Serializer::ObjectSerializer::VisitPointers(Object** start,
current_contents == current[-1]) {
DCHECK(!serializer_->isolate()->heap()->InNewSpace(current_contents));
int repeat_count = 1;
- while (current < end - 1 && current[repeat_count] == current_contents) {
+ while (&current[repeat_count] < end - 1 &&
+ current[repeat_count] == current_contents) {
repeat_count++;
}
current += repeat_count;
@@ -1626,19 +1673,20 @@ void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
Cell* object = Cell::cast(rinfo->target_cell());
serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
+ bytes_processed_so_far_ += kPointerSize;
}
-void Serializer::ObjectSerializer::VisitExternalAsciiString(
- v8::String::ExternalAsciiStringResource** resource_pointer) {
+void Serializer::ObjectSerializer::VisitExternalOneByteString(
+ v8::String::ExternalOneByteStringResource** resource_pointer) {
Address references_start = reinterpret_cast<Address>(resource_pointer);
OutputRawData(references_start);
for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
Object* source =
serializer_->isolate()->heap()->natives_source_cache()->get(i);
if (!source->IsUndefined()) {
- ExternalAsciiString* string = ExternalAsciiString::cast(source);
- typedef v8::String::ExternalAsciiStringResource Resource;
+ ExternalOneByteString* string = ExternalOneByteString::cast(source);
+ typedef v8::String::ExternalOneByteStringResource Resource;
const Resource* resource = string->resource();
if (resource == *resource_pointer) {
sink_->Put(kNativesStringResource, "NativesStringResource");
@@ -1747,8 +1795,14 @@ int Serializer::SpaceOfObject(HeapObject* object) {
}
+int Serializer::AllocateLargeObject(int size) {
+ fullness_[LO_SPACE] += size;
+ return seen_large_objects_index_++;
+}
+
+
int Serializer::Allocate(int space, int size) {
- CHECK(space >= 0 && space < kNumberOfSpaces);
+ CHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
int allocation_address = fullness_[space];
fullness_[space] = allocation_address + size;
return allocation_address;
@@ -1782,64 +1836,109 @@ void Serializer::InitializeCodeAddressMap() {
ScriptData* CodeSerializer::Serialize(Isolate* isolate,
Handle<SharedFunctionInfo> info,
Handle<String> source) {
+ base::ElapsedTimer timer;
+ if (FLAG_profile_deserialization) timer.Start();
+
// Serialize code object.
List<byte> payload;
ListSnapshotSink list_sink(&payload);
- CodeSerializer cs(isolate, &list_sink, *source);
+ DebugSnapshotSink debug_sink(&list_sink);
+ SnapshotByteSink* sink = FLAG_trace_code_serializer
+ ? static_cast<SnapshotByteSink*>(&debug_sink)
+ : static_cast<SnapshotByteSink*>(&list_sink);
+ CodeSerializer cs(isolate, sink, *source, info->code());
DisallowHeapAllocation no_gc;
Object** location = Handle<Object>::cast(info).location();
cs.VisitPointer(location);
cs.Pad();
SerializedCodeData data(&payload, &cs);
- return data.GetScriptData();
+ ScriptData* script_data = data.GetScriptData();
+
+ if (FLAG_profile_deserialization) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ int length = script_data->length();
+ PrintF("[Serializing to %d bytes took %0.3f ms]\n", length, ms);
+ }
+
+ return script_data;
}
void CodeSerializer::SerializeObject(Object* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
- CHECK(o->IsHeapObject());
HeapObject* heap_object = HeapObject::cast(o);
- // The code-caches link to context-specific code objects, which
- // the startup and context serializes cannot currently handle.
- DCHECK(!heap_object->IsMap() ||
- Map::cast(heap_object)->code_cache() ==
- heap_object->GetHeap()->empty_fixed_array());
-
int root_index;
if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) {
PutRoot(root_index, heap_object, how_to_code, where_to_point, skip);
return;
}
- // TODO(yangguo) wire up stubs from stub cache.
- // TODO(yangguo) wire up global object.
- // TODO(yangguo) We cannot deal with different hash seeds yet.
- DCHECK(!heap_object->IsHashTable());
-
if (address_mapper_.IsMapped(heap_object)) {
- int space = SpaceOfObject(heap_object);
- int address = address_mapper_.MappedTo(heap_object);
- SerializeReferenceToPreviousObject(space, address, how_to_code,
- where_to_point, skip);
+ SerializeReferenceToPreviousObject(heap_object, how_to_code, where_to_point,
+ skip);
return;
}
+ if (skip != 0) {
+ sink_->Put(kSkip, "SkipFromSerializeObject");
+ sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
+ }
+
if (heap_object->IsCode()) {
Code* code_object = Code::cast(heap_object);
- if (code_object->kind() == Code::BUILTIN) {
- SerializeBuiltin(code_object, how_to_code, where_to_point, skip);
- return;
+ switch (code_object->kind()) {
+ case Code::OPTIMIZED_FUNCTION: // No optimized code compiled yet.
+ case Code::HANDLER: // No handlers patched in yet.
+ case Code::REGEXP: // No regexp literals initialized yet.
+ case Code::NUMBER_OF_KINDS: // Pseudo enum value.
+ CHECK(false);
+ case Code::BUILTIN:
+ SerializeBuiltin(code_object, how_to_code, where_to_point);
+ return;
+ case Code::STUB:
+ SerializeCodeStub(code_object, how_to_code, where_to_point);
+ return;
+#define IC_KIND_CASE(KIND) case Code::KIND:
+ IC_KIND_LIST(IC_KIND_CASE)
+#undef IC_KIND_CASE
+ SerializeHeapObject(code_object, how_to_code, where_to_point);
+ return;
+ // TODO(yangguo): add special handling to canonicalize ICs.
+ case Code::FUNCTION:
+ // Only serialize the code for the toplevel function. Replace code
+ // of included function literals by the lazy compile builtin.
+ // This is safe, as checked in Compiler::BuildFunctionInfo.
+ if (code_object != main_code_) {
+ Code* lazy = *isolate()->builtins()->CompileLazy();
+ SerializeBuiltin(lazy, how_to_code, where_to_point);
+ } else {
+ SerializeHeapObject(code_object, how_to_code, where_to_point);
+ }
+ return;
}
- // TODO(yangguo) figure out whether other code kinds can be handled smarter.
}
if (heap_object == source_) {
- SerializeSourceObject(how_to_code, where_to_point, skip);
+ SerializeSourceObject(how_to_code, where_to_point);
return;
}
+ // Past this point we should not see any (context-specific) maps anymore.
+ CHECK(!heap_object->IsMap());
+ // There should be no references to the global object embedded.
+ CHECK(!heap_object->IsJSGlobalProxy() && !heap_object->IsGlobalObject());
+ // There should be no hash table embedded. They would require rehashing.
+ CHECK(!heap_object->IsHashTable());
+
+ SerializeHeapObject(heap_object, how_to_code, where_to_point);
+}
+
+
+void CodeSerializer::SerializeHeapObject(HeapObject* heap_object,
+ HowToCode how_to_code,
+ WhereToPoint where_to_point) {
if (heap_object->IsScript()) {
// The wrapper cache uses a Foreign object to point to a global handle.
// However, the object visitor expects foreign objects to point to external
@@ -1847,10 +1946,12 @@ void CodeSerializer::SerializeObject(Object* o, HowToCode how_to_code,
Script::cast(heap_object)->ClearWrapperCache();
}
- if (skip != 0) {
- sink_->Put(kSkip, "SkipFromSerializeObject");
- sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
+ if (FLAG_trace_code_serializer) {
+ PrintF("Encoding heap object: ");
+ heap_object->ShortPrint();
+ PrintF("\n");
}
+
// Object has not yet been serialized. Serialize it here.
ObjectSerializer serializer(this, heap_object, sink_, how_to_code,
where_to_point);
@@ -1859,30 +1960,61 @@ void CodeSerializer::SerializeObject(Object* o, HowToCode how_to_code,
void CodeSerializer::SerializeBuiltin(Code* builtin, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) {
- if (skip != 0) {
- sink_->Put(kSkip, "SkipFromSerializeBuiltin");
- sink_->PutInt(skip, "SkipDistanceFromSerializeBuiltin");
- }
-
+ WhereToPoint where_to_point) {
DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
+ (how_to_code == kPlain && where_to_point == kInnerPointer) ||
(how_to_code == kFromCode && where_to_point == kInnerPointer));
int builtin_index = builtin->builtin_index();
DCHECK_LT(builtin_index, Builtins::builtin_count);
DCHECK_LE(0, builtin_index);
+
+ if (FLAG_trace_code_serializer) {
+ PrintF("Encoding builtin: %s\n",
+ isolate()->builtins()->name(builtin_index));
+ }
+
sink_->Put(kBuiltin + how_to_code + where_to_point, "Builtin");
sink_->PutInt(builtin_index, "builtin_index");
}
-void CodeSerializer::SerializeSourceObject(HowToCode how_to_code,
- WhereToPoint where_to_point,
- int skip) {
- if (skip != 0) {
- sink_->Put(kSkip, "SkipFromSerializeSourceObject");
- sink_->PutInt(skip, "SkipDistanceFromSerializeSourceObject");
+void CodeSerializer::SerializeCodeStub(Code* stub, HowToCode how_to_code,
+ WhereToPoint where_to_point) {
+ DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
+ (how_to_code == kPlain && where_to_point == kInnerPointer) ||
+ (how_to_code == kFromCode && where_to_point == kInnerPointer));
+ uint32_t stub_key = stub->stub_key();
+ DCHECK(CodeStub::MajorKeyFromKey(stub_key) != CodeStub::NoCache);
+
+ int index = AddCodeStubKey(stub_key) + kCodeStubsBaseIndex;
+
+ if (FLAG_trace_code_serializer) {
+ PrintF("Encoding code stub %s as %d\n",
+ CodeStub::MajorName(CodeStub::MajorKeyFromKey(stub_key), false),
+ index);
}
+ sink_->Put(kAttachedReference + how_to_code + where_to_point, "CodeStub");
+ sink_->PutInt(index, "CodeStub key");
+}
+
+
+int CodeSerializer::AddCodeStubKey(uint32_t stub_key) {
+ // TODO(yangguo) Maybe we need a hash table for a faster lookup than O(n^2).
+ int index = 0;
+ while (index < stub_keys_.length()) {
+ if (stub_keys_[index] == stub_key) return index;
+ index++;
+ }
+ stub_keys_.Add(stub_key);
+ return index;
+}
+
+
+void CodeSerializer::SerializeSourceObject(HowToCode how_to_code,
+ WhereToPoint where_to_point) {
+ if (FLAG_trace_code_serializer) PrintF("Encoding source object\n");
+
DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
sink_->Put(kAttachedReference + how_to_code + where_to_point, "Source");
sink_->PutInt(kSourceObjectIndex, "kSourceObjectIndex");
@@ -1894,22 +2026,36 @@ Handle<SharedFunctionInfo> CodeSerializer::Deserialize(Isolate* isolate,
Handle<String> source) {
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
- SerializedCodeData scd(data, *source);
- SnapshotByteSource payload(scd.Payload(), scd.PayloadLength());
- Deserializer deserializer(&payload);
- STATIC_ASSERT(NEW_SPACE == 0);
- for (int i = NEW_SPACE; i <= PROPERTY_CELL_SPACE; i++) {
- deserializer.set_reservation(i, scd.GetReservation(i));
- }
-
- // Prepare and register list of attached objects.
- Vector<Object*> attached_objects = Vector<Object*>::New(1);
- attached_objects[kSourceObjectIndex] = *source;
- deserializer.SetAttachedObjects(&attached_objects);
Object* root;
- deserializer.DeserializePartial(isolate, &root);
- deserializer.FlushICacheForNewCodeObjects();
+
+ {
+ HandleScope scope(isolate);
+
+ SerializedCodeData scd(data, *source);
+ SnapshotByteSource payload(scd.Payload(), scd.PayloadLength());
+ Deserializer deserializer(&payload);
+ STATIC_ASSERT(NEW_SPACE == 0);
+ for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) {
+ deserializer.set_reservation(i, scd.GetReservation(i));
+ }
+
+ // Prepare and register list of attached objects.
+ Vector<const uint32_t> code_stub_keys = scd.CodeStubKeys();
+ Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(
+ code_stub_keys.length() + kCodeStubsBaseIndex);
+ attached_objects[kSourceObjectIndex] = source;
+ for (int i = 0; i < code_stub_keys.length(); i++) {
+ attached_objects[i + kCodeStubsBaseIndex] =
+ CodeStub::GetCode(isolate, code_stub_keys[i]).ToHandleChecked();
+ }
+ deserializer.SetAttachedObjects(&attached_objects);
+
+ // Deserialize.
+ deserializer.DeserializePartial(isolate, &root);
+ deserializer.FlushICacheForNewCodeObjects();
+ }
+
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
int length = data->length();
@@ -1922,18 +2068,35 @@ Handle<SharedFunctionInfo> CodeSerializer::Deserialize(Isolate* isolate,
SerializedCodeData::SerializedCodeData(List<byte>* payload, CodeSerializer* cs)
: owns_script_data_(true) {
DisallowHeapAllocation no_gc;
- int data_length = payload->length() + kHeaderEntries * kIntSize;
+ List<uint32_t>* stub_keys = cs->stub_keys();
+
+ // Calculate sizes.
+ int num_stub_keys = stub_keys->length();
+ int stub_keys_size = stub_keys->length() * kInt32Size;
+ int data_length = kHeaderSize + stub_keys_size + payload->length();
+
+ // Allocate backing store and create result data.
byte* data = NewArray<byte>(data_length);
DCHECK(IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment));
- CopyBytes(data + kHeaderEntries * kIntSize, payload->begin(),
- static_cast<size_t>(payload->length()));
script_data_ = new ScriptData(data, data_length);
script_data_->AcquireDataOwnership();
+
+ // Set header values.
SetHeaderValue(kCheckSumOffset, CheckSum(cs->source()));
+ SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
+ SetHeaderValue(kPayloadLengthOffset, payload->length());
STATIC_ASSERT(NEW_SPACE == 0);
- for (int i = NEW_SPACE; i <= PROPERTY_CELL_SPACE; i++) {
+ for (int i = 0; i < SerializerDeserializer::kNumberOfSpaces; i++) {
SetHeaderValue(kReservationsOffset + i, cs->CurrentAllocationAddress(i));
}
+
+ // Copy code stub keys.
+ CopyBytes(data + kHeaderSize, reinterpret_cast<byte*>(stub_keys->begin()),
+ stub_keys_size);
+
+ // Copy serialized data.
+ CopyBytes(data + kHeaderSize + stub_keys_size, payload->begin(),
+ static_cast<size_t>(payload->length()));
}
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index e4e6c3ad86..616f8f19c7 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -148,11 +148,15 @@ class SerializerDeserializer: public ObjectVisitor {
static int nop() { return kNop; }
+ // No reservation for large object space necessary.
+ static const int kNumberOfPreallocatedSpaces = LO_SPACE;
+ static const int kNumberOfSpaces = INVALID_SPACE;
+
protected:
// Where the pointed-to object can be found:
enum Where {
kNewObject = 0, // Object is next in snapshot.
- // 1-6 One per space.
+ // 1-7 One per space.
kRootArray = 0x9, // Object is found in root array.
kPartialSnapshotCache = 0xa, // Object is in the cache.
kExternalReference = 0xb, // Pointer to an external reference.
@@ -161,9 +165,9 @@ class SerializerDeserializer: public ObjectVisitor {
kAttachedReference = 0xe, // Object is described in an attached list.
kNop = 0xf, // Does nothing, used to pad.
kBackref = 0x10, // Object is described relative to end.
- // 0x11-0x16 One per space.
+ // 0x11-0x17 One per space.
kBackrefWithSkip = 0x18, // Object is described relative to end.
- // 0x19-0x1e One per space.
+ // 0x19-0x1f One per space.
// 0x20-0x3f Used by misc. tags below.
kPointedToMask = 0x3f
};
@@ -225,11 +229,11 @@ class SerializerDeserializer: public ObjectVisitor {
return byte_code & 0x1f;
}
- static const int kNumberOfSpaces = LO_SPACE;
static const int kAnyOldSpace = -1;
// A bitmask for getting the space out of an instruction.
static const int kSpaceMask = 7;
+ STATIC_ASSERT(kNumberOfSpaces <= kSpaceMask + 1);
};
@@ -249,7 +253,7 @@ class Deserializer: public SerializerDeserializer {
void set_reservation(int space_number, int reservation) {
DCHECK(space_number >= 0);
- DCHECK(space_number <= LAST_SPACE);
+ DCHECK(space_number < kNumberOfSpaces);
reservations_[space_number] = reservation;
}
@@ -257,7 +261,7 @@ class Deserializer: public SerializerDeserializer {
// Serialized user code reference certain objects that are provided in a list
// By calling this method, we assume that we are deserializing user code.
- void SetAttachedObjects(Vector<Object*>* attached_objects) {
+ void SetAttachedObjects(Vector<Handle<Object> >* attached_objects) {
attached_objects_ = attached_objects;
}
@@ -282,24 +286,18 @@ class Deserializer: public SerializerDeserializer {
void ReadChunk(
Object** start, Object** end, int space, Address object_address);
void ReadObject(int space_number, Object** write_back);
+ Address Allocate(int space_index, int size);
// Special handling for serialized code like hooking up internalized strings.
HeapObject* ProcessNewObjectFromSerializedCode(HeapObject* obj);
Object* ProcessBackRefInSerializedCode(Object* obj);
- // This routine both allocates a new object, and also keeps
- // track of where objects have been allocated so that we can
- // fix back references when deserializing.
- Address Allocate(int space_index, int size) {
- Address address = high_water_[space_index];
- high_water_[space_index] = address + size;
- return address;
- }
-
// This returns the address of an object that has been described in the
// snapshot as being offset bytes back in a particular space.
HeapObject* GetAddressFromEnd(int space) {
int offset = source_->GetInt();
+ if (space == LO_SPACE) return deserialized_large_objects_[offset];
+ DCHECK(space < kNumberOfPreallocatedSpaces);
offset <<= kObjectAlignmentBits;
return HeapObject::FromAddress(high_water_[space] - offset);
}
@@ -308,18 +306,20 @@ class Deserializer: public SerializerDeserializer {
Isolate* isolate_;
// Objects from the attached object descriptions in the serialized user code.
- Vector<Object*>* attached_objects_;
+ Vector<Handle<Object> >* attached_objects_;
SnapshotByteSource* source_;
// This is the address of the next object that will be allocated in each
// space. It is used to calculate the addresses of back-references.
- Address high_water_[LAST_SPACE + 1];
+ Address high_water_[kNumberOfPreallocatedSpaces];
- int reservations_[LAST_SPACE + 1];
+ int reservations_[kNumberOfSpaces];
static const intptr_t kUninitializedReservation = -1;
ExternalReferenceDecoder* external_reference_decoder_;
+ List<HeapObject*> deserialized_large_objects_;
+
DISALLOW_COPY_AND_ASSIGN(Deserializer);
};
@@ -430,8 +430,8 @@ class Serializer : public SerializerDeserializer {
void VisitCell(RelocInfo* rinfo);
void VisitRuntimeEntry(RelocInfo* reloc);
// Used for seralizing the external strings that hold the natives source.
- void VisitExternalAsciiString(
- v8::String::ExternalAsciiStringResource** resource);
+ void VisitExternalOneByteString(
+ v8::String::ExternalOneByteStringResource** resource);
// We can't serialize a heap with external two byte strings.
void VisitExternalTwoByteString(
v8::String::ExternalStringResource** resource) {
@@ -459,15 +459,14 @@ class Serializer : public SerializerDeserializer {
HowToCode how_to_code,
WhereToPoint where_to_point,
int skip) = 0;
- void SerializeReferenceToPreviousObject(
- int space,
- int address,
- HowToCode how_to_code,
- WhereToPoint where_to_point,
- int skip);
+ void SerializeReferenceToPreviousObject(HeapObject* heap_object,
+ HowToCode how_to_code,
+ WhereToPoint where_to_point,
+ int skip);
void InitializeAllocators();
// This will return the space for an object.
static int SpaceOfObject(HeapObject* object);
+ int AllocateLargeObject(int size);
int Allocate(int space, int size);
int EncodeExternalReference(Address addr) {
return external_reference_encoder_->Encode(addr);
@@ -482,7 +481,7 @@ class Serializer : public SerializerDeserializer {
Isolate* isolate_;
// Keep track of the fullness of each space in order to generate
// relative addresses for back references.
- int fullness_[LAST_SPACE + 1];
+ int fullness_[kNumberOfSpaces];
SnapshotByteSink* sink_;
ExternalReferenceEncoder* external_reference_encoder_;
@@ -499,6 +498,8 @@ class Serializer : public SerializerDeserializer {
private:
CodeAddressMap* code_address_map_;
+ // We map serialized large objects to indexes for back-referencing.
+ int seen_large_objects_index_;
DISALLOW_COPY_AND_ASSIGN(Serializer);
};
@@ -576,38 +577,49 @@ class StartupSerializer : public Serializer {
class CodeSerializer : public Serializer {
public:
- CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source)
- : Serializer(isolate, sink), source_(source) {
- set_root_index_wave_front(Heap::kStrongRootListLength);
- InitializeCodeAddressMap();
- }
-
static ScriptData* Serialize(Isolate* isolate,
Handle<SharedFunctionInfo> info,
Handle<String> source);
- virtual void SerializeObject(Object* o, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip);
-
static Handle<SharedFunctionInfo> Deserialize(Isolate* isolate,
ScriptData* data,
Handle<String> source);
static const int kSourceObjectIndex = 0;
+ static const int kCodeStubsBaseIndex = 1;
String* source() {
DCHECK(!AllowHeapAllocation::IsAllowed());
return source_;
}
+ List<uint32_t>* stub_keys() { return &stub_keys_; }
+
private:
+ CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source,
+ Code* main_code)
+ : Serializer(isolate, sink), source_(source), main_code_(main_code) {
+ set_root_index_wave_front(Heap::kStrongRootListLength);
+ InitializeCodeAddressMap();
+ }
+
+ virtual void SerializeObject(Object* o, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip);
+
void SerializeBuiltin(Code* builtin, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip);
- void SerializeSourceObject(HowToCode how_to_code, WhereToPoint where_to_point,
- int skip);
+ WhereToPoint where_to_point);
+ void SerializeCodeStub(Code* stub, HowToCode how_to_code,
+ WhereToPoint where_to_point);
+ void SerializeSourceObject(HowToCode how_to_code,
+ WhereToPoint where_to_point);
+ void SerializeHeapObject(HeapObject* heap_object, HowToCode how_to_code,
+ WhereToPoint where_to_point);
+ int AddCodeStubKey(uint32_t stub_key);
DisallowHeapAllocation no_gc_;
String* source_;
+ Code* main_code_;
+ List<uint32_t> stub_keys_;
DISALLOW_COPY_AND_ASSIGN(CodeSerializer);
};
@@ -638,12 +650,22 @@ class SerializedCodeData {
return result;
}
+ Vector<const uint32_t> CodeStubKeys() const {
+ return Vector<const uint32_t>(
+ reinterpret_cast<const uint32_t*>(script_data_->data() + kHeaderSize),
+ GetHeaderValue(kNumCodeStubKeysOffset));
+ }
+
const byte* Payload() const {
- return script_data_->data() + kHeaderEntries * kIntSize;
+ int code_stubs_size = GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size;
+ return script_data_->data() + kHeaderSize + code_stubs_size;
}
int PayloadLength() const {
- return script_data_->length() - kHeaderEntries * kIntSize;
+ int payload_length = GetHeaderValue(kPayloadLengthOffset);
+ DCHECK_EQ(script_data_->data() + script_data_->length(),
+ Payload() + payload_length);
+ return payload_length;
}
int GetReservation(int space) const {
@@ -666,10 +688,21 @@ class SerializedCodeData {
// The data header consists of int-sized entries:
// [0] version hash
- // [1..7] reservation sizes for spaces from NEW_SPACE to PROPERTY_CELL_SPACE.
+ // [1] number of code stub keys
+ // [2] payload length
+ // [3..9] reservation sizes for spaces from NEW_SPACE to PROPERTY_CELL_SPACE.
static const int kCheckSumOffset = 0;
- static const int kReservationsOffset = 1;
- static const int kHeaderEntries = 8;
+ static const int kNumCodeStubKeysOffset = 1;
+ static const int kPayloadLengthOffset = 2;
+ static const int kReservationsOffset = 3;
+
+ static const int kHeaderEntries =
+ kReservationsOffset + SerializerDeserializer::kNumberOfSpaces;
+ static const int kHeaderSize = kHeaderEntries * kIntSize;
+
+ // Following the header, we store, in sequential order
+ // - code stub keys
+ // - serialization payload
ScriptData* script_data_;
bool owns_script_data_;
diff --git a/deps/v8/src/snapshot-common.cc b/deps/v8/src/snapshot-common.cc
index 25193b2ed0..4e90ce14e7 100644
--- a/deps/v8/src/snapshot-common.cc
+++ b/deps/v8/src/snapshot-common.cc
@@ -21,12 +21,12 @@ void Snapshot::ReserveSpaceForLinkedInSnapshot(Deserializer* deserializer) {
deserializer->set_reservation(CODE_SPACE, code_space_used_);
deserializer->set_reservation(MAP_SPACE, map_space_used_);
deserializer->set_reservation(CELL_SPACE, cell_space_used_);
- deserializer->set_reservation(PROPERTY_CELL_SPACE,
- property_cell_space_used_);
+ deserializer->set_reservation(PROPERTY_CELL_SPACE, property_cell_space_used_);
+ deserializer->set_reservation(LO_SPACE, lo_space_used_);
}
-bool Snapshot::Initialize() {
+bool Snapshot::Initialize(Isolate* isolate) {
if (size_ > 0) {
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) {
@@ -35,7 +35,7 @@ bool Snapshot::Initialize() {
SnapshotByteSource source(raw_data_, raw_size_);
Deserializer deserializer(&source);
ReserveSpaceForLinkedInSnapshot(&deserializer);
- bool success = V8::Initialize(&deserializer);
+ bool success = isolate->Init(&deserializer);
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
PrintF("[Snapshot loading and deserialization took %0.3f ms]\n", ms);
@@ -67,6 +67,7 @@ Handle<Context> Snapshot::NewContextFromSnapshot(Isolate* isolate) {
deserializer.set_reservation(CELL_SPACE, context_cell_space_used_);
deserializer.set_reservation(PROPERTY_CELL_SPACE,
context_property_cell_space_used_);
+ deserializer.set_reservation(LO_SPACE, context_lo_space_used_);
deserializer.DeserializePartial(isolate, &root);
CHECK(root->IsContext());
return Handle<Context>(Context::cast(root));
diff --git a/deps/v8/src/snapshot-empty.cc b/deps/v8/src/snapshot-empty.cc
index 65207bfc74..e8673e5600 100644
--- a/deps/v8/src/snapshot-empty.cc
+++ b/deps/v8/src/snapshot-empty.cc
@@ -27,6 +27,7 @@ const int Snapshot::code_space_used_ = 0;
const int Snapshot::map_space_used_ = 0;
const int Snapshot::cell_space_used_ = 0;
const int Snapshot::property_cell_space_used_ = 0;
+const int Snapshot::lo_space_used_ = 0;
const int Snapshot::context_new_space_used_ = 0;
const int Snapshot::context_pointer_space_used_ = 0;
@@ -35,5 +36,5 @@ const int Snapshot::context_code_space_used_ = 0;
const int Snapshot::context_map_space_used_ = 0;
const int Snapshot::context_cell_space_used_ = 0;
const int Snapshot::context_property_cell_space_used_ = 0;
-
+const int Snapshot::context_lo_space_used_ = 0;
} } // namespace v8::internal
diff --git a/deps/v8/src/snapshot-external.cc b/deps/v8/src/snapshot-external.cc
index 38b7cf414c..9b8bc1b917 100644
--- a/deps/v8/src/snapshot-external.cc
+++ b/deps/v8/src/snapshot-external.cc
@@ -25,6 +25,7 @@ struct SnapshotImpl {
int map_space_used;
int cell_space_used;
int property_cell_space_used;
+ int lo_space_used;
const byte* context_data;
int context_size;
@@ -35,6 +36,7 @@ struct SnapshotImpl {
int context_map_space_used;
int context_cell_space_used;
int context_property_cell_space_used;
+ int context_lo_space_used;
};
@@ -46,7 +48,7 @@ bool Snapshot::HaveASnapshotToStartFrom() {
}
-bool Snapshot::Initialize() {
+bool Snapshot::Initialize(Isolate* isolate) {
if (!HaveASnapshotToStartFrom())
return false;
@@ -66,7 +68,8 @@ bool Snapshot::Initialize() {
deserializer.set_reservation(CELL_SPACE, snapshot_impl_->cell_space_used);
deserializer.set_reservation(PROPERTY_CELL_SPACE,
snapshot_impl_->property_cell_space_used);
- bool success = V8::Initialize(&deserializer);
+ deserializer.set_reservation(LO_SPACE, snapshot_impl_->lo_space_used);
+ bool success = isolate->Init(&deserializer);
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
PrintF("[Snapshot loading and deserialization took %0.3f ms]\n", ms);
@@ -97,6 +100,7 @@ Handle<Context> Snapshot::NewContextFromSnapshot(Isolate* isolate) {
deserializer.set_reservation(PROPERTY_CELL_SPACE,
snapshot_impl_->
context_property_cell_space_used);
+ deserializer.set_reservation(LO_SPACE, snapshot_impl_->context_lo_space_used);
Object* root;
deserializer.DeserializePartial(isolate, &root);
CHECK(root->IsContext());
@@ -123,6 +127,7 @@ void SetSnapshotFromFile(StartupData* snapshot_blob) {
snapshot_impl_->map_space_used = source.GetInt();
snapshot_impl_->cell_space_used = source.GetInt();
snapshot_impl_->property_cell_space_used = source.GetInt();
+ snapshot_impl_->lo_space_used = source.GetInt();
success &= source.GetBlob(&snapshot_impl_->context_data,
&snapshot_impl_->context_size);
diff --git a/deps/v8/src/snapshot-source-sink.cc b/deps/v8/src/snapshot-source-sink.cc
index 2be14383fa..29bad33aac 100644
--- a/deps/v8/src/snapshot-source-sink.cc
+++ b/deps/v8/src/snapshot-source-sink.cc
@@ -24,14 +24,10 @@ SnapshotByteSource::~SnapshotByteSource() { }
int32_t SnapshotByteSource::GetUnalignedInt() {
DCHECK(position_ < length_); // Require at least one byte left.
-#if defined(V8_HOST_CAN_READ_UNALIGNED) && __BYTE_ORDER == __LITTLE_ENDIAN
- int32_t answer = *reinterpret_cast<const int32_t*>(data_ + position_);
-#else
int32_t answer = data_[position_];
answer |= data_[position_ + 1] << 8;
answer |= data_[position_ + 2] << 16;
answer |= data_[position_ + 3] << 24;
-#endif
return answer;
}
@@ -43,15 +39,17 @@ void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) {
void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
- DCHECK(integer < 1 << 22);
+ DCHECK(integer < 1 << 30);
integer <<= 2;
int bytes = 1;
if (integer > 0xff) bytes = 2;
if (integer > 0xffff) bytes = 3;
- integer |= bytes;
+ if (integer > 0xffffff) bytes = 4;
+ integer |= (bytes - 1);
Put(static_cast<int>(integer & 0xff), "IntPart1");
if (bytes > 1) Put(static_cast<int>((integer >> 8) & 0xff), "IntPart2");
if (bytes > 2) Put(static_cast<int>((integer >> 16) & 0xff), "IntPart3");
+ if (bytes > 3) Put(static_cast<int>((integer >> 24) & 0xff), "IntPart4");
}
void SnapshotByteSink::PutRaw(byte* data, int number_of_bytes,
diff --git a/deps/v8/src/snapshot-source-sink.h b/deps/v8/src/snapshot-source-sink.h
index bda6455e8b..c1a31b5645 100644
--- a/deps/v8/src/snapshot-source-sink.h
+++ b/deps/v8/src/snapshot-source-sink.h
@@ -17,7 +17,7 @@ namespace internal {
*
* Note: Memory ownership remains with callee.
*/
-class SnapshotByteSource V8_FINAL {
+class SnapshotByteSource FINAL {
public:
SnapshotByteSource(const byte* array, int length);
~SnapshotByteSource();
@@ -39,7 +39,7 @@ class SnapshotByteSource V8_FINAL {
// This way of variable-length encoding integers does not suffer from branch
// mispredictions.
uint32_t answer = GetUnalignedInt();
- int bytes = answer & 3;
+ int bytes = (answer & 3) + 1;
Advance(bytes);
uint32_t mask = 0xffffffffu;
mask >>= 32 - (bytes << 3);
@@ -99,8 +99,8 @@ class DummySnapshotSink : public SnapshotByteSink {
class DebugSnapshotSink : public SnapshotByteSink {
public:
explicit DebugSnapshotSink(SnapshotByteSink* chained) : sink_(chained) {}
- virtual void Put(byte b, const char* description) V8_OVERRIDE;
- virtual int Position() V8_OVERRIDE { return sink_->Position(); }
+ virtual void Put(byte b, const char* description) OVERRIDE;
+ virtual int Position() OVERRIDE { return sink_->Position(); }
private:
SnapshotByteSink* sink_;
@@ -110,10 +110,10 @@ class DebugSnapshotSink : public SnapshotByteSink {
class ListSnapshotSink : public i::SnapshotByteSink {
public:
explicit ListSnapshotSink(i::List<byte>* data) : data_(data) {}
- virtual void Put(byte b, const char* description) V8_OVERRIDE {
+ virtual void Put(byte b, const char* description) OVERRIDE {
data_->Add(b);
}
- virtual int Position() V8_OVERRIDE { return data_->length(); }
+ virtual int Position() OVERRIDE { return data_->length(); }
private:
i::List<byte>* data_;
diff --git a/deps/v8/src/snapshot.h b/deps/v8/src/snapshot.h
index b785cf5700..590ecf1718 100644
--- a/deps/v8/src/snapshot.h
+++ b/deps/v8/src/snapshot.h
@@ -12,9 +12,9 @@ namespace internal {
class Snapshot {
public:
- // Initialize the VM from the internal snapshot. Returns false if no snapshot
- // could be found.
- static bool Initialize();
+ // Initialize the Isolate from the internal snapshot. Returns false if no
+ // snapshot could be found.
+ static bool Initialize(Isolate* isolate);
static bool HaveASnapshotToStartFrom();
@@ -48,6 +48,7 @@ class Snapshot {
static const int map_space_used_;
static const int cell_space_used_;
static const int property_cell_space_used_;
+ static const int lo_space_used_;
static const int context_new_space_used_;
static const int context_pointer_space_used_;
static const int context_data_space_used_;
@@ -55,6 +56,7 @@ class Snapshot {
static const int context_map_space_used_;
static const int context_cell_space_used_;
static const int context_property_cell_space_used_;
+ static const int context_lo_space_used_;
static const int size_;
static const int raw_size_;
static const int context_size_;
diff --git a/deps/v8/src/string-iterator.js b/deps/v8/src/string-iterator.js
index 7222885a56..cb578e77e7 100644
--- a/deps/v8/src/string-iterator.js
+++ b/deps/v8/src/string-iterator.js
@@ -38,7 +38,7 @@ function StringIteratorIterator() {
function StringIteratorNext() {
var iterator = ToObject(this);
- if (!HAS_PRIVATE(iterator, stringIteratorIteratedStringSymbol)) {
+ if (!HAS_DEFINED_PRIVATE(iterator, stringIteratorNextIndexSymbol)) {
throw MakeTypeError('incompatible_method_receiver',
['String Iterator.prototype.next']);
}
@@ -52,7 +52,8 @@ function StringIteratorNext() {
var length = TO_UINT32(s.length);
if (position >= length) {
- SET_PRIVATE(iterator, stringIteratorIteratedStringSymbol, UNDEFINED);
+ SET_PRIVATE(iterator, stringIteratorIteratedStringSymbol,
+ UNDEFINED);
return CreateIteratorResultObject(UNDEFINED, true);
}
diff --git a/deps/v8/src/string-search.h b/deps/v8/src/string-search.h
index ef47db6241..bf5ffe6b2d 100644
--- a/deps/v8/src/string-search.h
+++ b/deps/v8/src/string-search.h
@@ -30,7 +30,7 @@ class StringSearchBase {
// a potentially less efficient searching, but is a safe approximation.
// For needles using only characters in the same Unicode 256-code point page,
// there is no search speed degradation.
- static const int kAsciiAlphabetSize = 256;
+ static const int kLatin1AlphabetSize = 256;
static const int kUC16AlphabetSize = Isolate::kUC16AlphabetSize;
// Bad-char shift table stored in the state. It's length is the alphabet size.
@@ -81,8 +81,8 @@ class StringSearch : private StringSearchBase {
static inline int AlphabetSize() {
if (sizeof(PatternChar) == 1) {
- // ASCII needle.
- return kAsciiAlphabetSize;
+ // Latin1 needle.
+ return kLatin1AlphabetSize;
} else {
DCHECK(sizeof(PatternChar) == 2);
// UC16 needle.
diff --git a/deps/v8/src/string-stream.h b/deps/v8/src/string-stream.h
index e2475ff3b3..fca1d4beab 100644
--- a/deps/v8/src/string-stream.h
+++ b/deps/v8/src/string-stream.h
@@ -24,18 +24,18 @@ class StringAllocator {
// Normal allocator uses new[] and delete[].
-class HeapStringAllocator V8_FINAL : public StringAllocator {
+class HeapStringAllocator FINAL : public StringAllocator {
public:
~HeapStringAllocator() { DeleteArray(space_); }
- virtual char* allocate(unsigned bytes) V8_OVERRIDE;
- virtual char* grow(unsigned* bytes) V8_OVERRIDE;
+ virtual char* allocate(unsigned bytes) OVERRIDE;
+ virtual char* grow(unsigned* bytes) OVERRIDE;
private:
char* space_;
};
-class FmtElm V8_FINAL {
+class FmtElm FINAL {
public:
FmtElm(int value) : type_(INT) { // NOLINT
data_.u_int_ = value;
@@ -75,7 +75,7 @@ class FmtElm V8_FINAL {
};
-class StringStream V8_FINAL {
+class StringStream FINAL {
public:
explicit StringStream(StringAllocator* allocator):
allocator_(allocator),
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index ae65264d4a..ac5cb7f99e 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -9,11 +9,12 @@
// -------------------------------------------------------------------
function StringConstructor(x) {
- var value = %_ArgumentsLength() == 0 ? '' : TO_STRING_INLINE(x);
+ if (%_ArgumentsLength() == 0) x = '';
if (%_IsConstructCall()) {
- %_SetValueOf(this, value);
+ %_SetValueOf(this, TO_STRING_INLINE(x));
} else {
- return value;
+ return IS_SYMBOL(x) ?
+ %_CallFunction(x, SymbolToString) : TO_STRING_INLINE(x);
}
}
@@ -812,7 +813,7 @@ function StringFromCharCode(code) {
if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
if (code < 0) code = code & 0xffff;
if (code > 0xff) break;
- %_OneByteSeqStringSetChar(one_byte, i, code);
+ %_OneByteSeqStringSetChar(i, code, one_byte);
}
if (i == n) return one_byte;
one_byte = %TruncateString(one_byte, i);
@@ -821,7 +822,7 @@ function StringFromCharCode(code) {
for (var j = 0; i < n; i++, j++) {
var code = %_Arguments(i);
if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
- %_TwoByteSeqStringSetChar(two_byte, j, code);
+ %_TwoByteSeqStringSetChar(j, code, two_byte);
}
return one_byte + two_byte;
}
diff --git a/deps/v8/src/strtod.cc b/deps/v8/src/strtod.cc
index 64b7a29ef0..2b48af3cc6 100644
--- a/deps/v8/src/strtod.cc
+++ b/deps/v8/src/strtod.cc
@@ -63,7 +63,7 @@ static const double exact_powers_of_ten[] = {
// 10^22 = 0x21e19e0c9bab2400000 = 0x878678326eac9 * 2^22
10000000000000000000000.0
};
-static const int kExactPowersOfTenSize = ARRAY_SIZE(exact_powers_of_ten);
+static const int kExactPowersOfTenSize = arraysize(exact_powers_of_ten);
// Maximum number of significant digits in the decimal representation.
// In fact the value is 772 (see conversions.cc), but to give us some margin
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
deleted file mode 100644
index f959e92498..0000000000
--- a/deps/v8/src/stub-cache.cc
+++ /dev/null
@@ -1,1293 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/api.h"
-#include "src/arguments.h"
-#include "src/ast.h"
-#include "src/code-stubs.h"
-#include "src/cpu-profiler.h"
-#include "src/gdb-jit.h"
-#include "src/ic-inl.h"
-#include "src/stub-cache.h"
-#include "src/type-info.h"
-#include "src/vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------
-// StubCache implementation.
-
-
-StubCache::StubCache(Isolate* isolate)
- : isolate_(isolate) { }
-
-
-void StubCache::Initialize() {
- DCHECK(IsPowerOf2(kPrimaryTableSize));
- DCHECK(IsPowerOf2(kSecondaryTableSize));
- Clear();
-}
-
-
-static Code::Flags CommonStubCacheChecks(Name* name, Map* map,
- Code::Flags flags) {
- flags = Code::RemoveTypeAndHolderFromFlags(flags);
-
- // Validate that the name does not move on scavenge, and that we
- // can use identity checks instead of structural equality checks.
- DCHECK(!name->GetHeap()->InNewSpace(name));
- DCHECK(name->IsUniqueName());
-
- // The state bits are not important to the hash function because the stub
- // cache only contains handlers. Make sure that the bits are the least
- // significant so they will be the ones masked out.
- DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(flags));
- STATIC_ASSERT((Code::ICStateField::kMask & 1) == 1);
-
- // Make sure that the code type and cache holder are not included in the hash.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
- DCHECK(Code::ExtractCacheHolderFromFlags(flags) == 0);
-
- return flags;
-}
-
-
-Code* StubCache::Set(Name* name, Map* map, Code* code) {
- Code::Flags flags = CommonStubCacheChecks(name, map, code->flags());
-
- // Compute the primary entry.
- int primary_offset = PrimaryOffset(name, flags, map);
- Entry* primary = entry(primary_, primary_offset);
- Code* old_code = primary->value;
-
- // If the primary entry has useful data in it, we retire it to the
- // secondary cache before overwriting it.
- if (old_code != isolate_->builtins()->builtin(Builtins::kIllegal)) {
- Map* old_map = primary->map;
- Code::Flags old_flags =
- Code::RemoveTypeAndHolderFromFlags(old_code->flags());
- int seed = PrimaryOffset(primary->key, old_flags, old_map);
- int secondary_offset = SecondaryOffset(primary->key, old_flags, seed);
- Entry* secondary = entry(secondary_, secondary_offset);
- *secondary = *primary;
- }
-
- // Update primary cache.
- primary->key = name;
- primary->value = code;
- primary->map = map;
- isolate()->counters()->megamorphic_stub_cache_updates()->Increment();
- return code;
-}
-
-
-Code* StubCache::Get(Name* name, Map* map, Code::Flags flags) {
- flags = CommonStubCacheChecks(name, map, flags);
- int primary_offset = PrimaryOffset(name, flags, map);
- Entry* primary = entry(primary_, primary_offset);
- if (primary->key == name && primary->map == map) {
- return primary->value;
- }
- int secondary_offset = SecondaryOffset(name, flags, primary_offset);
- Entry* secondary = entry(secondary_, secondary_offset);
- if (secondary->key == name && secondary->map == map) {
- return secondary->value;
- }
- return NULL;
-}
-
-
-Handle<Code> PropertyICCompiler::Find(Handle<Name> name,
- Handle<Map> stub_holder, Code::Kind kind,
- ExtraICState extra_state,
- CacheHolderFlag cache_holder) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- kind, extra_state, cache_holder);
- Object* probe = stub_holder->FindInCodeCache(*name, flags);
- if (probe->IsCode()) return handle(Code::cast(probe));
- return Handle<Code>::null();
-}
-
-
-Handle<Code> PropertyHandlerCompiler::Find(Handle<Name> name,
- Handle<Map> stub_holder,
- Code::Kind kind,
- CacheHolderFlag cache_holder,
- Code::StubType type) {
- Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder);
- Object* probe = stub_holder->FindInCodeCache(*name, flags);
- if (probe->IsCode()) return handle(Code::cast(probe));
- return Handle<Code>::null();
-}
-
-
-Handle<Code> PropertyICCompiler::ComputeMonomorphic(
- Code::Kind kind, Handle<Name> name, Handle<HeapType> type,
- Handle<Code> handler, ExtraICState extra_ic_state) {
- Isolate* isolate = name->GetIsolate();
- if (handler.is_identical_to(isolate->builtins()->LoadIC_Normal()) ||
- handler.is_identical_to(isolate->builtins()->StoreIC_Normal())) {
- name = isolate->factory()->normal_ic_symbol();
- }
-
- CacheHolderFlag flag;
- Handle<Map> stub_holder = IC::GetICCacheHolder(*type, isolate, &flag);
-
- Handle<Code> ic;
- // There are multiple string maps that all use the same prototype. That
- // prototype cannot hold multiple handlers, one for each of the string maps,
- // for a single name. Hence, turn off caching of the IC.
- bool can_be_cached = !type->Is(HeapType::String());
- if (can_be_cached) {
- ic = Find(name, stub_holder, kind, extra_ic_state, flag);
- if (!ic.is_null()) return ic;
- }
-
-#ifdef DEBUG
- if (kind == Code::KEYED_STORE_IC) {
- DCHECK(STANDARD_STORE ==
- KeyedStoreIC::GetKeyedAccessStoreMode(extra_ic_state));
- }
-#endif
-
- PropertyICCompiler ic_compiler(isolate, kind, extra_ic_state, flag);
- ic = ic_compiler.CompileMonomorphic(type, handler, name, PROPERTY);
-
- if (can_be_cached) Map::UpdateCodeCache(stub_holder, name, ic);
- return ic;
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent(
- Handle<Name> name, Handle<HeapType> type) {
- Isolate* isolate = name->GetIsolate();
- Handle<Map> receiver_map = IC::TypeToMap(*type, isolate);
- if (receiver_map->prototype()->IsNull()) {
- // TODO(jkummerow/verwaest): If there is no prototype and the property
- // is nonexistent, introduce a builtin to handle this (fast properties
- // -> return undefined, dictionary properties -> do negative lookup).
- return Handle<Code>();
- }
- CacheHolderFlag flag;
- Handle<Map> stub_holder_map =
- IC::GetHandlerCacheHolder(*type, false, isolate, &flag);
-
- // If no dictionary mode objects are present in the prototype chain, the load
- // nonexistent IC stub can be shared for all names for a given map and we use
- // the empty string for the map cache in that case. If there are dictionary
- // mode objects involved, we need to do negative lookups in the stub and
- // therefore the stub will be specific to the name.
- Handle<Name> cache_name =
- receiver_map->is_dictionary_map()
- ? name
- : Handle<Name>::cast(isolate->factory()->nonexistent_symbol());
- Handle<Map> current_map = stub_holder_map;
- Handle<JSObject> last(JSObject::cast(receiver_map->prototype()));
- while (true) {
- if (current_map->is_dictionary_map()) cache_name = name;
- if (current_map->prototype()->IsNull()) break;
- last = handle(JSObject::cast(current_map->prototype()));
- current_map = handle(last->map());
- }
- // Compile the stub that is either shared for all names or
- // name specific if there are global objects involved.
- Handle<Code> handler = PropertyHandlerCompiler::Find(
- cache_name, stub_holder_map, Code::LOAD_IC, flag, Code::FAST);
- if (!handler.is_null()) return handler;
-
- NamedLoadHandlerCompiler compiler(isolate, type, last, flag);
- handler = compiler.CompileLoadNonexistent(cache_name);
- Map::UpdateCodeCache(stub_holder_map, cache_name, handler);
- return handler;
-}
-
-
-Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphic(
- Handle<Map> receiver_map) {
- Isolate* isolate = receiver_map->GetIsolate();
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC);
- Handle<Name> name = isolate->factory()->KeyedLoadMonomorphic_string();
-
- Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- ElementsKind elements_kind = receiver_map->elements_kind();
- Handle<Code> stub;
- if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements() ||
- receiver_map->has_fixed_typed_array_elements()) {
- stub = LoadFastElementStub(isolate,
- receiver_map->instance_type() == JS_ARRAY_TYPE,
- elements_kind).GetCode();
- } else {
- stub = FLAG_compiled_keyed_dictionary_loads
- ? LoadDictionaryElementStub(isolate).GetCode()
- : LoadDictionaryElementPlatformStub(isolate).GetCode();
- }
- PropertyICCompiler compiler(isolate, Code::KEYED_LOAD_IC);
- Handle<Code> code =
- compiler.CompileMonomorphic(HeapType::Class(receiver_map, isolate), stub,
- isolate->factory()->empty_string(), ELEMENT);
-
- Map::UpdateCodeCache(receiver_map, name, code);
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphic(
- Handle<Map> receiver_map, StrictMode strict_mode,
- KeyedAccessStoreMode store_mode) {
- Isolate* isolate = receiver_map->GetIsolate();
- ExtraICState extra_state =
- KeyedStoreIC::ComputeExtraICState(strict_mode, store_mode);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, extra_state);
-
- DCHECK(store_mode == STANDARD_STORE ||
- store_mode == STORE_AND_GROW_NO_TRANSITION ||
- store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
- store_mode == STORE_NO_TRANSITION_HANDLE_COW);
-
- Handle<String> name = isolate->factory()->KeyedStoreMonomorphic_string();
- Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state);
- Handle<Code> code =
- compiler.CompileKeyedStoreMonomorphic(receiver_map, store_mode);
-
- Map::UpdateCodeCache(receiver_map, name, code);
- DCHECK(KeyedStoreIC::GetKeyedAccessStoreMode(code->extra_ic_state())
- == store_mode);
- return code;
-}
-
-
-#define CALL_LOGGER_TAG(kind, type) (Logger::KEYED_##type)
-
-static void FillCache(Isolate* isolate, Handle<Code> code) {
- Handle<UnseededNumberDictionary> dictionary =
- UnseededNumberDictionary::Set(isolate->factory()->non_monomorphic_cache(),
- code->flags(),
- code);
- isolate->heap()->public_set_non_monomorphic_cache(*dictionary);
-}
-
-
-Code* PropertyICCompiler::FindPreMonomorphic(Isolate* isolate, Code::Kind kind,
- ExtraICState state) {
- Code::Flags flags = Code::ComputeFlags(kind, PREMONOMORPHIC, state);
- UnseededNumberDictionary* dictionary =
- isolate->heap()->non_monomorphic_cache();
- int entry = dictionary->FindEntry(isolate, flags);
- DCHECK(entry != -1);
- Object* code = dictionary->ValueAt(entry);
- // This might be called during the marking phase of the collector
- // hence the unchecked cast.
- return reinterpret_cast<Code*>(code);
-}
-
-
-Handle<Code> PropertyICCompiler::ComputeLoad(Isolate* isolate,
- InlineCacheState ic_state,
- ExtraICState extra_state) {
- Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, ic_state, extra_state);
- Handle<UnseededNumberDictionary> cache =
- isolate->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- PropertyICCompiler compiler(isolate, Code::LOAD_IC);
- Handle<Code> code;
- if (ic_state == UNINITIALIZED) {
- code = compiler.CompileLoadInitialize(flags);
- } else if (ic_state == PREMONOMORPHIC) {
- code = compiler.CompileLoadPreMonomorphic(flags);
- } else if (ic_state == MEGAMORPHIC) {
- code = compiler.CompileLoadMegamorphic(flags);
- } else {
- UNREACHABLE();
- }
- FillCache(isolate, code);
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::ComputeStore(Isolate* isolate,
- InlineCacheState ic_state,
- ExtraICState extra_state) {
- Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, ic_state, extra_state);
- Handle<UnseededNumberDictionary> cache =
- isolate->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- PropertyICCompiler compiler(isolate, Code::STORE_IC);
- Handle<Code> code;
- if (ic_state == UNINITIALIZED) {
- code = compiler.CompileStoreInitialize(flags);
- } else if (ic_state == PREMONOMORPHIC) {
- code = compiler.CompileStorePreMonomorphic(flags);
- } else if (ic_state == GENERIC) {
- code = compiler.CompileStoreGeneric(flags);
- } else if (ic_state == MEGAMORPHIC) {
- code = compiler.CompileStoreMegamorphic(flags);
- } else {
- UNREACHABLE();
- }
-
- FillCache(isolate, code);
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::ComputeCompareNil(Handle<Map> receiver_map,
- CompareNilICStub* stub) {
- Isolate* isolate = receiver_map->GetIsolate();
- Handle<String> name(isolate->heap()->empty_string());
- if (!receiver_map->is_dictionary_map()) {
- Handle<Code> cached_ic =
- Find(name, receiver_map, Code::COMPARE_NIL_IC, stub->GetExtraICState());
- if (!cached_ic.is_null()) return cached_ic;
- }
-
- Code::FindAndReplacePattern pattern;
- pattern.Add(isolate->factory()->meta_map(), receiver_map);
- Handle<Code> ic = stub->GetCodeCopy(pattern);
-
- if (!receiver_map->is_dictionary_map()) {
- Map::UpdateCodeCache(receiver_map, name, ic);
- }
-
- return ic;
-}
-
-
-// TODO(verwaest): Change this method so it takes in a TypeHandleList.
-Handle<Code> PropertyICCompiler::ComputeKeyedLoadPolymorphic(
- MapHandleList* receiver_maps) {
- Isolate* isolate = receiver_maps->at(0)->GetIsolate();
- Code::Flags flags = Code::ComputeFlags(Code::KEYED_LOAD_IC, POLYMORPHIC);
- Handle<PolymorphicCodeCache> cache =
- isolate->factory()->polymorphic_code_cache();
- Handle<Object> probe = cache->Lookup(receiver_maps, flags);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- TypeHandleList types(receiver_maps->length());
- for (int i = 0; i < receiver_maps->length(); i++) {
- types.Add(HeapType::Class(receiver_maps->at(i), isolate));
- }
- CodeHandleList handlers(receiver_maps->length());
- ElementHandlerCompiler compiler(isolate);
- compiler.CompileElementHandlers(receiver_maps, &handlers);
- PropertyICCompiler ic_compiler(isolate, Code::KEYED_LOAD_IC);
- Handle<Code> code = ic_compiler.CompilePolymorphic(
- &types, &handlers, isolate->factory()->empty_string(), Code::NORMAL,
- ELEMENT);
-
- isolate->counters()->keyed_load_polymorphic_stubs()->Increment();
-
- PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::ComputePolymorphic(
- Code::Kind kind, TypeHandleList* types, CodeHandleList* handlers,
- int valid_types, Handle<Name> name, ExtraICState extra_ic_state) {
- Handle<Code> handler = handlers->at(0);
- Code::StubType type = valid_types == 1 ? handler->type() : Code::NORMAL;
- DCHECK(kind == Code::LOAD_IC || kind == Code::STORE_IC);
- PropertyICCompiler ic_compiler(name->GetIsolate(), kind, extra_ic_state);
- return ic_compiler.CompilePolymorphic(types, handlers, name, type, PROPERTY);
-}
-
-
-Handle<Code> PropertyICCompiler::ComputeKeyedStorePolymorphic(
- MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode,
- StrictMode strict_mode) {
- Isolate* isolate = receiver_maps->at(0)->GetIsolate();
- DCHECK(store_mode == STANDARD_STORE ||
- store_mode == STORE_AND_GROW_NO_TRANSITION ||
- store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
- store_mode == STORE_NO_TRANSITION_HANDLE_COW);
- Handle<PolymorphicCodeCache> cache =
- isolate->factory()->polymorphic_code_cache();
- ExtraICState extra_state = KeyedStoreIC::ComputeExtraICState(
- strict_mode, store_mode);
- Code::Flags flags =
- Code::ComputeFlags(Code::KEYED_STORE_IC, POLYMORPHIC, extra_state);
- Handle<Object> probe = cache->Lookup(receiver_maps, flags);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state);
- Handle<Code> code =
- compiler.CompileKeyedStorePolymorphic(receiver_maps, store_mode);
- PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
- return code;
-}
-
-
-void StubCache::Clear() {
- Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
- for (int i = 0; i < kPrimaryTableSize; i++) {
- primary_[i].key = isolate()->heap()->empty_string();
- primary_[i].map = NULL;
- primary_[i].value = empty;
- }
- for (int j = 0; j < kSecondaryTableSize; j++) {
- secondary_[j].key = isolate()->heap()->empty_string();
- secondary_[j].map = NULL;
- secondary_[j].value = empty;
- }
-}
-
-
-void StubCache::CollectMatchingMaps(SmallMapList* types,
- Handle<Name> name,
- Code::Flags flags,
- Handle<Context> native_context,
- Zone* zone) {
- for (int i = 0; i < kPrimaryTableSize; i++) {
- if (primary_[i].key == *name) {
- Map* map = primary_[i].map;
- // Map can be NULL, if the stub is constant function call
- // with a primitive receiver.
- if (map == NULL) continue;
-
- int offset = PrimaryOffset(*name, flags, map);
- if (entry(primary_, offset) == &primary_[i] &&
- !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
- types->AddMapIfMissing(Handle<Map>(map), zone);
- }
- }
- }
-
- for (int i = 0; i < kSecondaryTableSize; i++) {
- if (secondary_[i].key == *name) {
- Map* map = secondary_[i].map;
- // Map can be NULL, if the stub is constant function call
- // with a primitive receiver.
- if (map == NULL) continue;
-
- // Lookup in primary table and skip duplicates.
- int primary_offset = PrimaryOffset(*name, flags, map);
-
- // Lookup in secondary table and add matches.
- int offset = SecondaryOffset(*name, flags, primary_offset);
- if (entry(secondary_, offset) == &secondary_[i] &&
- !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
- types->AddMapIfMissing(Handle<Map>(map), zone);
- }
- }
- }
-}
-
-
-// ------------------------------------------------------------------------
-// StubCompiler implementation.
-
-
-RUNTIME_FUNCTION(StoreCallbackProperty) {
- Handle<JSObject> receiver = args.at<JSObject>(0);
- Handle<JSObject> holder = args.at<JSObject>(1);
- Handle<ExecutableAccessorInfo> callback = args.at<ExecutableAccessorInfo>(2);
- Handle<Name> name = args.at<Name>(3);
- Handle<Object> value = args.at<Object>(4);
- HandleScope scope(isolate);
-
- DCHECK(callback->IsCompatibleReceiver(*receiver));
-
- Address setter_address = v8::ToCData<Address>(callback->setter());
- v8::AccessorSetterCallback fun =
- FUNCTION_CAST<v8::AccessorSetterCallback>(setter_address);
- DCHECK(fun != NULL);
-
- // TODO(rossberg): Support symbols in the API.
- if (name->IsSymbol()) return *value;
- Handle<String> str = Handle<String>::cast(name);
-
- LOG(isolate, ApiNamedPropertyAccess("store", *receiver, *name));
- PropertyCallbackArguments custom_args(isolate, callback->data(), *receiver,
- *holder);
- custom_args.Call(fun, v8::Utils::ToLocal(str), v8::Utils::ToLocal(value));
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- return *value;
-}
-
-
-/**
- * Attempts to load a property with an interceptor (which must be present),
- * but doesn't search the prototype chain.
- *
- * Returns |Heap::no_interceptor_result_sentinel()| if interceptor doesn't
- * provide any value for the given name.
- */
-RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly) {
- DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength);
- Handle<Name> name_handle =
- args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
- Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(
- NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex);
-
- // TODO(rossberg): Support symbols in the API.
- if (name_handle->IsSymbol())
- return isolate->heap()->no_interceptor_result_sentinel();
- Handle<String> name = Handle<String>::cast(name_handle);
-
- Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
- v8::NamedPropertyGetterCallback getter =
- FUNCTION_CAST<v8::NamedPropertyGetterCallback>(getter_address);
- DCHECK(getter != NULL);
-
- Handle<JSObject> receiver =
- args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
- Handle<JSObject> holder =
- args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
- PropertyCallbackArguments callback_args(
- isolate, interceptor_info->data(), *receiver, *holder);
- {
- // Use the interceptor getter.
- HandleScope scope(isolate);
- v8::Handle<v8::Value> r =
- callback_args.Call(getter, v8::Utils::ToLocal(name));
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- if (!r.IsEmpty()) {
- Handle<Object> result = v8::Utils::OpenHandle(*r);
- result->VerifyApiCallResultType();
- return *v8::Utils::OpenHandle(*r);
- }
- }
-
- return isolate->heap()->no_interceptor_result_sentinel();
-}
-
-
-static Object* ThrowReferenceError(Isolate* isolate, Name* name) {
- // If the load is non-contextual, just return the undefined result.
- // Note that both keyed and non-keyed loads may end up here.
- HandleScope scope(isolate);
- LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
- if (ic.contextual_mode() != CONTEXTUAL) {
- return isolate->heap()->undefined_value();
- }
-
- // Throw a reference error.
- Handle<Name> name_handle(name);
- Handle<Object> error =
- isolate->factory()->NewReferenceError("not_defined",
- HandleVector(&name_handle, 1));
- return isolate->Throw(*error);
-}
-
-
-/**
- * Loads a property with an interceptor performing post interceptor
- * lookup if interceptor failed.
- */
-RUNTIME_FUNCTION(LoadPropertyWithInterceptor) {
- HandleScope scope(isolate);
- DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength);
- Handle<Name> name =
- args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
- Handle<JSObject> receiver =
- args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
- Handle<JSObject> holder =
- args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
-
- Handle<Object> result;
- LookupIterator it(receiver, name, holder);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSObject::GetProperty(&it));
-
- if (it.IsFound()) return *result;
-
- return ThrowReferenceError(isolate, Name::cast(args[0]));
-}
-
-
-RUNTIME_FUNCTION(StorePropertyWithInterceptor) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- Handle<JSObject> receiver = args.at<JSObject>(0);
- Handle<Name> name = args.at<Name>(1);
- Handle<Object> value = args.at<Object>(2);
-#ifdef DEBUG
- if (receiver->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate, receiver);
- DCHECK(iter.IsAtEnd() ||
- Handle<JSGlobalObject>::cast(PrototypeIterator::GetCurrent(iter))
- ->HasNamedInterceptor());
- } else {
- DCHECK(receiver->HasNamedInterceptor());
- }
-#endif
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- JSObject::SetProperty(receiver, name, value, ic.strict_mode()));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(LoadElementWithInterceptor) {
- HandleScope scope(isolate);
- Handle<JSObject> receiver = args.at<JSObject>(0);
- DCHECK(args.smi_at(1) >= 0);
- uint32_t index = args.smi_at(1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- JSObject::GetElementWithInterceptor(receiver, receiver, index));
- return *result;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileLoadInitialize(Code::Flags flags) {
- LoadIC::GenerateInitialize(masm());
- Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadInitialize");
- PROFILE(isolate(),
- CodeCreateEvent(Logger::LOAD_INITIALIZE_TAG, *code, 0));
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileLoadPreMonomorphic(Code::Flags flags) {
- LoadIC::GeneratePreMonomorphic(masm());
- Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadPreMonomorphic");
- PROFILE(isolate(),
- CodeCreateEvent(Logger::LOAD_PREMONOMORPHIC_TAG, *code, 0));
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileLoadMegamorphic(Code::Flags flags) {
- LoadIC::GenerateMegamorphic(masm());
- Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadMegamorphic");
- PROFILE(isolate(),
- CodeCreateEvent(Logger::LOAD_MEGAMORPHIC_TAG, *code, 0));
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileStoreInitialize(Code::Flags flags) {
- StoreIC::GenerateInitialize(masm());
- Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreInitialize");
- PROFILE(isolate(),
- CodeCreateEvent(Logger::STORE_INITIALIZE_TAG, *code, 0));
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileStorePreMonomorphic(Code::Flags flags) {
- StoreIC::GeneratePreMonomorphic(masm());
- Handle<Code> code = GetCodeWithFlags(flags, "CompileStorePreMonomorphic");
- PROFILE(isolate(),
- CodeCreateEvent(Logger::STORE_PREMONOMORPHIC_TAG, *code, 0));
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileStoreGeneric(Code::Flags flags) {
- ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- StrictMode strict_mode = StoreIC::GetStrictMode(extra_state);
- StoreIC::GenerateRuntimeSetProperty(masm(), strict_mode);
- Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreGeneric");
- PROFILE(isolate(),
- CodeCreateEvent(Logger::STORE_GENERIC_TAG, *code, 0));
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileStoreMegamorphic(Code::Flags flags) {
- StoreIC::GenerateMegamorphic(masm());
- Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreMegamorphic");
- PROFILE(isolate(),
- CodeCreateEvent(Logger::STORE_MEGAMORPHIC_TAG, *code, 0));
- return code;
-}
-
-
-#undef CALL_LOGGER_TAG
-
-
-Handle<Code> PropertyAccessCompiler::GetCodeWithFlags(Code::Flags flags,
- const char* name) {
- // Create code object in the heap.
- CodeDesc desc;
- masm()->GetCode(&desc);
- Handle<Code> code = factory()->NewCode(desc, flags, masm()->CodeObject());
- if (code->IsCodeStubOrIC()) code->set_stub_key(CodeStub::NoCacheKey());
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code_stubs) {
- OFStream os(stdout);
- code->Disassemble(name, os);
- }
-#endif
- return code;
-}
-
-
-Handle<Code> PropertyAccessCompiler::GetCodeWithFlags(Code::Flags flags,
- Handle<Name> name) {
- return (FLAG_print_code_stubs && !name.is_null() && name->IsString())
- ? GetCodeWithFlags(flags, Handle<String>::cast(name)->ToCString().get())
- : GetCodeWithFlags(flags, NULL);
-}
-
-
-#define __ ACCESS_MASM(masm())
-
-
-Register NamedLoadHandlerCompiler::FrontendHeader(Register object_reg,
- Handle<Name> name,
- Label* miss) {
- PrototypeCheckType check_type = CHECK_ALL_MAPS;
- int function_index = -1;
- if (type()->Is(HeapType::String())) {
- function_index = Context::STRING_FUNCTION_INDEX;
- } else if (type()->Is(HeapType::Symbol())) {
- function_index = Context::SYMBOL_FUNCTION_INDEX;
- } else if (type()->Is(HeapType::Number())) {
- function_index = Context::NUMBER_FUNCTION_INDEX;
- } else if (type()->Is(HeapType::Boolean())) {
- function_index = Context::BOOLEAN_FUNCTION_INDEX;
- } else {
- check_type = SKIP_RECEIVER;
- }
-
- if (check_type == CHECK_ALL_MAPS) {
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), function_index, scratch1(), miss);
- Object* function = isolate()->native_context()->get(function_index);
- Object* prototype = JSFunction::cast(function)->instance_prototype();
- set_type_for_object(handle(prototype, isolate()));
- object_reg = scratch1();
- }
-
- // Check that the maps starting from the prototype haven't changed.
- return CheckPrototypes(object_reg, scratch1(), scratch2(), scratch3(), name,
- miss, check_type);
-}
-
-
-// Frontend for store uses the name register. It has to be restored before a
-// miss.
-Register NamedStoreHandlerCompiler::FrontendHeader(Register object_reg,
- Handle<Name> name,
- Label* miss) {
- return CheckPrototypes(object_reg, this->name(), scratch1(), scratch2(), name,
- miss, SKIP_RECEIVER);
-}
-
-
-bool PropertyICCompiler::IncludesNumberType(TypeHandleList* types) {
- for (int i = 0; i < types->length(); ++i) {
- if (types->at(i)->Is(HeapType::Number())) return true;
- }
- return false;
-}
-
-
-Register PropertyHandlerCompiler::Frontend(Register object_reg,
- Handle<Name> name) {
- Label miss;
- Register reg = FrontendHeader(object_reg, name, &miss);
- FrontendFooter(name, &miss);
- return reg;
-}
-
-
-void PropertyHandlerCompiler::NonexistentFrontendHeader(Handle<Name> name,
- Label* miss,
- Register scratch1,
- Register scratch2) {
- Register holder_reg;
- Handle<Map> last_map;
- if (holder().is_null()) {
- holder_reg = receiver();
- last_map = IC::TypeToMap(*type(), isolate());
- // If |type| has null as its prototype, |holder()| is
- // Handle<JSObject>::null().
- DCHECK(last_map->prototype() == isolate()->heap()->null_value());
- } else {
- holder_reg = FrontendHeader(receiver(), name, miss);
- last_map = handle(holder()->map());
- }
-
- if (last_map->is_dictionary_map()) {
- if (last_map->IsJSGlobalObjectMap()) {
- Handle<JSGlobalObject> global =
- holder().is_null()
- ? Handle<JSGlobalObject>::cast(type()->AsConstant()->Value())
- : Handle<JSGlobalObject>::cast(holder());
- GenerateCheckPropertyCell(masm(), global, name, scratch1, miss);
- } else {
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
- DCHECK(holder().is_null() ||
- holder()->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound);
- GenerateDictionaryNegativeLookup(masm(), miss, holder_reg, name, scratch1,
- scratch2);
- }
- }
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadField(Handle<Name> name,
- FieldIndex field) {
- Register reg = Frontend(receiver(), name);
- __ Move(receiver(), reg);
- LoadFieldStub stub(isolate(), field);
- GenerateTailCall(masm(), stub.GetCode());
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadConstant(Handle<Name> name,
- int constant_index) {
- Register reg = Frontend(receiver(), name);
- __ Move(receiver(), reg);
- LoadConstantStub stub(isolate(), constant_index);
- GenerateTailCall(masm(), stub.GetCode());
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent(
- Handle<Name> name) {
- Label miss;
- NonexistentFrontendHeader(name, &miss, scratch2(), scratch3());
- GenerateLoadConstant(isolate()->factory()->undefined_value());
- FrontendFooter(name, &miss);
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
- Handle<Name> name, Handle<ExecutableAccessorInfo> callback) {
- Register reg = Frontend(receiver(), name);
- GenerateLoadCallback(reg, callback);
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
- Handle<Name> name, const CallOptimization& call_optimization) {
- DCHECK(call_optimization.is_simple_api_call());
- Frontend(receiver(), name);
- Handle<Map> receiver_map = IC::TypeToMap(*type(), isolate());
- GenerateFastApiCall(
- masm(), call_optimization, receiver_map,
- receiver(), scratch1(), false, 0, NULL);
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
- Handle<Name> name) {
- // Perform a lookup after the interceptor.
- LookupResult lookup(isolate());
- holder()->LookupOwnRealNamedProperty(name, &lookup);
- if (!lookup.IsFound()) {
- PrototypeIterator iter(holder()->GetIsolate(), holder());
- if (!iter.IsAtEnd()) {
- PrototypeIterator::GetCurrent(iter)->Lookup(name, &lookup);
- }
- }
-
- Register reg = Frontend(receiver(), name);
- // TODO(368): Compile in the whole chain: all the interceptors in
- // prototypes and ultimate answer.
- GenerateLoadInterceptor(reg, &lookup, name);
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor(
- Register interceptor_reg, Handle<Name> name, LookupResult* lookup) {
- Handle<JSObject> real_named_property_holder(lookup->holder());
-
- set_type_for_object(holder());
- set_holder(real_named_property_holder);
- Register reg = Frontend(interceptor_reg, name);
-
- if (lookup->IsField()) {
- __ Move(receiver(), reg);
- LoadFieldStub stub(isolate(), lookup->GetFieldIndex());
- GenerateTailCall(masm(), stub.GetCode());
- } else {
- DCHECK(lookup->type() == CALLBACKS);
- Handle<ExecutableAccessorInfo> callback(
- ExecutableAccessorInfo::cast(lookup->GetCallbackObject()));
- DCHECK(callback->getter() != NULL);
- GenerateLoadCallback(reg, callback);
- }
-}
-
-
-Handle<Code> PropertyICCompiler::CompileMonomorphic(Handle<HeapType> type,
- Handle<Code> handler,
- Handle<Name> name,
- IcCheckType check) {
- TypeHandleList types(1);
- CodeHandleList handlers(1);
- types.Add(type);
- handlers.Add(handler);
- Code::StubType stub_type = handler->type();
- return CompilePolymorphic(&types, &handlers, name, stub_type, check);
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadViaGetter(
- Handle<Name> name, Handle<JSFunction> getter) {
- Frontend(receiver(), name);
- GenerateLoadViaGetter(masm(), type(), receiver(), getter);
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-// TODO(verwaest): Cleanup. holder() is actually the receiver.
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
- Handle<Map> transition, Handle<Name> name) {
- Label miss, slow;
-
- // Ensure no transitions to deprecated maps are followed.
- __ CheckMapDeprecated(transition, scratch1(), &miss);
-
- // Check that we are allowed to write this.
- bool is_nonexistent = holder()->map() == transition->GetBackPointer();
- if (is_nonexistent) {
- // Find the top object.
- Handle<JSObject> last;
- PrototypeIterator iter(isolate(), holder());
- while (!iter.IsAtEnd()) {
- last = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
- iter.Advance();
- }
- if (!last.is_null()) set_holder(last);
- NonexistentFrontendHeader(name, &miss, scratch1(), scratch2());
- } else {
- FrontendHeader(receiver(), name, &miss);
- DCHECK(holder()->HasFastProperties());
- }
-
- GenerateStoreTransition(transition, name, receiver(), this->name(), value(),
- scratch1(), scratch2(), scratch3(), &miss, &slow);
-
- GenerateRestoreName(&miss, name);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- GenerateRestoreName(&slow, name);
- TailCallBuiltin(masm(), SlowBuiltin(kind()));
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreField(LookupResult* lookup,
- Handle<Name> name) {
- Label miss;
- GenerateStoreField(lookup, value(), &miss);
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreViaSetter(
- Handle<JSObject> object, Handle<Name> name, Handle<JSFunction> setter) {
- Frontend(receiver(), name);
- GenerateStoreViaSetter(masm(), type(), receiver(), setter);
-
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name,
- const CallOptimization& call_optimization) {
- Frontend(receiver(), name);
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, handle(object->map()),
- receiver(), scratch1(), true, 1, values);
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphic(
- Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
- ElementsKind elements_kind = receiver_map->elements_kind();
- bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
- Handle<Code> stub;
- if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements() ||
- receiver_map->has_fixed_typed_array_elements()) {
- stub = StoreFastElementStub(isolate(), is_jsarray, elements_kind,
- store_mode).GetCode();
- } else {
- stub = StoreElementStub(isolate(), is_jsarray, elements_kind, store_mode)
- .GetCode();
- }
-
- __ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
-
- TailCallBuiltin(masm(), Builtins::kKeyedStoreIC_Miss);
-
- return GetCode(kind(), Code::NORMAL, factory()->empty_string());
-}
-
-
-#undef __
-
-
-void PropertyAccessCompiler::TailCallBuiltin(MacroAssembler* masm,
- Builtins::Name name) {
- Handle<Code> code(masm->isolate()->builtins()->builtin(name));
- GenerateTailCall(masm, code);
-}
-
-
-Register* PropertyAccessCompiler::GetCallingConvention(Code::Kind kind) {
- if (kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC) {
- return load_calling_convention();
- }
- DCHECK(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
- return store_calling_convention();
-}
-
-
-Handle<Code> PropertyICCompiler::GetCode(Code::Kind kind, Code::StubType type,
- Handle<Name> name,
- InlineCacheState state) {
- Code::Flags flags =
- Code::ComputeFlags(kind, state, extra_ic_state_, type, cache_holder());
- Handle<Code> code = GetCodeWithFlags(flags, name);
- IC::RegisterWeakMapDependency(code);
- PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
- return code;
-}
-
-
-Handle<Code> PropertyHandlerCompiler::GetCode(Code::Kind kind,
- Code::StubType type,
- Handle<Name> name) {
- Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder());
- Handle<Code> code = GetCodeWithFlags(flags, name);
- PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, *code, *name));
- return code;
-}
-
-
-void ElementHandlerCompiler::CompileElementHandlers(
- MapHandleList* receiver_maps, CodeHandleList* handlers) {
- for (int i = 0; i < receiver_maps->length(); ++i) {
- Handle<Map> receiver_map = receiver_maps->at(i);
- Handle<Code> cached_stub;
-
- if ((receiver_map->instance_type() & kNotStringTag) == 0) {
- cached_stub = isolate()->builtins()->KeyedLoadIC_String();
- } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
- cached_stub = isolate()->builtins()->KeyedLoadIC_Slow();
- } else {
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- ElementsKind elements_kind = receiver_map->elements_kind();
-
- if (IsFastElementsKind(elements_kind) ||
- IsExternalArrayElementsKind(elements_kind) ||
- IsFixedTypedArrayElementsKind(elements_kind)) {
- cached_stub = LoadFastElementStub(isolate(), is_js_array, elements_kind)
- .GetCode();
- } else if (elements_kind == SLOPPY_ARGUMENTS_ELEMENTS) {
- cached_stub = isolate()->builtins()->KeyedLoadIC_SloppyArguments();
- } else {
- DCHECK(elements_kind == DICTIONARY_ELEMENTS);
- cached_stub = LoadDictionaryElementStub(isolate()).GetCode();
- }
- }
-
- handlers->Add(cached_stub);
- }
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode) {
- // Collect MONOMORPHIC stubs for all |receiver_maps|.
- CodeHandleList handlers(receiver_maps->length());
- MapHandleList transitioned_maps(receiver_maps->length());
- for (int i = 0; i < receiver_maps->length(); ++i) {
- Handle<Map> receiver_map(receiver_maps->at(i));
- Handle<Code> cached_stub;
- Handle<Map> transitioned_map =
- receiver_map->FindTransitionedMap(receiver_maps);
-
- // TODO(mvstanton): The code below is doing pessimistic elements
- // transitions. I would like to stop doing that and rely on Allocation Site
- // Tracking to do a better job of ensuring the data types are what they need
- // to be. Not all the elements are in place yet, pessimistic elements
- // transitions are still important for performance.
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- ElementsKind elements_kind = receiver_map->elements_kind();
- if (!transitioned_map.is_null()) {
- cached_stub =
- ElementsTransitionAndStoreStub(isolate(), elements_kind,
- transitioned_map->elements_kind(),
- is_js_array, store_mode).GetCode();
- } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
- cached_stub = isolate()->builtins()->KeyedStoreIC_Slow();
- } else {
- if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements() ||
- receiver_map->has_fixed_typed_array_elements()) {
- cached_stub = StoreFastElementStub(isolate(), is_js_array,
- elements_kind, store_mode).GetCode();
- } else {
- cached_stub = StoreElementStub(isolate(), is_js_array, elements_kind,
- store_mode).GetCode();
- }
- }
- DCHECK(!cached_stub.is_null());
- handlers.Add(cached_stub);
- transitioned_maps.Add(transitioned_map);
- }
-
- Handle<Code> code = CompileKeyedStorePolymorphic(receiver_maps, &handlers,
- &transitioned_maps);
- isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
- PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, 0));
- return code;
-}
-
-
-void ElementHandlerCompiler::GenerateStoreDictionaryElement(
- MacroAssembler* masm) {
- KeyedStoreIC::GenerateSlow(masm);
-}
-
-
-CallOptimization::CallOptimization(LookupResult* lookup) {
- if (lookup->IsFound() &&
- lookup->IsCacheable() &&
- lookup->IsConstantFunction()) {
- // We only optimize constant function calls.
- Initialize(Handle<JSFunction>(lookup->GetConstantFunction()));
- } else {
- Initialize(Handle<JSFunction>::null());
- }
-}
-
-
-CallOptimization::CallOptimization(Handle<JSFunction> function) {
- Initialize(function);
-}
-
-
-Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
- Handle<Map> object_map,
- HolderLookup* holder_lookup) const {
- DCHECK(is_simple_api_call());
- if (!object_map->IsJSObjectMap()) {
- *holder_lookup = kHolderNotFound;
- return Handle<JSObject>::null();
- }
- if (expected_receiver_type_.is_null() ||
- expected_receiver_type_->IsTemplateFor(*object_map)) {
- *holder_lookup = kHolderIsReceiver;
- return Handle<JSObject>::null();
- }
- while (true) {
- if (!object_map->prototype()->IsJSObject()) break;
- Handle<JSObject> prototype(JSObject::cast(object_map->prototype()));
- if (!prototype->map()->is_hidden_prototype()) break;
- object_map = handle(prototype->map());
- if (expected_receiver_type_->IsTemplateFor(*object_map)) {
- *holder_lookup = kHolderFound;
- return prototype;
- }
- }
- *holder_lookup = kHolderNotFound;
- return Handle<JSObject>::null();
-}
-
-
-bool CallOptimization::IsCompatibleReceiver(Handle<Object> receiver,
- Handle<JSObject> holder) const {
- DCHECK(is_simple_api_call());
- if (!receiver->IsJSObject()) return false;
- Handle<Map> map(JSObject::cast(*receiver)->map());
- HolderLookup holder_lookup;
- Handle<JSObject> api_holder =
- LookupHolderOfExpectedType(map, &holder_lookup);
- switch (holder_lookup) {
- case kHolderNotFound:
- return false;
- case kHolderIsReceiver:
- return true;
- case kHolderFound:
- if (api_holder.is_identical_to(holder)) return true;
- // Check if holder is in prototype chain of api_holder.
- {
- JSObject* object = *api_holder;
- while (true) {
- Object* prototype = object->map()->prototype();
- if (!prototype->IsJSObject()) return false;
- if (prototype == *holder) return true;
- object = JSObject::cast(prototype);
- }
- }
- break;
- }
- UNREACHABLE();
- return false;
-}
-
-
-void CallOptimization::Initialize(Handle<JSFunction> function) {
- constant_function_ = Handle<JSFunction>::null();
- is_simple_api_call_ = false;
- expected_receiver_type_ = Handle<FunctionTemplateInfo>::null();
- api_call_info_ = Handle<CallHandlerInfo>::null();
-
- if (function.is_null() || !function->is_compiled()) return;
-
- constant_function_ = function;
- AnalyzePossibleApiFunction(function);
-}
-
-
-void CallOptimization::AnalyzePossibleApiFunction(Handle<JSFunction> function) {
- if (!function->shared()->IsApiFunction()) return;
- Handle<FunctionTemplateInfo> info(function->shared()->get_api_func_data());
-
- // Require a C++ callback.
- if (info->call_code()->IsUndefined()) return;
- api_call_info_ =
- Handle<CallHandlerInfo>(CallHandlerInfo::cast(info->call_code()));
-
- // Accept signatures that either have no restrictions at all or
- // only have restrictions on the receiver.
- if (!info->signature()->IsUndefined()) {
- Handle<SignatureInfo> signature =
- Handle<SignatureInfo>(SignatureInfo::cast(info->signature()));
- if (!signature->args()->IsUndefined()) return;
- if (!signature->receiver()->IsUndefined()) {
- expected_receiver_type_ =
- Handle<FunctionTemplateInfo>(
- FunctionTemplateInfo::cast(signature->receiver()));
- }
- }
-
- is_simple_api_call_ = true;
-}
-
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
deleted file mode 100644
index 77bd14cba1..0000000000
--- a/deps/v8/src/stub-cache.h
+++ /dev/null
@@ -1,684 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_STUB_CACHE_H_
-#define V8_STUB_CACHE_H_
-
-#include "src/allocation.h"
-#include "src/arguments.h"
-#include "src/code-stubs.h"
-#include "src/ic-inl.h"
-#include "src/macro-assembler.h"
-#include "src/objects.h"
-#include "src/zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-// The stub cache is used for megamorphic property accesses.
-// It maps (map, name, type) to property access handlers. The cache does not
-// need explicit invalidation when a prototype chain is modified, since the
-// handlers verify the chain.
-
-
-class CallOptimization;
-class SmallMapList;
-class StubCache;
-
-
-class SCTableReference {
- public:
- Address address() const { return address_; }
-
- private:
- explicit SCTableReference(Address address) : address_(address) {}
-
- Address address_;
-
- friend class StubCache;
-};
-
-
-class StubCache {
- public:
- struct Entry {
- Name* key;
- Code* value;
- Map* map;
- };
-
- void Initialize();
- // Access cache for entry hash(name, map).
- Code* Set(Name* name, Map* map, Code* code);
- Code* Get(Name* name, Map* map, Code::Flags flags);
- // Clear the lookup table (@ mark compact collection).
- void Clear();
- // Collect all maps that match the name and flags.
- void CollectMatchingMaps(SmallMapList* types,
- Handle<Name> name,
- Code::Flags flags,
- Handle<Context> native_context,
- Zone* zone);
- // Generate code for probing the stub cache table.
- // Arguments extra, extra2 and extra3 may be used to pass additional scratch
- // registers. Set to no_reg if not needed.
- void GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2 = no_reg,
- Register extra3 = no_reg);
-
- enum Table {
- kPrimary,
- kSecondary
- };
-
- SCTableReference key_reference(StubCache::Table table) {
- return SCTableReference(
- reinterpret_cast<Address>(&first_entry(table)->key));
- }
-
- SCTableReference map_reference(StubCache::Table table) {
- return SCTableReference(
- reinterpret_cast<Address>(&first_entry(table)->map));
- }
-
- SCTableReference value_reference(StubCache::Table table) {
- return SCTableReference(
- reinterpret_cast<Address>(&first_entry(table)->value));
- }
-
- StubCache::Entry* first_entry(StubCache::Table table) {
- switch (table) {
- case StubCache::kPrimary: return StubCache::primary_;
- case StubCache::kSecondary: return StubCache::secondary_;
- }
- UNREACHABLE();
- return NULL;
- }
-
- Isolate* isolate() { return isolate_; }
-
- // Setting the entry size such that the index is shifted by Name::kHashShift
- // is convenient; shifting down the length field (to extract the hash code)
- // automatically discards the hash bit field.
- static const int kCacheIndexShift = Name::kHashShift;
-
- private:
- explicit StubCache(Isolate* isolate);
-
- // The stub cache has a primary and secondary level. The two levels have
- // different hashing algorithms in order to avoid simultaneous collisions
- // in both caches. Unlike a probing strategy (quadratic or otherwise) the
- // update strategy on updates is fairly clear and simple: Any existing entry
- // in the primary cache is moved to the secondary cache, and secondary cache
- // entries are overwritten.
-
- // Hash algorithm for the primary table. This algorithm is replicated in
- // assembler for every architecture. Returns an index into the table that
- // is scaled by 1 << kCacheIndexShift.
- static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) {
- STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
- // Compute the hash of the name (use entire hash field).
- DCHECK(name->HasHashCode());
- uint32_t field = name->hash_field();
- // Using only the low bits in 64-bit mode is unlikely to increase the
- // risk of collision even if the heap is spread over an area larger than
- // 4Gb (and not at all if it isn't).
- uint32_t map_low32bits =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
- // We always set the in_loop bit to zero when generating the lookup code
- // so do it here too so the hash codes match.
- uint32_t iflags =
- (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
- // Base the offset on a simple combination of name, flags, and map.
- uint32_t key = (map_low32bits + field) ^ iflags;
- return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
- }
-
- // Hash algorithm for the secondary table. This algorithm is replicated in
- // assembler for every architecture. Returns an index into the table that
- // is scaled by 1 << kCacheIndexShift.
- static int SecondaryOffset(Name* name, Code::Flags flags, int seed) {
- // Use the seed from the primary cache in the secondary cache.
- uint32_t name_low32bits =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
- // We always set the in_loop bit to zero when generating the lookup code
- // so do it here too so the hash codes match.
- uint32_t iflags =
- (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
- uint32_t key = (seed - name_low32bits) + iflags;
- return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
- }
-
- // Compute the entry for a given offset in exactly the same way as
- // we do in generated code. We generate an hash code that already
- // ends in Name::kHashShift 0s. Then we multiply it so it is a multiple
- // of sizeof(Entry). This makes it easier to avoid making mistakes
- // in the hashed offset computations.
- static Entry* entry(Entry* table, int offset) {
- const int multiplier = sizeof(*table) >> Name::kHashShift;
- return reinterpret_cast<Entry*>(
- reinterpret_cast<Address>(table) + offset * multiplier);
- }
-
- static const int kPrimaryTableBits = 11;
- static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
- static const int kSecondaryTableBits = 9;
- static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
-
- Entry primary_[kPrimaryTableSize];
- Entry secondary_[kSecondaryTableSize];
- Isolate* isolate_;
-
- friend class Isolate;
- friend class SCTableReference;
-
- DISALLOW_COPY_AND_ASSIGN(StubCache);
-};
-
-
-// ------------------------------------------------------------------------
-
-
-// Support functions for IC stubs for callbacks.
-DECLARE_RUNTIME_FUNCTION(StoreCallbackProperty);
-
-
-// Support functions for IC stubs for interceptors.
-DECLARE_RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly);
-DECLARE_RUNTIME_FUNCTION(LoadPropertyWithInterceptor);
-DECLARE_RUNTIME_FUNCTION(LoadElementWithInterceptor);
-DECLARE_RUNTIME_FUNCTION(StorePropertyWithInterceptor);
-
-
-enum PrototypeCheckType { CHECK_ALL_MAPS, SKIP_RECEIVER };
-enum IcCheckType { ELEMENT, PROPERTY };
-
-
-class PropertyAccessCompiler BASE_EMBEDDED {
- public:
- static Builtins::Name MissBuiltin(Code::Kind kind) {
- switch (kind) {
- case Code::LOAD_IC:
- return Builtins::kLoadIC_Miss;
- case Code::STORE_IC:
- return Builtins::kStoreIC_Miss;
- case Code::KEYED_LOAD_IC:
- return Builtins::kKeyedLoadIC_Miss;
- case Code::KEYED_STORE_IC:
- return Builtins::kKeyedStoreIC_Miss;
- default:
- UNREACHABLE();
- }
- return Builtins::kLoadIC_Miss;
- }
-
- static void TailCallBuiltin(MacroAssembler* masm, Builtins::Name name);
-
- protected:
- PropertyAccessCompiler(Isolate* isolate, Code::Kind kind,
- CacheHolderFlag cache_holder)
- : registers_(GetCallingConvention(kind)),
- kind_(kind),
- cache_holder_(cache_holder),
- isolate_(isolate),
- masm_(isolate, NULL, 256) {}
-
- Code::Kind kind() const { return kind_; }
- CacheHolderFlag cache_holder() const { return cache_holder_; }
- MacroAssembler* masm() { return &masm_; }
- Isolate* isolate() const { return isolate_; }
- Heap* heap() const { return isolate()->heap(); }
- Factory* factory() const { return isolate()->factory(); }
-
- Register receiver() const { return registers_[0]; }
- Register name() const { return registers_[1]; }
- Register scratch1() const { return registers_[2]; }
- Register scratch2() const { return registers_[3]; }
- Register scratch3() const { return registers_[4]; }
-
- // Calling convention between indexed store IC and handler.
- Register transition_map() const { return scratch1(); }
-
- static Register* GetCallingConvention(Code::Kind);
- static Register* load_calling_convention();
- static Register* store_calling_convention();
- static Register* keyed_store_calling_convention();
-
- Register* registers_;
-
- static void GenerateTailCall(MacroAssembler* masm, Handle<Code> code);
-
- Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name);
- Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<Name> name);
-
- private:
- Code::Kind kind_;
- CacheHolderFlag cache_holder_;
-
- Isolate* isolate_;
- MacroAssembler masm_;
-};
-
-
-class PropertyICCompiler : public PropertyAccessCompiler {
- public:
- // Finds the Code object stored in the Heap::non_monomorphic_cache().
- static Code* FindPreMonomorphic(Isolate* isolate, Code::Kind kind,
- ExtraICState extra_ic_state);
-
- // Named
- static Handle<Code> ComputeLoad(Isolate* isolate, InlineCacheState ic_state,
- ExtraICState extra_state);
- static Handle<Code> ComputeStore(Isolate* isolate, InlineCacheState ic_state,
- ExtraICState extra_state);
-
- static Handle<Code> ComputeMonomorphic(Code::Kind kind, Handle<Name> name,
- Handle<HeapType> type,
- Handle<Code> handler,
- ExtraICState extra_ic_state);
- static Handle<Code> ComputePolymorphic(Code::Kind kind, TypeHandleList* types,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name,
- ExtraICState extra_ic_state);
-
- // Keyed
- static Handle<Code> ComputeKeyedLoadMonomorphic(Handle<Map> receiver_map);
-
- static Handle<Code> ComputeKeyedStoreMonomorphic(
- Handle<Map> receiver_map, StrictMode strict_mode,
- KeyedAccessStoreMode store_mode);
- static Handle<Code> ComputeKeyedLoadPolymorphic(MapHandleList* receiver_maps);
- static Handle<Code> ComputeKeyedStorePolymorphic(
- MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode,
- StrictMode strict_mode);
-
- // Compare nil
- static Handle<Code> ComputeCompareNil(Handle<Map> receiver_map,
- CompareNilICStub* stub);
-
-
- private:
- PropertyICCompiler(Isolate* isolate, Code::Kind kind,
- ExtraICState extra_ic_state = kNoExtraICState,
- CacheHolderFlag cache_holder = kCacheOnReceiver)
- : PropertyAccessCompiler(isolate, kind, cache_holder),
- extra_ic_state_(extra_ic_state) {}
-
- static Handle<Code> Find(Handle<Name> name, Handle<Map> stub_holder_map,
- Code::Kind kind,
- ExtraICState extra_ic_state = kNoExtraICState,
- CacheHolderFlag cache_holder = kCacheOnReceiver);
-
- Handle<Code> CompileLoadInitialize(Code::Flags flags);
- Handle<Code> CompileLoadPreMonomorphic(Code::Flags flags);
- Handle<Code> CompileLoadMegamorphic(Code::Flags flags);
- Handle<Code> CompileStoreInitialize(Code::Flags flags);
- Handle<Code> CompileStorePreMonomorphic(Code::Flags flags);
- Handle<Code> CompileStoreGeneric(Code::Flags flags);
- Handle<Code> CompileStoreMegamorphic(Code::Flags flags);
-
- Handle<Code> CompileMonomorphic(Handle<HeapType> type, Handle<Code> handler,
- Handle<Name> name, IcCheckType check);
- Handle<Code> CompilePolymorphic(TypeHandleList* types,
- CodeHandleList* handlers, Handle<Name> name,
- Code::StubType type, IcCheckType check);
-
- Handle<Code> CompileKeyedStoreMonomorphic(Handle<Map> receiver_map,
- KeyedAccessStoreMode store_mode);
- Handle<Code> CompileKeyedStorePolymorphic(MapHandleList* receiver_maps,
- KeyedAccessStoreMode store_mode);
- Handle<Code> CompileKeyedStorePolymorphic(MapHandleList* receiver_maps,
- CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps);
-
- bool IncludesNumberType(TypeHandleList* types);
-
- Handle<Code> GetCode(Code::Kind kind, Code::StubType type, Handle<Name> name,
- InlineCacheState state = MONOMORPHIC);
-
- Logger::LogEventsAndTags log_kind(Handle<Code> code) {
- if (kind() == Code::LOAD_IC) {
- return code->ic_state() == MONOMORPHIC ? Logger::LOAD_IC_TAG
- : Logger::LOAD_POLYMORPHIC_IC_TAG;
- } else if (kind() == Code::KEYED_LOAD_IC) {
- return code->ic_state() == MONOMORPHIC
- ? Logger::KEYED_LOAD_IC_TAG
- : Logger::KEYED_LOAD_POLYMORPHIC_IC_TAG;
- } else if (kind() == Code::STORE_IC) {
- return code->ic_state() == MONOMORPHIC ? Logger::STORE_IC_TAG
- : Logger::STORE_POLYMORPHIC_IC_TAG;
- } else {
- DCHECK_EQ(Code::KEYED_STORE_IC, kind());
- return code->ic_state() == MONOMORPHIC
- ? Logger::KEYED_STORE_IC_TAG
- : Logger::KEYED_STORE_POLYMORPHIC_IC_TAG;
- }
- }
-
- const ExtraICState extra_ic_state_;
-};
-
-
-class PropertyHandlerCompiler : public PropertyAccessCompiler {
- public:
- static Handle<Code> Find(Handle<Name> name, Handle<Map> map, Code::Kind kind,
- CacheHolderFlag cache_holder, Code::StubType type);
-
- protected:
- PropertyHandlerCompiler(Isolate* isolate, Code::Kind kind,
- Handle<HeapType> type, Handle<JSObject> holder,
- CacheHolderFlag cache_holder)
- : PropertyAccessCompiler(isolate, kind, cache_holder),
- type_(type),
- holder_(holder) {}
-
- virtual ~PropertyHandlerCompiler() {}
-
- virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
- Label* miss) {
- UNREACHABLE();
- return receiver();
- }
-
- virtual void FrontendFooter(Handle<Name> name, Label* miss) { UNREACHABLE(); }
-
- Register Frontend(Register object_reg, Handle<Name> name);
- void NonexistentFrontendHeader(Handle<Name> name, Label* miss,
- Register scratch1, Register scratch2);
-
- // TODO(verwaest): Make non-static.
- static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Handle<Map> receiver_map, Register receiver,
- Register scratch, bool is_store, int argc,
- Register* values);
-
- // Helper function used to check that the dictionary doesn't contain
- // the property. This function may return false negatives, so miss_label
- // must always call a backup property check that is complete.
- // This function is safe to call if the receiver has fast properties.
- // Name must be unique and receiver must be a heap object.
- static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Handle<Name> name,
- Register r0,
- Register r1);
-
- // Generate code to check that a global property cell is empty. Create
- // the property cell at compilation time if no cell exists for the
- // property.
- static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<JSGlobalObject> global,
- Handle<Name> name,
- Register scratch,
- Label* miss);
-
- // Generates code that verifies that the property holder has not changed
- // (checking maps of objects in the prototype chain for fast and global
- // objects or doing negative lookup for slow objects, ensures that the
- // property cells for global objects are still empty) and checks that the map
- // of the holder has not changed. If necessary the function also generates
- // code for security check in case of global object holders. Helps to make
- // sure that the current IC is still valid.
- //
- // The scratch and holder registers are always clobbered, but the object
- // register is only clobbered if it the same as the holder register. The
- // function returns a register containing the holder - either object_reg or
- // holder_reg.
- Register CheckPrototypes(Register object_reg, Register holder_reg,
- Register scratch1, Register scratch2,
- Handle<Name> name, Label* miss,
- PrototypeCheckType check = CHECK_ALL_MAPS);
-
- Handle<Code> GetCode(Code::Kind kind, Code::StubType type, Handle<Name> name);
- void set_type_for_object(Handle<Object> object) {
- type_ = IC::CurrentTypeOf(object, isolate());
- }
- void set_holder(Handle<JSObject> holder) { holder_ = holder; }
- Handle<HeapType> type() const { return type_; }
- Handle<JSObject> holder() const { return holder_; }
-
- private:
- Handle<HeapType> type_;
- Handle<JSObject> holder_;
-};
-
-
-class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
- public:
- NamedLoadHandlerCompiler(Isolate* isolate, Handle<HeapType> type,
- Handle<JSObject> holder,
- CacheHolderFlag cache_holder)
- : PropertyHandlerCompiler(isolate, Code::LOAD_IC, type, holder,
- cache_holder) {}
-
- virtual ~NamedLoadHandlerCompiler() {}
-
- Handle<Code> CompileLoadField(Handle<Name> name, FieldIndex index);
-
- Handle<Code> CompileLoadCallback(Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback);
-
- Handle<Code> CompileLoadCallback(Handle<Name> name,
- const CallOptimization& call_optimization);
-
- Handle<Code> CompileLoadConstant(Handle<Name> name, int constant_index);
-
- Handle<Code> CompileLoadInterceptor(Handle<Name> name);
-
- Handle<Code> CompileLoadViaGetter(Handle<Name> name,
- Handle<JSFunction> getter);
-
- Handle<Code> CompileLoadGlobal(Handle<PropertyCell> cell, Handle<Name> name,
- bool is_configurable);
-
- // Static interface
- static Handle<Code> ComputeLoadNonexistent(Handle<Name> name,
- Handle<HeapType> type);
-
- static void GenerateLoadViaGetter(MacroAssembler* masm, Handle<HeapType> type,
- Register receiver,
- Handle<JSFunction> getter);
-
- static void GenerateLoadViaGetterForDeopt(MacroAssembler* masm) {
- GenerateLoadViaGetter(masm, Handle<HeapType>::null(), no_reg,
- Handle<JSFunction>());
- }
-
- static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label);
-
- // These constants describe the structure of the interceptor arguments on the
- // stack. The arguments are pushed by the (platform-specific)
- // PushInterceptorArguments and read by LoadPropertyWithInterceptorOnly and
- // LoadWithInterceptor.
- static const int kInterceptorArgsNameIndex = 0;
- static const int kInterceptorArgsInfoIndex = 1;
- static const int kInterceptorArgsThisIndex = 2;
- static const int kInterceptorArgsHolderIndex = 3;
- static const int kInterceptorArgsLength = 4;
-
- protected:
- virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
- Label* miss);
-
- virtual void FrontendFooter(Handle<Name> name, Label* miss);
-
- private:
- Handle<Code> CompileLoadNonexistent(Handle<Name> name);
- void GenerateLoadConstant(Handle<Object> value);
- void GenerateLoadCallback(Register reg,
- Handle<ExecutableAccessorInfo> callback);
- void GenerateLoadCallback(const CallOptimization& call_optimization,
- Handle<Map> receiver_map);
- void GenerateLoadInterceptor(Register holder_reg,
- LookupResult* lookup,
- Handle<Name> name);
- void GenerateLoadPostInterceptor(Register reg,
- Handle<Name> name,
- LookupResult* lookup);
-
- // Generates prototype loading code that uses the objects from the
- // context we were in when this function was called. If the context
- // has changed, a jump to miss is performed. This ties the generated
- // code to a particular context and so must not be used in cases
- // where the generated code is not allowed to have references to
- // objects from a context.
- static void GenerateDirectLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype,
- Label* miss);
-
-
- Register scratch4() { return registers_[5]; }
-};
-
-
-class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
- public:
- explicit NamedStoreHandlerCompiler(Isolate* isolate, Handle<HeapType> type,
- Handle<JSObject> holder)
- : PropertyHandlerCompiler(isolate, Code::STORE_IC, type, holder,
- kCacheOnReceiver) {}
-
- virtual ~NamedStoreHandlerCompiler() {}
-
- Handle<Code> CompileStoreTransition(Handle<Map> transition,
- Handle<Name> name);
- Handle<Code> CompileStoreField(LookupResult* lookup, Handle<Name> name);
- Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback);
- Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name,
- const CallOptimization& call_optimization);
- Handle<Code> CompileStoreViaSetter(Handle<JSObject> object, Handle<Name> name,
- Handle<JSFunction> setter);
- Handle<Code> CompileStoreInterceptor(Handle<Name> name);
-
- static void GenerateStoreViaSetter(MacroAssembler* masm,
- Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter);
-
- static void GenerateStoreViaSetterForDeopt(MacroAssembler* masm) {
- GenerateStoreViaSetter(masm, Handle<HeapType>::null(), no_reg,
- Handle<JSFunction>());
- }
-
- protected:
- virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
- Label* miss);
-
- virtual void FrontendFooter(Handle<Name> name, Label* miss);
- void GenerateRestoreName(Label* label, Handle<Name> name);
-
- private:
- void GenerateStoreTransition(Handle<Map> transition, Handle<Name> name,
- Register receiver_reg, Register name_reg,
- Register value_reg, Register scratch1,
- Register scratch2, Register scratch3,
- Label* miss_label, Label* slow);
-
- void GenerateStoreField(LookupResult* lookup, Register value_reg,
- Label* miss_label);
-
- static Builtins::Name SlowBuiltin(Code::Kind kind) {
- switch (kind) {
- case Code::STORE_IC: return Builtins::kStoreIC_Slow;
- case Code::KEYED_STORE_IC: return Builtins::kKeyedStoreIC_Slow;
- default: UNREACHABLE();
- }
- return Builtins::kStoreIC_Slow;
- }
-
- static Register value();
-};
-
-
-class ElementHandlerCompiler : public PropertyHandlerCompiler {
- public:
- explicit ElementHandlerCompiler(Isolate* isolate)
- : PropertyHandlerCompiler(isolate, Code::KEYED_LOAD_IC,
- Handle<HeapType>::null(),
- Handle<JSObject>::null(), kCacheOnReceiver) {}
-
- virtual ~ElementHandlerCompiler() {}
-
- void CompileElementHandlers(MapHandleList* receiver_maps,
- CodeHandleList* handlers);
-
- static void GenerateLoadDictionaryElement(MacroAssembler* masm);
- static void GenerateStoreDictionaryElement(MacroAssembler* masm);
-};
-
-
-// Holds information about possible function call optimizations.
-class CallOptimization BASE_EMBEDDED {
- public:
- explicit CallOptimization(LookupResult* lookup);
-
- explicit CallOptimization(Handle<JSFunction> function);
-
- bool is_constant_call() const {
- return !constant_function_.is_null();
- }
-
- Handle<JSFunction> constant_function() const {
- DCHECK(is_constant_call());
- return constant_function_;
- }
-
- bool is_simple_api_call() const {
- return is_simple_api_call_;
- }
-
- Handle<FunctionTemplateInfo> expected_receiver_type() const {
- DCHECK(is_simple_api_call());
- return expected_receiver_type_;
- }
-
- Handle<CallHandlerInfo> api_call_info() const {
- DCHECK(is_simple_api_call());
- return api_call_info_;
- }
-
- enum HolderLookup {
- kHolderNotFound,
- kHolderIsReceiver,
- kHolderFound
- };
- Handle<JSObject> LookupHolderOfExpectedType(
- Handle<Map> receiver_map,
- HolderLookup* holder_lookup) const;
-
- // Check if the api holder is between the receiver and the holder.
- bool IsCompatibleReceiver(Handle<Object> receiver,
- Handle<JSObject> holder) const;
-
- private:
- void Initialize(Handle<JSFunction> function);
-
- // Determines whether the given function can be called using the
- // fast api call builtin.
- void AnalyzePossibleApiFunction(Handle<JSFunction> function);
-
- Handle<JSFunction> constant_function_;
- bool is_simple_api_call_;
- Handle<FunctionTemplateInfo> expected_receiver_type_;
- Handle<CallHandlerInfo> api_call_info_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_STUB_CACHE_H_
diff --git a/deps/v8/src/test/DEPS b/deps/v8/src/test/DEPS
new file mode 100644
index 0000000000..13855ec9d9
--- /dev/null
+++ b/deps/v8/src/test/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+include/libplatform/libplatform.h"
+]
diff --git a/deps/v8/src/test/run-all-unittests.cc b/deps/v8/src/test/run-all-unittests.cc
new file mode 100644
index 0000000000..8c361ddc36
--- /dev/null
+++ b/deps/v8/src/test/run-all-unittests.cc
@@ -0,0 +1,45 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/libplatform/libplatform.h"
+#include "include/v8.h"
+#include "src/base/compiler-specific.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace {
+
+class DefaultPlatformEnvironment FINAL : public ::testing::Environment {
+ public:
+ DefaultPlatformEnvironment() : platform_(NULL) {}
+ ~DefaultPlatformEnvironment() {}
+
+ virtual void SetUp() OVERRIDE {
+ EXPECT_EQ(NULL, platform_);
+ platform_ = v8::platform::CreateDefaultPlatform();
+ ASSERT_TRUE(platform_ != NULL);
+ v8::V8::InitializePlatform(platform_);
+ ASSERT_TRUE(v8::V8::Initialize());
+ }
+
+ virtual void TearDown() OVERRIDE {
+ ASSERT_TRUE(platform_ != NULL);
+ v8::V8::Dispose();
+ v8::V8::ShutdownPlatform();
+ delete platform_;
+ platform_ = NULL;
+ }
+
+ private:
+ v8::Platform* platform_;
+};
+
+} // namespace
+
+
+int main(int argc, char** argv) {
+ testing::InitGoogleMock(&argc, argv);
+ testing::AddGlobalTestEnvironment(new DefaultPlatformEnvironment);
+ v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+ return RUN_ALL_TESTS();
+}
diff --git a/deps/v8/src/test/test-utils.cc b/deps/v8/src/test/test-utils.cc
new file mode 100644
index 0000000000..104146598c
--- /dev/null
+++ b/deps/v8/src/test/test-utils.cc
@@ -0,0 +1,58 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/test/test-utils.h"
+
+#include "src/isolate-inl.h"
+
+namespace v8 {
+
+// static
+Isolate* TestWithIsolate::isolate_ = NULL;
+
+
+TestWithIsolate::TestWithIsolate()
+ : isolate_scope_(isolate()), handle_scope_(isolate()) {}
+
+
+TestWithIsolate::~TestWithIsolate() {}
+
+
+// static
+void TestWithIsolate::SetUpTestCase() {
+ Test::SetUpTestCase();
+ EXPECT_EQ(NULL, isolate_);
+ isolate_ = v8::Isolate::New();
+ EXPECT_TRUE(isolate_ != NULL);
+}
+
+
+// static
+void TestWithIsolate::TearDownTestCase() {
+ ASSERT_TRUE(isolate_ != NULL);
+ isolate_->Dispose();
+ isolate_ = NULL;
+ Test::TearDownTestCase();
+}
+
+
+TestWithContext::TestWithContext()
+ : context_(Context::New(isolate())), context_scope_(context_) {}
+
+
+TestWithContext::~TestWithContext() {}
+
+
+namespace internal {
+
+TestWithIsolate::~TestWithIsolate() {}
+
+
+Factory* TestWithIsolate::factory() const { return isolate()->factory(); }
+
+
+TestWithZone::~TestWithZone() {}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/test/test-utils.h b/deps/v8/src/test/test-utils.h
new file mode 100644
index 0000000000..05d1ea641d
--- /dev/null
+++ b/deps/v8/src/test/test-utils.h
@@ -0,0 +1,86 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TEST_TEST_UTILS_H_
+#define V8_TEST_TEST_UTILS_H_
+
+#include "include/v8.h"
+#include "src/base/macros.h"
+#include "src/zone.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+
+class TestWithIsolate : public ::testing::Test {
+ public:
+ TestWithIsolate();
+ virtual ~TestWithIsolate();
+
+ Isolate* isolate() const { return isolate_; }
+
+ static void SetUpTestCase();
+ static void TearDownTestCase();
+
+ private:
+ static Isolate* isolate_;
+ Isolate::Scope isolate_scope_;
+ HandleScope handle_scope_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestWithIsolate);
+};
+
+
+class TestWithContext : public virtual TestWithIsolate {
+ public:
+ TestWithContext();
+ virtual ~TestWithContext();
+
+ const Local<Context>& context() const { return context_; }
+
+ private:
+ Local<Context> context_;
+ Context::Scope context_scope_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestWithContext);
+};
+
+
+namespace internal {
+
+// Forward declarations.
+class Factory;
+
+
+class TestWithIsolate : public virtual ::v8::TestWithIsolate {
+ public:
+ TestWithIsolate() {}
+ virtual ~TestWithIsolate();
+
+ Factory* factory() const;
+ Isolate* isolate() const {
+ return reinterpret_cast<Isolate*>(::v8::TestWithIsolate::isolate());
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TestWithIsolate);
+};
+
+
+class TestWithZone : public TestWithIsolate {
+ public:
+ TestWithZone() : zone_(isolate()) {}
+ virtual ~TestWithZone();
+
+ Zone* zone() { return &zone_; }
+
+ private:
+ Zone zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestWithZone);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TEST_TEST_UTILS_H_
diff --git a/deps/v8/src/test/test.gyp b/deps/v8/src/test/test.gyp
new file mode 100644
index 0000000000..f4c6a5e469
--- /dev/null
+++ b/deps/v8/src/test/test.gyp
@@ -0,0 +1,71 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'v8_code': 1,
+ },
+ 'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
+ 'targets': [
+ {
+ 'target_name': 'run-all-unittests',
+ 'type': 'static_library',
+ 'variables': {
+ 'optimize': 'max',
+ },
+ 'dependencies': [
+ '../../testing/gmock.gyp:gmock',
+ '../../testing/gtest.gyp:gtest',
+ '../../tools/gyp/v8.gyp:v8_libplatform',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ 'run-all-unittests.cc',
+ 'test-utils.h',
+ 'test-utils.cc',
+ ],
+ 'export_dependent_settings': [
+ '../../testing/gmock.gyp:gmock',
+ '../../testing/gtest.gyp:gtest',
+ ],
+ 'conditions': [
+ ['component=="shared_library"', {
+ # compiler-unittests can't be built against a shared library, so we
+ # need to depend on the underlying static target in that case.
+ 'conditions': [
+ ['v8_use_snapshot=="true"', {
+ 'dependencies': ['../../tools/gyp/v8.gyp:v8_snapshot'],
+ },
+ {
+ 'dependencies': [
+ '../../tools/gyp/v8.gyp:v8_nosnapshot',
+ ],
+ }],
+ ],
+ }, {
+ 'dependencies': ['../../tools/gyp/v8.gyp:v8'],
+ }],
+ ['os_posix == 1', {
+ # TODO(svenpanne): This is a temporary work-around to fix the warnings
+ # that show up because we use -std=gnu++0x instead of -std=c++11.
+ 'cflags!': [
+ '-pedantic',
+ ],
+ 'direct_dependent_settings': {
+ 'cflags!': [
+ '-pedantic',
+ ],
+ },
+ }],
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ],
+ },
+ ],
+}
diff --git a/deps/v8/src/third_party/vtune/DEPS b/deps/v8/src/third_party/vtune/DEPS
new file mode 100644
index 0000000000..adbe86ecf6
--- /dev/null
+++ b/deps/v8/src/third_party/vtune/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+../../../include",
+]
diff --git a/deps/v8/src/third_party/vtune/v8-vtune.h b/deps/v8/src/third_party/vtune/v8-vtune.h
index c60b303b3a..a7e5116604 100644
--- a/deps/v8/src/third_party/vtune/v8-vtune.h
+++ b/deps/v8/src/third_party/vtune/v8-vtune.h
@@ -58,9 +58,11 @@
#ifndef V8_VTUNE_H_
#define V8_VTUNE_H_
+#include "../../../include/v8.h"
+
namespace vTune {
-void InitializeVtuneForV8();
+void InitializeVtuneForV8(v8::Isolate::CreateParams& params);
} // namespace vTune
diff --git a/deps/v8/src/third_party/vtune/vtune-jit.cc b/deps/v8/src/third_party/vtune/vtune-jit.cc
index d62dcfbb71..e489d6e215 100644
--- a/deps/v8/src/third_party/vtune/vtune-jit.cc
+++ b/deps/v8/src/third_party/vtune/vtune-jit.cc
@@ -271,13 +271,10 @@ void VTUNEJITInterface::event_handler(const v8::JitCodeEvent* event) {
} // namespace internal
-void InitializeVtuneForV8() {
- if (v8::V8::Initialize()) {
- v8::V8::SetFlagsFromString("--nocompact_code_space",
- (int)strlen("--nocompact_code_space"));
- v8::V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault,
- vTune::internal::VTUNEJITInterface::event_handler);
- }
+void InitializeVtuneForV8(v8::Isolate::CreateParams& params) {
+ v8::V8::SetFlagsFromString("--nocompact_code_space",
+ (int)strlen("--nocompact_code_space"));
+ params.code_event_handler = vTune::internal::VTUNEJITInterface::event_handler;
}
} // namespace vTune
diff --git a/deps/v8/src/token.h b/deps/v8/src/token.h
index 3535e343af..9c719b827e 100644
--- a/deps/v8/src/token.h
+++ b/deps/v8/src/token.h
@@ -148,11 +148,15 @@ namespace internal {
/* Future reserved words (ECMA-262, section 7.6.1.2). */ \
T(FUTURE_RESERVED_WORD, NULL, 0) \
T(FUTURE_STRICT_RESERVED_WORD, NULL, 0) \
+ K(CLASS, "class", 0) \
K(CONST, "const", 0) \
K(EXPORT, "export", 0) \
+ K(EXTENDS, "extends", 0) \
K(IMPORT, "import", 0) \
K(LET, "let", 0) \
+ K(STATIC, "static", 0) \
K(YIELD, "yield", 0) \
+ K(SUPER, "super", 0) \
\
/* Illegal token - not able to scan. */ \
T(ILLEGAL, "ILLEGAL", 0) \
diff --git a/deps/v8/src/type-feedback-vector-inl.h b/deps/v8/src/type-feedback-vector-inl.h
new file mode 100644
index 0000000000..43e768ee5d
--- /dev/null
+++ b/deps/v8/src/type-feedback-vector-inl.h
@@ -0,0 +1,45 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TYPE_FEEDBACK_VECTOR_INL_H_
+#define V8_TYPE_FEEDBACK_VECTOR_INL_H_
+
+#include "src/type-feedback-vector.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<Object> TypeFeedbackVector::UninitializedSentinel(Isolate* isolate) {
+ return isolate->factory()->uninitialized_symbol();
+}
+
+
+Handle<Object> TypeFeedbackVector::MegamorphicSentinel(Isolate* isolate) {
+ return isolate->factory()->megamorphic_symbol();
+}
+
+
+Handle<Object> TypeFeedbackVector::PremonomorphicSentinel(Isolate* isolate) {
+ return isolate->factory()->megamorphic_symbol();
+}
+
+
+Handle<Object> TypeFeedbackVector::GenericSentinel(Isolate* isolate) {
+ return isolate->factory()->generic_symbol();
+}
+
+
+Handle<Object> TypeFeedbackVector::MonomorphicArraySentinel(
+ Isolate* isolate, ElementsKind elements_kind) {
+ return Handle<Object>(Smi::FromInt(static_cast<int>(elements_kind)), isolate);
+}
+
+
+Object* TypeFeedbackVector::RawUninitializedSentinel(Heap* heap) {
+ return heap->uninitialized_symbol();
+}
+}
+} // namespace v8::internal
+
+#endif // V8_TYPE_FEEDBACK_VECTOR_INL_H_
diff --git a/deps/v8/src/type-feedback-vector.cc b/deps/v8/src/type-feedback-vector.cc
new file mode 100644
index 0000000000..a3fe0707c7
--- /dev/null
+++ b/deps/v8/src/type-feedback-vector.cc
@@ -0,0 +1,22 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/objects.h"
+#include "src/type-feedback-vector-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+Handle<TypeFeedbackVector> TypeFeedbackVector::Copy(
+ Isolate* isolate, Handle<TypeFeedbackVector> vector) {
+ Handle<TypeFeedbackVector> result;
+ result = Handle<TypeFeedbackVector>::cast(
+ isolate->factory()->CopyFixedArray(Handle<FixedArray>::cast(vector)));
+ return result;
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/type-feedback-vector.h b/deps/v8/src/type-feedback-vector.h
new file mode 100644
index 0000000000..b6fadba73e
--- /dev/null
+++ b/deps/v8/src/type-feedback-vector.h
@@ -0,0 +1,55 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TYPE_FEEDBACK_VECTOR_H_
+#define V8_TYPE_FEEDBACK_VECTOR_H_
+
+#include "src/checks.h"
+#include "src/elements-kind.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class TypeFeedbackVector : public FixedArray {
+ public:
+ // Casting.
+ static TypeFeedbackVector* cast(Object* obj) {
+ DCHECK(obj->IsTypeFeedbackVector());
+ return reinterpret_cast<TypeFeedbackVector*>(obj);
+ }
+
+ static Handle<TypeFeedbackVector> Copy(Isolate* isolate,
+ Handle<TypeFeedbackVector> vector);
+
+ // The object that indicates an uninitialized cache.
+ static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
+
+ // The object that indicates a megamorphic state.
+ static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
+
+ // The object that indicates a premonomorphic state.
+ static inline Handle<Object> PremonomorphicSentinel(Isolate* isolate);
+
+ // The object that indicates a generic state.
+ static inline Handle<Object> GenericSentinel(Isolate* isolate);
+
+ // The object that indicates a monomorphic state of Array with
+ // ElementsKind
+ static inline Handle<Object> MonomorphicArraySentinel(
+ Isolate* isolate, ElementsKind elements_kind);
+
+ // A raw version of the uninitialized sentinel that's safe to read during
+ // garbage collection (e.g., for patching the cache).
+ static inline Object* RawUninitializedSentinel(Heap* heap);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TypeFeedbackVector);
+};
+}
+} // namespace v8::internal
+
+#endif // V8_TRANSITIONS_H_
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 1d2db7c548..cf3950f4f0 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -7,31 +7,28 @@
#include "src/ast.h"
#include "src/code-stubs.h"
#include "src/compiler.h"
-#include "src/ic.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
#include "src/macro-assembler.h"
-#include "src/stub-cache.h"
#include "src/type-info.h"
-#include "src/ic-inl.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
-TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
- Handle<FixedArray> feedback_vector,
- Handle<Context> native_context,
- Zone* zone)
- : native_context_(native_context),
- zone_(zone) {
+TypeFeedbackOracle::TypeFeedbackOracle(
+ Handle<Code> code, Handle<TypeFeedbackVector> feedback_vector,
+ Handle<Context> native_context, Zone* zone)
+ : native_context_(native_context), zone_(zone) {
BuildDictionary(code);
DCHECK(dictionary_->IsDictionary());
// We make a copy of the feedback vector because a GC could clear
// the type feedback info contained therein.
// TODO(mvstanton): revisit the decision to copy when we weakly
// traverse the feedback vector at GC time.
- feedback_vector_ = isolate()->factory()->CopyFixedArray(feedback_vector);
+ feedback_vector_ = TypeFeedbackVector::Copy(isolate(), feedback_vector);
}
@@ -112,8 +109,9 @@ bool TypeFeedbackOracle::CallNewIsMonomorphic(int slot) {
byte TypeFeedbackOracle::ForInType(int feedback_vector_slot) {
Handle<Object> value = GetInfo(feedback_vector_slot);
return value.is_identical_to(
- TypeFeedbackInfo::UninitializedSentinel(isolate()))
- ? ForInStatement::FAST_FOR_IN : ForInStatement::SLOW_FOR_IN;
+ TypeFeedbackVector::UninitializedSentinel(isolate()))
+ ? ForInStatement::FAST_FOR_IN
+ : ForInStatement::SLOW_FOR_IN;
}
@@ -197,8 +195,10 @@ void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
}
if (code->is_compare_ic_stub()) {
- CompareIC::StubInfoToType(code->stub_key(), left_type, right_type,
- combined_type, map, zone());
+ CompareICStub stub(code->stub_key(), isolate());
+ *left_type = CompareICState::StateToType(zone(), stub.left());
+ *right_type = CompareICState::StateToType(zone(), stub.right());
+ *combined_type = CompareICState::StateToType(zone(), stub.state(), map);
} else if (code->is_compare_nil_ic_stub()) {
CompareNilICStub stub(isolate(), code->extra_ic_state());
*combined_type = stub.GetType(zone(), map);
@@ -218,8 +218,8 @@ void TypeFeedbackOracle::BinaryType(TypeFeedbackId id,
if (!object->IsCode()) {
// For some binary ops we don't have ICs, e.g. Token::COMMA, but for the
// operations covered by the BinaryOpIC we should always have them.
- DCHECK(op < BinaryOpIC::State::FIRST_TOKEN ||
- op > BinaryOpIC::State::LAST_TOKEN);
+ DCHECK(op < BinaryOpICState::FIRST_TOKEN ||
+ op > BinaryOpICState::LAST_TOKEN);
*left = *right = *result = Type::None(zone());
*fixed_right_arg = Maybe<int>();
*allocation_site = Handle<AllocationSite>::null();
@@ -227,7 +227,7 @@ void TypeFeedbackOracle::BinaryType(TypeFeedbackId id,
}
Handle<Code> code = Handle<Code>::cast(object);
DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
- BinaryOpIC::State state(isolate(), code->extra_ic_state());
+ BinaryOpICState state(isolate(), code->extra_ic_state());
DCHECK_EQ(op, state.op());
*left = state.GetLeftType(zone());
@@ -249,7 +249,7 @@ Type* TypeFeedbackOracle::CountType(TypeFeedbackId id) {
if (!object->IsCode()) return Type::None(zone());
Handle<Code> code = Handle<Code>::cast(object);
DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
- BinaryOpIC::State state(isolate(), code->extra_ic_state());
+ BinaryOpICState state(isolate(), code->extra_ic_state());
return state.GetLeftType(zone());
}
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 44fecf634c..434ddd6759 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -20,9 +20,8 @@ class SmallMapList;
class TypeFeedbackOracle: public ZoneObject {
public:
TypeFeedbackOracle(Handle<Code> code,
- Handle<FixedArray> feedback_vector,
- Handle<Context> native_context,
- Zone* zone);
+ Handle<TypeFeedbackVector> feedback_vector,
+ Handle<Context> native_context, Zone* zone);
bool LoadIsUninitialized(TypeFeedbackId id);
bool StoreIsUninitialized(TypeFeedbackId id);
@@ -120,7 +119,7 @@ class TypeFeedbackOracle: public ZoneObject {
Handle<Context> native_context_;
Zone* zone_;
Handle<UnseededNumberDictionary> dictionary_;
- Handle<FixedArray> feedback_vector_;
+ Handle<TypeFeedbackVector> feedback_vector_;
DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
};
diff --git a/deps/v8/src/types-inl.h b/deps/v8/src/types-inl.h
index f102ae3e13..162e658d67 100644
--- a/deps/v8/src/types-inl.h
+++ b/deps/v8/src/types-inl.h
@@ -70,7 +70,7 @@ T* ZoneTypeConfig::cast(Type* type) {
// static
bool ZoneTypeConfig::is_bitset(Type* type) {
- return reinterpret_cast<intptr_t>(type) & 1;
+ return reinterpret_cast<uintptr_t>(type) & 1;
}
@@ -87,9 +87,9 @@ bool ZoneTypeConfig::is_class(Type* type) {
// static
-int ZoneTypeConfig::as_bitset(Type* type) {
+ZoneTypeConfig::Type::bitset ZoneTypeConfig::as_bitset(Type* type) {
DCHECK(is_bitset(type));
- return static_cast<int>(reinterpret_cast<intptr_t>(type) >> 1);
+ return static_cast<Type::bitset>(reinterpret_cast<uintptr_t>(type) ^ 1u);
}
@@ -108,13 +108,14 @@ i::Handle<i::Map> ZoneTypeConfig::as_class(Type* type) {
// static
-ZoneTypeConfig::Type* ZoneTypeConfig::from_bitset(int bitset) {
- return reinterpret_cast<Type*>((bitset << 1) | 1);
+ZoneTypeConfig::Type* ZoneTypeConfig::from_bitset(Type::bitset bitset) {
+ return reinterpret_cast<Type*>(static_cast<uintptr_t>(bitset | 1u));
}
// static
-ZoneTypeConfig::Type* ZoneTypeConfig::from_bitset(int bitset, Zone* Zone) {
+ZoneTypeConfig::Type* ZoneTypeConfig::from_bitset(
+ Type::bitset bitset, Zone* Zone) {
return from_bitset(bitset);
}
@@ -229,8 +230,9 @@ bool HeapTypeConfig::is_struct(Type* type, int tag) {
// static
-int HeapTypeConfig::as_bitset(Type* type) {
- return i::Smi::cast(type)->value();
+HeapTypeConfig::Type::bitset HeapTypeConfig::as_bitset(Type* type) {
+ // TODO(rossberg): Breaks the Smi abstraction. Fix once there is a better way.
+ return static_cast<Type::bitset>(reinterpret_cast<uintptr_t>(type));
}
@@ -247,14 +249,15 @@ i::Handle<HeapTypeConfig::Struct> HeapTypeConfig::as_struct(Type* type) {
// static
-HeapTypeConfig::Type* HeapTypeConfig::from_bitset(int bitset) {
- return Type::cast(i::Smi::FromInt(bitset));
+HeapTypeConfig::Type* HeapTypeConfig::from_bitset(Type::bitset bitset) {
+ // TODO(rossberg): Breaks the Smi abstraction. Fix once there is a better way.
+ return reinterpret_cast<Type*>(static_cast<uintptr_t>(bitset));
}
// static
i::Handle<HeapTypeConfig::Type> HeapTypeConfig::from_bitset(
- int bitset, Isolate* isolate) {
+ Type::bitset bitset, Isolate* isolate) {
return i::handle(from_bitset(bitset), isolate);
}
diff --git a/deps/v8/src/types.cc b/deps/v8/src/types.cc
index db92f30c47..c3184c6844 100644
--- a/deps/v8/src/types.cc
+++ b/deps/v8/src/types.cc
@@ -11,185 +11,181 @@ namespace v8 {
namespace internal {
-// -----------------------------------------------------------------------------
-// Range-related custom order on doubles.
-// We want -0 to be less than +0.
+// NOTE: If code is marked as being a "shortcut", this means that removing
+// the code won't affect the semantics of the surrounding function definition.
-static bool dle(double x, double y) {
- return x <= y && (x != 0 || IsMinusZero(x) || !IsMinusZero(y));
-}
+// -----------------------------------------------------------------------------
+// Range-related helper functions.
-static bool deq(double x, double y) {
- return dle(x, y) && dle(y, x);
+// The result may be invalid (max < min).
+template<class Config>
+typename TypeImpl<Config>::Limits TypeImpl<Config>::Intersect(
+ Limits lhs, Limits rhs) {
+ DisallowHeapAllocation no_allocation;
+ Limits result(lhs);
+ if (lhs.min->Number() < rhs.min->Number()) result.min = rhs.min;
+ if (lhs.max->Number() > rhs.max->Number()) result.max = rhs.max;
+ return result;
}
-// -----------------------------------------------------------------------------
-// Glb and lub computation.
-
-// The largest bitset subsumed by this type.
template<class Config>
-int TypeImpl<Config>::BitsetType::Glb(TypeImpl* type) {
+typename TypeImpl<Config>::Limits TypeImpl<Config>::Union(
+ Limits lhs, Limits rhs) {
DisallowHeapAllocation no_allocation;
- if (type->IsBitset()) {
- return type->AsBitset();
- } else if (type->IsUnion()) {
- UnionHandle unioned = handle(type->AsUnion());
- DCHECK(unioned->Wellformed());
- return unioned->Get(0)->BitsetGlb(); // Other BitsetGlb's are kNone anyway.
- } else {
- return kNone;
- }
+ Limits result(lhs);
+ if (lhs.min->Number() > rhs.min->Number()) result.min = rhs.min;
+ if (lhs.max->Number() < rhs.max->Number()) result.max = rhs.max;
+ return result;
}
-// The smallest bitset subsuming this type.
template<class Config>
-int TypeImpl<Config>::BitsetType::Lub(TypeImpl* type) {
+bool TypeImpl<Config>::Overlap(
+ typename TypeImpl<Config>::RangeType* lhs,
+ typename TypeImpl<Config>::RangeType* rhs) {
DisallowHeapAllocation no_allocation;
- if (type->IsBitset()) {
- return type->AsBitset();
- } else if (type->IsUnion()) {
- UnionHandle unioned = handle(type->AsUnion());
- int bitset = kNone;
- for (int i = 0; i < unioned->Length(); ++i) {
- bitset |= unioned->Get(i)->BitsetLub();
- }
- return bitset;
- } else if (type->IsClass()) {
- // Little hack to avoid the need for a region for handlification here...
- return Config::is_class(type) ? Lub(*Config::as_class(type)) :
- type->AsClass()->Bound(NULL)->AsBitset();
- } else if (type->IsConstant()) {
- return type->AsConstant()->Bound()->AsBitset();
- } else if (type->IsRange()) {
- return type->AsRange()->Bound()->AsBitset();
- } else if (type->IsContext()) {
- return type->AsContext()->Bound()->AsBitset();
- } else if (type->IsArray()) {
- return type->AsArray()->Bound()->AsBitset();
- } else if (type->IsFunction()) {
- return type->AsFunction()->Bound()->AsBitset();
- } else {
- UNREACHABLE();
- return kNone;
- }
+ typename TypeImpl<Config>::Limits lim = Intersect(Limits(lhs), Limits(rhs));
+ return lim.min->Number() <= lim.max->Number();
}
-// The smallest bitset subsuming this type, ignoring explicit bounds.
template<class Config>
-int TypeImpl<Config>::BitsetType::InherentLub(TypeImpl* type) {
+bool TypeImpl<Config>::Contains(
+ typename TypeImpl<Config>::RangeType* lhs,
+ typename TypeImpl<Config>::RangeType* rhs) {
DisallowHeapAllocation no_allocation;
- if (type->IsBitset()) {
- return type->AsBitset();
- } else if (type->IsUnion()) {
- UnionHandle unioned = handle(type->AsUnion());
- int bitset = kNone;
- for (int i = 0; i < unioned->Length(); ++i) {
- bitset |= unioned->Get(i)->InherentBitsetLub();
- }
- return bitset;
- } else if (type->IsClass()) {
- return Lub(*type->AsClass()->Map());
- } else if (type->IsConstant()) {
- return Lub(*type->AsConstant()->Value());
- } else if (type->IsRange()) {
- return Lub(type->AsRange()->Min(), type->AsRange()->Max());
- } else if (type->IsContext()) {
- return kInternal & kTaggedPtr;
- } else if (type->IsArray()) {
- return kArray;
- } else if (type->IsFunction()) {
- return kFunction;
- } else {
- UNREACHABLE();
- return kNone;
- }
+ return lhs->Min()->Number() <= rhs->Min()->Number()
+ && rhs->Max()->Number() <= lhs->Max()->Number();
}
template<class Config>
-int TypeImpl<Config>::BitsetType::Lub(i::Object* value) {
+bool TypeImpl<Config>::Contains(
+ typename TypeImpl<Config>::RangeType* range, i::Object* val) {
DisallowHeapAllocation no_allocation;
- if (value->IsNumber()) {
- return Lub(value->Number()) & (value->IsSmi() ? kTaggedInt : kTaggedPtr);
- }
- return Lub(i::HeapObject::cast(value)->map());
+ return IsInteger(val)
+ && range->Min()->Number() <= val->Number()
+ && val->Number() <= range->Max()->Number();
}
+// -----------------------------------------------------------------------------
+// Min and Max computation.
+
template<class Config>
-int TypeImpl<Config>::BitsetType::Lub(double value) {
- DisallowHeapAllocation no_allocation;
- if (i::IsMinusZero(value)) return kMinusZero;
- if (std::isnan(value)) return kNaN;
- if (IsUint32Double(value)) return Lub(FastD2UI(value));
- if (IsInt32Double(value)) return Lub(FastD2I(value));
- return kOtherNumber;
+double TypeImpl<Config>::Min() {
+ DCHECK(this->Is(Number()));
+ if (this->IsBitset()) return BitsetType::Min(this->AsBitset());
+ if (this->IsUnion()) {
+ double min = +V8_INFINITY;
+ for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
+ min = std::min(min, this->AsUnion()->Get(i)->Min());
+ }
+ return min;
+ }
+ if (this->IsRange()) return this->AsRange()->Min()->Number();
+ if (this->IsConstant()) return this->AsConstant()->Value()->Number();
+ UNREACHABLE();
+ return 0;
}
template<class Config>
-int TypeImpl<Config>::BitsetType::Lub(double min, double max) {
- DisallowHeapAllocation no_allocation;
- DCHECK(dle(min, max));
- if (deq(min, max)) return BitsetType::Lub(min); // Singleton range.
- int bitset = BitsetType::kNumber ^ SEMANTIC(BitsetType::kNaN);
- if (dle(0, min) || max < 0) bitset ^= SEMANTIC(BitsetType::kMinusZero);
- return bitset;
- // TODO(neis): Could refine this further by doing more checks on min/max.
+double TypeImpl<Config>::Max() {
+ DCHECK(this->Is(Number()));
+ if (this->IsBitset()) return BitsetType::Max(this->AsBitset());
+ if (this->IsUnion()) {
+ double max = -V8_INFINITY;
+ for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
+ max = std::max(max, this->AsUnion()->Get(i)->Max());
+ }
+ return max;
+ }
+ if (this->IsRange()) return this->AsRange()->Max()->Number();
+ if (this->IsConstant()) return this->AsConstant()->Value()->Number();
+ UNREACHABLE();
+ return 0;
}
+// -----------------------------------------------------------------------------
+// Glb and lub computation.
+
+
+// The largest bitset subsumed by this type.
template<class Config>
-int TypeImpl<Config>::BitsetType::Lub(int32_t value) {
- if (value >= 0x40000000) {
- return i::SmiValuesAre31Bits() ? kOtherUnsigned31 : kUnsignedSmall;
+typename TypeImpl<Config>::bitset
+TypeImpl<Config>::BitsetType::Glb(TypeImpl* type) {
+ DisallowHeapAllocation no_allocation;
+ if (type->IsBitset()) {
+ return type->AsBitset();
+ } else if (type->IsUnion()) {
+ SLOW_DCHECK(type->AsUnion()->Wellformed());
+ return type->AsUnion()->Get(0)->BitsetGlb(); // Shortcut.
+ // (The remaining BitsetGlb's are None anyway).
+ } else {
+ return kNone;
}
- if (value >= 0) return kUnsignedSmall;
- if (value >= -0x40000000) return kOtherSignedSmall;
- return i::SmiValuesAre31Bits() ? kOtherSigned32 : kOtherSignedSmall;
}
+// The smallest bitset subsuming this type.
template<class Config>
-int TypeImpl<Config>::BitsetType::Lub(uint32_t value) {
+typename TypeImpl<Config>::bitset
+TypeImpl<Config>::BitsetType::Lub(TypeImpl* type) {
DisallowHeapAllocation no_allocation;
- if (value >= 0x80000000u) return kOtherUnsigned32;
- if (value >= 0x40000000u) {
- return i::SmiValuesAre31Bits() ? kOtherUnsigned31 : kUnsignedSmall;
+ if (type->IsBitset()) return type->AsBitset();
+ if (type->IsUnion()) {
+ int bitset = kNone;
+ for (int i = 0, n = type->AsUnion()->Length(); i < n; ++i) {
+ bitset |= type->AsUnion()->Get(i)->BitsetLub();
+ }
+ return bitset;
}
- return kUnsignedSmall;
+ if (type->IsClass()) {
+ // Little hack to avoid the need for a region for handlification here...
+ return Config::is_class(type) ? Lub(*Config::as_class(type)) :
+ type->AsClass()->Bound(NULL)->AsBitset();
+ }
+ if (type->IsConstant()) return type->AsConstant()->Bound()->AsBitset();
+ if (type->IsRange()) return type->AsRange()->BitsetLub();
+ if (type->IsContext()) return kInternal & kTaggedPtr;
+ if (type->IsArray()) return kArray;
+ if (type->IsFunction()) return kFunction;
+ UNREACHABLE();
+ return kNone;
}
template<class Config>
-int TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
+typename TypeImpl<Config>::bitset
+TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
DisallowHeapAllocation no_allocation;
switch (map->instance_type()) {
case STRING_TYPE:
- case ASCII_STRING_TYPE:
+ case ONE_BYTE_STRING_TYPE:
case CONS_STRING_TYPE:
- case CONS_ASCII_STRING_TYPE:
+ case CONS_ONE_BYTE_STRING_TYPE:
case SLICED_STRING_TYPE:
- case SLICED_ASCII_STRING_TYPE:
+ case SLICED_ONE_BYTE_STRING_TYPE:
case EXTERNAL_STRING_TYPE:
- case EXTERNAL_ASCII_STRING_TYPE:
+ case EXTERNAL_ONE_BYTE_STRING_TYPE:
case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
case SHORT_EXTERNAL_STRING_TYPE:
- case SHORT_EXTERNAL_ASCII_STRING_TYPE:
+ case SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE:
case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ return kOtherString;
case INTERNALIZED_STRING_TYPE:
- case ASCII_INTERNALIZED_STRING_TYPE:
+ case ONE_BYTE_INTERNALIZED_STRING_TYPE:
case EXTERNAL_INTERNALIZED_STRING_TYPE:
- case EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
- case SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
+ case SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
- return kString;
+ return kInternalizedString;
case SYMBOL_TYPE:
return kSymbol;
case ODDBALL_TYPE: {
@@ -261,81 +257,214 @@ int TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
}
-// -----------------------------------------------------------------------------
-// Predicates.
+template<class Config>
+typename TypeImpl<Config>::bitset
+TypeImpl<Config>::BitsetType::Lub(i::Object* value) {
+ DisallowHeapAllocation no_allocation;
+ if (value->IsNumber()) {
+ return Lub(value->Number()) & (value->IsSmi() ? kTaggedInt : kTaggedPtr);
+ }
+ return Lub(i::HeapObject::cast(value)->map());
+}
+
-// Check this <= that.
template<class Config>
-bool TypeImpl<Config>::SlowIs(TypeImpl* that) {
+typename TypeImpl<Config>::bitset
+TypeImpl<Config>::BitsetType::Lub(double value) {
DisallowHeapAllocation no_allocation;
+ if (i::IsMinusZero(value)) return kMinusZero;
+ if (std::isnan(value)) return kNaN;
+ if (IsUint32Double(value)) return Lub(FastD2UI(value));
+ if (IsInt32Double(value)) return Lub(FastD2I(value));
+ return kOtherNumber;
+}
- // Fast path for bitsets.
- if (this->IsNone()) return true;
- if (that->IsBitset()) {
- return BitsetType::Is(BitsetType::Lub(this), that->AsBitset());
+
+template<class Config>
+typename TypeImpl<Config>::bitset
+TypeImpl<Config>::BitsetType::Lub(int32_t value) {
+ DisallowHeapAllocation no_allocation;
+ if (value >= 0x40000000) {
+ return i::SmiValuesAre31Bits() ? kOtherUnsigned31 : kUnsignedSmall;
}
+ if (value >= 0) return kUnsignedSmall;
+ if (value >= -0x40000000) return kOtherSignedSmall;
+ return i::SmiValuesAre31Bits() ? kOtherSigned32 : kOtherSignedSmall;
+}
+
- if (that->IsClass()) {
- return this->IsClass()
- && *this->AsClass()->Map() == *that->AsClass()->Map()
- && ((Config::is_class(that) && Config::is_class(this)) ||
- BitsetType::New(this->BitsetLub())->Is(
- BitsetType::New(that->BitsetLub())));
+template<class Config>
+typename TypeImpl<Config>::bitset
+TypeImpl<Config>::BitsetType::Lub(uint32_t value) {
+ DisallowHeapAllocation no_allocation;
+ if (value >= 0x80000000u) return kOtherUnsigned32;
+ if (value >= 0x40000000u) {
+ return i::SmiValuesAre31Bits() ? kOtherUnsigned31 : kUnsignedSmall;
}
- if (that->IsConstant()) {
- return this->IsConstant()
- && *this->AsConstant()->Value() == *that->AsConstant()->Value()
- && this->AsConstant()->Bound()->Is(that->AsConstant()->Bound());
+ return kUnsignedSmall;
+}
+
+
+// Minimum values of regular numeric bitsets when SmiValuesAre31Bits.
+template<class Config>
+const typename TypeImpl<Config>::BitsetType::BitsetMin
+TypeImpl<Config>::BitsetType::BitsetMins31[] = {
+ {kOtherNumber, -V8_INFINITY},
+ {kOtherSigned32, kMinInt},
+ {kOtherSignedSmall, -0x40000000},
+ {kUnsignedSmall, 0},
+ {kOtherUnsigned31, 0x40000000},
+ {kOtherUnsigned32, 0x80000000},
+ {kOtherNumber, static_cast<double>(kMaxUInt32) + 1}
+};
+
+
+// Minimum values of regular numeric bitsets when SmiValuesAre32Bits.
+// OtherSigned32 and OtherUnsigned31 are empty (see the diagrams in types.h).
+template<class Config>
+const typename TypeImpl<Config>::BitsetType::BitsetMin
+TypeImpl<Config>::BitsetType::BitsetMins32[] = {
+ {kOtherNumber, -V8_INFINITY},
+ {kOtherSignedSmall, kMinInt},
+ {kUnsignedSmall, 0},
+ {kOtherUnsigned32, 0x80000000},
+ {kOtherNumber, static_cast<double>(kMaxUInt32) + 1}
+};
+
+
+template<class Config>
+typename TypeImpl<Config>::bitset
+TypeImpl<Config>::BitsetType::Lub(Limits lim) {
+ DisallowHeapAllocation no_allocation;
+ double min = lim.min->Number();
+ double max = lim.max->Number();
+ int lub = kNone;
+ const BitsetMin* mins = BitsetMins();
+
+ for (size_t i = 1; i < BitsetMinsSize(); ++i) {
+ if (min < mins[i].min) {
+ lub |= mins[i-1].bits;
+ if (max < mins[i].min) return lub;
+ }
}
- if (that->IsRange()) {
- return this->IsRange()
- && this->AsRange()->Bound()->Is(that->AsRange()->Bound())
- && dle(that->AsRange()->Min(), this->AsRange()->Min())
- && dle(this->AsRange()->Max(), that->AsRange()->Max());
+ return lub |= mins[BitsetMinsSize()-1].bits;
+}
+
+
+template<class Config>
+double TypeImpl<Config>::BitsetType::Min(bitset bits) {
+ DisallowHeapAllocation no_allocation;
+ DCHECK(Is(bits, kNumber));
+ const BitsetMin* mins = BitsetMins();
+ bool mz = SEMANTIC(bits & kMinusZero);
+ for (size_t i = 0; i < BitsetMinsSize(); ++i) {
+ if (Is(SEMANTIC(mins[i].bits), bits)) {
+ return mz ? std::min(0.0, mins[i].min) : mins[i].min;
+ }
+ }
+ if (mz) return 0;
+ return base::OS::nan_value();
+}
+
+
+template<class Config>
+double TypeImpl<Config>::BitsetType::Max(bitset bits) {
+ DisallowHeapAllocation no_allocation;
+ DCHECK(Is(bits, kNumber));
+ const BitsetMin* mins = BitsetMins();
+ bool mz = bits & kMinusZero;
+ if (BitsetType::Is(mins[BitsetMinsSize()-1].bits, bits)) {
+ return +V8_INFINITY;
+ }
+ for (size_t i = BitsetMinsSize()-1; i-- > 0; ) {
+ if (Is(SEMANTIC(mins[i].bits), bits)) {
+ return mz ?
+ std::max(0.0, mins[i+1].min - 1) : mins[i+1].min - 1;
+ }
+ }
+ if (mz) return 0;
+ return base::OS::nan_value();
+}
+
+
+// -----------------------------------------------------------------------------
+// Predicates.
+
+
+template<class Config>
+bool TypeImpl<Config>::SimplyEquals(TypeImpl* that) {
+ DisallowHeapAllocation no_allocation;
+ if (this->IsClass()) {
+ return that->IsClass()
+ && *this->AsClass()->Map() == *that->AsClass()->Map();
+ }
+ if (this->IsConstant()) {
+ return that->IsConstant()
+ && *this->AsConstant()->Value() == *that->AsConstant()->Value();
}
- if (that->IsContext()) {
- return this->IsContext()
+ if (this->IsContext()) {
+ return that->IsContext()
&& this->AsContext()->Outer()->Equals(that->AsContext()->Outer());
}
- if (that->IsArray()) {
- return this->IsArray()
+ if (this->IsArray()) {
+ return that->IsArray()
&& this->AsArray()->Element()->Equals(that->AsArray()->Element());
}
- if (that->IsFunction()) {
- // We currently do not allow for any variance here, in order to keep
- // Union and Intersect operations simple.
- if (!this->IsFunction()) return false;
+ if (this->IsFunction()) {
+ if (!that->IsFunction()) return false;
FunctionType* this_fun = this->AsFunction();
FunctionType* that_fun = that->AsFunction();
if (this_fun->Arity() != that_fun->Arity() ||
!this_fun->Result()->Equals(that_fun->Result()) ||
- !that_fun->Receiver()->Equals(this_fun->Receiver())) {
+ !this_fun->Receiver()->Equals(that_fun->Receiver())) {
return false;
}
- for (int i = 0; i < this_fun->Arity(); ++i) {
- if (!that_fun->Parameter(i)->Equals(this_fun->Parameter(i))) return false;
+ for (int i = 0, n = this_fun->Arity(); i < n; ++i) {
+ if (!this_fun->Parameter(i)->Equals(that_fun->Parameter(i))) return false;
}
return true;
}
+ UNREACHABLE();
+ return false;
+}
+
+
+// Check if [this] <= [that].
+template<class Config>
+bool TypeImpl<Config>::SlowIs(TypeImpl* that) {
+ DisallowHeapAllocation no_allocation;
+
+ if (that->IsBitset()) {
+ return BitsetType::Is(this->BitsetLub(), that->AsBitset());
+ }
+ if (this->IsBitset()) {
+ return BitsetType::Is(this->AsBitset(), that->BitsetGlb());
+ }
- // (T1 \/ ... \/ Tn) <= T <=> (T1 <= T) /\ ... /\ (Tn <= T)
+ // (T1 \/ ... \/ Tn) <= T if (T1 <= T) /\ ... /\ (Tn <= T)
if (this->IsUnion()) {
- UnionHandle unioned = handle(this->AsUnion());
- for (int i = 0; i < unioned->Length(); ++i) {
- if (!unioned->Get(i)->Is(that)) return false;
+ for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
+ if (!this->AsUnion()->Get(i)->Is(that)) return false;
}
return true;
}
- // T <= (T1 \/ ... \/ Tn) <=> (T <= T1) \/ ... \/ (T <= Tn)
- // (iff T is not a union)
- DCHECK(!this->IsUnion() && that->IsUnion());
- UnionHandle unioned = handle(that->AsUnion());
- for (int i = 0; i < unioned->Length(); ++i) {
- if (this->Is(unioned->Get(i))) return true;
- if (this->IsBitset()) break; // Fast fail, only first field is a bitset.
+ // T <= (T1 \/ ... \/ Tn) if (T <= T1) \/ ... \/ (T <= Tn)
+ if (that->IsUnion()) {
+ for (int i = 0, n = that->AsUnion()->Length(); i < n; ++i) {
+ if (this->Is(that->AsUnion()->Get(i))) return true;
+ if (i > 1 && this->IsRange()) return false; // Shortcut.
+ }
+ return false;
+ }
+
+ if (that->IsRange()) {
+ return (this->IsRange() && Contains(that->AsRange(), this->AsRange()))
+ || (this->IsConstant() &&
+ Contains(that->AsRange(), *this->AsConstant()->Value()));
}
- return false;
+ if (this->IsRange()) return false;
+ return this->SimplyEquals(that);
}
@@ -359,7 +488,7 @@ bool TypeImpl<Config>::NowIs(TypeImpl* that) {
}
-// Check if this contains only (currently) stable classes.
+// Check if [this] contains only (currently) stable classes.
template<class Config>
bool TypeImpl<Config>::NowStable() {
DisallowHeapAllocation no_allocation;
@@ -370,82 +499,93 @@ bool TypeImpl<Config>::NowStable() {
}
-// Check this overlaps that.
+// Check if [this] and [that] overlap.
template<class Config>
bool TypeImpl<Config>::Maybe(TypeImpl* that) {
DisallowHeapAllocation no_allocation;
- // (T1 \/ ... \/ Tn) overlaps T <=> (T1 overlaps T) \/ ... \/ (Tn overlaps T)
+ // (T1 \/ ... \/ Tn) overlaps T if (T1 overlaps T) \/ ... \/ (Tn overlaps T)
if (this->IsUnion()) {
- UnionHandle unioned = handle(this->AsUnion());
- for (int i = 0; i < unioned->Length(); ++i) {
- if (unioned->Get(i)->Maybe(that)) return true;
+ for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
+ if (this->AsUnion()->Get(i)->Maybe(that)) return true;
}
return false;
}
- // T overlaps (T1 \/ ... \/ Tn) <=> (T overlaps T1) \/ ... \/ (T overlaps Tn)
+ // T overlaps (T1 \/ ... \/ Tn) if (T overlaps T1) \/ ... \/ (T overlaps Tn)
if (that->IsUnion()) {
- UnionHandle unioned = handle(that->AsUnion());
- for (int i = 0; i < unioned->Length(); ++i) {
- if (this->Maybe(unioned->Get(i))) return true;
+ for (int i = 0, n = that->AsUnion()->Length(); i < n; ++i) {
+ if (this->Maybe(that->AsUnion()->Get(i))) return true;
}
return false;
}
- DCHECK(!this->IsUnion() && !that->IsUnion());
- if (this->IsBitset() || that->IsBitset()) {
- return BitsetType::IsInhabited(this->BitsetLub() & that->BitsetLub());
- }
- if (this->IsClass()) {
- return that->IsClass()
- && *this->AsClass()->Map() == *that->AsClass()->Map();
- }
- if (this->IsConstant()) {
- return that->IsConstant()
- && *this->AsConstant()->Value() == *that->AsConstant()->Value();
- }
- if (this->IsContext()) {
- return this->Equals(that);
- }
- if (this->IsArray()) {
- // There is no variance!
- return this->Equals(that);
+ if (!BitsetType::IsInhabited(this->BitsetLub() & that->BitsetLub()))
+ return false;
+ if (this->IsBitset() || that->IsBitset()) return true;
+
+ if (this->IsClass() != that->IsClass()) return true;
+
+ if (this->IsRange()) {
+ if (that->IsConstant()) {
+ return Contains(this->AsRange(), *that->AsConstant()->Value());
+ }
+ return that->IsRange() && Overlap(this->AsRange(), that->AsRange());
}
- if (this->IsFunction()) {
- // There is no variance!
- return this->Equals(that);
+ if (that->IsRange()) {
+ if (this->IsConstant()) {
+ return Contains(that->AsRange(), *this->AsConstant()->Value());
+ }
+ return this->IsRange() && Overlap(this->AsRange(), that->AsRange());
}
- return false;
+ return this->SimplyEquals(that);
}
-// Check if value is contained in (inhabits) type.
+// Return the range in [this], or [NULL].
template<class Config>
-bool TypeImpl<Config>::Contains(i::Object* value) {
+typename TypeImpl<Config>::RangeType* TypeImpl<Config>::GetRange() {
DisallowHeapAllocation no_allocation;
- if (this->IsRange()) {
- return value->IsNumber() &&
- dle(this->AsRange()->Min(), value->Number()) &&
- dle(value->Number(), this->AsRange()->Max()) &&
- BitsetType::Is(BitsetType::Lub(value), this->BitsetLub());
+ if (this->IsRange()) return this->AsRange();
+ if (this->IsUnion() && this->AsUnion()->Get(1)->IsRange()) {
+ return this->AsUnion()->Get(1)->AsRange();
}
+ return NULL;
+}
+
+
+template<class Config>
+bool TypeImpl<Config>::Contains(i::Object* value) {
+ DisallowHeapAllocation no_allocation;
for (Iterator<i::Object> it = this->Constants(); !it.Done(); it.Advance()) {
if (*it.Current() == value) return true;
}
+ if (IsInteger(value)) {
+ RangeType* range = this->GetRange();
+ if (range != NULL && Contains(range, value)) return true;
+ }
return BitsetType::New(BitsetType::Lub(value))->Is(this);
}
template<class Config>
bool TypeImpl<Config>::UnionType::Wellformed() {
- DCHECK(this->Length() >= 2);
+ DisallowHeapAllocation no_allocation;
+ // This checks the invariants of the union representation:
+ // 1. There are at least two elements.
+ // 2. At most one element is a bitset, and it must be the first one.
+ // 3. At most one element is a range, and it must be the second one
+ // (even when the first element is not a bitset).
+ // 4. No element is itself a union.
+ // 5. No element is a subtype of any other.
+ DCHECK(this->Length() >= 2); // (1)
for (int i = 0; i < this->Length(); ++i) {
- DCHECK(!this->Get(i)->IsUnion());
- if (i > 0) DCHECK(!this->Get(i)->IsBitset());
+ if (i != 0) DCHECK(!this->Get(i)->IsBitset()); // (2)
+ if (i != 1) DCHECK(!this->Get(i)->IsRange()); // (3)
+ DCHECK(!this->Get(i)->IsUnion()); // (4)
for (int j = 0; j < this->Length(); ++j) {
- if (i != j) DCHECK(!this->Get(i)->Is(this->Get(j)));
+ if (i != j) DCHECK(!this->Get(i)->Is(this->Get(j))); // (5)
}
}
return true;
@@ -455,228 +595,231 @@ bool TypeImpl<Config>::UnionType::Wellformed() {
// -----------------------------------------------------------------------------
// Union and intersection
+
+static bool AddIsSafe(int x, int y) {
+ return x >= 0 ?
+ y <= std::numeric_limits<int>::max() - x :
+ y >= std::numeric_limits<int>::min() - x;
+}
+
+
template<class Config>
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Rebound(
- int bitset, Region* region) {
- TypeHandle bound = BitsetType::New(bitset, region);
- if (this->IsClass()) {
- return ClassType::New(this->AsClass()->Map(), bound, region);
- } else if (this->IsConstant()) {
- return ConstantType::New(this->AsConstant()->Value(), bound, region);
- } else if (this->IsRange()) {
- return RangeType::New(
- this->AsRange()->Min(), this->AsRange()->Max(), bound, region);
- } else if (this->IsContext()) {
- return ContextType::New(this->AsContext()->Outer(), bound, region);
- } else if (this->IsArray()) {
- return ArrayType::New(this->AsArray()->Element(), bound, region);
- } else if (this->IsFunction()) {
- FunctionHandle function = Config::handle(this->AsFunction());
- int arity = function->Arity();
- FunctionHandle type = FunctionType::New(
- function->Result(), function->Receiver(), bound, arity, region);
- for (int i = 0; i < arity; ++i) {
- type->InitParameter(i, function->Parameter(i));
- }
- return type;
+typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Intersect(
+ TypeHandle type1, TypeHandle type2, Region* region) {
+ bitset bits = type1->BitsetGlb() & type2->BitsetGlb();
+ if (!BitsetType::IsInhabited(bits)) bits = BitsetType::kNone;
+
+ // Fast case: bit sets.
+ if (type1->IsBitset() && type2->IsBitset()) {
+ return BitsetType::New(bits, region);
}
- UNREACHABLE();
- return TypeHandle();
+
+ // Fast case: top or bottom types.
+ if (type1->IsNone() || type2->IsAny()) return type1; // Shortcut.
+ if (type2->IsNone() || type1->IsAny()) return type2; // Shortcut.
+
+ // Semi-fast case.
+ if (type1->Is(type2)) return type1;
+ if (type2->Is(type1)) return type2;
+
+ // Slow case: create union.
+ int size1 = type1->IsUnion() ? type1->AsUnion()->Length() : 1;
+ int size2 = type2->IsUnion() ? type2->AsUnion()->Length() : 1;
+ if (!AddIsSafe(size1, size2)) return Any(region);
+ int size = size1 + size2;
+ if (!AddIsSafe(size, 2)) return Any(region);
+ size += 2;
+ UnionHandle result = UnionType::New(size, region);
+ size = 0;
+
+ // Deal with bitsets.
+ result->Set(size++, BitsetType::New(bits, region));
+
+ // Deal with ranges.
+ TypeHandle range = None(region);
+ RangeType* range1 = type1->GetRange();
+ RangeType* range2 = type2->GetRange();
+ if (range1 != NULL && range2 != NULL) {
+ Limits lim = Intersect(Limits(range1), Limits(range2));
+ if (lim.min->Number() <= lim.max->Number()) {
+ range = RangeType::New(lim, region);
+ }
+ }
+ result->Set(size++, range);
+
+ size = IntersectAux(type1, type2, result, size, region);
+ return NormalizeUnion(result, size);
}
template<class Config>
-int TypeImpl<Config>::BoundBy(TypeImpl* that) {
- DCHECK(!this->IsUnion());
- if (that->IsUnion()) {
- UnionType* unioned = that->AsUnion();
- int length = unioned->Length();
- int bitset = BitsetType::kNone;
- for (int i = 0; i < length; ++i) {
- bitset |= BoundBy(unioned->Get(i)->unhandle());
- }
- return bitset;
- } else if (that->IsClass() && this->IsClass() &&
- *this->AsClass()->Map() == *that->AsClass()->Map()) {
- return that->BitsetLub();
- } else if (that->IsConstant() && this->IsConstant() &&
- *this->AsConstant()->Value() == *that->AsConstant()->Value()) {
- return that->AsConstant()->Bound()->AsBitset();
- } else if (that->IsContext() && this->IsContext() && this->Is(that)) {
- return that->AsContext()->Bound()->AsBitset();
- } else if (that->IsArray() && this->IsArray() && this->Is(that)) {
- return that->AsArray()->Bound()->AsBitset();
- } else if (that->IsFunction() && this->IsFunction() && this->Is(that)) {
- return that->AsFunction()->Bound()->AsBitset();
- }
- return that->BitsetGlb();
-}
-
-
-template<class Config>
-int TypeImpl<Config>::IndexInUnion(
- int bound, UnionHandle unioned, int current_size) {
- DCHECK(!this->IsUnion());
- for (int i = 0; i < current_size; ++i) {
- TypeHandle that = unioned->Get(i);
- if (that->IsBitset()) {
- if (BitsetType::Is(bound, that->AsBitset())) return i;
- } else if (that->IsClass() && this->IsClass()) {
- if (*this->AsClass()->Map() == *that->AsClass()->Map()) return i;
- } else if (that->IsConstant() && this->IsConstant()) {
- if (*this->AsConstant()->Value() == *that->AsConstant()->Value())
- return i;
- } else if (that->IsContext() && this->IsContext()) {
- if (this->Is(that)) return i;
- } else if (that->IsArray() && this->IsArray()) {
- if (this->Is(that)) return i;
- } else if (that->IsFunction() && this->IsFunction()) {
- if (this->Is(that)) return i;
- }
- }
- return -1;
-}
-
-
-// Get non-bitsets from type, bounded by upper.
-// Store at result starting at index. Returns updated index.
-template<class Config>
-int TypeImpl<Config>::ExtendUnion(
- UnionHandle result, int size, TypeHandle type,
- TypeHandle other, bool is_intersect, Region* region) {
- if (type->IsUnion()) {
- UnionHandle unioned = handle(type->AsUnion());
- for (int i = 0; i < unioned->Length(); ++i) {
- TypeHandle type_i = unioned->Get(i);
- DCHECK(i == 0 || !(type_i->IsBitset() || type_i->Is(unioned->Get(0))));
- if (!type_i->IsBitset()) {
- size = ExtendUnion(result, size, type_i, other, is_intersect, region);
- }
- }
- } else if (!type->IsBitset()) {
- DCHECK(type->IsClass() || type->IsConstant() || type->IsRange() ||
- type->IsContext() || type->IsArray() || type->IsFunction());
- int inherent_bound = type->InherentBitsetLub();
- int old_bound = type->BitsetLub();
- int other_bound = type->BoundBy(other->unhandle()) & inherent_bound;
- int new_bound =
- is_intersect ? (old_bound & other_bound) : (old_bound | other_bound);
- if (new_bound != BitsetType::kNone) {
- int i = type->IndexInUnion(new_bound, result, size);
- if (i == -1) {
- i = size++;
- } else if (result->Get(i)->IsBitset()) {
- return size; // Already fully subsumed.
- } else {
- int type_i_bound = result->Get(i)->BitsetLub();
- new_bound |= type_i_bound;
- if (new_bound == type_i_bound) return size;
- }
- if (new_bound != old_bound) type = type->Rebound(new_bound, region);
- result->Set(i, type);
+int TypeImpl<Config>::UpdateRange(
+ RangeHandle range, UnionHandle result, int size, Region* region) {
+ TypeHandle old_range = result->Get(1);
+ DCHECK(old_range->IsRange() || old_range->IsNone());
+ if (range->Is(old_range)) return size;
+ if (!old_range->Is(range->unhandle())) {
+ range = RangeType::New(
+ Union(Limits(range->AsRange()), Limits(old_range->AsRange())), region);
+ }
+ result->Set(1, range);
+
+ // Remove any components that just got subsumed.
+ for (int i = 2; i < size; ) {
+ if (result->Get(i)->Is(range->unhandle())) {
+ result->Set(i, result->Get(--size));
+ } else {
+ ++i;
}
}
return size;
}
-// Union is O(1) on simple bitsets, but O(n*m) on structured unions.
template<class Config>
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Union(
- TypeHandle type1, TypeHandle type2, Region* region) {
- // Fast case: bit sets.
- if (type1->IsBitset() && type2->IsBitset()) {
- return BitsetType::New(type1->AsBitset() | type2->AsBitset(), region);
+int TypeImpl<Config>::IntersectAux(
+ TypeHandle lhs, TypeHandle rhs,
+ UnionHandle result, int size, Region* region) {
+ if (lhs->IsUnion()) {
+ for (int i = 0, n = lhs->AsUnion()->Length(); i < n; ++i) {
+ size = IntersectAux(lhs->AsUnion()->Get(i), rhs, result, size, region);
+ }
+ return size;
+ }
+ if (rhs->IsUnion()) {
+ for (int i = 0, n = rhs->AsUnion()->Length(); i < n; ++i) {
+ size = IntersectAux(lhs, rhs->AsUnion()->Get(i), result, size, region);
+ }
+ return size;
}
- // Fast case: top or bottom types.
- if (type1->IsAny() || type2->IsNone()) return type1;
- if (type2->IsAny() || type1->IsNone()) return type2;
-
- // Semi-fast case: Unioned objects are neither involved nor produced.
- if (!(type1->IsUnion() || type2->IsUnion())) {
- if (type1->Is(type2)) return type2;
- if (type2->Is(type1)) return type1;
+ if (!BitsetType::IsInhabited(lhs->BitsetLub() & rhs->BitsetLub())) {
+ return size;
}
- // Slow case: may need to produce a Unioned object.
- int size = 0;
- if (!type1->IsBitset()) {
- size += (type1->IsUnion() ? type1->AsUnion()->Length() : 1);
+ if (lhs->IsRange()) {
+ if (rhs->IsBitset() || rhs->IsClass()) {
+ return UpdateRange(
+ Config::template cast<RangeType>(lhs), result, size, region);
+ }
+ if (rhs->IsConstant() &&
+ Contains(lhs->AsRange(), *rhs->AsConstant()->Value())) {
+ return AddToUnion(rhs, result, size, region);
+ }
+ return size;
}
- if (!type2->IsBitset()) {
- size += (type2->IsUnion() ? type2->AsUnion()->Length() : 1);
+ if (rhs->IsRange()) {
+ if (lhs->IsBitset() || lhs->IsClass()) {
+ return UpdateRange(
+ Config::template cast<RangeType>(rhs), result, size, region);
+ }
+ if (lhs->IsConstant() &&
+ Contains(rhs->AsRange(), *lhs->AsConstant()->Value())) {
+ return AddToUnion(lhs, result, size, region);
+ }
+ return size;
}
- int bitset = type1->BitsetGlb() | type2->BitsetGlb();
- if (bitset != BitsetType::kNone) ++size;
- DCHECK(size >= 1);
- UnionHandle unioned = UnionType::New(size, region);
- size = 0;
- if (bitset != BitsetType::kNone) {
- unioned->Set(size++, BitsetType::New(bitset, region));
+ if (lhs->IsBitset() || rhs->IsBitset()) {
+ return AddToUnion(lhs->IsBitset() ? rhs : lhs, result, size, region);
}
- size = ExtendUnion(unioned, size, type1, type2, false, region);
- size = ExtendUnion(unioned, size, type2, type1, false, region);
-
- if (size == 1) {
- return unioned->Get(0);
- } else {
- unioned->Shrink(size);
- DCHECK(unioned->Wellformed());
- return unioned;
+ if (lhs->IsClass() != rhs->IsClass()) {
+ return AddToUnion(lhs->IsClass() ? rhs : lhs, result, size, region);
+ }
+ if (lhs->SimplyEquals(rhs->unhandle())) {
+ return AddToUnion(lhs, result, size, region);
}
+ return size;
}
-// Intersection is O(1) on simple bitsets, but O(n*m) on structured unions.
template<class Config>
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Intersect(
+typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Union(
TypeHandle type1, TypeHandle type2, Region* region) {
+
// Fast case: bit sets.
if (type1->IsBitset() && type2->IsBitset()) {
- return BitsetType::New(type1->AsBitset() & type2->AsBitset(), region);
+ return BitsetType::New(type1->AsBitset() | type2->AsBitset(), region);
}
// Fast case: top or bottom types.
- if (type1->IsNone() || type2->IsAny()) return type1;
- if (type2->IsNone() || type1->IsAny()) return type2;
+ if (type1->IsAny() || type2->IsNone()) return type1;
+ if (type2->IsAny() || type1->IsNone()) return type2;
+
+ // Semi-fast case.
+ if (type1->Is(type2)) return type2;
+ if (type2->Is(type1)) return type1;
+
+ // Slow case: create union.
+ int size1 = type1->IsUnion() ? type1->AsUnion()->Length() : 1;
+ int size2 = type2->IsUnion() ? type2->AsUnion()->Length() : 1;
+ if (!AddIsSafe(size1, size2)) return Any(region);
+ int size = size1 + size2;
+ if (!AddIsSafe(size, 2)) return Any(region);
+ size += 2;
+ UnionHandle result = UnionType::New(size, region);
+ size = 0;
- // Semi-fast case: Unioned objects are neither involved nor produced.
- if (!(type1->IsUnion() || type2->IsUnion())) {
- if (type1->Is(type2)) return type1;
- if (type2->Is(type1)) return type2;
+ // Deal with bitsets.
+ TypeHandle bits = BitsetType::New(
+ type1->BitsetGlb() | type2->BitsetGlb(), region);
+ result->Set(size++, bits);
+
+ // Deal with ranges.
+ TypeHandle range = None(region);
+ RangeType* range1 = type1->GetRange();
+ RangeType* range2 = type2->GetRange();
+ if (range1 != NULL && range2 != NULL) {
+ range = RangeType::New(Union(Limits(range1), Limits(range2)), region);
+ } else if (range1 != NULL) {
+ range = handle(range1);
+ } else if (range2 != NULL) {
+ range = handle(range2);
}
+ result->Set(size++, range);
+
+ size = AddToUnion(type1, result, size, region);
+ size = AddToUnion(type2, result, size, region);
+ return NormalizeUnion(result, size);
+}
- // Slow case: may need to produce a Unioned object.
- int size = 0;
- if (!type1->IsBitset()) {
- size += (type1->IsUnion() ? type1->AsUnion()->Length() : 1);
+
+// Add [type] to [result] unless [type] is bitset, range, or already subsumed.
+// Return new size of [result].
+template<class Config>
+int TypeImpl<Config>::AddToUnion(
+ TypeHandle type, UnionHandle result, int size, Region* region) {
+ if (type->IsBitset() || type->IsRange()) return size;
+ if (type->IsUnion()) {
+ for (int i = 0, n = type->AsUnion()->Length(); i < n; ++i) {
+ size = AddToUnion(type->AsUnion()->Get(i), result, size, region);
+ }
+ return size;
}
- if (!type2->IsBitset()) {
- size += (type2->IsUnion() ? type2->AsUnion()->Length() : 1);
+ for (int i = 0; i < size; ++i) {
+ if (type->Is(result->Get(i))) return size;
}
- int bitset = type1->BitsetGlb() & type2->BitsetGlb();
- if (bitset != BitsetType::kNone) ++size;
- DCHECK(size >= 1);
+ result->Set(size++, type);
+ return size;
+}
- UnionHandle unioned = UnionType::New(size, region);
- size = 0;
- if (bitset != BitsetType::kNone) {
- unioned->Set(size++, BitsetType::New(bitset, region));
- }
- size = ExtendUnion(unioned, size, type1, type2, true, region);
- size = ExtendUnion(unioned, size, type2, type1, true, region);
- if (size == 0) {
- return None(region);
- } else if (size == 1) {
- return unioned->Get(0);
- } else {
- unioned->Shrink(size);
- DCHECK(unioned->Wellformed());
- return unioned;
+template<class Config>
+typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::NormalizeUnion(
+ UnionHandle unioned, int size) {
+ DCHECK(size >= 2);
+ // If range is subsumed by bitset, use its place for a different type.
+ if (unioned->Get(1)->Is(unioned->Get(0))) {
+ unioned->Set(1, unioned->Get(--size));
}
+ // If bitset is None, use its place for a different type.
+ if (size >= 2 && unioned->Get(0)->IsNone()) {
+ unioned->Set(0, unioned->Get(--size));
+ }
+ if (size == 1) return unioned->Get(0);
+ unioned->Shrink(size);
+ SLOW_DCHECK(unioned->Wellformed());
+ return unioned;
}
@@ -689,10 +832,9 @@ int TypeImpl<Config>::NumClasses() {
if (this->IsClass()) {
return 1;
} else if (this->IsUnion()) {
- UnionHandle unioned = handle(this->AsUnion());
int result = 0;
- for (int i = 0; i < unioned->Length(); ++i) {
- if (unioned->Get(i)->IsClass()) ++result;
+ for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
+ if (this->AsUnion()->Get(i)->IsClass()) ++result;
}
return result;
} else {
@@ -707,10 +849,9 @@ int TypeImpl<Config>::NumConstants() {
if (this->IsConstant()) {
return 1;
} else if (this->IsUnion()) {
- UnionHandle unioned = handle(this->AsUnion());
int result = 0;
- for (int i = 0; i < unioned->Length(); ++i) {
- if (unioned->Get(i)->IsConstant()) ++result;
+ for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
+ if (this->AsUnion()->Get(i)->IsConstant()) ++result;
}
return result;
} else {
@@ -772,9 +913,8 @@ void TypeImpl<Config>::Iterator<T>::Advance() {
DisallowHeapAllocation no_allocation;
++index_;
if (type_->IsUnion()) {
- UnionHandle unioned = handle(type_->AsUnion());
- for (; index_ < unioned->Length(); ++index_) {
- if (matches(unioned->Get(index_))) return;
+ for (int n = type_->AsUnion()->Length(); index_ < n; ++index_) {
+ if (matches(type_->AsUnion()->Get(index_))) return;
}
} else if (index_ == 0 && matches(type_)) {
return;
@@ -793,19 +933,15 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Convert(
if (type->IsBitset()) {
return BitsetType::New(type->AsBitset(), region);
} else if (type->IsClass()) {
- TypeHandle bound = BitsetType::New(type->BitsetLub(), region);
- return ClassType::New(type->AsClass()->Map(), bound, region);
+ return ClassType::New(type->AsClass()->Map(), region);
} else if (type->IsConstant()) {
- TypeHandle bound = Convert<OtherType>(type->AsConstant()->Bound(), region);
- return ConstantType::New(type->AsConstant()->Value(), bound, region);
+ return ConstantType::New(type->AsConstant()->Value(), region);
} else if (type->IsRange()) {
- TypeHandle bound = Convert<OtherType>(type->AsRange()->Bound(), region);
return RangeType::New(
- type->AsRange()->Min(), type->AsRange()->Max(), bound, region);
+ type->AsRange()->Min(), type->AsRange()->Max(), region);
} else if (type->IsContext()) {
- TypeHandle bound = Convert<OtherType>(type->AsContext()->Bound(), region);
TypeHandle outer = Convert<OtherType>(type->AsContext()->Outer(), region);
- return ContextType::New(outer, bound, region);
+ return ContextType::New(outer, region);
} else if (type->IsUnion()) {
int length = type->AsUnion()->Length();
UnionHandle unioned = UnionType::New(length, region);
@@ -816,14 +952,12 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Convert(
return unioned;
} else if (type->IsArray()) {
TypeHandle element = Convert<OtherType>(type->AsArray()->Element(), region);
- TypeHandle bound = Convert<OtherType>(type->AsArray()->Bound(), region);
- return ArrayType::New(element, bound, region);
+ return ArrayType::New(element, region);
} else if (type->IsFunction()) {
TypeHandle res = Convert<OtherType>(type->AsFunction()->Result(), region);
TypeHandle rcv = Convert<OtherType>(type->AsFunction()->Receiver(), region);
- TypeHandle bound = Convert<OtherType>(type->AsFunction()->Bound(), region);
FunctionHandle function = FunctionType::New(
- res, rcv, bound, type->AsFunction()->Arity(), region);
+ res, rcv, type->AsFunction()->Arity(), region);
for (int i = 0; i < function->Arity(); ++i) {
TypeHandle param = Convert<OtherType>(
type->AsFunction()->Parameter(i), region);
@@ -841,8 +975,8 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Convert(
// Printing.
template<class Config>
-const char* TypeImpl<Config>::BitsetType::Name(int bitset) {
- switch (bitset) {
+const char* TypeImpl<Config>::BitsetType::Name(bitset bits) {
+ switch (bits) {
case REPRESENTATION(kAny): return "Any";
#define RETURN_NAMED_REPRESENTATION_TYPE(type, value) \
case REPRESENTATION(k##type): return #type;
@@ -862,15 +996,15 @@ const char* TypeImpl<Config>::BitsetType::Name(int bitset) {
template <class Config>
void TypeImpl<Config>::BitsetType::Print(OStream& os, // NOLINT
- int bitset) {
+ bitset bits) {
DisallowHeapAllocation no_allocation;
- const char* name = Name(bitset);
+ const char* name = Name(bits);
if (name != NULL) {
os << name;
return;
}
- static const int named_bitsets[] = {
+ static const bitset named_bitsets[] = {
#define BITSET_CONSTANT(type, value) REPRESENTATION(k##type),
REPRESENTATION_BITSET_TYPE_LIST(BITSET_CONSTANT)
#undef BITSET_CONSTANT
@@ -882,16 +1016,16 @@ void TypeImpl<Config>::BitsetType::Print(OStream& os, // NOLINT
bool is_first = true;
os << "(";
- for (int i(ARRAY_SIZE(named_bitsets) - 1); bitset != 0 && i >= 0; --i) {
- int subset = named_bitsets[i];
- if ((bitset & subset) == subset) {
+ for (int i(arraysize(named_bitsets) - 1); bits != 0 && i >= 0; --i) {
+ bitset subset = named_bitsets[i];
+ if ((bits & subset) == subset) {
if (!is_first) os << " | ";
is_first = false;
os << Name(subset);
- bitset -= subset;
+ bits -= subset;
}
}
- DCHECK(bitset == 0);
+ DCHECK(bits == 0);
os << ")";
}
@@ -907,24 +1041,18 @@ void TypeImpl<Config>::PrintTo(OStream& os, PrintDimension dim) { // NOLINT
BitsetType::New(BitsetType::Lub(this))->PrintTo(os, dim);
os << ")";
} else if (this->IsConstant()) {
- os << "Constant(" << static_cast<void*>(*this->AsConstant()->Value())
- << " : ";
- BitsetType::New(BitsetType::Lub(this))->PrintTo(os, dim);
- os << ")";
+ os << "Constant(" << Brief(*this->AsConstant()->Value()) << ")";
} else if (this->IsRange()) {
- os << "Range(" << this->AsRange()->Min()
- << ".." << this->AsRange()->Max() << " : ";
- BitsetType::New(BitsetType::Lub(this))->PrintTo(os, dim);
- os << ")";
+ os << "Range(" << this->AsRange()->Min()->Number()
+ << ", " << this->AsRange()->Max()->Number() << ")";
} else if (this->IsContext()) {
os << "Context(";
this->AsContext()->Outer()->PrintTo(os, dim);
os << ")";
} else if (this->IsUnion()) {
os << "(";
- UnionHandle unioned = handle(this->AsUnion());
- for (int i = 0; i < unioned->Length(); ++i) {
- TypeHandle type_i = unioned->Get(i);
+ for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
+ TypeHandle type_i = this->AsUnion()->Get(i);
if (i > 0) os << " | ";
type_i->PrintTo(os, dim);
}
@@ -963,6 +1091,12 @@ void TypeImpl<Config>::Print() {
PrintTo(os);
os << endl;
}
+template <class Config>
+void TypeImpl<Config>::BitsetType::Print(bitset bits) {
+ OFStream os(stdout);
+ Print(os, bits);
+ os << endl;
+}
#endif
diff --git a/deps/v8/src/types.h b/deps/v8/src/types.h
index cca8b3167b..e7815ed316 100644
--- a/deps/v8/src/types.h
+++ b/deps/v8/src/types.h
@@ -5,6 +5,7 @@
#ifndef V8_TYPES_H_
#define V8_TYPES_H_
+#include "src/conversions.h"
#include "src/factory.h"
#include "src/handles.h"
#include "src/ostreams.h"
@@ -23,6 +24,7 @@ namespace internal {
// Types consist of two dimensions: semantic (value range) and representation.
// Both are related through subtyping.
//
+//
// SEMANTIC DIMENSION
//
// The following equations and inequations hold for the semantic axis:
@@ -61,6 +63,7 @@ namespace internal {
// However, we also define a 'temporal' variant of the subtyping relation that
// considers the _current_ state only, i.e., Constant(x) <_now Class(map(x)).
//
+//
// REPRESENTATIONAL DIMENSION
//
// For the representation axis, the following holds:
@@ -88,6 +91,16 @@ namespace internal {
// SignedSmall /\ TaggedInt (a 'smi')
// Number /\ TaggedPtr (a heap number)
//
+//
+// RANGE TYPES
+//
+// A range type represents a continuous integer interval by its minimum and
+// maximum value. Either value might be an infinity.
+//
+// Constant(v) is considered a subtype of Range(x..y) if v happens to be an
+// integer between x and y.
+//
+//
// PREDICATES
//
// There are two main functions for testing types:
@@ -109,21 +122,23 @@ namespace internal {
// Any compilation decision based on such temporary properties requires runtime
// guarding!
//
+//
// PROPERTIES
//
// Various formal properties hold for constructors, operators, and predicates
-// over types. For example, constructors are injective, subtyping is a complete
-// partial order, union and intersection satisfy the usual algebraic properties.
+// over types. For example, constructors are injective and subtyping is a
+// complete partial order.
//
// See test/cctest/test-types.cc for a comprehensive executable specification,
// especially with respect to the properties of the more exotic 'temporal'
// constructors and predicates (those prefixed 'Now').
//
+//
// IMPLEMENTATION
//
// Internally, all 'primitive' types, and their unions, are represented as
-// bitsets. Class is a heap pointer to the respective map. Only Constant's, or
-// unions containing Class'es or Constant's, currently require allocation.
+// bitsets. Bit 0 is reserved for tagging. Class is a heap pointer to the
+// respective map. Only structured types require allocation.
// Note that the bitset representation is closed under both Union and Intersect.
//
// There are two type representations, using different allocation:
@@ -139,24 +154,23 @@ namespace internal {
// Values for bitset types
#define MASK_BITSET_TYPE_LIST(V) \
- V(Representation, static_cast<int>(0xffc00000)) \
- V(Semantic, static_cast<int>(0x003fffff))
+ V(Representation, 0xff800000u) \
+ V(Semantic, 0x007ffffeu)
#define REPRESENTATION(k) ((k) & BitsetType::kRepresentation)
#define SEMANTIC(k) ((k) & BitsetType::kSemantic)
#define REPRESENTATION_BITSET_TYPE_LIST(V) \
V(None, 0) \
- V(UntaggedInt1, 1 << 22 | kSemantic) \
- V(UntaggedInt8, 1 << 23 | kSemantic) \
- V(UntaggedInt16, 1 << 24 | kSemantic) \
- V(UntaggedInt32, 1 << 25 | kSemantic) \
- V(UntaggedFloat32, 1 << 26 | kSemantic) \
- V(UntaggedFloat64, 1 << 27 | kSemantic) \
- V(UntaggedPtr, 1 << 28 | kSemantic) \
- V(TaggedInt, 1 << 29 | kSemantic) \
- /* MSB has to be sign-extended */ \
- V(TaggedPtr, static_cast<int>(~0u << 30) | kSemantic) \
+ V(UntaggedInt1, 1u << 23 | kSemantic) \
+ V(UntaggedInt8, 1u << 24 | kSemantic) \
+ V(UntaggedInt16, 1u << 25 | kSemantic) \
+ V(UntaggedInt32, 1u << 26 | kSemantic) \
+ V(UntaggedFloat32, 1u << 27 | kSemantic) \
+ V(UntaggedFloat64, 1u << 28 | kSemantic) \
+ V(UntaggedPtr, 1u << 29 | kSemantic) \
+ V(TaggedInt, 1u << 30 | kSemantic) \
+ V(TaggedPtr, 1u << 31 | kSemantic) \
\
V(UntaggedInt, kUntaggedInt1 | kUntaggedInt8 | \
kUntaggedInt16 | kUntaggedInt32) \
@@ -166,34 +180,35 @@ namespace internal {
V(Tagged, kTaggedInt | kTaggedPtr)
#define SEMANTIC_BITSET_TYPE_LIST(V) \
- V(Null, 1 << 0 | REPRESENTATION(kTaggedPtr)) \
- V(Undefined, 1 << 1 | REPRESENTATION(kTaggedPtr)) \
- V(Boolean, 1 << 2 | REPRESENTATION(kTaggedPtr)) \
- V(UnsignedSmall, 1 << 3 | REPRESENTATION(kTagged | kUntaggedNumber)) \
- V(OtherSignedSmall, 1 << 4 | REPRESENTATION(kTagged | kUntaggedNumber)) \
- V(OtherUnsigned31, 1 << 5 | REPRESENTATION(kTagged | kUntaggedNumber)) \
- V(OtherUnsigned32, 1 << 6 | REPRESENTATION(kTagged | kUntaggedNumber)) \
- V(OtherSigned32, 1 << 7 | REPRESENTATION(kTagged | kUntaggedNumber)) \
- V(MinusZero, 1 << 8 | REPRESENTATION(kTagged | kUntaggedNumber)) \
- V(NaN, 1 << 9 | REPRESENTATION(kTagged | kUntaggedNumber)) \
- V(OtherNumber, 1 << 10 | REPRESENTATION(kTagged | kUntaggedNumber)) \
- V(Symbol, 1 << 11 | REPRESENTATION(kTaggedPtr)) \
- V(InternalizedString, 1 << 12 | REPRESENTATION(kTaggedPtr)) \
- V(OtherString, 1 << 13 | REPRESENTATION(kTaggedPtr)) \
- V(Undetectable, 1 << 14 | REPRESENTATION(kTaggedPtr)) \
- V(Array, 1 << 15 | REPRESENTATION(kTaggedPtr)) \
- V(Buffer, 1 << 16 | REPRESENTATION(kTaggedPtr)) \
- V(Function, 1 << 17 | REPRESENTATION(kTaggedPtr)) \
- V(RegExp, 1 << 18 | REPRESENTATION(kTaggedPtr)) \
- V(OtherObject, 1 << 19 | REPRESENTATION(kTaggedPtr)) \
- V(Proxy, 1 << 20 | REPRESENTATION(kTaggedPtr)) \
- V(Internal, 1 << 21 | REPRESENTATION(kTagged | kUntagged)) \
+ V(Null, 1u << 1 | REPRESENTATION(kTaggedPtr)) \
+ V(Undefined, 1u << 2 | REPRESENTATION(kTaggedPtr)) \
+ V(Boolean, 1u << 3 | REPRESENTATION(kTaggedPtr)) \
+ V(UnsignedSmall, 1u << 4 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(OtherSignedSmall, 1u << 5 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(OtherUnsigned31, 1u << 6 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(OtherUnsigned32, 1u << 7 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(OtherSigned32, 1u << 8 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(MinusZero, 1u << 9 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(NaN, 1u << 10 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(OtherNumber, 1u << 11 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(Symbol, 1u << 12 | REPRESENTATION(kTaggedPtr)) \
+ V(InternalizedString, 1u << 13 | REPRESENTATION(kTaggedPtr)) \
+ V(OtherString, 1u << 14 | REPRESENTATION(kTaggedPtr)) \
+ V(Undetectable, 1u << 15 | REPRESENTATION(kTaggedPtr)) \
+ V(Array, 1u << 16 | REPRESENTATION(kTaggedPtr)) \
+ V(Buffer, 1u << 17 | REPRESENTATION(kTaggedPtr)) \
+ V(Function, 1u << 18 | REPRESENTATION(kTaggedPtr)) \
+ V(RegExp, 1u << 19 | REPRESENTATION(kTaggedPtr)) \
+ V(OtherObject, 1u << 20 | REPRESENTATION(kTaggedPtr)) \
+ V(Proxy, 1u << 21 | REPRESENTATION(kTaggedPtr)) \
+ V(Internal, 1u << 22 | REPRESENTATION(kTagged | kUntagged)) \
\
V(SignedSmall, kUnsignedSmall | kOtherSignedSmall) \
V(Signed32, kSignedSmall | kOtherUnsigned31 | kOtherSigned32) \
V(Unsigned32, kUnsignedSmall | kOtherUnsigned31 | kOtherUnsigned32) \
V(Integral32, kSigned32 | kUnsigned32) \
- V(Number, kIntegral32 | kMinusZero | kNaN | kOtherNumber) \
+ V(OrderedNumber, kIntegral32 | kMinusZero | kOtherNumber) \
+ V(Number, kOrderedNumber | kNaN) \
V(String, kInternalizedString | kOtherString) \
V(UniqueName, kSymbol | kInternalizedString) \
V(Name, kSymbol | kString) \
@@ -206,12 +221,36 @@ namespace internal {
V(Receiver, kObject | kProxy) \
V(NonNumber, kBoolean | kName | kNull | kReceiver | \
kUndefined | kInternal) \
- V(Any, -1)
+ V(Any, 0xfffffffeu)
+
+/*
+ * The following diagrams show how integers (in the mathematical sense) are
+ * divided among the different atomic numerical types.
+ *
+ * If SmiValuesAre31Bits():
+ *
+ * ON OS32 OSS US OU31 OU32 ON
+ * ______[_______[_______[_______[_______[_______[_______
+ * -2^31 -2^30 0 2^30 2^31 2^32
+ *
+ * Otherwise:
+ *
+ * ON OSS US OU32 ON
+ * ______[_______________[_______________[_______[_______
+ * -2^31 0 2^31 2^32
+ *
+ *
+ * E.g., OtherUnsigned32 (OU32) covers all integers from 2^31 to 2^32-1.
+ *
+ */
+
+#define PROPER_BITSET_TYPE_LIST(V) \
+ REPRESENTATION_BITSET_TYPE_LIST(V) \
+ SEMANTIC_BITSET_TYPE_LIST(V)
#define BITSET_TYPE_LIST(V) \
MASK_BITSET_TYPE_LIST(V) \
- REPRESENTATION_BITSET_TYPE_LIST(V) \
- SEMANTIC_BITSET_TYPE_LIST(V)
+ PROPER_BITSET_TYPE_LIST(V)
// -----------------------------------------------------------------------------
@@ -228,11 +267,11 @@ namespace internal {
// static bool is_bitset(Type*);
// static bool is_class(Type*);
// static bool is_struct(Type*, int tag);
-// static int as_bitset(Type*);
+// static bitset as_bitset(Type*);
// static i::Handle<i::Map> as_class(Type*);
// static Handle<Struct>::type as_struct(Type*);
-// static Type* from_bitset(int bitset);
-// static Handle<Type>::type from_bitset(int bitset, Region*);
+// static Type* from_bitset(bitset);
+// static Handle<Type>::type from_bitset(bitset, Region*);
// static Handle<Type>::type from_class(i::Handle<Map>, Region*);
// static Handle<Type>::type from_struct(Handle<Struct>::type, int tag);
// static Handle<Struct>::type struct_create(int tag, int length, Region*);
@@ -251,9 +290,10 @@ class TypeImpl : public Config::Base {
public:
// Auxiliary types.
- class BitsetType; // Internal
- class StructuralType; // Internal
- class UnionType; // Internal
+ typedef uint32_t bitset; // Internal
+ class BitsetType; // Internal
+ class StructuralType; // Internal
+ class UnionType; // Internal
class ClassType;
class ConstantType;
@@ -275,21 +315,23 @@ class TypeImpl : public Config::Base {
// Constructors.
#define DEFINE_TYPE_CONSTRUCTOR(type, value) \
- static TypeImpl* type() { return BitsetType::New(BitsetType::k##type); } \
+ static TypeImpl* type() { \
+ return BitsetType::New(BitsetType::k##type); \
+ } \
static TypeHandle type(Region* region) { \
return BitsetType::New(BitsetType::k##type, region); \
}
- BITSET_TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
+ PROPER_BITSET_TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
#undef DEFINE_TYPE_CONSTRUCTOR
static TypeHandle Class(i::Handle<i::Map> map, Region* region) {
return ClassType::New(map, region);
}
static TypeHandle Constant(i::Handle<i::Object> value, Region* region) {
- // TODO(neis): Return RangeType for numerical values.
return ConstantType::New(value, region);
}
- static TypeHandle Range(double min, double max, Region* region) {
+ static TypeHandle Range(
+ i::Handle<i::Object> min, i::Handle<i::Object> max, Region* region) {
return RangeType::New(min, max, region);
}
static TypeHandle Context(TypeHandle outer, Region* region) {
@@ -357,7 +399,7 @@ class TypeImpl : public Config::Base {
template<class TypeHandle>
bool Equals(TypeHandle that) { return this->Equals(*that); }
- // Equivalent to Constant(value)->Is(this), but avoiding allocation.
+ // Equivalent to Constant(val)->Is(this), but avoiding allocation.
bool Contains(i::Object* val);
bool Contains(i::Handle<i::Object> val) { return this->Contains(*val); }
@@ -404,6 +446,13 @@ class TypeImpl : public Config::Base {
ArrayType* AsArray() { return ArrayType::cast(this); }
FunctionType* AsFunction() { return FunctionType::cast(this); }
+ // Minimum and maximum of a numeric type.
+ // These functions do not distinguish between -0 and +0. If the type equals
+ // kNaN, they return NaN; otherwise kNaN is ignored. Only call these
+ // functions on subtypes of Number.
+ double Min();
+ double Max();
+
int NumClasses();
int NumConstants();
@@ -456,7 +505,7 @@ class TypeImpl : public Config::Base {
bool IsBitset() { return Config::is_bitset(this); }
bool IsUnion() { return Config::is_struct(this, StructuralType::kUnionTag); }
- int AsBitset() {
+ bitset AsBitset() {
DCHECK(this->IsBitset());
return static_cast<BitsetType*>(this)->Bitset();
}
@@ -464,18 +513,47 @@ class TypeImpl : public Config::Base {
// Auxiliary functions.
- int BitsetGlb() { return BitsetType::Glb(this); }
- int BitsetLub() { return BitsetType::Lub(this); }
- int InherentBitsetLub() { return BitsetType::InherentLub(this); }
+ bitset BitsetGlb() { return BitsetType::Glb(this); }
+ bitset BitsetLub() { return BitsetType::Lub(this); }
bool SlowIs(TypeImpl* that);
- TypeHandle Rebound(int bitset, Region* region);
- int BoundBy(TypeImpl* that);
- int IndexInUnion(int bound, UnionHandle unioned, int current_size);
- static int ExtendUnion(
- UnionHandle unioned, int current_size, TypeHandle t,
- TypeHandle other, bool is_intersect, Region* region);
+ static bool IsInteger(double x) {
+ return nearbyint(x) == x && !i::IsMinusZero(x); // Allows for infinities.
+ }
+ static bool IsInteger(i::Object* x) {
+ return x->IsNumber() && IsInteger(x->Number());
+ }
+
+ struct Limits {
+ i::Handle<i::Object> min;
+ i::Handle<i::Object> max;
+ Limits(i::Handle<i::Object> min, i::Handle<i::Object> max) :
+ min(min), max(max) {}
+ explicit Limits(RangeType* range) :
+ min(range->Min()), max(range->Max()) {}
+ };
+
+ static Limits Intersect(Limits lhs, Limits rhs);
+ static Limits Union(Limits lhs, Limits rhs);
+ static bool Overlap(RangeType* lhs, RangeType* rhs);
+ static bool Contains(RangeType* lhs, RangeType* rhs);
+ static bool Contains(RangeType* range, i::Object* val);
+
+ RangeType* GetRange();
+ static int UpdateRange(
+ RangeHandle type, UnionHandle result, int size, Region* region);
+
+ bool SimplyEquals(TypeImpl* that);
+ template<class TypeHandle>
+ bool SimplyEquals(TypeHandle that) { return this->SimplyEquals(*that); }
+
+ static int AddToUnion(
+ TypeHandle type, UnionHandle result, int size, Region* region);
+ static int IntersectAux(
+ TypeHandle type, TypeHandle other,
+ UnionHandle result, int size, Region* region);
+ static TypeHandle NormalizeUnion(UnionHandle unioned, int size);
};
@@ -494,36 +572,60 @@ class TypeImpl<Config>::BitsetType : public TypeImpl<Config> {
kUnusedEOL = 0
};
- int Bitset() { return Config::as_bitset(this); }
+ bitset Bitset() { return Config::as_bitset(this); }
- static TypeImpl* New(int bitset) {
- return static_cast<BitsetType*>(Config::from_bitset(bitset));
+ static TypeImpl* New(bitset bits) {
+ DCHECK(bits == kNone || IsInhabited(bits));
+ return Config::from_bitset(bits);
}
- static TypeHandle New(int bitset, Region* region) {
- return Config::from_bitset(bitset, region);
+ static TypeHandle New(bitset bits, Region* region) {
+ DCHECK(bits == kNone || IsInhabited(bits));
+ return Config::from_bitset(bits, region);
}
+ // TODO(neis): Eventually allow again for types with empty semantics
+ // part and modify intersection and possibly subtyping accordingly.
- static bool IsInhabited(int bitset) {
- return (bitset & kRepresentation) && (bitset & kSemantic);
+ static bool IsInhabited(bitset bits) {
+ return bits & kSemantic;
}
- static bool Is(int bitset1, int bitset2) {
- return (bitset1 | bitset2) == bitset2;
+ static bool Is(bitset bits1, bitset bits2) {
+ return (bits1 | bits2) == bits2;
}
- static int Glb(TypeImpl* type); // greatest lower bound that's a bitset
- static int Lub(TypeImpl* type); // least upper bound that's a bitset
- static int Lub(i::Object* value);
- static int Lub(double value);
- static int Lub(int32_t value);
- static int Lub(uint32_t value);
- static int Lub(i::Map* map);
- static int Lub(double min, double max);
- static int InherentLub(TypeImpl* type);
+ static double Min(bitset);
+ static double Max(bitset);
- static const char* Name(int bitset);
- static void Print(OStream& os, int bitset); // NOLINT
- using TypeImpl::PrintTo;
+ static bitset Glb(TypeImpl* type); // greatest lower bound that's a bitset
+ static bitset Lub(TypeImpl* type); // least upper bound that's a bitset
+ static bitset Lub(i::Object* value);
+ static bitset Lub(double value);
+ static bitset Lub(int32_t value);
+ static bitset Lub(uint32_t value);
+ static bitset Lub(i::Map* map);
+ static bitset Lub(Limits lim);
+
+ static const char* Name(bitset);
+ static void Print(OStream& os, bitset); // NOLINT
+#ifdef DEBUG
+ static void Print(bitset);
+#endif
+
+ private:
+ struct BitsetMin{
+ bitset bits;
+ double min;
+ };
+ static const BitsetMin BitsetMins31[];
+ static const BitsetMin BitsetMins32[];
+ static const BitsetMin* BitsetMins() {
+ return i::SmiValuesAre31Bits() ? BitsetMins31 : BitsetMins32;
+ }
+ static size_t BitsetMinsSize() {
+ return i::SmiValuesAre31Bits() ? 7 : 5;
+ /* arraysize(BitsetMins31) : arraysize(BitsetMins32); */
+ // Using arraysize here doesn't compile on Windows.
+ }
};
@@ -610,35 +712,25 @@ template<class Config>
class TypeImpl<Config>::ClassType : public StructuralType {
public:
TypeHandle Bound(Region* region) {
- return Config::is_class(this)
- ? BitsetType::New(BitsetType::Lub(*Config::as_class(this)), region)
- : this->Get(0);
+ return Config::is_class(this) ?
+ BitsetType::New(BitsetType::Lub(*Config::as_class(this)), region) :
+ this->Get(0);
}
i::Handle<i::Map> Map() {
- return Config::is_class(this)
- ? Config::as_class(this)
- : this->template GetValue<i::Map>(1);
- }
-
- static ClassHandle New(
- i::Handle<i::Map> map, TypeHandle bound, Region* region) {
- DCHECK(BitsetType::Is(bound->AsBitset(), BitsetType::Lub(*map)));
- ClassHandle type = Config::template cast<ClassType>(
- StructuralType::New(StructuralType::kClassTag, 2, region));
- type->Set(0, bound);
- type->SetValue(1, map);
- return type;
+ return Config::is_class(this) ? Config::as_class(this) :
+ this->template GetValue<i::Map>(1);
}
static ClassHandle New(i::Handle<i::Map> map, Region* region) {
ClassHandle type =
Config::template cast<ClassType>(Config::from_class(map, region));
- if (type->IsClass()) {
- return type;
- } else {
- TypeHandle bound = BitsetType::New(BitsetType::Lub(*map), region);
- return New(map, bound, region);
+ if (!type->IsClass()) {
+ type = Config::template cast<ClassType>(
+ StructuralType::New(StructuralType::kClassTag, 2, region));
+ type->Set(0, BitsetType::New(BitsetType::Lub(*map), region));
+ type->SetValue(1, map);
}
+ return type;
}
static ClassType* cast(TypeImpl* type) {
@@ -657,26 +749,21 @@ class TypeImpl<Config>::ConstantType : public StructuralType {
TypeHandle Bound() { return this->Get(0); }
i::Handle<i::Object> Value() { return this->template GetValue<i::Object>(1); }
- static ConstantHandle New(
- i::Handle<i::Object> value, TypeHandle bound, Region* region) {
- DCHECK(BitsetType::Is(bound->AsBitset(), BitsetType::Lub(*value)));
+ static ConstantHandle New(i::Handle<i::Object> value, Region* region) {
ConstantHandle type = Config::template cast<ConstantType>(
StructuralType::New(StructuralType::kConstantTag, 2, region));
- type->Set(0, bound);
+ type->Set(0, BitsetType::New(BitsetType::Lub(*value), region));
type->SetValue(1, value);
return type;
}
- static ConstantHandle New(i::Handle<i::Object> value, Region* region) {
- TypeHandle bound = BitsetType::New(BitsetType::Lub(*value), region);
- return New(value, bound, region);
- }
-
static ConstantType* cast(TypeImpl* type) {
DCHECK(type->IsConstant());
return static_cast<ConstantType*>(type);
}
};
+// TODO(neis): Also cache value if numerical.
+// TODO(neis): Allow restricting the representation.
// -----------------------------------------------------------------------------
@@ -685,27 +772,23 @@ class TypeImpl<Config>::ConstantType : public StructuralType {
template<class Config>
class TypeImpl<Config>::RangeType : public StructuralType {
public:
- TypeHandle Bound() { return this->Get(0); }
- double Min() { return this->template GetValue<i::HeapNumber>(1)->value(); }
- double Max() { return this->template GetValue<i::HeapNumber>(2)->value(); }
+ int BitsetLub() { return this->Get(0)->AsBitset(); }
+ i::Handle<i::Object> Min() { return this->template GetValue<i::Object>(1); }
+ i::Handle<i::Object> Max() { return this->template GetValue<i::Object>(2); }
static RangeHandle New(
- double min, double max, TypeHandle bound, Region* region) {
- DCHECK(BitsetType::Is(bound->AsBitset(), BitsetType::Lub(min, max)));
+ i::Handle<i::Object> min, i::Handle<i::Object> max, Region* region) {
+ DCHECK(min->Number() <= max->Number());
RangeHandle type = Config::template cast<RangeType>(
StructuralType::New(StructuralType::kRangeTag, 3, region));
- type->Set(0, bound);
- Factory* factory = Config::isolate(region)->factory();
- Handle<HeapNumber> minV = factory->NewHeapNumber(min);
- Handle<HeapNumber> maxV = factory->NewHeapNumber(max);
- type->SetValue(1, minV);
- type->SetValue(2, maxV);
+ type->Set(0, BitsetType::New(BitsetType::Lub(Limits(min, max)), region));
+ type->SetValue(1, min);
+ type->SetValue(2, max);
return type;
}
- static RangeHandle New(double min, double max, Region* region) {
- TypeHandle bound = BitsetType::New(BitsetType::Lub(min, max), region);
- return New(min, max, bound, region);
+ static RangeHandle New(Limits lim, Region* region) {
+ return New(lim.min, lim.max, region);
}
static RangeType* cast(TypeImpl* type) {
@@ -713,6 +796,8 @@ class TypeImpl<Config>::RangeType : public StructuralType {
return static_cast<RangeType*>(type);
}
};
+// TODO(neis): Also cache min and max values.
+// TODO(neis): Allow restricting the representation.
// -----------------------------------------------------------------------------
@@ -721,25 +806,15 @@ class TypeImpl<Config>::RangeType : public StructuralType {
template<class Config>
class TypeImpl<Config>::ContextType : public StructuralType {
public:
- TypeHandle Bound() { return this->Get(0); }
- TypeHandle Outer() { return this->Get(1); }
+ TypeHandle Outer() { return this->Get(0); }
- static ContextHandle New(TypeHandle outer, TypeHandle bound, Region* region) {
- DCHECK(BitsetType::Is(
- bound->AsBitset(), BitsetType::kInternal & BitsetType::kTaggedPtr));
+ static ContextHandle New(TypeHandle outer, Region* region) {
ContextHandle type = Config::template cast<ContextType>(
- StructuralType::New(StructuralType::kContextTag, 2, region));
- type->Set(0, bound);
- type->Set(1, outer);
+ StructuralType::New(StructuralType::kContextTag, 1, region));
+ type->Set(0, outer);
return type;
}
- static ContextHandle New(TypeHandle outer, Region* region) {
- TypeHandle bound = BitsetType::New(
- BitsetType::kInternal & BitsetType::kTaggedPtr, region);
- return New(outer, bound, region);
- }
-
static ContextType* cast(TypeImpl* type) {
DCHECK(type->IsContext());
return static_cast<ContextType*>(type);
@@ -753,23 +828,15 @@ class TypeImpl<Config>::ContextType : public StructuralType {
template<class Config>
class TypeImpl<Config>::ArrayType : public StructuralType {
public:
- TypeHandle Bound() { return this->Get(0); }
- TypeHandle Element() { return this->Get(1); }
+ TypeHandle Element() { return this->Get(0); }
- static ArrayHandle New(TypeHandle element, TypeHandle bound, Region* region) {
- DCHECK(BitsetType::Is(bound->AsBitset(), BitsetType::kArray));
+ static ArrayHandle New(TypeHandle element, Region* region) {
ArrayHandle type = Config::template cast<ArrayType>(
- StructuralType::New(StructuralType::kArrayTag, 2, region));
- type->Set(0, bound);
- type->Set(1, element);
+ StructuralType::New(StructuralType::kArrayTag, 1, region));
+ type->Set(0, element);
return type;
}
- static ArrayHandle New(TypeHandle element, Region* region) {
- TypeHandle bound = BitsetType::New(BitsetType::kArray, region);
- return New(element, bound, region);
- }
-
static ArrayType* cast(TypeImpl* type) {
DCHECK(type->IsArray());
return static_cast<ArrayType*>(type);
@@ -783,32 +850,22 @@ class TypeImpl<Config>::ArrayType : public StructuralType {
template<class Config>
class TypeImpl<Config>::FunctionType : public StructuralType {
public:
- int Arity() { return this->Length() - 3; }
- TypeHandle Bound() { return this->Get(0); }
- TypeHandle Result() { return this->Get(1); }
- TypeHandle Receiver() { return this->Get(2); }
- TypeHandle Parameter(int i) { return this->Get(3 + i); }
+ int Arity() { return this->Length() - 2; }
+ TypeHandle Result() { return this->Get(0); }
+ TypeHandle Receiver() { return this->Get(1); }
+ TypeHandle Parameter(int i) { return this->Get(2 + i); }
- void InitParameter(int i, TypeHandle type) { this->Set(3 + i, type); }
+ void InitParameter(int i, TypeHandle type) { this->Set(2 + i, type); }
static FunctionHandle New(
- TypeHandle result, TypeHandle receiver, TypeHandle bound,
- int arity, Region* region) {
- DCHECK(BitsetType::Is(bound->AsBitset(), BitsetType::kFunction));
+ TypeHandle result, TypeHandle receiver, int arity, Region* region) {
FunctionHandle type = Config::template cast<FunctionType>(
- StructuralType::New(StructuralType::kFunctionTag, 3 + arity, region));
- type->Set(0, bound);
- type->Set(1, result);
- type->Set(2, receiver);
+ StructuralType::New(StructuralType::kFunctionTag, 2 + arity, region));
+ type->Set(0, result);
+ type->Set(1, receiver);
return type;
}
- static FunctionHandle New(
- TypeHandle result, TypeHandle receiver, int arity, Region* region) {
- TypeHandle bound = BitsetType::New(BitsetType::kFunction, region);
- return New(result, receiver, bound, arity, region);
- }
-
static FunctionType* cast(TypeImpl* type) {
DCHECK(type->IsFunction());
return static_cast<FunctionType*>(type);
@@ -853,11 +910,6 @@ struct ZoneTypeConfig {
typedef i::Zone Region;
template<class T> struct Handle { typedef T* type; };
- // TODO(neis): This will be removed again once we have struct_get_double().
- static inline i::Isolate* isolate(Region* region) {
- return region->isolate();
- }
-
template<class T> static inline T* handle(T* type);
template<class T> static inline T* cast(Type* type);
@@ -865,12 +917,12 @@ struct ZoneTypeConfig {
static inline bool is_class(Type* type);
static inline bool is_struct(Type* type, int tag);
- static inline int as_bitset(Type* type);
+ static inline Type::bitset as_bitset(Type* type);
static inline i::Handle<i::Map> as_class(Type* type);
static inline Struct* as_struct(Type* type);
- static inline Type* from_bitset(int bitset);
- static inline Type* from_bitset(int bitset, Zone* zone);
+ static inline Type* from_bitset(Type::bitset);
+ static inline Type* from_bitset(Type::bitset, Zone* zone);
static inline Type* from_class(i::Handle<i::Map> map, Zone* zone);
static inline Type* from_struct(Struct* structured);
@@ -900,11 +952,6 @@ struct HeapTypeConfig {
typedef i::Isolate Region;
template<class T> struct Handle { typedef i::Handle<T> type; };
- // TODO(neis): This will be removed again once we have struct_get_double().
- static inline i::Isolate* isolate(Region* region) {
- return region;
- }
-
template<class T> static inline i::Handle<T> handle(T* type);
template<class T> static inline i::Handle<T> cast(i::Handle<Type> type);
@@ -912,12 +959,12 @@ struct HeapTypeConfig {
static inline bool is_class(Type* type);
static inline bool is_struct(Type* type, int tag);
- static inline int as_bitset(Type* type);
+ static inline Type::bitset as_bitset(Type* type);
static inline i::Handle<i::Map> as_class(Type* type);
static inline i::Handle<Struct> as_struct(Type* type);
- static inline Type* from_bitset(int bitset);
- static inline i::Handle<Type> from_bitset(int bitset, Isolate* isolate);
+ static inline Type* from_bitset(Type::bitset);
+ static inline i::Handle<Type> from_bitset(Type::bitset, Isolate* isolate);
static inline i::Handle<Type> from_class(
i::Handle<i::Map> map, Isolate* isolate);
static inline i::Handle<Type> from_struct(i::Handle<Struct> structure);
diff --git a/deps/v8/src/typing.cc b/deps/v8/src/typing.cc
index 136f72271c..02c9603efe 100644
--- a/deps/v8/src/typing.cc
+++ b/deps/v8/src/typing.cc
@@ -352,6 +352,9 @@ void AstTyper::VisitFunctionLiteral(FunctionLiteral* expr) {
}
+void AstTyper::VisitClassLiteral(ClassLiteral* expr) {}
+
+
void AstTyper::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
}
@@ -723,6 +726,9 @@ void AstTyper::VisitThisFunction(ThisFunction* expr) {
}
+void AstTyper::VisitSuperReference(SuperReference* expr) {}
+
+
void AstTyper::VisitDeclarations(ZoneList<Declaration*>* decls) {
for (int i = 0; i < decls->length(); ++i) {
Declaration* decl = decls->at(i);
diff --git a/deps/v8/src/unique.h b/deps/v8/src/unique.h
index ffc659fa10..619c3c9c36 100644
--- a/deps/v8/src/unique.h
+++ b/deps/v8/src/unique.h
@@ -5,8 +5,8 @@
#ifndef V8_HYDROGEN_UNIQUE_H_
#define V8_HYDROGEN_UNIQUE_H_
-#include "src/handles.h"
-#include "src/objects.h"
+#include "src/handles-inl.h" // TODO(everyone): Fix our inl.h crap
+#include "src/objects-inl.h" // TODO(everyone): Fix our inl.h crap
#include "src/string-stream.h"
#include "src/utils.h"
#include "src/zone.h"
@@ -32,6 +32,8 @@ class UniqueSet;
template <typename T>
class Unique {
public:
+ Unique<T>() : raw_address_(NULL) {}
+
// TODO(titzer): make private and introduce a uniqueness scope.
explicit Unique(Handle<T> handle) {
if (handle.is_null()) {
@@ -118,12 +120,8 @@ class Unique {
friend class UniqueSet<T>; // Uses internal details for speed.
template <class U>
friend class Unique; // For comparing raw_address values.
- template <class U>
- friend class PrintableUnique; // For automatic up casting.
protected:
- Unique<T>() : raw_address_(NULL) { }
-
Address raw_address_;
Handle<T> handle_;
@@ -131,72 +129,8 @@ class Unique {
};
-// TODO(danno): At some point if all of the uses of Unique end up using
-// PrintableUnique, then we should merge PrintableUnique into Unique and
-// predicate generating the printable string on a "am I tracing" check.
-template <class T>
-class PrintableUnique : public Unique<T> {
- public:
- // TODO(titzer): make private and introduce a uniqueness scope.
- explicit PrintableUnique(Zone* zone, Handle<T> handle) : Unique<T>(handle) {
- InitializeString(zone);
- }
-
- // TODO(titzer): this is a hack to migrate to Unique<T> incrementally.
- PrintableUnique(Zone* zone, Address raw_address, Handle<T> handle)
- : Unique<T>(raw_address, handle) {
- InitializeString(zone);
- }
-
- // Constructor for handling automatic up casting.
- // Eg. PrintableUnique<JSFunction> can be passed when PrintableUnique<Object>
- // is expected.
- template <class S>
- PrintableUnique(PrintableUnique<S> uniq) // NOLINT
- : Unique<T>(Handle<T>()) {
-#ifdef DEBUG
- T* a = NULL;
- S* b = NULL;
- a = b; // Fake assignment to enforce type checks.
- USE(a);
-#endif
- this->raw_address_ = uniq.raw_address_;
- this->handle_ = uniq.handle_;
- string_ = uniq.string();
- }
-
- // TODO(titzer): this is a hack to migrate to Unique<T> incrementally.
- static PrintableUnique<T> CreateUninitialized(Zone* zone, Handle<T> handle) {
- return PrintableUnique<T>(zone, reinterpret_cast<Address>(NULL), handle);
- }
-
- static PrintableUnique<T> CreateImmovable(Zone* zone, Handle<T> handle) {
- return PrintableUnique<T>(zone, reinterpret_cast<Address>(*handle), handle);
- }
-
- const char* string() const { return string_; }
-
- private:
- const char* string_;
-
- void InitializeString(Zone* zone) {
- // The stringified version of the parameter must be calculated when the
- // Operator is constructed to avoid accessing the heap.
- HeapStringAllocator temp_allocator;
- StringStream stream(&temp_allocator);
- this->handle_->ShortPrint(&stream);
- SmartArrayPointer<const char> desc_string = stream.ToCString();
- const char* desc_chars = desc_string.get();
- int length = static_cast<int>(strlen(desc_chars));
- char* desc_copy = zone->NewArray<char>(length + 1);
- memcpy(desc_copy, desc_chars, length + 1);
- string_ = desc_copy;
- }
-};
-
-
template <typename T>
-class UniqueSet V8_FINAL : public ZoneObject {
+class UniqueSet FINAL : public ZoneObject {
public:
// Constructor. A new set will be empty.
UniqueSet() : size_(0), capacity_(0), array_(NULL) { }
diff --git a/deps/v8/src/uri.js b/deps/v8/src/uri.js
index 4b7d1f7e00..09079bc29f 100644
--- a/deps/v8/src/uri.js
+++ b/deps/v8/src/uri.js
@@ -172,10 +172,10 @@
throw new $URIError("URI malformed");
}
if (value < 0x10000) {
- %_TwoByteSeqStringSetChar(result, index++, value);
+ %_TwoByteSeqStringSetChar(index++, value, result);
} else {
- %_TwoByteSeqStringSetChar(result, index++, (value >> 10) + 0xd7c0);
- %_TwoByteSeqStringSetChar(result, index++, (value & 0x3ff) + 0xdc00);
+ %_TwoByteSeqStringSetChar(index++, (value >> 10) + 0xd7c0, result);
+ %_TwoByteSeqStringSetChar(index++, (value & 0x3ff) + 0xdc00, result);
}
return index;
}
@@ -205,7 +205,7 @@
var result = %NewString(array.length, NEW_ONE_BYTE_STRING);
for (var i = 0; i < array.length; i++) {
- %_OneByteSeqStringSetChar(result, i, array[i]);
+ %_OneByteSeqStringSetChar(i, array[i], result);
}
return result;
}
@@ -217,24 +217,24 @@
var index = 0;
var k = 0;
- // Optimistically assume ascii string.
+ // Optimistically assume one-byte string.
for ( ; k < uriLength; k++) {
var code = uri.charCodeAt(k);
if (code == 37) { // '%'
if (k + 2 >= uriLength) throw new $URIError("URI malformed");
var cc = URIHexCharsToCharCode(uri.charCodeAt(k+1), uri.charCodeAt(k+2));
- if (cc >> 7) break; // Assumption wrong, two byte string.
+ if (cc >> 7) break; // Assumption wrong, two-byte string.
if (reserved(cc)) {
- %_OneByteSeqStringSetChar(one_byte, index++, 37); // '%'.
- %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+1));
- %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+2));
+ %_OneByteSeqStringSetChar(index++, 37, one_byte); // '%'.
+ %_OneByteSeqStringSetChar(index++, uri.charCodeAt(k+1), one_byte);
+ %_OneByteSeqStringSetChar(index++, uri.charCodeAt(k+2), one_byte);
} else {
- %_OneByteSeqStringSetChar(one_byte, index++, cc);
+ %_OneByteSeqStringSetChar(index++, cc, one_byte);
}
k += 2;
} else {
- if (code > 0x7f) break; // Assumption wrong, two byte string.
- %_OneByteSeqStringSetChar(one_byte, index++, code);
+ if (code > 0x7f) break; // Assumption wrong, two-byte string.
+ %_OneByteSeqStringSetChar(index++, code, one_byte);
}
}
@@ -264,14 +264,14 @@
}
index = URIDecodeOctets(octets, two_byte, index);
} else if (reserved(cc)) {
- %_TwoByteSeqStringSetChar(two_byte, index++, 37); // '%'.
- %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k - 1));
- %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k));
+ %_TwoByteSeqStringSetChar(index++, 37, two_byte); // '%'.
+ %_TwoByteSeqStringSetChar(index++, uri.charCodeAt(k - 1), two_byte);
+ %_TwoByteSeqStringSetChar(index++, uri.charCodeAt(k), two_byte);
} else {
- %_TwoByteSeqStringSetChar(two_byte, index++, cc);
+ %_TwoByteSeqStringSetChar(index++, cc, two_byte);
}
} else {
- %_TwoByteSeqStringSetChar(two_byte, index++, code);
+ %_TwoByteSeqStringSetChar(index++, code, two_byte);
}
}
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index c5012bbc25..2991815e57 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -12,6 +12,7 @@
#include "include/v8.h"
#include "src/allocation.h"
+#include "src/base/bits.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
@@ -25,9 +26,16 @@ namespace internal {
// ----------------------------------------------------------------------------
// General helper functions
+
+// Same as strcmp, but can handle NULL arguments.
+inline bool CStringEquals(const char* s1, const char* s2) {
+ return (s1 == s2) || (s1 != NULL && s2 != NULL && strcmp(s1, s2) == 0);
+}
+
+
// X must be a power of 2. Returns the number of trailing zeros.
inline int WhichPowerOf2(uint32_t x) {
- DCHECK(IsPowerOf2(x));
+ DCHECK(base::bits::IsPowerOfTwo32(x));
int bits = 0;
#ifdef DEBUG
int original_x = x;
@@ -111,6 +119,12 @@ int HandleObjectPointerCompare(const Handle<T>* a, const Handle<T>* b) {
}
+template <typename T, typename U>
+inline bool IsAligned(T value, U alignment) {
+ return (value & (alignment - 1)) == 0;
+}
+
+
// Returns true if (addr + offset) is aligned.
inline bool IsAddressAligned(Address addr,
intptr_t alignment,
@@ -667,26 +681,17 @@ class SequenceCollector : public Collector<T, growth_factor, max_growth> {
};
-// Compare ASCII/16bit chars to ASCII/16bit chars.
+// Compare 8bit/16bit chars to 8bit/16bit chars.
template <typename lchar, typename rchar>
inline int CompareCharsUnsigned(const lchar* lhs,
const rchar* rhs,
int chars) {
const lchar* limit = lhs + chars;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- if (sizeof(*lhs) == sizeof(*rhs)) {
- // Number of characters in a uintptr_t.
- static const int kStepSize = sizeof(uintptr_t) / sizeof(*lhs); // NOLINT
- while (lhs <= limit - kStepSize) {
- if (*reinterpret_cast<const uintptr_t*>(lhs) !=
- *reinterpret_cast<const uintptr_t*>(rhs)) {
- break;
- }
- lhs += kStepSize;
- rhs += kStepSize;
- }
+ if (sizeof(*lhs) == sizeof(char) && sizeof(*rhs) == sizeof(char)) {
+ // memcmp compares byte-by-byte, yielding wrong results for two-byte
+ // strings on little-endian systems.
+ return memcmp(lhs, rhs, chars);
}
-#endif
while (lhs < limit) {
int r = static_cast<int>(*lhs) - static_cast<int>(*rhs);
if (r != 0) return r;
@@ -734,62 +739,6 @@ inline int TenToThe(int exponent) {
}
-// The type-based aliasing rule allows the compiler to assume that pointers of
-// different types (for some definition of different) never alias each other.
-// Thus the following code does not work:
-//
-// float f = foo();
-// int fbits = *(int*)(&f);
-//
-// The compiler 'knows' that the int pointer can't refer to f since the types
-// don't match, so the compiler may cache f in a register, leaving random data
-// in fbits. Using C++ style casts makes no difference, however a pointer to
-// char data is assumed to alias any other pointer. This is the 'memcpy
-// exception'.
-//
-// Bit_cast uses the memcpy exception to move the bits from a variable of one
-// type of a variable of another type. Of course the end result is likely to
-// be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005)
-// will completely optimize BitCast away.
-//
-// There is an additional use for BitCast.
-// Recent gccs will warn when they see casts that may result in breakage due to
-// the type-based aliasing rule. If you have checked that there is no breakage
-// you can use BitCast to cast one pointer type to another. This confuses gcc
-// enough that it can no longer see that you have cast one pointer type to
-// another thus avoiding the warning.
-
-// We need different implementations of BitCast for pointer and non-pointer
-// values. We use partial specialization of auxiliary struct to work around
-// issues with template functions overloading.
-template <class Dest, class Source>
-struct BitCastHelper {
- STATIC_ASSERT(sizeof(Dest) == sizeof(Source));
-
- INLINE(static Dest cast(const Source& source)) {
- Dest dest;
- memcpy(&dest, &source, sizeof(dest));
- return dest;
- }
-};
-
-template <class Dest, class Source>
-struct BitCastHelper<Dest, Source*> {
- INLINE(static Dest cast(Source* source)) {
- return BitCastHelper<Dest, uintptr_t>::
- cast(reinterpret_cast<uintptr_t>(source));
- }
-};
-
-template <class Dest, class Source>
-INLINE(Dest BitCast(const Source& source));
-
-template <class Dest, class Source>
-inline Dest BitCast(const Source& source) {
- return BitCastHelper<Dest, Source>::cast(source);
-}
-
-
template<typename ElementType, int NumElements>
class EmbeddedContainer {
public:
@@ -1277,21 +1226,6 @@ inline void MemsetPointer(T** dest, U* value, int counter) {
}
-// Simple wrapper that allows an ExternalString to refer to a
-// Vector<const char>. Doesn't assume ownership of the data.
-class AsciiStringAdapter: public v8::String::ExternalAsciiStringResource {
- public:
- explicit AsciiStringAdapter(Vector<const char> data) : data_(data) {}
-
- virtual const char* data() const { return data_.start(); }
-
- virtual size_t length() const { return data_.length(); }
-
- private:
- Vector<const char> data_;
-};
-
-
// Simple support to read a file into a 0-terminated C-string.
// The returned buffer must be freed by the caller.
// On return, *exits tells whether the file existed.
@@ -1316,7 +1250,7 @@ INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
#endif
-// Copy from ASCII/16bit chars to ASCII/16bit chars.
+// Copy from 8bit/16bit chars to 8bit/16bit chars.
template <typename sourcechar, typename sinkchar>
INLINE(void CopyChars(sinkchar* dest, const sourcechar* src, int chars));
@@ -1350,25 +1284,11 @@ void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
template <typename sourcechar, typename sinkchar>
void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, int chars) {
sinkchar* limit = dest + chars;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- if (sizeof(*dest) == sizeof(*src)) {
- if (chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest))) {
- MemCopy(dest, src, chars * sizeof(*dest));
- return;
- }
- // Number of characters in a uintptr_t.
- static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest); // NOLINT
- DCHECK(dest + kStepSize > dest); // Check for overflow.
- while (dest + kStepSize <= limit) {
- *reinterpret_cast<uintptr_t*>(dest) =
- *reinterpret_cast<const uintptr_t*>(src);
- dest += kStepSize;
- src += kStepSize;
- }
- }
-#endif
- while (dest < limit) {
- *dest++ = static_cast<sinkchar>(*src++);
+ if ((sizeof(*dest) == sizeof(*src)) &&
+ (chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest)))) {
+ MemCopy(dest, src, chars * sizeof(*dest));
+ } else {
+ while (dest < limit) *dest++ = static_cast<sinkchar>(*src++);
}
}
@@ -1535,6 +1455,16 @@ bool StringToArrayIndex(Stream* stream, uint32_t* index) {
}
-} } // namespace v8::internal
+// Returns current value of top of the stack. Works correctly with ASAN.
+DISABLE_ASAN
+inline uintptr_t GetCurrentStackPosition() {
+ // Takes the address of the limit variable in order to find out where
+ // the top of stack is right now.
+ uintptr_t limit = reinterpret_cast<uintptr_t>(&limit);
+ return limit;
+}
+
+} // namespace internal
+} // namespace v8
#endif // V8_UTILS_H_
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 13a576b1c9..62c3da4cf7 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -33,14 +33,9 @@ v8::ArrayBuffer::Allocator* V8::array_buffer_allocator_ = NULL;
v8::Platform* V8::platform_ = NULL;
-bool V8::Initialize(Deserializer* des) {
+bool V8::Initialize() {
InitializeOncePerProcess();
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL) return true;
- if (isolate->IsDead()) return false;
- if (isolate->IsInitialized()) return true;
-
- return isolate->Init(des);
+ return true;
}
@@ -52,8 +47,8 @@ void V8::TearDown() {
ExternalReference::TearDownMathExpData();
RegisteredExtension::UnregisterAll();
Isolate::GlobalTearDown();
-
Sampler::TearDown();
+ FlagList::ResetAllFlags(); // Frees memory held by string arguments.
}
@@ -79,6 +74,8 @@ void V8::InitializeOncePerProcessImpl() {
base::OS::Initialize(FLAG_random_seed, FLAG_hard_abort, FLAG_gc_fake_mmap);
+ Isolate::InitializeOncePerProcess();
+
Sampler::SetUp();
CpuFeatures::Probe(false);
init_memcopy_functions();
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index 8ae75fb854..13c33e1860 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -49,17 +49,11 @@
namespace v8 {
namespace internal {
-class Deserializer;
-
class V8 : public AllStatic {
public:
// Global actions.
- // If Initialize is called with des == NULL, the initial state is
- // created from scratch. If a non-null Deserializer is given, the
- // initial state is created by reading the deserialized data into an
- // empty heap.
- static bool Initialize(Deserializer* des);
+ static bool Initialize();
static void TearDown();
// Report process out of memory. Implementation found in api.cc.
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index 9612f16f96..782b953ea9 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -571,10 +571,6 @@ SetUpLockedPrototype(PropertyDescriptor, $Array(
// property descriptor. For a description of the array layout please
// see the runtime.cc file.
function ConvertDescriptorArrayToDescriptor(desc_array) {
- if (desc_array === false) {
- throw 'Internal error: invalid desc_array';
- }
-
if (IS_UNDEFINED(desc_array)) {
return UNDEFINED;
}
@@ -649,9 +645,6 @@ function GetOwnPropertyJS(obj, v) {
// If p is not a property on obj undefined is returned.
var props = %GetOwnProperty(ToObject(obj), p);
- // A false value here means that access checks failed.
- if (props === false) return UNDEFINED;
-
return ConvertDescriptorArrayToDescriptor(props);
}
@@ -692,11 +685,8 @@ function DefineProxyProperty(obj, p, attributes, should_throw) {
// ES5 8.12.9.
function DefineObjectProperty(obj, p, desc, should_throw) {
- var current_or_access = %GetOwnProperty(ToObject(obj), ToName(p));
- // A false value here means that access checks failed.
- if (current_or_access === false) return UNDEFINED;
-
- var current = ConvertDescriptorArrayToDescriptor(current_or_access);
+ var current_array = %GetOwnProperty(ToObject(obj), ToName(p));
+ var current = ConvertDescriptorArrayToDescriptor(current_array);
var extensible = %IsExtensible(ToObject(obj));
// Error handling according to spec.
@@ -840,8 +830,18 @@ function DefineObjectProperty(obj, p, desc, should_throw) {
// property.
// Step 12 - updating an existing accessor property with an accessor
// descriptor.
- var getter = desc.hasGetter() ? desc.getGet() : null;
- var setter = desc.hasSetter() ? desc.getSet() : null;
+ var getter = null;
+ if (desc.hasGetter()) {
+ getter = desc.getGet();
+ } else if (IsAccessorDescriptor(current) && current.hasGetter()) {
+ getter = current.getGet();
+ }
+ var setter = null;
+ if (desc.hasSetter()) {
+ setter = desc.getSet();
+ } else if (IsAccessorDescriptor(current) && current.hasSetter()) {
+ setter = current.getSet();
+ }
%DefineAccessorPropertyUnchecked(obj, p, getter, setter, flag);
}
return true;
@@ -1757,7 +1757,11 @@ function FunctionSourceString(func) {
var name = %FunctionNameShouldPrintAsAnonymous(func)
? 'anonymous'
: %FunctionGetName(func);
- var head = %FunctionIsGenerator(func) ? 'function* ' : 'function ';
+
+ var isGenerator = %FunctionIsGenerator(func);
+ var head = %FunctionIsConciseMethod(func)
+ ? (isGenerator ? '*' : '')
+ : (isGenerator ? 'function* ' : 'function ');
return head + name + source;
}
@@ -1855,9 +1859,7 @@ function FunctionConstructor(arg1) { // length == 1
var global_proxy = %GlobalProxy(global);
// Compile the string in the constructor and not a helper so that errors
// appear to come from here.
- var f = %CompileString(source, true);
- if (!IS_FUNCTION(f)) return f;
- f = %_CallFunction(global_proxy, f);
+ var f = %_CallFunction(global_proxy, %CompileString(source, true));
%FunctionMarkNameShouldPrintAsAnonymous(f);
return f;
}
diff --git a/deps/v8/src/v8threads.cc b/deps/v8/src/v8threads.cc
index 010f50b3e4..a46b289ba1 100644
--- a/deps/v8/src/v8threads.cc
+++ b/deps/v8/src/v8threads.cc
@@ -307,6 +307,9 @@ void ThreadManager::EagerlyArchiveThread() {
void ThreadManager::FreeThreadResources() {
+ DCHECK(!isolate_->has_pending_exception());
+ DCHECK(!isolate_->external_caught_exception());
+ DCHECK(isolate_->try_catch_handler() == NULL);
isolate_->handle_scope_implementer()->FreeThreadResources();
isolate_->FreeThreadResources();
isolate_->debug()->FreeThreadResources();
diff --git a/deps/v8/src/vector.h b/deps/v8/src/vector.h
index e12b916106..d3ba7759a6 100644
--- a/deps/v8/src/vector.h
+++ b/deps/v8/src/vector.h
@@ -151,9 +151,9 @@ inline int StrLength(const char* string) {
}
-#define STATIC_ASCII_VECTOR(x) \
+#define STATIC_CHAR_VECTOR(x) \
v8::internal::Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(x), \
- ARRAY_SIZE(x)-1)
+ arraysize(x) - 1)
inline Vector<const char> CStrVector(const char* data) {
return Vector<const char>(data, StrLength(data));
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index c6f087d04b..7ed073ca54 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -33,9 +33,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 28
-#define BUILD_NUMBER 73
-#define PATCH_LEVEL 0
+#define MINOR_VERSION 29
+#define BUILD_NUMBER 93
+#define PATCH_LEVEL 1
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/deps/v8/src/weak_collection.js b/deps/v8/src/weak-collection.js
index 73dd9de6ba..1160176d66 100644
--- a/deps/v8/src/weak_collection.js
+++ b/deps/v8/src/weak-collection.js
@@ -23,7 +23,7 @@ function WeakMapConstructor(iterable) {
var iter, adder;
if (!IS_NULL_OR_UNDEFINED(iterable)) {
- iter = GetIterator(iterable);
+ iter = GetIterator(ToObject(iterable));
adder = this.set;
if (!IS_SPEC_FUNCTION(adder)) {
throw MakeTypeError('property_not_function', ['set', this]);
@@ -139,7 +139,7 @@ function WeakSetConstructor(iterable) {
var iter, adder;
if (!IS_NULL_OR_UNDEFINED(iterable)) {
- iter = GetIterator(iterable);
+ iter = GetIterator(ToObject(iterable));
adder = this.add;
if (!IS_SPEC_FUNCTION(adder)) {
throw MakeTypeError('property_not_function', ['add', this]);
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index d13c21f4b7..ce68524524 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -6,6 +6,7 @@
#if V8_TARGET_ARCH_X64
+#include "src/base/bits.h"
#include "src/macro-assembler.h"
#include "src/serialize.h"
@@ -161,7 +162,7 @@ Operand::Operand(const Operand& operand, int32_t offset) {
int32_t disp_value = 0;
if (mode == 0x80 || is_baseless) {
// Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
- disp_value = *BitCast<const int32_t*>(&operand.buf_[disp_offset]);
+ disp_value = *bit_cast<const int32_t*>(&operand.buf_[disp_offset]);
} else if (mode == 0x40) {
// Mode 1: Byte displacement.
disp_value = static_cast<signed char>(operand.buf_[disp_offset]);
@@ -265,7 +266,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
- DCHECK(IsPowerOf2(m));
+ DCHECK(base::bits::IsPowerOfTwo32(m));
int delta = (m - (pc_offset() & (m - 1))) & (m - 1);
Nop(delta);
}
@@ -941,6 +942,20 @@ void Assembler::emit_imul(Register dst, Register src, Immediate imm, int size) {
}
+void Assembler::emit_imul(Register dst, const Operand& src, Immediate imm,
+ int size) {
+ EnsureSpace ensure_space(this);
+ emit_rex(dst, src, size);
+ if (is_int8(imm.value_)) {
+ emit(0x6B);
+ } else {
+ emit(0x69);
+ }
+ emit_operand(dst, src);
+ emit(imm.value_);
+}
+
+
void Assembler::emit_inc(Register dst, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
@@ -1176,6 +1191,7 @@ void Assembler::movb(Register dst, const Operand& src) {
void Assembler::movb(Register dst, Immediate imm) {
EnsureSpace ensure_space(this);
if (!dst.is_byte_register()) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
emit_rex_32(dst);
}
emit(0xB0 + dst.low_bits());
@@ -1186,6 +1202,7 @@ void Assembler::movb(Register dst, Immediate imm) {
void Assembler::movb(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
if (!src.is_byte_register()) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
emit_rex_32(src, dst);
} else {
emit_optional_rex_32(src, dst);
@@ -1397,7 +1414,12 @@ void Assembler::emit_movzxb(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
// 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
// there is no need to make this a 64 bit operation.
- emit_optional_rex_32(dst, src);
+ if (!src.is_byte_register()) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(dst, src);
+ } else {
+ emit_optional_rex_32(dst, src);
+ }
emit(0x0F);
emit(0xB6);
emit_modrm(dst, src);
@@ -1653,7 +1675,8 @@ void Assembler::setcc(Condition cc, Register reg) {
}
EnsureSpace ensure_space(this);
DCHECK(is_uint4(cc));
- if (!reg.is_byte_register()) { // Use x64 byte registers, where different.
+ if (!reg.is_byte_register()) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
emit_rex_32(reg);
}
emit(0x0F);
@@ -2620,6 +2643,16 @@ void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
}
+void Assembler::cvttsd2siq(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x2C);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -2891,6 +2924,12 @@ void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
}
+void Assembler::emit_sse_operand(Register reg, const Operand& adr) {
+ Register ireg = {reg.code()};
+ emit_operand(ireg, adr);
+}
+
+
void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
}
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 3896f8923d..529b100e24 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -200,6 +200,11 @@ struct XMMRegister {
return kMaxNumAllocatableRegisters;
}
+ // TODO(turbofan): Proper support for float32.
+ static int NumAllocatableAliasedRegisters() {
+ return NumAllocatableRegisters();
+ }
+
static int ToAllocationIndex(XMMRegister reg) {
DCHECK(reg.code() != 0);
return reg.code() - 1;
@@ -1048,6 +1053,7 @@ class Assembler : public AssemblerBase {
void cvttsd2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, XMMRegister src);
void cvttsd2siq(Register dst, XMMRegister src);
+ void cvttsd2siq(Register dst, const Operand& src);
void cvtlsi2sd(XMMRegister dst, const Operand& src);
void cvtlsi2sd(XMMRegister dst, Register src);
@@ -1316,6 +1322,7 @@ class Assembler : public AssemblerBase {
// The first argument is the reg field, the second argument is the r/m field.
void emit_sse_operand(XMMRegister dst, XMMRegister src);
void emit_sse_operand(XMMRegister reg, const Operand& adr);
+ void emit_sse_operand(Register reg, const Operand& adr);
void emit_sse_operand(XMMRegister dst, Register src);
void emit_sse_operand(Register dst, XMMRegister src);
@@ -1447,6 +1454,7 @@ class Assembler : public AssemblerBase {
void emit_imul(Register dst, Register src, int size);
void emit_imul(Register dst, const Operand& src, int size);
void emit_imul(Register dst, Register src, Immediate imm, int size);
+ void emit_imul(Register dst, const Operand& src, Immediate imm, int size);
void emit_inc(Register dst, int size);
void emit_inc(const Operand& dst, int size);
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index a18747d50d..194d8a6b2e 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -6,10 +6,10 @@
#if V8_TARGET_ARCH_X64
+#include "src/code-factory.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -609,8 +609,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
}
@@ -1066,8 +1066,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Copy all arguments from the array to the stack.
Label entry, loop;
- Register receiver = LoadIC::ReceiverRegister();
- Register key = LoadIC::NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
__ movp(key, Operand(rbp, kIndexOffset));
__ jmp(&entry);
__ bind(&loop);
@@ -1075,10 +1075,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Use inline caching to speed up access to arguments.
if (FLAG_vector_ics) {
- __ Move(LoadIC::SlotRegister(), Smi::FromInt(0));
+ __ Move(VectorLoadICDescriptor::SlotRegister(), Smi::FromInt(0));
}
- Handle<Code> ic =
- masm->isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(masm->isolate()).code();
__ Call(ic, RelocInfo::CODE_TARGET);
// It is important that we do not have a test instruction after the
// call. A test instruction after the call is used to indicate that
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 5a30ab70a8..7d1e4f5e0d 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -8,357 +8,105 @@
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+#include "src/isolate.h"
+#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
-void FastNewClosureStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { rsi, rbx };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
-}
-
-
-void FastNewContextStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { rsi, rdi };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void ToNumberStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { rsi, rax };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void NumberToStringStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { rsi, rax };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
-}
-
-
-void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { rsi, rax, rbx, rcx };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi(),
- Representation::Tagged() };
-
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry,
- representations);
-}
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { rsi, rax, rbx, rcx, rdx };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
-}
-
-
-void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { rsi, rbx, rdx };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void CallFunctionStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = {rsi, rdi};
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void CallConstructStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // rax : number of arguments
- // rbx : feedback vector
- // rdx : (only if rbx is not the megamorphic symbol) slot in feedback
- // vector (Smi)
- // rdi : constructor function
- // TODO(turbofan): So far we don't gather type feedback and hence skip the
- // slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {rsi, rax, rdi, rbx};
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void RegExpConstructResultStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { rsi, rcx, rbx, rax };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { rsi, rax, rbx };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry);
-}
-
-
-const Register InterfaceDescriptor::ContextRegister() { return rsi; }
-
-
static void InitializeArrayConstructorDescriptor(
- CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
+ Isolate* isolate, CodeStubDescriptor* descriptor,
int constant_stack_parameter_count) {
- // register state
- // rax -- number of arguments
- // rdi -- function
- // rbx -- allocation site with elements kind
Address deopt_handler = Runtime::FunctionForId(
Runtime::kArrayConstructor)->entry;
if (constant_stack_parameter_count == 0) {
- Register registers[] = { rsi, rdi, rbx };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
- deopt_handler, NULL, constant_stack_parameter_count,
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE);
} else {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = { rsi, rdi, rbx, rax };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32() };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers, rax,
- deopt_handler, representations,
- constant_stack_parameter_count,
+ descriptor->Initialize(rax, deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
}
static void InitializeInternalArrayConstructorDescriptor(
- CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
+ Isolate* isolate, CodeStubDescriptor* descriptor,
int constant_stack_parameter_count) {
- // register state
- // rsi -- context
- // rax -- number of arguments
- // rdi -- constructor function
Address deopt_handler = Runtime::FunctionForId(
Runtime::kInternalArrayConstructor)->entry;
if (constant_stack_parameter_count == 0) {
- Register registers[] = { rsi, rdi };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
- deopt_handler, NULL, constant_stack_parameter_count,
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE);
} else {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = { rsi, rdi, rax };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32() };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers, rax,
- deopt_handler, representations,
- constant_stack_parameter_count,
+ descriptor->Initialize(rax, deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
}
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 0);
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(MajorKey(), descriptor, -1);
-}
-
-
-void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0);
-}
-
-
-void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1);
-}
-
-
-void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1);
+void ArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
}
-void CompareNilICStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { rsi, rax };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(CompareNilIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
+void ArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
}
-void ToBooleanStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { rsi, rax };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(ToBooleanIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
+void ArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
}
-void BinaryOpICStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { rsi, rdx, rax };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(BinaryOpIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
+void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
-void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { rsi, rcx, rdx, rax };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
+void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
}
-void StringAddStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { rsi, rdx, rax };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kStringAdd)->entry);
-}
-
-
-void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
- Register registers[] = { rsi, // context
- rdi, // JSFunction
- rax, // actual number of arguments
- rbx, // expected number of arguments
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // JSFunction
- Representation::Integer32(), // actual number of arguments
- Representation::Integer32(), // expected number of arguments
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::KeyedCall);
- Register registers[] = { rsi, // context
- rcx, // key
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // key
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::NamedCall);
- Register registers[] = { rsi, // context
- rcx, // name
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // name
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::CallHandler);
- Register registers[] = { rsi, // context
- rdx, // receiver
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // receiver
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::ApiFunctionCall);
- Register registers[] = { rsi, // context
- rax, // callee
- rbx, // call_data
- rcx, // holder
- rdx, // api_function_address
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
+void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
}
#define __ ACCESS_MASM(masm)
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
+ ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
isolate()->counters()->code_stubs()->Increment();
- CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
- int param_count = descriptor->GetEnvironmentParameterCount();
+ CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
+ int param_count = descriptor.GetEnvironmentParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
DCHECK(param_count == 0 ||
- rax.is(descriptor->GetEnvironmentParameterRegister(
- param_count - 1)));
+ rax.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
// Push arguments
for (int i = 0; i < param_count; ++i) {
- __ Push(descriptor->GetEnvironmentParameterRegister(i));
+ __ Push(descriptor.GetEnvironmentParameterRegister(i));
}
- ExternalReference miss = descriptor->miss_handler();
__ CallExternalReference(miss, param_count);
}
@@ -367,7 +115,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- __ PushCallerSaved(save_doubles_);
+ __ PushCallerSaved(save_doubles() ? kSaveFPRegs : kDontSaveFPRegs);
const int argument_count = 1;
__ PrepareCallCFunction(argument_count);
__ LoadAddress(arg_reg_1,
@@ -377,7 +125,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
__ CallCFunction(
ExternalReference::store_buffer_overflow_function(isolate()),
argument_count);
- __ PopCallerSaved(save_doubles_);
+ __ PopCallerSaved(save_doubles() ? kSaveFPRegs : kDontSaveFPRegs);
__ ret(0);
}
@@ -512,7 +260,8 @@ void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
void MathPowStub::Generate(MacroAssembler* masm) {
- const Register exponent = rdx;
+ const Register exponent = MathPowTaggedDescriptor::exponent();
+ DCHECK(exponent.is(rdx));
const Register base = rax;
const Register scratch = rcx;
const XMMRegister double_result = xmm3;
@@ -526,7 +275,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ movp(scratch, Immediate(1));
__ Cvtlsi2sd(double_result, scratch);
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
Label base_is_smi, unpack_exponent;
// The exponent and base are supplied as arguments on the stack.
// This can only happen if the stub is called from non-optimized code.
@@ -556,7 +305,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &call_runtime);
__ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
- } else if (exponent_type_ == TAGGED) {
+ } else if (exponent_type() == TAGGED) {
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
__ SmiToInteger32(exponent, exponent);
__ jmp(&int_exponent);
@@ -565,11 +314,13 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
}
- if (exponent_type_ != INTEGER) {
+ if (exponent_type() != INTEGER) {
Label fast_power, try_arithmetic_simplification;
// Detect integer exponents stored as double.
__ DoubleToI(exponent, double_exponent, double_scratch,
- TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification);
+ TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification,
+ &try_arithmetic_simplification,
+ &try_arithmetic_simplification);
__ jmp(&int_exponent);
__ bind(&try_arithmetic_simplification);
@@ -578,7 +329,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cmpl(exponent, Immediate(0x1));
__ j(overflow, &call_runtime);
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
// compile time and uses DoMathPowHalf instead. We then skip this check
// for non-constant cases of +/-0.5 as these hardly occur.
@@ -737,7 +488,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Returning or bailing out.
Counters* counters = isolate()->counters();
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
__ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
@@ -772,7 +523,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
- Register receiver = LoadIC::ReceiverRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r8,
r9, &miss);
@@ -784,6 +535,8 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in rdx and the parameter count is in rax.
+ DCHECK(rdx.is(ArgumentsAccessReadDescriptor::index()));
+ DCHECK(rax.is(ArgumentsAccessReadDescriptor::parameter_count()));
// Check that the key is a smi.
Label slow;
@@ -1083,6 +836,37 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
}
+void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
+ // Return address is on the stack.
+ Label slow;
+
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
+ Register scratch = rax;
+ DCHECK(!scratch.is(receiver) && !scratch.is(key));
+
+ // Check that the key is an array index, that is Uint32.
+ STATIC_ASSERT(kSmiValueSize <= 32);
+ __ JumpUnlessNonNegativeSmi(key, &slow);
+
+ // Everything is fine, call runtime.
+ __ PopReturnAddressTo(scratch);
+ __ Push(receiver); // receiver
+ __ Push(key); // key
+ __ PushReturnAddressFrom(scratch);
+
+ // Perform tail call to the entry.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
+ masm->isolate()),
+ 2, 1);
+
+ __ bind(&slow);
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
+}
+
+
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// rsp[0] : return address
// rsp[8] : number of parameters
@@ -1333,7 +1117,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (6) One byte sequential. Load regexp code for one byte.
__ bind(&seq_one_byte_string);
// rax: RegExp data (FixedArray)
- __ movp(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
+ __ movp(r11, FieldOperand(rax, JSRegExp::kDataOneByteCodeOffset));
__ Set(rcx, 1); // Type is one byte.
// (E) Carry on. String handling is done.
@@ -1346,7 +1130,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rdi: sequential subject string (or look-alike, external string)
// r15: original subject string
- // rcx: encoding of subject string (1 if ASCII, 0 if two_byte);
+ // rcx: encoding of subject string (1 if one_byte, 0 if two_byte);
// r11: code
// Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height.
@@ -1361,7 +1145,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rdi: subject string
// rbx: previous index
- // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
+ // rcx: encoding of subject string (1 if one_byte 0 if two_byte);
// r11: code
// All checks done. Now push arguments for native regexp code.
Counters* counters = isolate()->counters();
@@ -1410,7 +1194,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rdi: subject string
// rbx: previous index
- // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
+ // rcx: encoding of subject string (1 if one_byte 0 if two_byte);
// r11: code
// r14: slice offset
// r15: original subject string
@@ -1639,14 +1423,12 @@ static int NegativeComparisonResult(Condition cc) {
}
-static void CheckInputType(MacroAssembler* masm,
- Register input,
- CompareIC::State expected,
- Label* fail) {
+static void CheckInputType(MacroAssembler* masm, Register input,
+ CompareICState::State expected, Label* fail) {
Label ok;
- if (expected == CompareIC::SMI) {
+ if (expected == CompareICState::SMI) {
__ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::NUMBER) {
+ } else if (expected == CompareICState::NUMBER) {
__ JumpIfSmi(input, &ok);
__ CompareMap(input, masm->isolate()->factory()->heap_number_map());
__ j(not_equal, fail);
@@ -1671,14 +1453,14 @@ static void BranchIfNotInternalizedString(MacroAssembler* masm,
}
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
Label check_unequal_objects, done;
Condition cc = GetCondition();
Factory* factory = isolate()->factory();
Label miss;
- CheckInputType(masm, rdx, left_, &miss);
- CheckInputType(masm, rax, right_, &miss);
+ CheckInputType(masm, rdx, left(), &miss);
+ CheckInputType(masm, rax, right(), &miss);
// Compare two smis.
Label non_smi, smi_done;
@@ -1850,24 +1632,15 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_for_strings);
- __ JumpIfNotBothSequentialAsciiStrings(
- rdx, rax, rcx, rbx, &check_unequal_objects);
+ __ JumpIfNotBothSequentialOneByteStrings(rdx, rax, rcx, rbx,
+ &check_unequal_objects);
- // Inline comparison of ASCII strings.
+ // Inline comparison of one-byte strings.
if (cc == equal) {
- StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- rdx,
- rax,
- rcx,
- rbx);
+ StringHelper::GenerateFlatOneByteStringEquals(masm, rdx, rax, rcx, rbx);
} else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- rdx,
- rax,
- rcx,
- rbx,
- rdi,
- r8);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, rdx, rax, rcx, rbx,
+ rdi, r8);
}
#ifdef DEBUG
@@ -1954,7 +1727,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// function without changing the state.
__ cmpp(rcx, rdi);
__ j(equal, &done);
- __ Cmp(rcx, TypeFeedbackInfo::MegamorphicSentinel(isolate));
+ __ Cmp(rcx, TypeFeedbackVector::MegamorphicSentinel(isolate));
__ j(equal, &done);
if (!FLAG_pretenuring_call_new) {
@@ -1978,13 +1751,13 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ Cmp(rcx, TypeFeedbackInfo::UninitializedSentinel(isolate));
+ __ Cmp(rcx, TypeFeedbackVector::UninitializedSentinel(isolate));
__ j(equal, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
__ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
- TypeFeedbackInfo::MegamorphicSentinel(isolate));
+ TypeFeedbackVector::MegamorphicSentinel(isolate));
__ jmp(&done);
// An uninitialized cache is patched with the function or sentinel to
@@ -2163,7 +1936,7 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
void CallFunctionStub::Generate(MacroAssembler* masm) {
- CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+ CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
}
@@ -2245,7 +2018,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
// rdi - function
// rdx - slot id (as integer)
Label miss;
- int argc = state_.arg_count();
+ int argc = arg_count();
ParameterCount actual(argc);
EmitLoadTypeFeedbackVector(masm, rbx);
@@ -2269,7 +2042,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ TailCallStub(&stub);
__ bind(&miss);
- GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+ GenerateMiss(masm);
// The slow case, we need this no matter what to complete a call after a miss.
CallFunctionNoFeedback(masm,
@@ -2284,13 +2057,12 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
void CallICStub::Generate(MacroAssembler* masm) {
// rdi - function
- // rbx - vector
// rdx - slot id
Isolate* isolate = masm->isolate();
Label extra_checks_or_miss, slow_start;
Label slow, non_function, wrap, cont;
Label have_js_function;
- int argc = state_.arg_count();
+ int argc = arg_count();
StackArgumentsAccessor args(rsp, argc);
ParameterCount actual(argc);
@@ -2303,7 +2075,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &extra_checks_or_miss);
__ bind(&have_js_function);
- if (state_.CallAsMethod()) {
+ if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
// Load the receiver from the stack.
@@ -2322,7 +2094,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&slow);
EmitSlowCase(isolate, masm, &args, argc, &non_function);
- if (state_.CallAsMethod()) {
+ if (CallAsMethod()) {
__ bind(&wrap);
EmitWrapCase(masm, &args, &cont);
}
@@ -2332,9 +2104,9 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
FixedArray::kHeaderSize));
- __ Cmp(rcx, TypeFeedbackInfo::MegamorphicSentinel(isolate));
+ __ Cmp(rcx, TypeFeedbackVector::MegamorphicSentinel(isolate));
__ j(equal, &slow_start);
- __ Cmp(rcx, TypeFeedbackInfo::UninitializedSentinel(isolate));
+ __ Cmp(rcx, TypeFeedbackVector::UninitializedSentinel(isolate));
__ j(equal, &miss);
if (!FLAG_trace_ic) {
@@ -2343,15 +2115,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ AssertNotSmi(rcx);
__ CmpObjectType(rcx, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &miss);
- __ Move(FieldOperand(rbx, rdx, times_pointer_size,
- FixedArray::kHeaderSize),
- TypeFeedbackInfo::MegamorphicSentinel(isolate));
+ __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
+ TypeFeedbackVector::MegamorphicSentinel(isolate));
__ jmp(&slow_start);
}
// We are here because tracing is on or we are going monomorphic.
__ bind(&miss);
- GenerateMiss(masm, IC::kCallIC_Miss);
+ GenerateMiss(masm);
// the slow case
__ bind(&slow_start);
@@ -2367,9 +2138,9 @@ void CallICStub::Generate(MacroAssembler* masm) {
}
-void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
// Get the receiver of the function from the stack; 1 ~ return address.
- __ movp(rcx, Operand(rsp, (state_.arg_count() + 1) * kPointerSize));
+ __ movp(rcx, Operand(rsp, (arg_count() + 1) * kPointerSize));
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -2382,6 +2153,9 @@ void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
__ Push(rdx);
// Call the entry.
+ IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+ : IC::kCallIC_Customization_Miss;
+
ExternalReference miss = ExternalReference(IC_Utility(id),
masm->isolate());
__ CallExternalReference(miss, 4);
@@ -2432,11 +2206,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Enter the exit frame that transitions from JavaScript to C++.
#ifdef _WIN64
- int arg_stack_space = (result_size_ < 2 ? 2 : 4);
-#else
+ int arg_stack_space = (result_size() < 2 ? 2 : 4);
+#else // _WIN64
int arg_stack_space = 0;
-#endif
- __ EnterExitFrame(arg_stack_space, save_doubles_);
+#endif // _WIN64
+ __ EnterExitFrame(arg_stack_space, save_doubles());
// rbx: pointer to builtin function (C callee-saved).
// rbp: frame pointer of exit frame (restored after C call).
@@ -2458,14 +2232,14 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9.
// Pass argv and argc as two parameters. The arguments object will
// be created by stubs declared by DECLARE_RUNTIME_FUNCTION().
- if (result_size_ < 2) {
+ if (result_size() < 2) {
// Pass a pointer to the Arguments object as the first argument.
// Return result in single register (rax).
__ movp(rcx, r14); // argc.
__ movp(rdx, r15); // argv.
__ Move(r8, ExternalReference::isolate_address(isolate()));
} else {
- DCHECK_EQ(2, result_size_);
+ DCHECK_EQ(2, result_size());
// Pass a pointer to the result location as the first argument.
__ leap(rcx, StackSpaceOperand(2));
// Pass a pointer to the Arguments object as the second argument.
@@ -2479,21 +2253,21 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ movp(rdi, r14); // argc.
__ movp(rsi, r15); // argv.
__ Move(rdx, ExternalReference::isolate_address(isolate()));
-#endif
+#endif // _WIN64
__ call(rbx);
// Result is in rax - do not destroy this register!
#ifdef _WIN64
// If return value is on the stack, pop it to registers.
- if (result_size_ > 1) {
- DCHECK_EQ(2, result_size_);
+ if (result_size() > 1) {
+ DCHECK_EQ(2, result_size());
// Read result values stored on stack. Result is stored
// above the four argument mirror slots and the two
// Arguments object slots.
__ movq(rax, Operand(rsp, 6 * kRegisterSize));
__ movq(rdx, Operand(rsp, 7 * kRegisterSize));
}
-#endif
+#endif // _WIN64
// Runtime functions should not return 'the hole'. Allowing it to escape may
// lead to crashes in the IC code later.
@@ -2527,7 +2301,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
}
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles_);
+ __ LeaveExitFrame(save_doubles());
__ ret(0);
// Handling of exception.
@@ -2556,7 +2330,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
}
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+void JSEntryStub::Generate(MacroAssembler* masm) {
Label invoke, handler_entry, exit;
Label not_outermost_js, not_outermost_js_2;
@@ -2569,7 +2343,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ movp(rbp, rsp);
// Push the stack frame type marker twice.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ int marker = type();
// Scratch register is neither callee-save, nor an argument register on any
// platform. It's free to use at this point.
// Cannot use smi-register for loading yet.
@@ -2659,7 +2433,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// external reference instead of inlining the call target address directly
// in the code, because the builtin stubs may not have been generated yet
// at the time this code is generated.
- if (is_construct) {
+ if (type() == StackFrame::ENTRY_CONSTRUCT) {
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
isolate());
__ Load(rax, construct_entry);
@@ -2932,22 +2706,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
-// Passing arguments in registers is not supported.
-Register InstanceofStub::left() { return rax; }
-
-
-Register InstanceofStub::right() { return rdx; }
-
-
// -------------------------------------------------------------------------
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- Label flat_string;
- Label ascii_string;
- Label got_char_code;
- Label sliced_string;
-
// If the receiver is a smi trigger the non-string case.
__ JumpIfSmi(object_, receiver_not_string_);
@@ -3101,65 +2863,6 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
}
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash = (seed + character) + ((seed + character) << 10);
- __ LoadRoot(scratch, Heap::kHashSeedRootIndex);
- __ SmiToInteger32(scratch, scratch);
- __ addl(scratch, character);
- __ movl(hash, scratch);
- __ shll(scratch, Immediate(10));
- __ addl(hash, scratch);
- // hash ^= hash >> 6;
- __ movl(scratch, hash);
- __ shrl(scratch, Immediate(6));
- __ xorl(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash += character;
- __ addl(hash, character);
- // hash += hash << 10;
- __ movl(scratch, hash);
- __ shll(scratch, Immediate(10));
- __ addl(hash, scratch);
- // hash ^= hash >> 6;
- __ movl(scratch, hash);
- __ shrl(scratch, Immediate(6));
- __ xorl(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch) {
- // hash += hash << 3;
- __ leal(hash, Operand(hash, hash, times_8, 0));
- // hash ^= hash >> 11;
- __ movl(scratch, hash);
- __ shrl(scratch, Immediate(11));
- __ xorl(hash, scratch);
- // hash += hash << 15;
- __ movl(scratch, hash);
- __ shll(scratch, Immediate(15));
- __ addl(hash, scratch);
-
- __ andl(hash, Immediate(String::kHashBitMask));
-
- // if (hash == 0) hash = 27;
- Label hash_not_zero;
- __ j(not_zero, &hash_not_zero);
- __ Set(hash, StringHasher::kZeroHash);
- __ bind(&hash_not_zero);
-}
-
-
void SubStringStub::Generate(MacroAssembler* masm) {
Label runtime;
@@ -3275,7 +2978,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ testb(rbx, Immediate(kStringEncodingMask));
__ j(zero, &two_byte_slice, Label::kNear);
- __ AllocateAsciiSlicedString(rax, rbx, r14, &runtime);
+ __ AllocateOneByteSlicedString(rax, rbx, r14, &runtime);
__ jmp(&set_slice_header, Label::kNear);
__ bind(&two_byte_slice);
__ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
@@ -3320,7 +3023,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ j(zero, &two_byte_sequential);
// Allocate the result.
- __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime);
+ __ AllocateOneByteString(rax, rcx, r11, r14, r15, &runtime);
// rax: result string
// rcx: result string length
@@ -3381,11 +3084,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
}
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2) {
+void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2) {
Register length = scratch1;
// Compare lengths.
@@ -3408,8 +3111,8 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
// Compare characters.
__ bind(&compare_chars);
Label strings_not_equal;
- GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
- &strings_not_equal, Label::kNear);
+ GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
+ &strings_not_equal, Label::kNear);
// Characters are equal.
__ Move(rax, Smi::FromInt(EQUAL));
@@ -3422,13 +3125,9 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
}
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
+void StringHelper::GenerateCompareFlatOneByteStrings(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3, Register scratch4) {
// Ensure that you can always subtract a string length from a non-negative
// number (e.g. another length).
STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
@@ -3458,11 +3157,11 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Compare loop.
Label result_not_equal;
- GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
- &result_not_equal,
- // In debug-code mode, SmiTest below might push
- // the target label outside the near range.
- Label::kFar);
+ GenerateOneByteCharsCompareLoop(
+ masm, left, right, min_length, scratch2, &result_not_equal,
+ // In debug-code mode, SmiTest below might push
+ // the target label outside the near range.
+ Label::kFar);
// Completed loop without finding different characters.
// Compare lengths (precomputed).
@@ -3496,14 +3195,9 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
}
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch,
- Label* chars_not_equal,
- Label::Distance near_jump) {
+void StringHelper::GenerateOneByteCharsCompareLoop(
+ MacroAssembler* masm, Register left, Register right, Register length,
+ Register scratch, Label* chars_not_equal, Label::Distance near_jump) {
// Change index to run from -length to -1 by adding length to string
// start. This means that loop ends when index reaches zero, which
// doesn't need an additional compare.
@@ -3549,16 +3243,17 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ bind(&not_same);
- // Check that both are sequential ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
+ // Check that both are sequential one-byte strings.
+ __ JumpIfNotBothSequentialOneByteStrings(rdx, rax, rcx, rbx, &runtime);
- // Inline comparison of ASCII strings.
+ // Inline comparison of one-byte strings.
__ IncrementCounter(counters->string_compare_native(), 1);
// Drop arguments from the stack
__ PopReturnAddressTo(rcx);
__ addp(rsp, Immediate(2 * kPointerSize));
__ PushReturnAddressFrom(rcx);
- GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, rdx, rax, rcx, rbx, rdi,
+ r8);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
@@ -3590,13 +3285,13 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// Tail call into the stub that handles binary operations with allocation
// sites.
- BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+ BinaryOpWithAllocationSiteStub stub(isolate(), state());
__ TailCallStub(&stub);
}
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::SMI);
+void CompareICStub::GenerateSmis(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::SMI);
Label miss;
__ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
@@ -3619,17 +3314,17 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::NUMBER);
+void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- if (left_ == CompareIC::SMI) {
+ if (left() == CompareICState::SMI) {
__ JumpIfNotSmi(rdx, &miss);
}
- if (right_ == CompareIC::SMI) {
+ if (right() == CompareICState::SMI) {
__ JumpIfNotSmi(rax, &miss);
}
@@ -3671,12 +3366,12 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+ CompareICState::GENERIC, CompareICState::GENERIC);
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
+ if (Token::IsOrderedRelationalCompareOp(op())) {
__ Cmp(rax, isolate()->factory()->undefined_value());
__ j(not_equal, &miss);
__ JumpIfSmi(rdx, &unordered);
@@ -3686,7 +3381,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
}
__ bind(&maybe_undefined2);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
+ if (Token::IsOrderedRelationalCompareOp(op())) {
__ Cmp(rdx, isolate()->factory()->undefined_value());
__ j(equal, &unordered);
}
@@ -3696,8 +3391,8 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::INTERNALIZED_STRING);
+void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::INTERNALIZED_STRING);
DCHECK(GetCondition() == equal);
// Registers containing left and right operands respectively.
@@ -3739,8 +3434,8 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::UNIQUE_NAME);
+void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::UNIQUE_NAME);
DCHECK(GetCondition() == equal);
// Registers containing left and right operands respectively.
@@ -3761,8 +3456,8 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
__ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
- __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
+ __ JumpIfNotUniqueNameInstanceType(tmp1, &miss, Label::kNear);
+ __ JumpIfNotUniqueNameInstanceType(tmp2, &miss, Label::kNear);
// Unique names are compared by identity.
Label done;
@@ -3782,11 +3477,11 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::STRING);
+void CompareICStub::GenerateStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::STRING);
Label miss;
- bool equality = Token::IsEqualityOp(op_);
+ bool equality = Token::IsEqualityOp(op());
// Registers containing left and right operands respectively.
Register left = rdx;
@@ -3839,16 +3534,16 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
__ bind(&do_compare);
}
- // Check that both strings are sequential ASCII.
+ // Check that both strings are sequential one-byte.
Label runtime;
- __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
+ __ JumpIfNotBothSequentialOneByteStrings(left, right, tmp1, tmp2, &runtime);
- // Compare flat ASCII strings. Returns when done.
+ // Compare flat one-byte strings. Returns when done.
if (equality) {
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, left, right, tmp1, tmp2);
+ StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1,
+ tmp2);
} else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(
+ StringHelper::GenerateCompareFlatOneByteStrings(
masm, left, right, tmp1, tmp2, tmp3, kScratchRegister);
}
@@ -3869,8 +3564,8 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::OBJECT);
+void CompareICStub::GenerateObjects(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::OBJECT);
Label miss;
Condition either_smi = masm->CheckEitherSmi(rdx, rax);
__ j(either_smi, &miss, Label::kNear);
@@ -3889,7 +3584,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
Label miss;
Condition either_smi = masm->CheckEitherSmi(rdx, rax);
__ j(either_smi, &miss, Label::kNear);
@@ -3909,7 +3604,7 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+void CompareICStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
@@ -3920,7 +3615,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
__ Push(rax);
__ Push(rdx);
__ Push(rax);
- __ Push(Smi::FromInt(op_));
+ __ Push(Smi::FromInt(op()));
__ CallExternalReference(miss, 3);
// Compute the entry point of the rewritten stub.
@@ -3981,8 +3676,8 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// Check if the entry name is not a unique name.
__ movp(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
- __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
- miss);
+ __ JumpIfNotUniqueNameInstanceType(
+ FieldOperand(entity_name, Map::kInstanceTypeOffset), miss);
__ bind(&good);
}
@@ -4068,9 +3763,9 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
- Register scratch = result_;
+ Register scratch = result();
- __ SmiToInteger32(scratch, FieldOperand(dictionary_, kCapacityOffset));
+ __ SmiToInteger32(scratch, FieldOperand(dictionary(), kCapacityOffset));
__ decl(scratch);
__ Push(scratch);
@@ -4091,12 +3786,10 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// Scale the index by multiplying by the entry size.
DCHECK(NameDictionary::kEntrySize == 3);
- __ leap(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
+ __ leap(index(), Operand(scratch, scratch, times_2, 0)); // index *= 3.
// Having undefined at this place means the name is not contained.
- __ movp(scratch, Operand(dictionary_,
- index_,
- times_pointer_size,
+ __ movp(scratch, Operand(dictionary(), index(), times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
__ Cmp(scratch, isolate()->factory()->undefined_value());
@@ -4106,15 +3799,16 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ cmpp(scratch, args.GetArgumentOperand(0));
__ j(equal, &in_dictionary);
- if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
// If we hit a key that is not a unique name during negative
// lookup we have to bailout as this key might be equal to the
// key we are looking for.
// Check if the entry name is not a unique name.
__ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
- &maybe_in_dictionary);
+ __ JumpIfNotUniqueNameInstanceType(
+ FieldOperand(scratch, Map::kInstanceTypeOffset),
+ &maybe_in_dictionary);
}
}
@@ -4122,7 +3816,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// If we are doing negative lookup then probing failure should be
// treated as a lookup success. For positive lookup probing failure
// should be treated as lookup failure.
- if (mode_ == POSITIVE_LOOKUP) {
+ if (mode() == POSITIVE_LOOKUP) {
__ movp(scratch, Immediate(0));
__ Drop(1);
__ ret(2 * kPointerSize);
@@ -4165,11 +3859,8 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
__ jmp(&skip_to_incremental_noncompacting, Label::kNear);
__ jmp(&skip_to_incremental_compacting, Label::kFar);
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
} else {
__ ret(0);
@@ -4191,7 +3882,7 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.Save(masm);
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
__ movp(regs_.scratch0(), Operand(regs_.address(), 0));
@@ -4211,10 +3902,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
InformIncrementalMarker(masm);
regs_.Restore(masm);
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
__ bind(&dont_need_remembered_set);
@@ -4229,7 +3917,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
Register address =
arg_reg_1.is(regs_.address()) ? kScratchRegister : regs_.address();
DCHECK(!address.is(regs_.object()));
@@ -4247,7 +3935,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
__ CallCFunction(
ExternalReference::incremental_marking_record_write_function(isolate()),
argument_count);
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
}
@@ -4280,10 +3968,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
} else {
__ ret(0);
@@ -4325,10 +4010,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
} else {
__ ret(0);
@@ -4430,14 +4112,27 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ movp(rbx, MemOperand(rbp, parameter_count_offset));
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
__ PopReturnAddressTo(rcx);
- int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
- ? kPointerSize
- : 0;
+ int additional_offset =
+ function_mode() == JS_FUNCTION_STUB_MODE ? kPointerSize : 0;
__ leap(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset));
__ jmp(rcx); // Return to IC Miss stub, continuation still on stack.
}
+void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorLoadStub stub(isolate(), state());
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorKeyedLoadStub stub(isolate());
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -4640,7 +4335,7 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
- if (argument_count_ == ANY) {
+ if (argument_count() == ANY) {
Label not_zero_case, not_one_case;
__ testp(rax, rax);
__ j(not_zero, &not_zero_case);
@@ -4653,11 +4348,11 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
__ bind(&not_one_case);
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
- } else if (argument_count_ == NONE) {
+ } else if (argument_count() == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- } else if (argument_count_ == ONE) {
+ } else if (argument_count() == ONE) {
CreateArrayDispatchOneArgument(masm, mode);
- } else if (argument_count_ == MORE_THAN_ONE) {
+ } else if (argument_count() == MORE_THAN_ONE) {
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
} else {
UNREACHABLE();
@@ -4818,9 +4513,9 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register return_address = rdi;
Register context = rsi;
- int argc = ArgumentBits::decode(bit_field_);
- bool is_store = IsStoreBits::decode(bit_field_);
- bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+ int argc = this->argc();
+ bool is_store = this->is_store();
+ bool call_data_undefined = this->call_data_undefined();
typedef FunctionCallbackArguments FCA;
@@ -4932,7 +4627,8 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
Register accessor_info_arg = rsi;
Register name_arg = rdi;
#endif
- Register api_function_address = r8;
+ Register api_function_address = ApiGetterDescriptor::function_address();
+ DCHECK(api_function_address.is(r8));
Register scratch = rax;
// v8::Arguments::values_ and handler for name.
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index 71fc5aba59..d17fa1b5ff 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -5,31 +5,12 @@
#ifndef V8_X64_CODE_STUBS_X64_H_
#define V8_X64_CODE_STUBS_X64_H_
-#include "src/ic-inl.h"
-
namespace v8 {
namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
-class StoreBufferOverflowStub: public PlatformCodeStub {
- public:
- StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
- : PlatformCodeStub(isolate), save_doubles_(save_fp) { }
-
- void Generate(MacroAssembler* masm);
-
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- SaveFPRegsMode save_doubles_;
-
- Major MajorKey() const { return StoreBufferOverflow; }
- int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
class StringHelper : public AllStatic {
public:
@@ -42,71 +23,24 @@ class StringHelper : public AllStatic {
Register count,
String::Encoding encoding);
+ // Compares two flat one-byte strings and returns result in rax.
+ static void GenerateCompareFlatOneByteStrings(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3, Register scratch4);
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-class SubStringStub: public PlatformCodeStub {
- public:
- explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- private:
- Major MajorKey() const { return SubString; }
- int MinorKey() const { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public PlatformCodeStub {
- public:
- explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- // Compares two flat ASCII strings and returns result in rax.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
+ // Compares two flat one-byte strings for equality and returns result in rax.
+ static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+ Register left, Register right,
Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- // Compares two flat ASCII strings for equality and returns result
- // in rax.
- static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2);
+ Register scratch2);
private:
- virtual Major MajorKey() const { return StringCompare; }
- virtual int MinorKey() const { return 0; }
- virtual void Generate(MacroAssembler* masm);
-
- static void GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch,
- Label* chars_not_equal,
+ static void GenerateOneByteCharsCompareLoop(
+ MacroAssembler* masm, Register left, Register right, Register length,
+ Register scratch, Label* chars_not_equal,
Label::Distance near_jump = Label::kFar);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
@@ -114,18 +48,13 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
- NameDictionaryLookupStub(Isolate* isolate,
- Register dictionary,
- Register result,
- Register index,
- LookupMode mode)
- : PlatformCodeStub(isolate),
- dictionary_(dictionary),
- result_(result),
- index_(index),
- mode_(mode) { }
-
- void Generate(MacroAssembler* masm);
+ NameDictionaryLookupStub(Isolate* isolate, Register dictionary,
+ Register result, Register index, LookupMode mode)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = DictionaryBits::encode(dictionary.code()) |
+ ResultBits::encode(result.code()) |
+ IndexBits::encode(index.code()) | LookupModeBits::encode(mode);
+ }
static void GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
@@ -156,46 +85,49 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() const { return NameDictionaryLookup; }
+ Register dictionary() const {
+ return Register::from_code(DictionaryBits::decode(minor_key_));
+ }
- int MinorKey() const {
- return DictionaryBits::encode(dictionary_.code()) |
- ResultBits::encode(result_.code()) |
- IndexBits::encode(index_.code()) |
- LookupModeBits::encode(mode_);
+ Register result() const {
+ return Register::from_code(ResultBits::decode(minor_key_));
}
+ Register index() const {
+ return Register::from_code(IndexBits::decode(minor_key_));
+ }
+
+ LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
+
class DictionaryBits: public BitField<int, 0, 4> {};
class ResultBits: public BitField<int, 4, 4> {};
class IndexBits: public BitField<int, 8, 4> {};
class LookupModeBits: public BitField<LookupMode, 12, 1> {};
- Register dictionary_;
- Register result_;
- Register index_;
- LookupMode mode_;
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
};
class RecordWriteStub: public PlatformCodeStub {
public:
- RecordWriteStub(Isolate* isolate,
- Register object,
- Register value,
- Register address,
- RememberedSetAction remembered_set_action,
+ RecordWriteStub(Isolate* isolate, Register object, Register value,
+ Register address, RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
: PlatformCodeStub(isolate),
- object_(object),
- value_(value),
- address_(address),
- remembered_set_action_(remembered_set_action),
- save_fp_regs_mode_(fp_mode),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
+ minor_key_ = ObjectBits::encode(object.code()) |
+ ValueBits::encode(value.code()) |
+ AddressBits::encode(address.code()) |
+ RememberedSetActionBits::encode(remembered_set_action) |
+ SaveFPRegsModeBits::encode(fp_mode);
}
+ RecordWriteStub(uint32_t key, Isolate* isolate)
+ : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
+
enum Mode {
STORE_BUFFER_ONLY,
INCREMENTAL,
@@ -251,6 +183,8 @@ class RecordWriteStub: public PlatformCodeStub {
CpuFeatures::FlushICache(stub->instruction_start(), 7);
}
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+
private:
// This is a helper class for freeing up 3 scratch registers, where the third
// is always rcx (needed for shift operations). The input is two registers
@@ -379,7 +313,9 @@ class RecordWriteStub: public PlatformCodeStub {
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
- void Generate(MacroAssembler* masm);
+ virtual Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+
+ virtual void Generate(MacroAssembler* masm) OVERRIDE;
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
@@ -387,18 +323,28 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
- Major MajorKey() const { return RecordWrite; }
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
- int MinorKey() const {
- return ObjectBits::encode(object_.code()) |
- ValueBits::encode(value_.code()) |
- AddressBits::encode(address_.code()) |
- RememberedSetActionBits::encode(remembered_set_action_) |
- SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+ Register object() const {
+ return Register::from_code(ObjectBits::decode(minor_key_));
}
- void Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ Register value() const {
+ return Register::from_code(ValueBits::decode(minor_key_));
+ }
+
+ Register address() const {
+ return Register::from_code(AddressBits::decode(minor_key_));
+ }
+
+ RememberedSetAction remembered_set_action() const {
+ return RememberedSetActionBits::decode(minor_key_);
+ }
+
+ SaveFPRegsMode save_fp_regs_mode() const {
+ return SaveFPRegsModeBits::decode(minor_key_);
}
class ObjectBits: public BitField<int, 0, 4> {};
@@ -407,13 +353,10 @@ class RecordWriteStub: public PlatformCodeStub {
class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
- Register object_;
- Register value_;
- Register address_;
- RememberedSetAction remembered_set_action_;
- SaveFPRegsMode save_fp_regs_mode_;
Label slow_;
RegisterAllocation regs_;
+
+ DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
};
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 01cb512d02..44e1618c31 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -286,7 +286,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
Label loop, entry, convert_hole;
- __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64));
+ __ movq(r15, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
// r15: the-hole NaN
__ jmp(&entry);
@@ -393,7 +393,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ movp(FieldOperand(r11, FixedArray::kLengthOffset), r14);
// Prepare for conversion loop.
- __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64));
+ __ movq(rsi, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
__ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
// rsi: the-hole NaN
// rdi: pointer to the-hole
@@ -522,7 +522,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ j(zero, &seq_string, Label::kNear);
// Handle external strings.
- Label ascii_external, done;
+ Label one_byte_external, done;
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
@@ -537,22 +537,22 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
STATIC_ASSERT(kTwoByteStringTag == 0);
__ testb(result, Immediate(kStringEncodingMask));
__ movp(result, FieldOperand(string, ExternalString::kResourceDataOffset));
- __ j(not_equal, &ascii_external, Label::kNear);
+ __ j(not_equal, &one_byte_external, Label::kNear);
// Two-byte string.
__ movzxwl(result, Operand(result, index, times_2, 0));
__ jmp(&done, Label::kNear);
- __ bind(&ascii_external);
- // Ascii string.
+ __ bind(&one_byte_external);
+ // One-byte string.
__ movzxbl(result, Operand(result, index, times_1, 0));
__ jmp(&done, Label::kNear);
- // Dispatch on the encoding: ASCII or two-byte.
- Label ascii;
+ // Dispatch on the encoding: one-byte or two-byte.
+ Label one_byte;
__ bind(&seq_string);
STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ testb(result, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii, Label::kNear);
+ __ j(not_zero, &one_byte, Label::kNear);
// Two-byte string.
// Load the two-byte character code into the result register.
@@ -563,9 +563,9 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
SeqTwoByteString::kHeaderSize));
__ jmp(&done, Label::kNear);
- // ASCII string.
+ // One-byte string.
// Load the byte into the result register.
- __ bind(&ascii);
+ __ bind(&one_byte);
__ movzxbl(result, FieldOperand(string,
index,
times_1,
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 8bfd7f4c58..0a551eef5c 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -6,7 +6,7 @@
#define V8_X64_CODEGEN_X64_H_
#include "src/ast.h"
-#include "src/ic-inl.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index fe78e37214..c8b7c2246a 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -162,17 +162,17 @@ void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Register state for IC load call (from ic-x64.cc).
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0, false);
}
void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Register state for IC store call (from ic-x64.cc).
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- Register value = StoreIC::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.bit() | name.bit() | value.bit(), 0, false);
}
@@ -186,9 +186,9 @@ void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC store call (from ic-x64.cc).
- Register receiver = KeyedStoreIC::ReceiverRegister();
- Register name = KeyedStoreIC::NameRegister();
- Register value = KeyedStoreIC::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.bit() | name.bit() | value.bit(), 0, false);
}
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index a2f9faa002..16b0cdcd1b 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -103,7 +103,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
void Deoptimizer::SetPlatformCompiledStubRegisters(
- FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+ FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
intptr_t handler =
reinterpret_cast<intptr_t>(descriptor->deoptimization_handler());
int params = descriptor->GetHandlerParameterCount();
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 38b594c2bc..02c2d9c798 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -6,15 +6,16 @@
#if V8_TARGET_ARCH_X64
+#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compiler.h"
#include "src/debug.h"
#include "src/full-codegen.h"
+#include "src/ic/ic.h"
#include "src/isolate-inl.h"
#include "src/parser.h"
#include "src/scopes.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -1011,7 +1012,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1148,7 +1150,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// No need for a write barrier, we are storing a Smi in the feedback vector.
__ Move(rbx, FeedbackVector());
__ Move(FieldOperand(rbx, FixedArray::OffsetOfElementAt(slot)),
- TypeFeedbackInfo::MegamorphicSentinel(isolate()));
+ TypeFeedbackVector::MegamorphicSentinel(isolate()));
__ Move(rbx, Smi::FromInt(1)); // Smi indicates slow check
__ movp(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
@@ -1289,9 +1291,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(),
- info->strict_mode(),
- info->is_generator());
+ FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
__ Move(rbx, info);
__ CallStub(&stub);
} else {
@@ -1312,6 +1312,25 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
}
+void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
+ Comment cnmt(masm_, "[ SuperReference ");
+
+ __ movp(LoadDescriptor::ReceiverRegister(),
+ Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+
+ Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
+ __ Move(LoadDescriptor::NameRegister(), home_object_symbol);
+
+ CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+
+ __ Cmp(rax, isolate()->factory()->undefined_value());
+ Label done;
+ __ j(not_equal, &done);
+ __ CallRuntime(Runtime::kThrowNonMethodError, 0);
+ __ bind(&done);
+}
+
+
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
TypeofState typeof_state,
Label* slow) {
@@ -1363,10 +1382,10 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
// All extension objects were empty and it is safe to use a global
// load IC call.
- __ movp(LoadIC::ReceiverRegister(), GlobalObjectOperand());
- __ Move(LoadIC::NameRegister(), proxy->var()->name());
+ __ movp(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ Move(LoadDescriptor::NameRegister(), proxy->var()->name());
if (FLAG_vector_ics) {
- __ Move(LoadIC::SlotRegister(),
+ __ Move(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(proxy->VariableFeedbackSlot()));
}
@@ -1449,10 +1468,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
switch (var->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
- __ Move(LoadIC::NameRegister(), var->name());
- __ movp(LoadIC::ReceiverRegister(), GlobalObjectOperand());
+ __ Move(LoadDescriptor::NameRegister(), var->name());
+ __ movp(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
if (FLAG_vector_ics) {
- __ Move(LoadIC::SlotRegister(),
+ __ Move(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(proxy->VariableFeedbackSlot()));
}
CallLoadIC(CONTEXTUAL);
@@ -1667,9 +1686,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
- DCHECK(StoreIC::ValueRegister().is(rax));
- __ Move(StoreIC::NameRegister(), key->value());
- __ movp(StoreIC::ReceiverRegister(), Operand(rsp, 0));
+ DCHECK(StoreDescriptor::ValueRegister().is(rax));
+ __ Move(StoreDescriptor::NameRegister(), key->value());
+ __ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
@@ -1830,13 +1849,19 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ enum LhsKind {
+ VARIABLE,
+ NAMED_PROPERTY,
+ KEYED_PROPERTY,
+ NAMED_SUPER_PROPERTY
+ };
LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
if (property != NULL) {
assign_type = (property->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
+ ? (property->IsSuperAccess() ? NAMED_SUPER_PROPERTY
+ : NAMED_PROPERTY)
+ : KEYED_PROPERTY;
}
// Evaluate LHS expression.
@@ -1848,17 +1873,26 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
// We need the receiver both on the stack and in the register.
VisitForStackValue(property->obj());
- __ movp(LoadIC::ReceiverRegister(), Operand(rsp, 0));
+ __ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, 0));
} else {
VisitForStackValue(property->obj());
}
break;
+ case NAMED_SUPER_PROPERTY:
+ VisitForStackValue(property->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(property->obj()->AsSuperReference());
+ __ Push(result_register());
+ if (expr->is_compound()) {
+ __ Push(MemOperand(rsp, kPointerSize));
+ __ Push(result_register());
+ }
+ break;
case KEYED_PROPERTY: {
if (expr->is_compound()) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ movp(LoadIC::ReceiverRegister(), Operand(rsp, kPointerSize));
- __ movp(LoadIC::NameRegister(), Operand(rsp, 0));
+ __ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, kPointerSize));
+ __ movp(LoadDescriptor::NameRegister(), Operand(rsp, 0));
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@@ -1880,6 +1914,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
EmitNamedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
@@ -1925,6 +1963,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyAssignment(expr);
+ break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
@@ -1939,12 +1980,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
VisitForStackValue(expr->expression());
switch (expr->yield_kind()) {
- case Yield::SUSPEND:
+ case Yield::kSuspend:
// Pop value from top-of-stack slot; box result into result register.
EmitCreateIteratorResult(false);
__ Push(result_register());
// Fall through.
- case Yield::INITIAL: {
+ case Yield::kInitial: {
Label suspend, continuation, post_runtime, resume;
__ jmp(&suspend);
@@ -1978,7 +2019,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
- case Yield::FINAL: {
+ case Yield::kFinal: {
VisitForAccumulatorValue(expr->generator_object());
__ Move(FieldOperand(result_register(),
JSGeneratorObject::kContinuationOffset),
@@ -1990,7 +2031,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
- case Yield::DELEGATING: {
+ case Yield::kDelegating: {
VisitForStackValue(expr->generator_object());
// Initial stack layout is as follows:
@@ -1999,8 +2040,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label l_catch, l_try, l_suspend, l_continuation, l_resume;
Label l_next, l_call, l_loop;
- Register load_receiver = LoadIC::ReceiverRegister();
- Register load_name = LoadIC::NameRegister();
+ Register load_receiver = LoadDescriptor::ReceiverRegister();
+ Register load_name = LoadDescriptor::NameRegister();
// Initial send value is undefined.
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
@@ -2057,10 +2098,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_call);
__ movp(load_receiver, Operand(rsp, kPointerSize));
if (FLAG_vector_ics) {
- __ Move(LoadIC::SlotRegister(),
+ __ Move(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(expr->KeyedLoadFeedbackSlot()));
}
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallIC(ic, TypeFeedbackId::None());
__ movp(rdi, rax);
__ movp(Operand(rsp, 2 * kPointerSize), rdi);
@@ -2076,7 +2117,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Push(load_receiver); // save result
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
if (FLAG_vector_ics) {
- __ Move(LoadIC::SlotRegister(), Smi::FromInt(expr->DoneFeedbackSlot()));
+ __ Move(VectorLoadICDescriptor::SlotRegister(),
+ Smi::FromInt(expr->DoneFeedbackSlot()));
}
CallLoadIC(NOT_CONTEXTUAL); // rax=result.done
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
@@ -2088,7 +2130,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Pop(load_receiver); // result
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
if (FLAG_vector_ics) {
- __ Move(LoadIC::SlotRegister(),
+ __ Move(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(expr->ValueFeedbackSlot()));
}
CallLoadIC(NOT_CONTEXTUAL); // result.value in rax
@@ -2251,9 +2293,12 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
- __ Move(LoadIC::NameRegister(), key->value());
+ DCHECK(!prop->IsSuperAccess());
+
+ __ Move(LoadDescriptor::NameRegister(), key->value());
if (FLAG_vector_ics) {
- __ Move(LoadIC::SlotRegister(), Smi::FromInt(prop->PropertyFeedbackSlot()));
+ __ Move(VectorLoadICDescriptor::SlotRegister(),
+ Smi::FromInt(prop->PropertyFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL);
} else {
CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
@@ -2261,11 +2306,24 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
}
+void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+ // Stack: receiver, home_object
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+ DCHECK(prop->IsSuperAccess());
+
+ __ Push(key->value());
+ __ CallRuntime(Runtime::kLoadFromSuper, 3);
+}
+
+
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
if (FLAG_vector_ics) {
- __ Move(LoadIC::SlotRegister(), Smi::FromInt(prop->PropertyFeedbackSlot()));
+ __ Move(VectorLoadICDescriptor::SlotRegister(),
+ Smi::FromInt(prop->PropertyFeedbackSlot()));
CallIC(ic);
} else {
CallIC(ic, prop->PropertyFeedbackId());
@@ -2290,8 +2348,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ movp(rax, rcx);
- BinaryOpICStub stub(isolate(), op, mode);
- CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2338,9 +2396,9 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
__ Pop(rdx);
- BinaryOpICStub stub(isolate(), op, mode);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(rax);
}
@@ -2370,9 +2428,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
case NAMED_PROPERTY: {
__ Push(rax); // Preserve value.
VisitForAccumulatorValue(prop->obj());
- __ Move(StoreIC::ReceiverRegister(), rax);
- __ Pop(StoreIC::ValueRegister()); // Restore value.
- __ Move(StoreIC::NameRegister(), prop->key()->AsLiteral()->value());
+ __ Move(StoreDescriptor::ReceiverRegister(), rax);
+ __ Pop(StoreDescriptor::ValueRegister()); // Restore value.
+ __ Move(StoreDescriptor::NameRegister(),
+ prop->key()->AsLiteral()->value());
CallStoreIC();
break;
}
@@ -2380,12 +2439,11 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ Push(rax); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
- __ Move(KeyedStoreIC::NameRegister(), rax);
- __ Pop(KeyedStoreIC::ReceiverRegister());
- __ Pop(KeyedStoreIC::ValueRegister()); // Restore value.
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ __ Move(StoreDescriptor::NameRegister(), rax);
+ __ Pop(StoreDescriptor::ReceiverRegister());
+ __ Pop(StoreDescriptor::ValueRegister()); // Restore value.
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic);
break;
}
@@ -2409,8 +2467,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
- __ Move(StoreIC::NameRegister(), var->name());
- __ movp(StoreIC::ReceiverRegister(), GlobalObjectOperand());
+ __ Move(StoreDescriptor::NameRegister(), var->name());
+ __ movp(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
CallStoreIC();
} else if (op == Token::INIT_CONST_LEGACY) {
@@ -2480,8 +2538,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
- __ Move(StoreIC::NameRegister(), prop->key()->AsLiteral()->value());
- __ Pop(StoreIC::ReceiverRegister());
+ __ Move(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
+ __ Pop(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2489,17 +2547,33 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
+void FullCodeGenerator::EmitNamedSuperPropertyAssignment(Assignment* expr) {
+ // Assignment to named property of super.
+ // rax : value
+ // stack : receiver ('this'), home_object
+ Property* prop = expr->target()->AsProperty();
+ DCHECK(prop != NULL);
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(key != NULL);
+
+ __ Push(rax);
+ __ Push(key->value());
+ __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy),
+ 4);
+ context()->Plug(rax);
+}
+
+
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
- __ Pop(KeyedStoreIC::NameRegister()); // Key.
- __ Pop(KeyedStoreIC::ReceiverRegister());
- DCHECK(KeyedStoreIC::ValueRegister().is(rax));
+ __ Pop(StoreDescriptor::NameRegister()); // Key.
+ __ Pop(StoreDescriptor::ReceiverRegister());
+ DCHECK(StoreDescriptor::ValueRegister().is(rax));
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2512,17 +2586,24 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
Expression* key = expr->key();
if (key->IsPropertyName()) {
- VisitForAccumulatorValue(expr->obj());
- DCHECK(!rax.is(LoadIC::ReceiverRegister()));
- __ movp(LoadIC::ReceiverRegister(), rax);
- EmitNamedPropertyLoad(expr);
+ if (!expr->IsSuperAccess()) {
+ VisitForAccumulatorValue(expr->obj());
+ DCHECK(!rax.is(LoadDescriptor::ReceiverRegister()));
+ __ movp(LoadDescriptor::ReceiverRegister(), rax);
+ EmitNamedPropertyLoad(expr);
+ } else {
+ VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(expr->obj()->AsSuperReference());
+ __ Push(result_register());
+ EmitNamedSuperPropertyLoad(expr);
+ }
PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(rax);
} else {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
- __ Move(LoadIC::NameRegister(), rax);
- __ Pop(LoadIC::ReceiverRegister());
+ __ Move(LoadDescriptor::NameRegister(), rax);
+ __ Pop(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
context()->Plug(rax);
}
@@ -2540,11 +2621,10 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallIC::CallType call_type = callee->IsVariableProxy()
- ? CallIC::FUNCTION
- : CallIC::METHOD;
+ CallICState::CallType call_type =
+ callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
// Get the target function.
- if (call_type == CallIC::FUNCTION) {
+ if (call_type == CallICState::FUNCTION) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
@@ -2555,7 +2635,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
- __ movp(LoadIC::ReceiverRegister(), Operand(rsp, 0));
+ DCHECK(!callee->AsProperty()->IsSuperAccess());
+ __ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
@@ -2567,6 +2648,43 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+ DCHECK(callee->IsProperty());
+ Property* prop = callee->AsProperty();
+ DCHECK(prop->IsSuperAccess());
+
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+ // Load the function from the receiver.
+ SuperReference* super_ref = prop->obj()->AsSuperReference();
+ EmitLoadHomeObject(super_ref);
+ __ Push(rax);
+ VisitForAccumulatorValue(super_ref->this_var());
+ __ Push(rax);
+ __ Push(rax);
+ __ Push(Operand(rsp, kPointerSize * 2));
+ __ Push(key->value());
+
+ // Stack here:
+ // - home_object
+ // - this (receiver)
+ // - this (receiver) <-- LoadFromSuper will pop here and below.
+ // - home_object
+ // - key
+ __ CallRuntime(Runtime::kLoadFromSuper, 3);
+
+ // Replace home_object with target function.
+ __ movp(Operand(rsp, kPointerSize), rax);
+
+ // Stack here:
+ // - target function
+ // - this (receiver)
+ EmitCall(expr, CallICState::METHOD);
+}
+
+
// Common code for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* key) {
@@ -2577,8 +2695,8 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
// Load the function from the receiver.
DCHECK(callee->IsProperty());
- __ movp(LoadIC::ReceiverRegister(), Operand(rsp, 0));
- __ Move(LoadIC::NameRegister(), rax);
+ __ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, 0));
+ __ Move(LoadDescriptor::NameRegister(), rax);
EmitKeyedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
@@ -2586,11 +2704,11 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ Push(Operand(rsp, 0));
__ movp(Operand(rsp, kPointerSize), rax);
- EmitCall(expr, CallIC::METHOD);
+ EmitCall(expr, CallICState::METHOD);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2728,13 +2846,20 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitCall(expr);
} else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty();
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(property->obj());
- }
- if (property->key()->IsPropertyName()) {
- EmitCallWithLoadIC(expr);
+ bool is_named_call = property->key()->IsPropertyName();
+ // super.x() is handled in EmitCallWithLoadIC.
+ if (property->IsSuperAccess() && is_named_call) {
+ EmitSuperCallWithLoadIC(expr);
} else {
- EmitKeyedCallWithLoadIC(expr, property->key());
+ {
+ PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(property->obj());
+ }
+ if (is_named_call) {
+ EmitCallWithLoadIC(expr);
+ } else {
+ EmitKeyedCallWithLoadIC(expr, property->key());
+ }
}
} else {
DCHECK(call_type == Call::OTHER_CALL);
@@ -3231,7 +3356,7 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
// Functions have class 'Function'.
__ bind(&function);
- __ Move(rax, isolate()->factory()->function_class_string());
+ __ Move(rax, isolate()->factory()->Function_string());
__ jmp(&done);
// Objects with a non-function constructor have class 'Object'.
@@ -3351,9 +3476,9 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
Register index = rbx;
Register value = rcx;
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- VisitForAccumulatorValue(args->at(0)); // string
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
__ Pop(value);
__ Pop(index);
@@ -3384,9 +3509,9 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
Register index = rbx;
Register value = rcx;
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- VisitForAccumulatorValue(args->at(0)); // string
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
__ Pop(value);
__ Pop(index);
@@ -3742,7 +3867,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
Label bailout, return_result, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
@@ -3803,7 +3928,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
array = no_reg;
- // Check that all array elements are sequential ASCII strings, and
+ // Check that all array elements are sequential one-byte strings, and
// accumulate the sum of their lengths, as a smi-encoded value.
__ Set(index, 0);
__ Set(string_length, 0);
@@ -3812,7 +3937,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// scratch, string_length(int32), elements(FixedArray*).
if (generate_debug_code_) {
__ cmpp(index, array_length);
- __ Assert(below, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
+ __ Assert(below, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
}
__ bind(&loop);
__ movp(string, FieldOperand(elements,
@@ -3856,7 +3981,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// elements: FixedArray of strings.
// index: Array length.
- // Check that the separator is a sequential ASCII string.
+ // Check that the separator is a sequential one-byte string.
__ movp(string, separator_operand);
__ JumpIfSmi(string, &bailout);
__ movp(scratch, FieldOperand(string, HeapObject::kMapOffset));
@@ -3884,8 +4009,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Live registers and stack values:
// string_length: Total length of result string.
// elements: FixedArray of strings.
- __ AllocateAsciiString(result_pos, string_length, scratch,
- index, string, &bailout);
+ __ AllocateOneByteString(result_pos, string_length, scratch, index, string,
+ &bailout);
__ movp(result_operand, result_pos);
__ leap(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
@@ -3932,7 +4057,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case
__ bind(&one_char_separator);
- // Get the separator ASCII character value.
+ // Get the separator one-byte character value.
// Register "string" holds the separator.
__ movzxbl(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
__ Set(index, 0);
@@ -4056,10 +4181,10 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ Push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
// Load the function from the receiver.
- __ movp(LoadIC::ReceiverRegister(), Operand(rsp, 0));
- __ Move(LoadIC::NameRegister(), expr->name());
+ __ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, 0));
+ __ Move(LoadDescriptor::NameRegister(), expr->name());
if (FLAG_vector_ics) {
- __ Move(LoadIC::SlotRegister(),
+ __ Move(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(expr->CallRuntimeFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL);
} else {
@@ -4228,6 +4353,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (prop != NULL) {
assign_type =
(prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ if (prop->IsSuperAccess()) {
+ // throw exception.
+ VisitSuperReference(prop->obj()->AsSuperReference());
+ return;
+ }
}
// Evaluate expression and get value.
@@ -4242,15 +4372,15 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
if (assign_type == NAMED_PROPERTY) {
VisitForStackValue(prop->obj());
- __ movp(LoadIC::ReceiverRegister(), Operand(rsp, 0));
+ __ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, 0));
EmitNamedPropertyLoad(prop);
} else {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
// Leave receiver on stack
- __ movp(LoadIC::ReceiverRegister(), Operand(rsp, kPointerSize));
+ __ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, kPointerSize));
// Copy of key, needed for later store.
- __ movp(LoadIC::NameRegister(), Operand(rsp, 0));
+ __ movp(LoadDescriptor::NameRegister(), Operand(rsp, 0));
EmitKeyedPropertyLoad(prop);
}
}
@@ -4332,8 +4462,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ bind(&stub_call);
__ movp(rdx, rax);
__ Move(rax, Smi::FromInt(1));
- BinaryOpICStub stub(isolate(), expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), expr->binary_op(),
+ NO_OVERWRITE).code();
+ CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4362,8 +4493,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
case NAMED_PROPERTY: {
- __ Move(StoreIC::NameRegister(), prop->key()->AsLiteral()->value());
- __ Pop(StoreIC::ReceiverRegister());
+ __ Move(StoreDescriptor::NameRegister(),
+ prop->key()->AsLiteral()->value());
+ __ Pop(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -4376,11 +4508,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
- __ Pop(KeyedStoreIC::NameRegister());
- __ Pop(KeyedStoreIC::ReceiverRegister());
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ __ Pop(StoreDescriptor::NameRegister());
+ __ Pop(StoreDescriptor::ReceiverRegister());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -4403,10 +4534,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "[ Global variable");
- __ Move(LoadIC::NameRegister(), proxy->name());
- __ movp(LoadIC::ReceiverRegister(), GlobalObjectOperand());
+ __ Move(LoadDescriptor::NameRegister(), proxy->name());
+ __ movp(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
if (FLAG_vector_ics) {
- __ Move(LoadIC::SlotRegister(),
+ __ Move(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(proxy->VariableFeedbackSlot()));
}
// Use a regular load, not a contextual load, to avoid a reference
@@ -4568,7 +4699,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
new file mode 100644
index 0000000000..84fdca4fb9
--- /dev/null
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -0,0 +1,305 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X64
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+const Register CallInterfaceDescriptor::ContextRegister() { return rsi; }
+
+
+const Register LoadDescriptor::ReceiverRegister() { return rdx; }
+const Register LoadDescriptor::NameRegister() { return rcx; }
+
+
+const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return rax; }
+
+
+const Register VectorLoadICDescriptor::VectorRegister() { return rbx; }
+
+
+const Register StoreDescriptor::ReceiverRegister() { return rdx; }
+const Register StoreDescriptor::NameRegister() { return rcx; }
+const Register StoreDescriptor::ValueRegister() { return rax; }
+
+
+const Register ElementTransitionAndStoreDescriptor::MapRegister() {
+ return rbx;
+}
+
+
+const Register InstanceofDescriptor::left() { return rax; }
+const Register InstanceofDescriptor::right() { return rdx; }
+
+
+const Register ArgumentsAccessReadDescriptor::index() { return rdx; }
+const Register ArgumentsAccessReadDescriptor::parameter_count() { return rax; }
+
+
+const Register ApiGetterDescriptor::function_address() { return r8; }
+
+
+const Register MathPowTaggedDescriptor::exponent() { return rdx; }
+
+
+const Register MathPowIntegerDescriptor::exponent() {
+ return MathPowTaggedDescriptor::exponent();
+}
+
+
+void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {rsi, rbx};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {rsi, rdi};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // ToNumberStub invokes a function, and therefore needs a context.
+ Register registers[] = {rsi, rax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {rsi, rax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastCloneShallowArrayDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rsi, rax, rbx, rcx};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void FastCloneShallowObjectDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rsi, rax, rbx, rcx, rdx};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CreateAllocationSiteDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rsi, rbx, rdx};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StoreArrayLiteralElementDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rsi, rcx, rax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {rsi, rdi};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionWithFeedbackDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rsi, rdi, rdx};
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // rax : number of arguments
+ // rbx : feedback vector
+ // rdx : (only if rbx is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
+ // rdi : constructor function
+ // TODO(turbofan): So far we don't gather type feedback and hence skip the
+ // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+ Register registers[] = {rsi, rax, rdi, rbx};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void RegExpConstructResultDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rsi, rcx, rbx, rax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void TransitionElementsKindDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rsi, rax, rbx};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorConstantArgCountDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // rax -- number of arguments
+ // rdi -- function
+ // rbx -- allocation site with elements kind
+ Register registers[] = {rsi, rdi, rbx};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = {rsi, rdi, rbx, rax};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(),
+ Representation::Tagged(), Representation::Integer32()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // rsi -- context
+ // rax -- number of arguments
+ // rdi -- constructor function
+ Register registers[] = {rsi, rdi};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void InternalArrayConstructorDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = {rsi, rdi, rax};
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Integer32()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {rsi, rax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {rsi, rax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {rsi, rdx, rax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpWithAllocationSiteDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rsi, rcx, rdx, rax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {rsi, rdx, rax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ rsi, // context
+ rcx, // key
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ rsi, // context
+ rcx, // name
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ rsi, // context
+ rdx, // receiver
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ rsi, // context
+ rdi, // JSFunction
+ rax, // actual number of arguments
+ rbx, // expected number of arguments
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // JSFunction
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ rsi, // context
+ rax, // callee
+ rbx, // call_data
+ rcx, // holder
+ rdx, // api_function_address
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 4457c20297..1981d55f79 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -6,9 +6,12 @@
#if V8_TARGET_ARCH_X64
+#include "src/base/bits.h"
+#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/hydrogen-osr.h"
-#include "src/stub-cache.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
#include "src/x64/lithium-codegen-x64.h"
namespace v8 {
@@ -17,7 +20,7 @@ namespace internal {
// When invoking builtins, we need to record the safepoint in the middle of
// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator V8_FINAL : public CallWrapper {
+class SafepointGenerator FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -27,9 +30,9 @@ class SafepointGenerator V8_FINAL : public CallWrapper {
deopt_mode_(mode) { }
virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
+ virtual void BeforeCall(int call_size) const OVERRIDE {}
- virtual void AfterCall() const V8_OVERRIDE {
+ virtual void AfterCall() const OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@@ -300,16 +303,11 @@ bool LCodeGen::GenerateJumpTable() {
Comment(";;; -------------------- Jump table --------------------");
}
for (int i = 0; i < jump_table_.length(); i++) {
- __ bind(&jump_table_[i].label);
- Address entry = jump_table_[i].address;
- Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
- int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- Comment(";;; jump table entry %d.", i);
- } else {
- Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
- }
- if (jump_table_[i].needs_frame) {
+ Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+ __ bind(&table_entry->label);
+ Address entry = table_entry->address;
+ DeoptComment(table_entry->reason);
+ if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
__ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
if (needs_frame.is_bound()) {
@@ -726,9 +724,10 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
-void LCodeGen::DeoptimizeIf(Condition cc,
- LEnvironment* environment,
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
+ const char* detail,
Deoptimizer::BailoutType bailout_type) {
+ LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
@@ -771,22 +770,22 @@ void LCodeGen::DeoptimizeIf(Condition cc,
__ bind(&done);
}
+ Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), detail);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (cc == no_condition && frame_is_built_ &&
!info()->saves_caller_doubles()) {
+ DeoptComment(reason);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
+ Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ !frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (jump_table_.is_empty() ||
- jump_table_.last().address != entry ||
- jump_table_.last().needs_frame != !frame_is_built_ ||
- jump_table_.last().bailout_type != bailout_type) {
- Deoptimizer::JumpTableEntry table_entry(entry,
- bailout_type,
- !frame_is_built_);
+ !table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
}
if (cc == no_condition) {
@@ -798,12 +797,12 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
-void LCodeGen::DeoptimizeIf(Condition cc,
- LEnvironment* environment) {
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
+ const char* detail) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(cc, environment, bailout_type);
+ DeoptimizeIf(cc, instr, detail, bailout_type);
}
@@ -811,7 +810,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, 0, TENURED);
+ DeoptimizationInputData::New(isolate(), length, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
@@ -1034,7 +1033,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ andl(dividend, Immediate(mask));
__ negl(dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
}
__ jmp(&done, Label::kNear);
}
@@ -1051,7 +1050,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(rax));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
@@ -1066,7 +1065,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ j(not_zero, &remainder_not_zero, Label::kNear);
__ cmpl(dividend, Immediate(0));
- DeoptimizeIf(less, instr->environment());
+ DeoptimizeIf(less, instr, "minus zero");
__ bind(&remainder_not_zero);
}
}
@@ -1088,7 +1087,7 @@ void LCodeGen::DoModI(LModI* instr) {
// deopt in this case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(right_reg, right_reg);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "division by zero");
}
// Check for kMinInt % -1, idiv would signal a divide error. We
@@ -1099,7 +1098,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_zero, &no_overflow_possible, Label::kNear);
__ cmpl(right_reg, Immediate(-1));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "minus zero");
} else {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ Set(result_reg, 0);
@@ -1119,7 +1118,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_sign, &positive_left, Label::kNear);
__ idivl(right_reg);
__ testl(result_reg, result_reg);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
__ jmp(&done, Label::kNear);
__ bind(&positive_left);
}
@@ -1145,13 +1144,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ negl(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
return;
}
@@ -1178,7 +1177,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(rdx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
@@ -1186,7 +1185,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -1233,7 +1232,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(divisor, divisor);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "division by zero");
}
// Check for (0 / -x) that will produce negative zero.
@@ -1242,7 +1241,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ testl(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ testl(divisor, divisor);
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr, "minus zero");
__ bind(&dividend_not_zero);
}
@@ -1252,7 +1251,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ cmpl(dividend, Immediate(kMinInt));
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmpl(divisor, Immediate(-1));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "overflow");
__ bind(&dividend_not_min_int);
}
@@ -1274,26 +1273,26 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
Register dividend = ToRegister(instr->dividend());
int32_t divisor = instr->divisor();
Register result = ToRegister(instr->result());
- DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
+ DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
DCHECK(!result.is(dividend));
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmpl(dividend, Immediate(kMinInt));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "overflow");
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ testl(dividend, Immediate(mask));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr, "lost precision");
}
__ Move(result, dividend);
int32_t shift = WhichPowerOf2Abs(divisor);
@@ -1314,7 +1313,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(rdx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
@@ -1322,7 +1321,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
}
__ TruncatingDiv(dividend, Abs(divisor));
@@ -1332,7 +1331,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ movl(rax, rdx);
__ imull(rax, rax, Immediate(divisor));
__ subl(rax, dividend);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "lost precision");
}
}
@@ -1352,7 +1351,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(divisor, divisor);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "division by zero");
}
// Check for (0 / -x) that will produce negative zero.
@@ -1361,7 +1360,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ testl(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ testl(divisor, divisor);
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr, "minus zero");
__ bind(&dividend_not_zero);
}
@@ -1371,7 +1370,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cmpl(dividend, Immediate(kMinInt));
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmpl(divisor, Immediate(-1));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "overflow");
__ bind(&dividend_not_min_int);
}
@@ -1382,7 +1381,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ testl(remainder, remainder);
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr, "lost precision");
}
}
@@ -1459,7 +1458,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
if (can_overflow) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -1478,10 +1477,10 @@ void LCodeGen::DoMulI(LMulI* instr) {
? !instr->hydrogen_value()->representation().IsSmi()
: SmiValuesAre31Bits());
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr, "minus zero");
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
__ cmpl(kScratchRegister, Immediate(0));
- DeoptimizeIf(less, instr->environment());
+ DeoptimizeIf(less, instr, "minus zero");
}
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
@@ -1489,7 +1488,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else {
__ orl(kScratchRegister, ToOperand(right));
}
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr, "minus zero");
} else {
// Test the non-zero operand for negative sign.
if (instr->hydrogen_value()->representation().IsSmi()) {
@@ -1497,7 +1496,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else {
__ orl(kScratchRegister, ToRegister(right));
}
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr, "minus zero");
}
__ bind(&done);
}
@@ -1610,7 +1609,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shrl_cl(ToRegister(left));
if (instr->can_deopt()) {
__ testl(ToRegister(left), ToRegister(left));
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr, "negative value");
}
break;
case Token::SHL:
@@ -1639,7 +1638,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shrl(ToRegister(left), Immediate(shift_count));
} else if (instr->can_deopt()) {
__ testl(ToRegister(left), ToRegister(left));
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr, "negative value");
}
break;
case Token::SHL:
@@ -1654,7 +1653,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shll(ToRegister(left), Immediate(shift_count - 1));
}
__ Integer32ToSmi(ToRegister(left), ToRegister(left));
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
} else {
__ shll(ToRegister(left), Immediate(shift_count));
}
@@ -1697,7 +1696,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
}
@@ -1721,7 +1720,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
DCHECK(instr->result()->IsDoubleRegister());
XMMRegister res = ToDoubleRegister(instr->result());
double v = instr->value();
- uint64_t int_val = BitCast<uint64_t, double>(v);
+ uint64_t int_val = bit_cast<uint64_t, double>(v);
// Use xor to produce +0.0 in a fast and compact way, but avoid to
// do so if the constant is -0.0.
if (int_val == 0) {
@@ -1762,9 +1761,9 @@ void LCodeGen::DoDateField(LDateField* instr) {
DCHECK(object.is(rax));
Condition cc = masm()->CheckSmi(object);
- DeoptimizeIf(cc, instr->environment());
+ DeoptimizeIf(cc, instr, "Smi");
__ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "not a date object");
if (index->value() == 0) {
__ movp(result, FieldOperand(object, JSDate::kValueOffset));
@@ -1928,7 +1927,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
}
}
@@ -2057,8 +2056,9 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(rax));
DCHECK(ToRegister(instr->result()).is(rax));
- BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@@ -2172,7 +2172,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ testb(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "Smi");
}
const Register map = kScratchRegister;
@@ -2226,7 +2226,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr, "unexpected object");
}
}
}
@@ -2500,7 +2500,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
@@ -2578,7 +2578,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ JumpIfSmi(input, is_false);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
+ if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2609,7 +2609,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
+ if (String::Equals(class_name, isolate()->factory()->Object_string())) {
__ j(not_equal, is_true);
} else {
__ j(not_equal, is_false);
@@ -2671,15 +2671,15 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
@@ -2784,7 +2784,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
@@ -2842,27 +2842,36 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
__ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
}
}
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+ DCHECK(FLAG_vector_ics);
+ Register vector = ToRegister(instr->temp_vector());
+ DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
+ __ Move(vector, instr->hydrogen()->feedback_vector());
+ // No need to allocate this register.
+ DCHECK(VectorLoadICDescriptor::SlotRegister().is(rax));
+ __ Move(VectorLoadICDescriptor::SlotRegister(),
+ Smi::FromInt(instr->hydrogen()->slot()));
+}
+
+
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister()));
+ DCHECK(ToRegister(instr->global_object())
+ .is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(rax));
- __ Move(LoadIC::NameRegister(), instr->name());
+ __ Move(LoadDescriptor::NameRegister(), instr->name());
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ Move(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(rax));
- __ Move(LoadIC::SlotRegister(), Smi::FromInt(instr->hydrogen()->slot()));
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2881,7 +2890,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
DCHECK(!value.is(cell));
__ Move(cell, cell_handle, RelocInfo::CELL);
__ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
// Store the value.
__ movp(Operand(cell, 0), value);
} else {
@@ -2900,7 +2909,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
} else {
Label is_not_hole;
__ j(not_equal, &is_not_hole, Label::kNear);
@@ -2921,7 +2930,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(target, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
} else {
__ j(not_equal, &skip_assignment);
}
@@ -2997,19 +3006,14 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(rax));
- __ Move(LoadIC::NameRegister(), instr->name());
+ __ Move(LoadDescriptor::NameRegister(), instr->name());
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ Move(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(rax));
- __ Move(LoadIC::SlotRegister(), Smi::FromInt(instr->hydrogen()->slot()));
- }
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+ }
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3024,7 +3028,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
// If the function does not have an initial map, we're done.
Label done;
@@ -3136,7 +3140,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ movl(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ testl(result, result);
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr, "negative value");
}
break;
case EXTERNAL_FLOAT32_ELEMENTS:
@@ -3175,7 +3179,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
FAST_DOUBLE_ELEMENTS,
instr->base_offset() + sizeof(kHoleNanLower32));
__ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
}
Operand double_load_operand = BuildFastArrayOperand(
@@ -3223,21 +3227,19 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
}
__ Load(result,
- BuildFastArrayOperand(instr->elements(),
- key,
+ BuildFastArrayOperand(instr->elements(), key,
instr->hydrogen()->key()->representation(),
- FAST_ELEMENTS,
- offset),
+ FAST_ELEMENTS, offset),
representation);
// Check for the hole value.
if (requires_hole_check) {
if (IsFastSmiElementsKind(hinstr->elements_kind())) {
Condition smi = __ CheckSmi(result);
- DeoptimizeIf(NegateCondition(smi), instr->environment());
+ DeoptimizeIf(NegateCondition(smi), instr, "not a Smi");
} else {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
}
}
}
@@ -3286,19 +3288,14 @@ Operand LCodeGen::BuildFastArrayOperand(
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister()));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ Move(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(rax));
- __ Move(LoadIC::SlotRegister(), Smi::FromInt(instr->hydrogen()->slot()));
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3391,9 +3388,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// The receiver should be a JS object.
Condition is_smi = __ CheckSmi(receiver);
- DeoptimizeIf(is_smi, instr->environment());
+ DeoptimizeIf(is_smi, instr, "Smi");
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
- DeoptimizeIf(below, instr->environment());
+ DeoptimizeIf(below, instr, "not a JavaScript object");
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
@@ -3420,7 +3417,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmpp(length, Immediate(kArgumentsLimit));
- DeoptimizeIf(above, instr->environment());
+ DeoptimizeIf(above, instr, "too many arguments");
__ Push(receiver);
__ movp(receiver, length);
@@ -3532,6 +3529,30 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
+void LCodeGen::DoTailCallThroughMegamorphicCache(
+ LTailCallThroughMegamorphicCache* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register name = ToRegister(instr->name());
+ DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(name.is(LoadDescriptor::NameRegister()));
+
+ Register scratch = rbx;
+ DCHECK(!scratch.is(receiver) && !scratch.is(name));
+
+ // Important for the tail-call.
+ bool must_teardown_frame = NeedsEagerFrame();
+
+ // The probe will tail call to a handler if found.
+ isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
+ must_teardown_frame, receiver, name,
+ scratch, no_reg);
+
+ // Tail call to miss if we ended up here.
+ if (must_teardown_frame) __ leave();
+ LoadIC::GenerateMiss(masm());
+}
+
+
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(rax));
@@ -3591,7 +3612,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "not a heap number");
Label slow, allocated, done;
Register tmp = input_reg.is(rax) ? rcx : rax;
@@ -3637,7 +3658,7 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ negl(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr, "overflow");
__ bind(&is_positive);
}
@@ -3648,21 +3669,21 @@ void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ negp(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr, "overflow");
__ bind(&is_positive);
}
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LMathAbs* instr_;
};
@@ -3703,18 +3724,18 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
// Deoptimize if minus zero.
__ movq(output_reg, input_reg);
__ subq(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "minus zero");
}
__ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
__ cvttsd2si(output_reg, xmm_scratch);
__ cmpl(output_reg, Immediate(0x1));
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
} else {
Label negative_sign, done;
// Deoptimize on unordered.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(parity_even, instr->environment());
+ DeoptimizeIf(parity_even, instr, "NaN");
__ j(below, &negative_sign, Label::kNear);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3723,7 +3744,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ j(above, &positive_sign, Label::kNear);
__ movmskpd(output_reg, input_reg);
__ testq(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr, "minus zero");
__ Set(output_reg, 0);
__ jmp(&done);
__ bind(&positive_sign);
@@ -3733,7 +3754,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ cvttsd2si(output_reg, input_reg);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x1));
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
@@ -3744,7 +3765,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ subl(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
__ bind(&done);
}
@@ -3771,8 +3792,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ cvttsd2si(output_reg, xmm_scratch);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x1));
- __ RecordComment("D2I conversion overflow");
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
__ jmp(&done, dist);
__ bind(&below_one_half);
@@ -3788,8 +3808,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ cvttsd2si(output_reg, input_temp);
// Catch minint due to overflow, and to prevent overflow when compensating.
__ cmpl(output_reg, Immediate(0x1));
- __ RecordComment("D2I conversion overflow");
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
__ Cvtlsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
@@ -3804,8 +3823,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ movq(output_reg, input_reg);
__ testq(output_reg, output_reg);
- __ RecordComment("Minus zero");
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr, "minus zero");
}
__ Set(output_reg, 0);
__ bind(&done);
@@ -3869,9 +3887,9 @@ void LCodeGen::DoPower(LPower* instr) {
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
- Register exponent = rdx;
+ Register tagged_exponent = MathPowTaggedDescriptor::exponent();
DCHECK(!instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(exponent));
+ ToRegister(instr->right()).is(tagged_exponent));
DCHECK(!instr->right()->IsDoubleRegister() ||
ToDoubleRegister(instr->right()).is(xmm1));
DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
@@ -3882,9 +3900,9 @@ void LCodeGen::DoPower(LPower* instr) {
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
- __ JumpIfSmi(exponent, &no_deopt, Label::kNear);
- __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
- DeoptimizeIf(not_equal, instr->environment());
+ __ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear);
+ __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx);
+ DeoptimizeIf(not_equal, instr, "not a heap number");
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@@ -4191,10 +4209,10 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister()));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- __ Move(StoreIC::NameRegister(), instr->hydrogen()->name());
+ __ Move(StoreDescriptor::NameRegister(), instr->hydrogen()->name());
Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4256,7 +4274,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ int3();
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr->environment());
+ DeoptimizeIf(cc, instr, "out of bounds");
}
}
@@ -4348,8 +4366,9 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
__ ucomisd(value, value);
__ j(parity_odd, &have_value, Label::kNear); // NaN.
- __ Set(kScratchRegister, BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+ __ Set(kScratchRegister,
+ bit_cast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
__ movq(value, kScratchRegister);
__ bind(&have_value);
@@ -4456,13 +4475,12 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister()));
- DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister()));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- Handle<Code> ic = instr->strict_mode() == STRICT
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4504,7 +4522,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "memento found");
__ bind(&no_memento_found);
}
@@ -4521,14 +4539,14 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) {
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
+ class DeferredStringCharCodeAt FINAL : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStringCharCodeAt(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@@ -4576,14 +4594,14 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
+ class DeferredStringCharFromCode FINAL : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStringCharFromCode(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -4648,15 +4666,15 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagI FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
instr_->temp2(), SIGNED_INT32);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagI* instr_;
};
@@ -4678,15 +4696,15 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagU FINAL : public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
instr_->temp2(), UNSIGNED_INT32);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
@@ -4765,14 +4783,14 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagD FINAL : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagD(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -4823,12 +4841,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
- DeoptimizeIf(NegateCondition(is_smi), instr->environment());
+ DeoptimizeIf(NegateCondition(is_smi), instr, "overflow");
}
__ Integer32ToSmi(output, input);
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
}
@@ -4838,7 +4856,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
Register input = ToRegister(instr->value());
if (instr->needs_check()) {
Condition is_smi = __ CheckSmi(input);
- DeoptimizeIf(NegateCondition(is_smi), instr->environment());
+ DeoptimizeIf(NegateCondition(is_smi), instr, "not a Smi");
} else {
__ AssertSmi(input);
}
@@ -4846,12 +4864,12 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
}
-void LCodeGen::EmitNumberUntagD(Register input_reg,
- XMMRegister result_reg,
- bool can_convert_undefined_to_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode) {
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
+ XMMRegister result_reg, NumberUntagDMode mode) {
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+ bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
@@ -4869,7 +4887,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
if (can_convert_undefined_to_nan) {
__ j(not_equal, &convert, Label::kNear);
} else {
- DeoptimizeIf(not_equal, env);
+ DeoptimizeIf(not_equal, instr, "not a heap number");
}
if (deoptimize_on_minus_zero) {
@@ -4879,7 +4897,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
__ j(not_equal, &done, Label::kNear);
__ movmskpd(kScratchRegister, result_reg);
__ testq(kScratchRegister, Immediate(1));
- DeoptimizeIf(not_zero, env);
+ DeoptimizeIf(not_zero, instr, "minus zero");
}
__ jmp(&done, Label::kNear);
@@ -4888,7 +4906,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Convert undefined (and hole) to NaN. Compute NaN as 0/0.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, env);
+ DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
__ xorps(result_reg, result_reg);
__ divsd(result_reg, result_reg);
@@ -4935,32 +4953,40 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ bind(&check_false);
__ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
- __ RecordComment("Deferred TaggedToI: cannot truncate");
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false");
__ Set(input_reg, 0);
- __ jmp(done);
} else {
- Label bailout;
- XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
- __ TaggedToI(input_reg, input_reg, xmm_temp,
- instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
-
- __ jmp(done);
- __ bind(&bailout);
- DeoptimizeIf(no_condition, instr->environment());
+ XMMRegister scratch = ToDoubleRegister(instr->temp());
+ DCHECK(!scratch.is(xmm0));
+ __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ DeoptimizeIf(not_equal, instr, "not a heap number");
+ __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ cvttsd2si(input_reg, xmm0);
+ __ Cvtlsi2sd(scratch, input_reg);
+ __ ucomisd(xmm0, scratch);
+ DeoptimizeIf(not_equal, instr, "lost precision");
+ DeoptimizeIf(parity_even, instr, "NaN");
+ if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
+ __ testl(input_reg, input_reg);
+ __ j(not_zero, done);
+ __ movmskpd(input_reg, xmm0);
+ __ andl(input_reg, Immediate(1));
+ DeoptimizeIf(not_zero, instr, "minus zero");
+ }
}
}
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI V8_FINAL : public LDeferredCode {
+ class DeferredTaggedToI FINAL : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredTaggedToI(instr_, done());
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
@@ -4994,11 +5020,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
NumberUntagDMode mode = value->representation().IsSmi()
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
- EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->can_convert_undefined_to_nan(),
- instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment(),
- mode);
+ EmitNumberUntagD(instr, input_reg, result_reg, mode);
}
@@ -5014,14 +5036,19 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
if (instr->truncating()) {
__ TruncateDoubleToI(result_reg, input_reg);
} else {
- Label bailout, done;
+ Label lost_precision, is_nan, minus_zero, done;
XMMRegister xmm_scratch = double_scratch0();
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
__ DoubleToI(result_reg, input_reg, xmm_scratch,
- instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
-
- __ jmp(&done, Label::kNear);
- __ bind(&bailout);
- DeoptimizeIf(no_condition, instr->environment());
+ instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
+ &is_nan, &minus_zero, dist);
+ __ jmp(&done, dist);
+ __ bind(&lost_precision);
+ DeoptimizeIf(no_condition, instr, "lost precision");
+ __ bind(&is_nan);
+ DeoptimizeIf(no_condition, instr, "NaN");
+ __ bind(&minus_zero);
+ DeoptimizeIf(no_condition, instr, "minus zero");
__ bind(&done);
}
}
@@ -5036,25 +5063,29 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
XMMRegister input_reg = ToDoubleRegister(input);
Register result_reg = ToRegister(result);
- Label bailout, done;
+ Label lost_precision, is_nan, minus_zero, done;
XMMRegister xmm_scratch = double_scratch0();
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
__ DoubleToI(result_reg, input_reg, xmm_scratch,
- instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
-
- __ jmp(&done, Label::kNear);
- __ bind(&bailout);
- DeoptimizeIf(no_condition, instr->environment());
+ instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
+ &minus_zero, dist);
+ __ jmp(&done, dist);
+ __ bind(&lost_precision);
+ DeoptimizeIf(no_condition, instr, "lost precision");
+ __ bind(&is_nan);
+ DeoptimizeIf(no_condition, instr, "NaN");
+ __ bind(&minus_zero);
+ DeoptimizeIf(no_condition, instr, "minus zero");
__ bind(&done);
-
__ Integer32ToSmi(result_reg, result_reg);
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
- DeoptimizeIf(NegateCondition(cc), instr->environment());
+ DeoptimizeIf(NegateCondition(cc), instr, "not a Smi");
}
@@ -5062,7 +5093,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
- DeoptimizeIf(cc, instr->environment());
+ DeoptimizeIf(cc, instr, "Smi");
}
}
@@ -5082,14 +5113,14 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "wrong instance type");
} else {
- DeoptimizeIf(below, instr->environment());
+ DeoptimizeIf(below, instr, "wrong instance type");
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
Immediate(static_cast<int8_t>(last)));
- DeoptimizeIf(above, instr->environment());
+ DeoptimizeIf(above, instr, "wrong instance type");
}
}
} else {
@@ -5097,17 +5128,17 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
uint8_t tag;
instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
- if (IsPowerOf2(mask)) {
- DCHECK(tag == 0 || IsPowerOf2(tag));
+ if (base::bits::IsPowerOfTwo32(mask)) {
+ DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
Immediate(mask));
- DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
+ DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type");
} else {
__ movzxbl(kScratchRegister,
FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
__ andb(kScratchRegister, Immediate(mask));
__ cmpb(kScratchRegister, Immediate(tag));
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "wrong instance type");
}
}
}
@@ -5116,7 +5147,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
__ Cmp(reg, instr->hydrogen()->object().handle());
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "value mismatch");
}
@@ -5131,22 +5162,22 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ testp(rax, Immediate(kSmiTagMask));
}
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "instance migration failed");
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+ class DeferredCheckMaps FINAL : public LDeferredCode {
public:
DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
: LDeferredCode(codegen), instr_(instr), object_(object) {
SetExit(check_maps());
}
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredInstanceMigration(instr_, object_);
}
Label* check_maps() { return &check_maps_; }
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LCheckMaps* instr_;
Label check_maps_;
@@ -5184,7 +5215,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "wrong map");
}
__ bind(&success);
@@ -5223,7 +5254,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ Cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
__ xorl(input_reg, input_reg);
__ jmp(&done, Label::kNear);
@@ -5267,14 +5298,14 @@ void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate V8_FINAL : public LDeferredCode {
+ class DeferredAllocate FINAL : public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredAllocate(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LAllocate* instr_;
};
@@ -5434,9 +5465,8 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(),
- instr->hydrogen()->strict_mode(),
- instr->hydrogen()->is_generator());
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ instr->hydrogen()->kind());
__ Move(rbx, instr->hydrogen()->shared_info());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
@@ -5615,9 +5645,7 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
if (info()->IsStub() && type == Deoptimizer::EAGER) {
type = Deoptimizer::LAZY;
}
-
- Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
- DeoptimizeIf(no_condition, instr->environment(), type);
+ DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
}
@@ -5643,14 +5671,14 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck V8_FINAL : public LDeferredCode {
+ class DeferredStackCheck FINAL : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStackCheck(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
@@ -5707,19 +5735,19 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "undefined");
Register null_value = rdi;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ cmpp(rax, null_value);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "null");
Condition cc = masm()->CheckSmi(rax);
- DeoptimizeIf(cc, instr->environment());
+ DeoptimizeIf(cc, instr, "Smi");
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
- DeoptimizeIf(below_equal, instr->environment());
+ DeoptimizeIf(below_equal, instr, "wrong instance type");
Label use_cache, call_runtime;
__ CheckEnumCache(null_value, &call_runtime);
@@ -5734,7 +5762,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kMetaMapRootIndex);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "wrong map");
__ bind(&use_cache);
}
@@ -5756,7 +5784,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
Condition cc = masm()->CheckSmi(result);
- DeoptimizeIf(cc, instr->environment());
+ DeoptimizeIf(cc, instr, "no cache");
}
@@ -5764,7 +5792,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
__ cmpp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "wrong map");
}
@@ -5783,7 +5811,7 @@ void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ class DeferredLoadMutableDouble FINAL : public LDeferredCode {
public:
DeferredLoadMutableDouble(LCodeGen* codegen,
LLoadFieldByIndex* instr,
@@ -5794,10 +5822,10 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
object_(object),
index_(index) {
}
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LLoadFieldByIndex* instr_;
Register object_;
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index b3070c0189..ccd90b53c6 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -143,8 +143,8 @@ class LCodeGen: public LCodeGenBase {
// Code generation passes. Returns true if code generation should
// continue.
- void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
- void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE;
+ void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
+ void GenerateBodyInstructionPost(LInstruction* instr) OVERRIDE;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateJumpTable();
@@ -206,10 +206,9 @@ class LCodeGen: public LCodeGenBase {
int argc);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc,
- LEnvironment* environment,
+ void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail,
Deoptimizer::BailoutType bailout_type);
- void DeoptimizeIf(Condition cc, LEnvironment* environment);
+ void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
@@ -253,7 +252,7 @@ class LCodeGen: public LCodeGenBase {
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
- void RecordAndWritePosition(int position) V8_OVERRIDE;
+ void RecordAndWritePosition(int position) OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
@@ -263,13 +262,8 @@ class LCodeGen: public LCodeGenBase {
void EmitBranch(InstrType instr, Condition cc);
template<class InstrType>
void EmitFalseBranch(InstrType instr, Condition cc);
- void EmitNumberUntagD(
- Register input,
- XMMRegister result,
- bool allow_undefined_as_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
+ void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+ XMMRegister result, NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
@@ -307,13 +301,17 @@ class LCodeGen: public LCodeGenBase {
int* offset,
AllocationSiteMode mode);
- void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+ void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
void DoStoreKeyedExternalArray(LStoreKeyed* instr);
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+
+ template <class T>
+ void EmitVectorLoadICRegisters(T* instr);
+
#ifdef _MSC_VER
// On windows, you may not access the stack more than one page below
// the most recently mapped page. To make the allocated area randomly
@@ -341,7 +339,7 @@ class LCodeGen: public LCodeGenBase {
Safepoint::Kind expected_safepoint_kind_;
- class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
+ class PushSafepointRegistersScope FINAL BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
index bfc2ec0e7d..d10e1a1e9e 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.cc
+++ b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
@@ -188,7 +188,7 @@ void LGapResolver::EmitMove(int index) {
}
} else if (destination->IsDoubleRegister()) {
double v = cgen_->ToDouble(constant_source);
- uint64_t int_val = BitCast<uint64_t, double>(v);
+ uint64_t int_val = bit_cast<uint64_t, double>(v);
XMMRegister dst = cgen_->ToDoubleRegister(destination);
if (int_val == 0) {
__ xorps(dst, dst);
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.h b/deps/v8/src/x64/lithium-gap-resolver-x64.h
index fd4b91ab34..695b3526ed 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.h
+++ b/deps/v8/src/x64/lithium-gap-resolver-x64.h
@@ -15,7 +15,7 @@ namespace internal {
class LCodeGen;
class LGapResolver;
-class LGapResolver V8_FINAL BASE_EMBEDDED {
+class LGapResolver FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 0575166fa4..69f50b1bee 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -449,12 +449,6 @@ LPlatformChunk* LChunkBuilder::Build() {
}
-void LChunkBuilder::Abort(BailoutReason reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
Register::ToAllocationIndex(reg));
@@ -609,9 +603,8 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
ZoneList<HValue*> objects_to_materialize(0, zone());
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator,
- &objects_to_materialize));
+ instr->set_environment(CreateEnvironment(
+ hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
return instr;
}
@@ -1103,14 +1096,14 @@ LInstruction* LChunkBuilder::DoCallJSFunction(
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
- const InterfaceDescriptor* descriptor = instr->descriptor();
+ CallInterfaceDescriptor descriptor = instr->descriptor();
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
ops.Add(target, zone());
for (int i = 1; i < instr->OperandCount(); i++) {
- LOperand* op = UseFixed(instr->OperandAt(i),
- descriptor->GetParameterRegister(i - 1));
+ LOperand* op =
+ UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
ops.Add(op, zone());
}
@@ -1120,6 +1113,19 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
}
+LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
+ HTailCallThroughMegamorphicCache* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
+ LOperand* receiver_register =
+ UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
+ LOperand* name_register =
+ UseFixed(instr->name(), LoadDescriptor::NameRegister());
+ // Not marked as call. It can't deoptimize, and it never returns.
+ return new (zone()) LTailCallThroughMegamorphicCache(
+ context, receiver_register, name_register);
+}
+
+
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LOperand* function = UseFixed(instr->function(), rdi);
@@ -1623,8 +1629,10 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
Representation exponent_type = instr->right()->representation();
DCHECK(instr->left()->representation().IsDouble());
LOperand* left = UseFixedDouble(instr->left(), xmm2);
- LOperand* right = exponent_type.IsDouble() ?
- UseFixedDouble(instr->right(), xmm1) : UseFixed(instr->right(), rdx);
+ LOperand* right =
+ exponent_type.IsDouble()
+ ? UseFixedDouble(instr->right(), xmm1)
+ : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
LPower* result = new(zone()) LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
CAN_DEOPTIMIZE_EAGERLY);
@@ -2058,11 +2066,11 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* global_object = UseFixed(instr->global_object(),
- LoadIC::ReceiverRegister());
+ LOperand* global_object =
+ UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
@@ -2131,10 +2139,11 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister());
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
context, object, vector);
@@ -2222,11 +2231,12 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), LoadIC::NameRegister());
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadKeyedGeneric* result =
@@ -2303,10 +2313,10 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* object = UseFixed(instr->object(),
- KeyedStoreIC::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), KeyedStoreIC::NameRegister());
- LOperand* value = UseFixed(instr->value(), KeyedStoreIC::ValueRegister());
+ LOperand* object =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+ LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
DCHECK(instr->object()->representation().IsTagged());
DCHECK(instr->key()->representation().IsTagged());
@@ -2382,8 +2392,6 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
val = UseFixed(instr->value(), rax);
} else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
- } else if (instr->field_representation().IsSmi()) {
- val = UseRegister(instr->value());
} else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
} else {
@@ -2401,8 +2409,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* object = UseFixed(instr->object(), StoreIC::ReceiverRegister());
- LOperand* value = UseFixed(instr->value(), StoreIC::ValueRegister());
+ LOperand* object =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
LStoreNamedGeneric* result =
new(zone()) LStoreNamedGeneric(context, object, value);
@@ -2479,10 +2488,10 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
return DefineAsSpilled(result, spill_index);
} else {
DCHECK(info()->IsStub());
- CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor();
+ CallInterfaceDescriptor descriptor =
+ info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index());
- Register reg = descriptor->GetEnvironmentParameterRegister(index);
+ Register reg = descriptor.GetEnvironmentParameterRegister(index);
return DefineFixed(result, reg);
}
}
@@ -2498,7 +2507,7 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
} else {
spill_index = env_index - instr->environment()->first_local_index();
if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Abort(kTooManySpillSlotsNeededForOSR);
+ Retry(kTooManySpillSlotsNeededForOSR);
spill_index = 0;
}
}
@@ -2605,6 +2614,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
}
+ inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index a1c563f882..30b994ee77 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -151,6 +151,7 @@ class LCodeGen;
V(StringCompareAndBranch) \
V(SubI) \
V(TaggedToI) \
+ V(TailCallThroughMegamorphicCache) \
V(ThisFunction) \
V(ToFastProperties) \
V(TransitionElementsKind) \
@@ -163,11 +164,11 @@ class LCodeGen;
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ virtual Opcode opcode() const FINAL OVERRIDE { \
return LInstruction::k##type; \
} \
- virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
- virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE; \
+ virtual const char* Mnemonic() const FINAL OVERRIDE { \
return mnemonic; \
} \
static L##type* cast(LInstruction* instr) { \
@@ -291,14 +292,14 @@ class LTemplateResultInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ virtual bool HasResult() const FINAL OVERRIDE {
return R != 0 && result() != NULL;
}
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() const { return results_[0]; }
virtual bool MustSignExtendResult(
- LPlatformChunk* chunk) const V8_FINAL V8_OVERRIDE;
+ LPlatformChunk* chunk) const FINAL OVERRIDE;
protected:
EmbeddedContainer<LOperand*, R> results_;
@@ -316,11 +317,11 @@ class LTemplateInstruction : public LTemplateResultInstruction<R> {
private:
// Iterator support.
- virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
- virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+ virtual int InputCount() FINAL OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
- virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
- virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
+ virtual int TempCount() FINAL OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
};
@@ -335,8 +336,8 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsGap() const FINAL OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
static LGap* cast(LInstruction* instr) {
DCHECK(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@@ -373,11 +374,11 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
};
-class LInstructionGap V8_FINAL : public LGap {
+class LInstructionGap FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return !IsRedundant();
}
@@ -385,14 +386,14 @@ class LInstructionGap V8_FINAL : public LGap {
};
-class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGoto(HBasicBlock* block) : block_(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual bool IsControl() const V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ virtual bool IsControl() const OVERRIDE { return true; }
int block_id() const { return block_->block_id(); }
@@ -401,7 +402,7 @@ class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -417,14 +418,14 @@ class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LDummy FINAL : public LTemplateInstruction<1, 0, 0> {
public:
LDummy() {}
DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
};
-class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
inputs_[0] = value;
@@ -433,25 +434,25 @@ class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- virtual bool IsControl() const V8_OVERRIDE { return true; }
+ virtual bool IsControl() const OVERRIDE { return true; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
-class LLabel V8_FINAL : public LGap {
+class LLabel FINAL : public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -467,16 +468,16 @@ class LLabel V8_FINAL : public LGap {
};
-class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallStub(LOperand* context) {
inputs_[0] = context;
@@ -489,9 +490,30 @@ class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LTailCallThroughMegamorphicCache FINAL
+ : public LTemplateInstruction<0, 3, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ explicit LTailCallThroughMegamorphicCache(LOperand* context,
+ LOperand* receiver,
+ LOperand* name) {
+ inputs_[0] = context;
+ inputs_[1] = receiver;
+ inputs_[2] = name;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* name() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
+ "tail-call-through-megamorphic-cache")
+ DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
+};
+
+
+class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
@@ -503,7 +525,7 @@ class LControlInstruction : public LTemplateInstruction<0, I, T> {
public:
LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
- virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual bool IsControl() const FINAL OVERRIDE { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -542,7 +564,7 @@ class LControlInstruction : public LTemplateInstruction<0, I, T> {
};
-class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LWrapReceiver FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LWrapReceiver(LOperand* receiver, LOperand* function) {
inputs_[0] = receiver;
@@ -557,7 +579,7 @@ class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
@@ -578,7 +600,7 @@ class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
};
-class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
inputs_[0] = arguments;
@@ -592,11 +614,11 @@ class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -608,14 +630,14 @@ class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
};
-class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LModByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -633,7 +655,7 @@ class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LModByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LModByConstI(LOperand* dividend,
int32_t divisor,
@@ -658,7 +680,7 @@ class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LModI FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LModI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -675,7 +697,7 @@ class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -693,7 +715,7 @@ class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LDivByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LDivByConstI(LOperand* dividend,
int32_t divisor,
@@ -718,7 +740,7 @@ class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LDivI FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
inputs_[0] = dividend;
@@ -735,7 +757,7 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFlooringDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -754,7 +776,7 @@ class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+class LFlooringDivByConstI FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LFlooringDivByConstI(LOperand* dividend,
int32_t divisor,
@@ -782,7 +804,7 @@ class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 3> {
};
-class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivI FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
inputs_[0] = dividend;
@@ -799,7 +821,7 @@ class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMulI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMulI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -814,7 +836,7 @@ class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
public:
LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -833,11 +855,11 @@ class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
return hydrogen()->representation().IsDouble();
}
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathFloor FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathFloor(LOperand* value) {
inputs_[0] = value;
@@ -850,7 +872,7 @@ class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LMathRound FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LMathRound(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -865,7 +887,7 @@ class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LMathFround V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathFround FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathFround(LOperand* value) { inputs_[0] = value; }
@@ -875,7 +897,7 @@ class LMathFround V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathAbs FINAL : public LTemplateInstruction<1, 2, 0> {
public:
explicit LMathAbs(LOperand* context, LOperand* value) {
inputs_[1] = context;
@@ -890,7 +912,7 @@ class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathLog FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathLog(LOperand* value) {
inputs_[0] = value;
@@ -902,7 +924,7 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathClz32 FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathClz32(LOperand* value) {
inputs_[0] = value;
@@ -914,7 +936,7 @@ class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LMathExp FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LMathExp(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -931,7 +953,7 @@ class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathSqrt FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSqrt(LOperand* value) {
inputs_[0] = value;
@@ -943,7 +965,7 @@ class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathPowHalf FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathPowHalf(LOperand* value) {
inputs_[0] = value;
@@ -955,7 +977,7 @@ class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -969,7 +991,7 @@ class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
};
-class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LCmpHoleAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LCmpHoleAndBranch(LOperand* object) {
inputs_[0] = object;
@@ -982,7 +1004,7 @@ class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
};
-class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LCompareMinusZeroAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -997,7 +1019,7 @@ class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 0> {
-class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LIsObjectAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsObjectAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1008,11 +1030,11 @@ class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1025,11 +1047,11 @@ class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1040,11 +1062,11 @@ class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1058,11 +1080,11 @@ class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
+class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
public:
explicit LStringCompareAndBranch(LOperand* context,
LOperand* left,
@@ -1080,13 +1102,13 @@ class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
"string-compare-and-branch")
DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Token::Value op() const { return hydrogen()->token(); }
};
-class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1098,11 +1120,11 @@ class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -1115,7 +1137,7 @@ class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndexAndBranch V8_FINAL
+class LHasCachedArrayIndexAndBranch FINAL
: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@@ -1128,11 +1150,11 @@ class LHasCachedArrayIndexAndBranch V8_FINAL
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
@@ -1148,11 +1170,11 @@ class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LCmpT FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LCmpT(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1171,7 +1193,7 @@ class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LInstanceOf FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1187,7 +1209,7 @@ class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LInstanceOfKnownGlobal FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
@@ -1208,7 +1230,7 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
return lazy_deopt_env_;
}
virtual void SetDeferredLazyDeoptimizationEnvironment(
- LEnvironment* env) V8_OVERRIDE {
+ LEnvironment* env) OVERRIDE {
lazy_deopt_env_ = env;
}
@@ -1217,7 +1239,7 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
@@ -1232,7 +1254,7 @@ class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
};
-class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LBitI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1252,7 +1274,7 @@ class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LShiftI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -1273,7 +1295,7 @@ class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSubI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1288,7 +1310,7 @@ class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantI FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1297,7 +1319,7 @@ class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantS FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1306,7 +1328,7 @@ class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 1> {
+class LConstantD FINAL : public LTemplateInstruction<1, 0, 1> {
public:
explicit LConstantD(LOperand* temp) {
temps_[0] = temp;
@@ -1321,7 +1343,7 @@ class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 1> {
};
-class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantE FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1332,7 +1354,7 @@ class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantT FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1343,7 +1365,7 @@ class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LBranch(LOperand* value) {
inputs_[0] = value;
@@ -1354,17 +1376,17 @@ class LBranch V8_FINAL : public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
};
-class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LCmpMapAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LCmpMapAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1379,7 +1401,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
};
-class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
@@ -1391,7 +1413,7 @@ class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDateField FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDateField(LOperand* date, Smi* index) : index_(index) {
inputs_[0] = date;
@@ -1408,7 +1430,7 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSeqStringGetChar FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
inputs_[0] = string;
@@ -1423,7 +1445,7 @@ class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LSeqStringSetChar FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LSeqStringSetChar(LOperand* context,
LOperand* string,
@@ -1444,7 +1466,7 @@ class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
};
-class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LAddI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1464,7 +1486,7 @@ class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathMinMax(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1479,7 +1501,7 @@ class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LPower FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1494,7 +1516,7 @@ class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1506,18 +1528,18 @@ class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- virtual Opcode opcode() const V8_OVERRIDE {
+ virtual Opcode opcode() const OVERRIDE {
return LInstruction::kArithmeticD;
}
- virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
- virtual const char* Mnemonic() const V8_OVERRIDE;
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
private:
Token::Value op_;
};
-class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LArithmeticT(Token::Value op,
LOperand* context,
@@ -1534,18 +1556,18 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
LOperand* left() { return inputs_[1]; }
LOperand* right() { return inputs_[2]; }
- virtual Opcode opcode() const V8_OVERRIDE {
+ virtual Opcode opcode() const OVERRIDE {
return LInstruction::kArithmeticT;
}
- virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
- virtual const char* Mnemonic() const V8_OVERRIDE;
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
private:
Token::Value op_;
};
-class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LReturn FINAL : public LTemplateInstruction<0, 3, 0> {
public:
explicit LReturn(LOperand* value,
LOperand* context,
@@ -1572,7 +1594,7 @@ class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
-class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
@@ -1585,7 +1607,7 @@ class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LLoadNamedGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
explicit LLoadNamedGeneric(LOperand* context, LOperand* object,
LOperand* vector) {
@@ -1605,7 +1627,7 @@ class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadFunctionPrototype FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadFunctionPrototype(LOperand* function) {
inputs_[0] = function;
@@ -1618,7 +1640,7 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadRoot FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
@@ -1643,7 +1665,7 @@ inline static bool ExternalArrayOpRequiresTemp(
}
-class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
@@ -1664,7 +1686,7 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
uint32_t base_offset() const { return hydrogen()->base_offset(); }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
@@ -1672,7 +1694,7 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> {
+class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
public:
LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key,
LOperand* vector) {
@@ -1692,14 +1714,14 @@ class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> {
};
-class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
explicit LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
LOperand* vector) {
@@ -1720,7 +1742,7 @@ class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 1> {
public:
explicit LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1735,7 +1757,7 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
@@ -1748,11 +1770,11 @@ class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
@@ -1769,11 +1791,11 @@ class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LPushArgument FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
@@ -1785,7 +1807,7 @@ class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDrop FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
@@ -1798,7 +1820,7 @@ class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
+class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 0> {
public:
LStoreCodeEntry(LOperand* function, LOperand* code_object) {
inputs_[0] = function;
@@ -1815,7 +1837,7 @@ class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
};
-class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
+class LInnerAllocatedObject FINAL: public LTemplateInstruction<1, 2, 0> {
public:
LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
inputs_[0] = base_object;
@@ -1825,27 +1847,27 @@ class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
LOperand* base_object() const { return inputs_[0]; }
LOperand* offset() const { return inputs_[1]; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
};
-class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LThisFunction FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
-class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LContext FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
DECLARE_HYDROGEN_ACCESSOR(Context)
};
-class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LDeclareGlobals FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
inputs_[0] = context;
@@ -1858,7 +1880,7 @@ class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallJSFunction(LOperand* function) {
inputs_[0] = function;
@@ -1869,44 +1891,44 @@ class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
+class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
public:
- LCallWithDescriptor(const InterfaceDescriptor* descriptor,
- const ZoneList<LOperand*>& operands,
- Zone* zone)
- : inputs_(descriptor->GetRegisterParameterCount() + 1, zone) {
- DCHECK(descriptor->GetRegisterParameterCount() + 1 == operands.length());
+ LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+ const ZoneList<LOperand*>& operands, Zone* zone)
+ : inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
+ DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
inputs_.AddAll(operands, zone);
}
LOperand* target() const { return inputs_[0]; }
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
- DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
ZoneList<LOperand*> inputs_;
// Iterator support.
- virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
- virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+ virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
- virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
- virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+ virtual int TempCount() FINAL OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
};
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
@@ -1919,13 +1941,13 @@ class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
@@ -1941,7 +1963,7 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNew(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
@@ -1954,13 +1976,13 @@ class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
@@ -1973,13 +1995,13 @@ class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallRuntime(LOperand* context) {
inputs_[0] = context;
@@ -1990,7 +2012,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
@@ -2000,7 +2022,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -2012,7 +2034,7 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LUint32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -2024,7 +2046,7 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagI FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -2040,7 +2062,7 @@ class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagU FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -2056,7 +2078,7 @@ class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberTagD FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LNumberTagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2072,7 +2094,7 @@ class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToI FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleToI(LOperand* value) {
inputs_[0] = value;
@@ -2087,7 +2109,7 @@ class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToSmi FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleToSmi(LOperand* value) {
inputs_[0] = value;
@@ -2101,7 +2123,7 @@ class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LTaggedToI FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LTaggedToI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2118,7 +2140,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiTag FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -2131,7 +2153,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberUntagD FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value;
@@ -2144,7 +2166,7 @@ class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -2161,7 +2183,7 @@ class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
inputs_[0] = object;
@@ -2176,7 +2198,7 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Representation representation() const {
return hydrogen()->field_representation();
@@ -2184,7 +2206,7 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
inputs_[0] = context;
@@ -2199,14 +2221,14 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
-class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
@@ -2229,13 +2251,13 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
uint32_t base_offset() const { return hydrogen()->base_offset(); }
};
-class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyedGeneric(LOperand* context,
LOperand* object,
@@ -2255,13 +2277,13 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
-class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* context,
@@ -2282,7 +2304,7 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
Handle<Map> transitioned_map() {
@@ -2293,7 +2315,7 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
};
-class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LTrapAllocationMemento(LOperand* object,
LOperand* temp) {
@@ -2309,7 +2331,7 @@ class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringAdd FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -2326,7 +2348,7 @@ class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringCharCodeAt FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
inputs_[0] = context;
@@ -2343,7 +2365,7 @@ class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringCharFromCode FINAL : public LTemplateInstruction<1, 2, 0> {
public:
explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
inputs_[0] = context;
@@ -2358,7 +2380,7 @@ class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckValue FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
@@ -2371,7 +2393,7 @@ class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckInstanceType FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckInstanceType(LOperand* value) {
inputs_[0] = value;
@@ -2384,7 +2406,7 @@ class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMaps(LOperand* value = NULL) {
inputs_[0] = value;
@@ -2397,7 +2419,7 @@ class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2409,7 +2431,7 @@ class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampDToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampDToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -2421,7 +2443,7 @@ class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -2433,7 +2455,7 @@ class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LClampTToUint8(LOperand* unclamped,
LOperand* temp_xmm) {
@@ -2448,7 +2470,7 @@ class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
@@ -2461,7 +2483,7 @@ class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleBits FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleBits(LOperand* value) {
inputs_[0] = value;
@@ -2474,7 +2496,7 @@ class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LConstructDouble FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LConstructDouble(LOperand* hi, LOperand* lo) {
inputs_[0] = hi;
@@ -2488,7 +2510,7 @@ class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LAllocate FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
inputs_[0] = context;
@@ -2505,7 +2527,7 @@ class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LRegExpLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LRegExpLiteral(LOperand* context) {
inputs_[0] = context;
@@ -2518,7 +2540,7 @@ class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFunctionLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LFunctionLiteral(LOperand* context) {
inputs_[0] = context;
@@ -2531,7 +2553,7 @@ class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
@@ -2544,7 +2566,7 @@ class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LTypeof FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -2558,7 +2580,7 @@ class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -2571,11 +2593,11 @@ class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
Handle<String> type_literal() { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch FINAL : public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
@@ -2589,18 +2611,18 @@ class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
};
-class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
};
-class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LStackCheck FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LStackCheck(LOperand* context) {
inputs_[0] = context;
@@ -2618,7 +2640,7 @@ class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LForInPrepareMap FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LForInPrepareMap(LOperand* context, LOperand* object) {
inputs_[0] = context;
@@ -2632,7 +2654,7 @@ class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
@@ -2648,7 +2670,7 @@ class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
@@ -2662,7 +2684,7 @@ class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
};
-class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
@@ -2706,7 +2728,7 @@ class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LPlatformChunk V8_FINAL : public LChunk {
+class LPlatformChunk FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph),
@@ -2724,20 +2746,14 @@ class LPlatformChunk V8_FINAL : public LChunk {
};
-class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
+class LChunkBuilder FINAL : public LChunkBuilderBase {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : LChunkBuilderBase(graph->zone()),
- chunk_(NULL),
- info_(info),
- graph_(graph),
- status_(UNUSED),
+ : LChunkBuilderBase(info, graph),
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- allocator_(allocator) { }
-
- Isolate* isolate() const { return graph_->isolate(); }
+ allocator_(allocator) {}
// Build the sequence for the graph.
LPlatformChunk* Build();
@@ -2767,24 +2783,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
private:
- enum Status {
- UNUSED,
- BUILDING,
- DONE,
- ABORTED
- };
-
- LPlatformChunk* chunk() const { return chunk_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_building() const { return status_ == BUILDING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- void Abort(BailoutReason reason);
-
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(XMMRegister reg);
@@ -2829,7 +2827,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
- virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
@@ -2876,10 +2874,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
HBinaryOperation* instr);
void FindDehoistedKeyDefinitions(HValue* candidate);
- LPlatformChunk* chunk_;
- CompilationInfo* info_;
- HGraph* const graph_;
- Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
HBasicBlock* next_block_;
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 7a37fb3e3a..5033303402 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -6,6 +6,8 @@
#if V8_TARGET_ARCH_X64
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
@@ -235,8 +237,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
DCHECK(and_then == kFallThroughAtEnd);
j(equal, &done, Label::kNear);
}
- StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(isolate(), save_fp);
+ StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
CallStub(&store_buffer_overflow);
if (and_then == kReturnAtEnd) {
ret(0);
@@ -545,7 +546,7 @@ void MacroAssembler::CheckStackAlignment() {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
Label alignment_as_expected;
testp(rsp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected, Label::kNear);
@@ -2610,13 +2611,9 @@ void MacroAssembler::JumpIfNotString(Register object,
}
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
- Register first_object,
- Register second_object,
- Register scratch1,
- Register scratch2,
- Label* on_fail,
- Label::Distance near_jump) {
+void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(
+ Register first_object, Register second_object, Register scratch1,
+ Register scratch2, Label* on_fail, Label::Distance near_jump) {
// Check that both objects are not smis.
Condition either_smi = CheckEitherSmi(first_object, second_object);
j(either_smi, on_fail, near_jump);
@@ -2627,67 +2624,62 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
- // Check that both are flat ASCII strings.
+ // Check that both are flat one-byte strings.
DCHECK(kNotStringTag != 0);
- const int kFlatAsciiStringMask =
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag =
+ const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
- andl(scratch1, Immediate(kFlatAsciiStringMask));
- andl(scratch2, Immediate(kFlatAsciiStringMask));
+ andl(scratch1, Immediate(kFlatOneByteStringMask));
+ andl(scratch2, Immediate(kFlatOneByteStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
- DCHECK_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+ Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
j(not_equal, on_fail, near_jump);
}
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
- Register instance_type,
- Register scratch,
- Label* failure,
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
+ Register instance_type, Register scratch, Label* failure,
Label::Distance near_jump) {
if (!scratch.is(instance_type)) {
movl(scratch, instance_type);
}
- const int kFlatAsciiStringMask =
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- andl(scratch, Immediate(kFlatAsciiStringMask));
+ andl(scratch, Immediate(kFlatOneByteStringMask));
cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
j(not_equal, failure, near_jump);
}
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* on_fail,
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
+ Register first_object_instance_type, Register second_object_instance_type,
+ Register scratch1, Register scratch2, Label* on_fail,
Label::Distance near_jump) {
// Load instance type for both strings.
movp(scratch1, first_object_instance_type);
movp(scratch2, second_object_instance_type);
- // Check that both are flat ASCII strings.
+ // Check that both are flat one-byte strings.
DCHECK(kNotStringTag != 0);
- const int kFlatAsciiStringMask =
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag =
+ const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
- andl(scratch1, Immediate(kFlatAsciiStringMask));
- andl(scratch2, Immediate(kFlatAsciiStringMask));
+ andl(scratch1, Immediate(kFlatOneByteStringMask));
+ andl(scratch2, Immediate(kFlatOneByteStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
- DCHECK_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+ Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
j(not_equal, on_fail, near_jump);
}
@@ -2709,16 +2701,16 @@ static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
}
-void MacroAssembler::JumpIfNotUniqueName(Operand operand,
- Label* not_unique_name,
- Label::Distance distance) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
+ Label* not_unique_name,
+ Label::Distance distance) {
JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
}
-void MacroAssembler::JumpIfNotUniqueName(Register reg,
- Label* not_unique_name,
- Label::Distance distance) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
+ Label* not_unique_name,
+ Label::Distance distance) {
JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
}
@@ -3394,8 +3386,9 @@ void MacroAssembler::StoreNumberToDoubleElements(
bind(&is_nan);
// Convert all NaNs to the same canonical NaN value when they are stored in
// the double array.
- Set(kScratchRegister, BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+ Set(kScratchRegister,
+ bit_cast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
movq(xmm_scratch, kScratchRegister);
jmp(&have_double_value, Label::kNear);
@@ -3524,17 +3517,16 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
}
-void MacroAssembler::DoubleToI(Register result_reg,
- XMMRegister input_reg,
+void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
XMMRegister scratch,
MinusZeroMode minus_zero_mode,
- Label* conversion_failed,
- Label::Distance dst) {
+ Label* lost_precision, Label* is_nan,
+ Label* minus_zero, Label::Distance dst) {
cvttsd2si(result_reg, input_reg);
Cvtlsi2sd(xmm0, result_reg);
ucomisd(xmm0, input_reg);
- j(not_equal, conversion_failed, dst);
- j(parity_even, conversion_failed, dst); // NaN.
+ j(not_equal, lost_precision, dst);
+ j(parity_even, is_nan, dst); // NaN.
if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
Label done;
// The integer converted back is equal to the original. We
@@ -3544,47 +3536,14 @@ void MacroAssembler::DoubleToI(Register result_reg,
movmskpd(result_reg, input_reg);
// Bit 0 contains the sign of the double in input_reg.
// If input was positive, we are ok and return 0, otherwise
- // jump to conversion_failed.
+ // jump to minus_zero.
andl(result_reg, Immediate(1));
- j(not_zero, conversion_failed, dst);
+ j(not_zero, minus_zero, dst);
bind(&done);
}
}
-void MacroAssembler::TaggedToI(Register result_reg,
- Register input_reg,
- XMMRegister temp,
- MinusZeroMode minus_zero_mode,
- Label* lost_precision,
- Label::Distance dst) {
- Label done;
- DCHECK(!temp.is(xmm0));
-
- // Heap number map check.
- CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- j(not_equal, lost_precision, dst);
-
- movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- cvttsd2si(result_reg, xmm0);
- Cvtlsi2sd(temp, result_reg);
- ucomisd(xmm0, temp);
- RecordComment("Deferred TaggedToI: lost precision");
- j(not_equal, lost_precision, dst);
- RecordComment("Deferred TaggedToI: NaN");
- j(parity_even, lost_precision, dst); // NaN.
- if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
- testl(result_reg, result_reg);
- j(not_zero, &done, Label::kNear);
- movmskpd(result_reg, xmm0);
- andl(result_reg, Immediate(1));
- j(not_zero, lost_precision, dst);
- }
- bind(&done);
-}
-
-
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
@@ -4102,7 +4061,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
// Get the required frame alignment for the OS.
const int kFrameAlignment = base::OS::ActivationFrameAlignment();
if (kFrameAlignment > 0) {
- DCHECK(IsPowerOf2(kFrameAlignment));
+ DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
DCHECK(is_int8(kFrameAlignment));
andp(rsp, Immediate(-kFrameAlignment));
}
@@ -4650,12 +4609,10 @@ void MacroAssembler::AllocateTwoByteString(Register result,
}
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
@@ -4668,7 +4625,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
subp(scratch1, Immediate(kHeaderAlignment));
}
- // Allocate ASCII string in new space.
+ // Allocate one-byte string in new space.
Allocate(SeqOneByteString::kHeaderSize,
times_1,
scratch1,
@@ -4679,7 +4636,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
TAG_OBJECT);
// Set the map, length and hash field.
- LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
+ LoadRoot(kScratchRegister, Heap::kOneByteStringMapRootIndex);
movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
Integer32ToSmi(scratch1, length);
movp(FieldOperand(result, String::kLengthOffset), scratch1);
@@ -4702,10 +4659,10 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
}
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
Allocate(ConsString::kSize,
result,
scratch1,
@@ -4714,7 +4671,7 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
- LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
+ LoadRoot(kScratchRegister, Heap::kConsOneByteStringMapRootIndex);
movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
@@ -4733,16 +4690,16 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
}
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
// Allocate heap number in new space.
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
- LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
+ LoadRoot(kScratchRegister, Heap::kSlicedOneByteStringMapRootIndex);
movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
@@ -4991,7 +4948,7 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments) {
// Make stack end at alignment and allocate space for arguments and old rsp.
movp(kScratchRegister, rsp);
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
@@ -5251,12 +5208,12 @@ void MacroAssembler::EnsureNotWhite(
jmp(&is_data_object, Label::kNear);
bind(&not_external);
- // Sequential string, either ASCII or UC16.
+ // Sequential string, either Latin1 or UC16.
DCHECK(kOneByteStringTag == 0x04);
andp(length, Immediate(kStringEncodingMask));
xorp(length, Immediate(kStringEncodingMask));
addp(length, Immediate(0x04));
- // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
+ // Value now either 4 (if Latin1) or 8 (if UC16), i.e. char-size shifted by 2.
imulp(length, FieldOperand(value, String::kLengthOffset));
shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
@@ -5368,12 +5325,14 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
DCHECK(!dividend.is(rax));
DCHECK(!dividend.is(rdx));
- MultiplierAndShift ms(divisor);
- movl(rax, Immediate(ms.multiplier()));
+ base::MagicNumbersForDivision<uint32_t> mag =
+ base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+ movl(rax, Immediate(mag.multiplier));
imull(dividend);
- if (divisor > 0 && ms.multiplier() < 0) addl(rdx, dividend);
- if (divisor < 0 && ms.multiplier() > 0) subl(rdx, dividend);
- if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift()));
+ bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+ if (divisor > 0 && neg) addl(rdx, dividend);
+ if (divisor < 0 && !neg && mag.multiplier > 0) subl(rdx, dividend);
+ if (mag.shift > 0) sarl(rdx, Immediate(mag.shift));
movl(rax, dividend);
shrl(rax, Immediate(31));
addl(rdx, rax);
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 2ab05cf1ac..d051773b40 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -6,6 +6,7 @@
#define V8_X64_MACRO_ASSEMBLER_X64_H_
#include "src/assembler.h"
+#include "src/bailout-reason.h"
#include "src/frames.h"
#include "src/globals.h"
@@ -773,29 +774,22 @@ class MacroAssembler: public Assembler {
Label::Distance near_jump = Label::kFar);
- void JumpIfNotBothSequentialAsciiStrings(
- Register first_object,
- Register second_object,
- Register scratch1,
- Register scratch2,
- Label* on_not_both_flat_ascii,
+ void JumpIfNotBothSequentialOneByteStrings(
+ Register first_object, Register second_object, Register scratch1,
+ Register scratch2, Label* on_not_both_flat_one_byte,
Label::Distance near_jump = Label::kFar);
- // Check whether the instance type represents a flat ASCII string. Jump to the
- // label if not. If the instance type can be scratched specify same register
- // for both instance type and scratch.
- void JumpIfInstanceTypeIsNotSequentialAscii(
- Register instance_type,
- Register scratch,
- Label*on_not_flat_ascii_string,
+ // Check whether the instance type represents a flat one-byte string. Jump
+ // to the label if not. If the instance type can be scratched specify same
+ // register for both instance type and scratch.
+ void JumpIfInstanceTypeIsNotSequentialOneByte(
+ Register instance_type, Register scratch,
+ Label* on_not_flat_one_byte_string,
Label::Distance near_jump = Label::kFar);
- void JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* on_fail,
+ void JumpIfBothInstanceTypesAreNotSequentialOneByte(
+ Register first_object_instance_type, Register second_object_instance_type,
+ Register scratch1, Register scratch2, Label* on_fail,
Label::Distance near_jump = Label::kFar);
void EmitSeqStringSetCharCheck(Register string,
@@ -804,10 +798,10 @@ class MacroAssembler: public Assembler {
uint32_t encoding_mask);
// Checks if the given register or operand is a unique name
- void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
- Label::Distance distance = Label::kFar);
- void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
- Label::Distance distance = Label::kFar);
+ void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name,
+ Label::Distance distance = Label::kFar);
+ void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
+ Label::Distance distance = Label::kFar);
// ---------------------------------------------------------------------------
// Macro instructions.
@@ -1037,12 +1031,9 @@ class MacroAssembler: public Assembler {
void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
void DoubleToI(Register result_reg, XMMRegister input_reg,
- XMMRegister scratch, MinusZeroMode minus_zero_mode,
- Label* conversion_failed, Label::Distance dst = Label::kFar);
-
- void TaggedToI(Register result_reg, Register input_reg, XMMRegister temp,
- MinusZeroMode minus_zero_mode, Label* lost_precision,
- Label::Distance dst = Label::kFar);
+ XMMRegister scratch, MinusZeroMode minus_zero_mode,
+ Label* lost_precision, Label* is_nan, Label* minus_zero,
+ Label::Distance dst = Label::kFar);
void LoadUint32(XMMRegister dst, Register src);
@@ -1206,12 +1197,9 @@ class MacroAssembler: public Assembler {
Register scratch2,
Register scratch3,
Label* gc_required);
- void AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
+ void AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3, Label* gc_required);
// Allocate a raw cons string object. Only the map field of the result is
// initialized.
@@ -1219,10 +1207,8 @@ class MacroAssembler: public Assembler {
Register scratch1,
Register scratch2,
Label* gc_required);
- void AllocateAsciiConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateOneByteConsString(Register result, Register scratch1,
+ Register scratch2, Label* gc_required);
// Allocate a raw sliced string object. Only the map field of the result is
// initialized.
@@ -1230,10 +1216,8 @@ class MacroAssembler: public Assembler {
Register scratch1,
Register scratch2,
Label* gc_required);
- void AllocateAsciiSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateOneByteSlicedString(Register result, Register scratch1,
+ Register scratch2, Label* gc_required);
// ---------------------------------------------------------------------------
// Support functions.
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index 731089a508..82a3735d77 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -22,7 +22,7 @@ namespace internal {
/*
* This assembler uses the following register assignment convention
- * - rdx : Currently loaded character(s) as ASCII or UC16. Must be loaded
+ * - rdx : Currently loaded character(s) as Latin1 or UC16. Must be loaded
* using LoadCurrentCharacter before using any of the dispatch methods.
* Temporarily stores the index of capture start after a matching pass
* for a global regexp.
@@ -244,7 +244,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ addl(rax, rbx);
BranchOrBacktrack(greater, on_no_match);
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
Label loop_increment;
if (on_no_match == NULL) {
on_no_match = &backtrack_label_;
@@ -400,7 +400,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
Label loop;
__ bind(&loop);
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
__ movzxbl(rax, Operand(rdx, 0));
__ cmpb(rax, Operand(rbx, 0));
} else {
@@ -498,7 +498,7 @@ void RegExpMacroAssemblerX64::CheckBitInTable(
Label* on_bit_set) {
__ Move(rax, table);
Register index = current_character();
- if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
+ if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
__ movp(rbx, current_character());
__ andp(rbx, Immediate(kTableMask));
index = rbx;
@@ -518,7 +518,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
switch (type) {
case 's':
// Match space-characters
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
// One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success;
__ cmpl(current_character(), Immediate(' '));
@@ -574,7 +574,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ subl(rax, Immediate(0x0b));
__ cmpl(rax, Immediate(0x0c - 0x0b));
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
BranchOrBacktrack(above, on_no_match);
} else {
Label done;
@@ -590,8 +590,8 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
return true;
}
case 'w': {
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
__ cmpl(current_character(), Immediate('z'));
BranchOrBacktrack(above, on_no_match);
}
@@ -604,8 +604,8 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
}
case 'W': {
Label done;
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
__ cmpl(current_character(), Immediate('z'));
__ j(above, &done);
}
@@ -614,7 +614,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
__ testb(Operand(rbx, current_character(), times_1, 0),
current_character());
BranchOrBacktrack(not_zero, on_no_match);
- if (mode_ != ASCII) {
+ if (mode_ != LATIN1) {
__ bind(&done);
}
return true;
@@ -1205,7 +1205,7 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+ bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
DCHECK(re_code->instruction_start() <= *return_address);
DCHECK(*return_address <=
@@ -1236,8 +1236,8 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
- // If we changed between an ASCII and an UC16 string, the specialized
+ if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
+ // If we changed between an Latin1 and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
return RETRY;
@@ -1413,7 +1413,7 @@ void RegExpMacroAssemblerX64::CheckStackLimit() {
void RegExpMacroAssemblerX64::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
if (characters == 4) {
__ movl(current_character(), Operand(rsi, rdi, times_1, cp_offset));
} else if (characters == 2) {
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.h b/deps/v8/src/x64/regexp-macro-assembler-x64.h
index 2e2e45e35f..e3733775de 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.h
@@ -254,7 +254,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
ZoneList<int> code_relative_fixup_positions_;
- // Which mode to generate code for (ASCII or UC16).
+ // Which mode to generate code for (LATIN1 or UC16).
Mode mode_;
// One greater than maximal register index actually used.
diff --git a/deps/v8/src/x87/assembler-x87-inl.h b/deps/v8/src/x87/assembler-x87-inl.h
index 25ecfcf137..6555ccdd83 100644
--- a/deps/v8/src/x87/assembler-x87-inl.h
+++ b/deps/v8/src/x87/assembler-x87-inl.h
@@ -45,7 +45,7 @@
namespace v8 {
namespace internal {
-bool CpuFeatures::SupportsCrankshaft() { return false; }
+bool CpuFeatures::SupportsCrankshaft() { return true; }
static const byte kCallOpcode = 0xE8;
diff --git a/deps/v8/src/x87/assembler-x87.cc b/deps/v8/src/x87/assembler-x87.cc
index 0e3ff25fa4..110b813cae 100644
--- a/deps/v8/src/x87/assembler-x87.cc
+++ b/deps/v8/src/x87/assembler-x87.cc
@@ -38,6 +38,7 @@
#if V8_TARGET_ARCH_X87
+#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/disassembler.h"
#include "src/macro-assembler.h"
@@ -266,7 +267,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
- DCHECK(IsPowerOf2(m));
+ DCHECK(base::bits::IsPowerOfTwo32(m));
int mask = m - 1;
int addr = pc_offset();
Nop((m - (addr & mask)) & mask);
@@ -1518,6 +1519,20 @@ void Assembler::fst_s(const Operand& adr) {
}
+void Assembler::fldcw(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ emit_operand(ebp, adr);
+}
+
+
+void Assembler::fnstcw(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ emit_operand(edi, adr);
+}
+
+
void Assembler::fstp_d(const Operand& adr) {
EnsureSpace ensure_space(this);
EMIT(0xDD);
@@ -1597,6 +1612,13 @@ void Assembler::fchs() {
}
+void Assembler::fsqrt() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xFA);
+}
+
+
void Assembler::fcos() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
@@ -1658,6 +1680,13 @@ void Assembler::fadd_i(int i) {
}
+void Assembler::fadd_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDC);
+ emit_operand(eax, adr);
+}
+
+
void Assembler::fsub(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xE8, i);
@@ -1771,6 +1800,13 @@ void Assembler::ftst() {
}
+void Assembler::fxam() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xE5);
+}
+
+
void Assembler::fucomp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDD, 0xE8, i);
@@ -1832,6 +1868,20 @@ void Assembler::fnclex() {
}
+void Assembler::fnsave(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDD);
+ emit_operand(esi, adr);
+}
+
+
+void Assembler::frstor(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDD);
+ emit_operand(esp, adr);
+}
+
+
void Assembler::sahf() {
EnsureSpace ensure_space(this);
EMIT(0x9E);
@@ -1847,11 +1897,6 @@ void Assembler::setcc(Condition cc, Register reg) {
}
-void Assembler::Print() {
- Disassembler::Decode(isolate(), stdout, buffer_, pc_);
-}
-
-
void Assembler::RecordJSReturn() {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/x87/assembler-x87.h b/deps/v8/src/x87/assembler-x87.h
index a2bedcc3cc..a292388261 100644
--- a/deps/v8/src/x87/assembler-x87.h
+++ b/deps/v8/src/x87/assembler-x87.h
@@ -142,12 +142,19 @@ inline Register Register::FromAllocationIndex(int index) {
struct X87Register {
- static const int kMaxNumAllocatableRegisters = 8;
+ static const int kMaxNumAllocatableRegisters = 6;
static const int kMaxNumRegisters = 8;
static int NumAllocatableRegisters() {
return kMaxNumAllocatableRegisters;
}
+
+ // TODO(turbofan): Proper support for float32.
+ static int NumAllocatableAliasedRegisters() {
+ return NumAllocatableRegisters();
+ }
+
+
static int ToAllocationIndex(X87Register reg) {
return reg.code_;
}
@@ -852,6 +859,7 @@ class Assembler : public AssemblerBase {
void fabs();
void fchs();
+ void fsqrt();
void fcos();
void fsin();
void fptan();
@@ -862,6 +870,7 @@ class Assembler : public AssemblerBase {
void fadd(int i);
void fadd_i(int i);
+ void fadd_d(const Operand& adr);
void fsub(int i);
void fsub_i(int i);
void fmul(int i);
@@ -884,14 +893,19 @@ class Assembler : public AssemblerBase {
void ffree(int i = 0);
void ftst();
+ void fxam();
void fucomp(int i);
void fucompp();
void fucomi(int i);
void fucomip();
void fcompp();
void fnstsw_ax();
+ void fldcw(const Operand& adr);
+ void fnstcw(const Operand& adr);
void fwait();
void fnclex();
+ void fnsave(const Operand& adr);
+ void frstor(const Operand& adr);
void frndint();
@@ -902,9 +916,6 @@ class Assembler : public AssemblerBase {
// TODO(lrn): Need SFENCE for movnt?
- // Debugging
- void Print();
-
// Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
diff --git a/deps/v8/src/x87/builtins-x87.cc b/deps/v8/src/x87/builtins-x87.cc
index 59ecda3a95..d6311752c7 100644
--- a/deps/v8/src/x87/builtins-x87.cc
+++ b/deps/v8/src/x87/builtins-x87.cc
@@ -6,10 +6,10 @@
#if V8_TARGET_ARCH_X87
+#include "src/code-factory.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -550,8 +550,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
}
@@ -660,7 +660,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
}
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm) {
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+ SaveFPRegsMode save_doubles) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -669,7 +670,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm) {
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0);
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
__ popad();
// Tear down internal frame.
}
@@ -680,13 +681,12 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm) {
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm);
+ Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
}
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- // SaveDoubles is meanless for X87, just used by deoptimizer.cc
- Generate_NotifyStubFailureHelper(masm);
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
}
@@ -995,8 +995,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Copy all arguments from the array to the stack.
Label entry, loop;
- Register receiver = LoadIC::ReceiverRegister();
- Register key = LoadIC::NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
__ mov(key, Operand(ebp, kIndexOffset));
__ jmp(&entry);
__ bind(&loop);
@@ -1004,9 +1004,10 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Use inline caching to speed up access to arguments.
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(), Immediate(Smi::FromInt(0)));
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Immediate(Smi::FromInt(0)));
}
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(masm->isolate()).code();
__ call(ic, RelocInfo::CODE_TARGET);
// It is important that we do not have a test instruction after the
// call. A test instruction after the call is used to indicate that
diff --git a/deps/v8/src/x87/code-stubs-x87.cc b/deps/v8/src/x87/code-stubs-x87.cc
index 6191aaf4e6..4a6083c25f 100644
--- a/deps/v8/src/x87/code-stubs-x87.cc
+++ b/deps/v8/src/x87/code-stubs-x87.cc
@@ -6,129 +6,23 @@
#if V8_TARGET_ARCH_X87
+#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
#include "src/isolate.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
-void FastNewClosureStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, ebx };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
-}
-
-
-void FastNewContextStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, edi };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void ToNumberStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // ToNumberStub invokes a function, and therefore needs a context.
- Register registers[] = { esi, eax };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void NumberToStringStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, eax };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
-}
-
-
-void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, eax, ebx, ecx };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi(),
- Representation::Tagged() };
-
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry,
- representations);
-}
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, eax, ebx, ecx, edx };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
-}
-
-
-void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, ebx, edx };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void CallFunctionStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = {esi, edi};
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void CallConstructStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // eax : number of arguments
- // ebx : feedback vector
- // edx : (only if ebx is not the megamorphic symbol) slot in feedback
- // vector (Smi)
- // edi : constructor function
- // TODO(turbofan): So far we don't gather type feedback and hence skip the
- // slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {esi, eax, edi, ebx};
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
-}
-
-
-void RegExpConstructResultStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, ecx, ebx, eax };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, eax, ebx };
- descriptor->Initialize(
- MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry);
-}
-
-
-const Register InterfaceDescriptor::ContextRegister() { return esi; }
-
-
static void InitializeArrayConstructorDescriptor(
- Isolate* isolate, CodeStub::Major major,
- CodeStubInterfaceDescriptor* descriptor,
+ Isolate* isolate, CodeStubDescriptor* descriptor,
int constant_stack_parameter_count) {
// register state
// eax -- number of arguments
@@ -138,28 +32,17 @@ static void InitializeArrayConstructorDescriptor(
Runtime::kArrayConstructor)->entry;
if (constant_stack_parameter_count == 0) {
- Register registers[] = { esi, edi, ebx };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
- deopt_handler, NULL, constant_stack_parameter_count,
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE);
} else {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = { esi, edi, ebx, eax };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32() };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers, eax,
- deopt_handler, representations,
- constant_stack_parameter_count,
+ descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
}
static void InitializeInternalArrayConstructorDescriptor(
- CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
+ Isolate* isolate, CodeStubDescriptor* descriptor,
int constant_stack_parameter_count) {
// register state
// eax -- number of arguments
@@ -168,200 +51,70 @@ static void InitializeInternalArrayConstructorDescriptor(
Runtime::kInternalArrayConstructor)->entry;
if (constant_stack_parameter_count == 0) {
- Register registers[] = { esi, edi };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
- deopt_handler, NULL, constant_stack_parameter_count,
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE);
} else {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = { esi, edi, eax };
- Representation representations[] = {
- Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32() };
- descriptor->Initialize(major, ARRAY_SIZE(registers), registers, eax,
- deopt_handler, representations,
- constant_stack_parameter_count,
+ descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
}
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), MajorKey(), descriptor, 0);
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), MajorKey(), descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), MajorKey(), descriptor, -1);
-}
-
-
-void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0);
+void ArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
}
-void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1);
-}
-
-
-void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1);
-}
-
-
-void CompareNilICStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, eax };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(CompareNilIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
-}
-
-void ToBooleanStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, eax };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(ToBooleanIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
+void ArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
}
-void BinaryOpICStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, edx, eax };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(BinaryOpIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
+void ArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
}
-void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, ecx, edx, eax };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
+void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
-void StringAddStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- Register registers[] = { esi, edx, eax };
- descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
- Runtime::FunctionForId(Runtime::kStringAdd)->entry);
+void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
}
-void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
- Register registers[] = { esi, // context
- edi, // JSFunction
- eax, // actual number of arguments
- ebx, // expected number of arguments
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // JSFunction
- Representation::Integer32(), // actual number of arguments
- Representation::Integer32(), // expected number of arguments
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::KeyedCall);
- Register registers[] = { esi, // context
- ecx, // key
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // key
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::NamedCall);
- Register registers[] = { esi, // context
- ecx, // name
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // name
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::CallHandler);
- Register registers[] = { esi, // context
- edx, // name
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // receiver
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::ApiFunctionCall);
- Register registers[] = { esi, // context
- eax, // callee
- ebx, // call_data
- ecx, // holder
- edx, // api_function_address
- };
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- };
- descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
- }
+void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
}
#define __ ACCESS_MASM(masm)
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
+ ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
isolate()->counters()->code_stubs()->Increment();
- CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
- int param_count = descriptor->GetEnvironmentParameterCount();
+ CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
+ int param_count = descriptor.GetEnvironmentParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
DCHECK(param_count == 0 ||
- eax.is(descriptor->GetEnvironmentParameterRegister(
- param_count - 1)));
+ eax.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
// Push arguments
for (int i = 0; i < param_count; ++i) {
- __ push(descriptor->GetEnvironmentParameterRegister(i));
+ __ push(descriptor.GetEnvironmentParameterRegister(i));
}
- ExternalReference miss = descriptor->miss_handler();
__ CallExternalReference(miss, param_count);
}
@@ -374,6 +127,11 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// store the registers in any particular way, but we do have to store and
// restore them.
__ pushad();
+ if (save_doubles()) {
+ // Save FPU stat in m108byte.
+ __ sub(esp, Immediate(108));
+ __ fnsave(Operand(esp, 0));
+ }
const int argument_count = 1;
AllowExternalCallThatCantCauseGC scope(masm);
@@ -383,6 +141,11 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
__ CallCFunction(
ExternalReference::store_buffer_overflow_function(isolate()),
argument_count);
+ if (save_doubles()) {
+ // Restore FPU stat in m108byte.
+ __ frstor(Operand(esp, 0));
+ __ add(esp, Immediate(108));
+ }
__ popad();
__ ret(0);
}
@@ -568,7 +331,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
- Register receiver = LoadIC::ReceiverRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, eax,
ebx, &miss);
@@ -578,8 +341,40 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
+void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
+ // Return address is on the stack.
+ Label slow;
+
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
+ Register scratch = eax;
+ DCHECK(!scratch.is(receiver) && !scratch.is(key));
+
+ // Check that the key is an array index, that is Uint32.
+ __ test(key, Immediate(kSmiTagMask | kSmiSignMask));
+ __ j(not_zero, &slow);
+
+ // Everything is fine, call runtime.
+ __ pop(scratch);
+ __ push(receiver); // receiver
+ __ push(key); // key
+ __ push(scratch); // return address
+
+ // Perform tail call to the entry.
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadElementWithInterceptor), masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+
+ __ bind(&slow);
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
+}
+
+
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in edx and the parameter count is in eax.
+ DCHECK(edx.is(ArgumentsAccessReadDescriptor::index()));
+ DCHECK(eax.is(ArgumentsAccessReadDescriptor::parameter_count()));
// The displacement is used for skipping the frame pointer on the
// stack. It is the offset of the last parameter (if any) relative
@@ -1147,7 +942,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ JumpIfNotSmi(ebx, &runtime);
__ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
__ j(above_equal, &runtime);
- __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kDataOneByteCodeOffset));
__ Move(ecx, Immediate(1)); // Type is one byte.
// (E) Carry on. String handling is done.
@@ -1161,7 +956,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// eax: subject string
// ebx: previous index (smi)
// edx: code
- // ecx: encoding of subject string (1 if ASCII, 0 if two_byte);
+ // ecx: encoding of subject string (1 if one_byte, 0 if two_byte);
// All checks done. Now push arguments for native regexp code.
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->regexp_entry_native(), 1);
@@ -1206,7 +1001,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// esi: original subject string
// eax: underlying subject string
// ebx: previous index
- // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
+ // ecx: encoding of subject string (1 if one_byte 0 if two_byte);
// edx: code
// Argument 4: End of string data
// Argument 3: Start of string data
@@ -1330,16 +1125,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(eax, Operand(esp, kSubjectOffset));
__ mov(ecx, eax);
__ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
- __ RecordWriteField(ebx,
- RegExpImpl::kLastSubjectOffset,
- eax,
- edi);
+ __ RecordWriteField(ebx, RegExpImpl::kLastSubjectOffset, eax, edi,
+ kDontSaveFPRegs);
__ mov(eax, ecx);
__ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
- __ RecordWriteField(ebx,
- RegExpImpl::kLastInputOffset,
- eax,
- edi);
+ __ RecordWriteField(ebx, RegExpImpl::kLastInputOffset, eax, edi,
+ kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
@@ -1441,14 +1232,12 @@ static int NegativeComparisonResult(Condition cc) {
}
-static void CheckInputType(MacroAssembler* masm,
- Register input,
- CompareIC::State expected,
- Label* fail) {
+static void CheckInputType(MacroAssembler* masm, Register input,
+ CompareICState::State expected, Label* fail) {
Label ok;
- if (expected == CompareIC::SMI) {
+ if (expected == CompareICState::SMI) {
__ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::NUMBER) {
+ } else if (expected == CompareICState::NUMBER) {
__ JumpIfSmi(input, &ok);
__ cmp(FieldOperand(input, HeapObject::kMapOffset),
Immediate(masm->isolate()->factory()->heap_number_map()));
@@ -1473,13 +1262,13 @@ static void BranchIfNotInternalizedString(MacroAssembler* masm,
}
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
Label check_unequal_objects;
Condition cc = GetCondition();
Label miss;
- CheckInputType(masm, edx, left_, &miss);
- CheckInputType(masm, eax, right_, &miss);
+ CheckInputType(masm, edx, left(), &miss);
+ CheckInputType(masm, eax, right(), &miss);
// Compare two smis.
Label non_smi, smi_done;
@@ -1662,23 +1451,15 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_for_strings);
- __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
- &check_unequal_objects);
+ __ JumpIfNotBothSequentialOneByteStrings(edx, eax, ecx, ebx,
+ &check_unequal_objects);
- // Inline comparison of ASCII strings.
+ // Inline comparison of one-byte strings.
if (cc == equal) {
- StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- edx,
- eax,
- ecx,
- ebx);
+ StringHelper::GenerateFlatOneByteStringEquals(masm, edx, eax, ecx, ebx);
} else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- edx,
- eax,
- ecx,
- ebx,
- edi);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, edx, eax, ecx, ebx,
+ edi);
}
#ifdef DEBUG
__ Abort(kUnexpectedFallThroughFromStringComparison);
@@ -1767,7 +1548,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// function without changing the state.
__ cmp(ecx, edi);
__ j(equal, &done, Label::kFar);
- __ cmp(ecx, Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
__ j(equal, &done, Label::kFar);
if (!FLAG_pretenuring_call_new) {
@@ -1790,14 +1571,14 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate)));
+ __ cmp(ecx, Immediate(TypeFeedbackVector::UninitializedSentinel(isolate)));
__ j(equal, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize),
- Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ mov(
+ FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
+ Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
__ jmp(&done, Label::kFar);
// An uninitialized cache is patched with the function or sentinel to
@@ -1843,7 +1624,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ push(edi);
__ push(ebx);
__ push(edx);
- __ RecordWriteArray(ebx, edi, edx, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ RecordWriteArray(ebx, edi, edx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
__ pop(edx);
__ pop(ebx);
__ pop(edi);
@@ -1964,7 +1746,7 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
void CallFunctionStub::Generate(MacroAssembler* masm) {
- CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+ CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
}
@@ -2048,7 +1830,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
// edi - function
// edx - slot id
Label miss;
- int argc = state_.arg_count();
+ int argc = arg_count();
ParameterCount actual(argc);
EmitLoadTypeFeedbackVector(masm, ebx);
@@ -2072,7 +1854,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ TailCallStub(&stub);
__ bind(&miss);
- GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+ GenerateMiss(masm);
// The slow case, we need this no matter what to complete a call after a miss.
CallFunctionNoFeedback(masm,
@@ -2092,7 +1874,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
Label extra_checks_or_miss, slow_start;
Label slow, non_function, wrap, cont;
Label have_js_function;
- int argc = state_.arg_count();
+ int argc = arg_count();
ParameterCount actual(argc);
EmitLoadTypeFeedbackVector(masm, ebx);
@@ -2103,7 +1885,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &extra_checks_or_miss);
__ bind(&have_js_function);
- if (state_.CallAsMethod()) {
+ if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
// Load the receiver from the stack.
@@ -2122,7 +1904,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&slow);
EmitSlowCase(isolate, masm, argc, &non_function);
- if (state_.CallAsMethod()) {
+ if (CallAsMethod()) {
__ bind(&wrap);
EmitWrapCase(masm, argc, &cont);
}
@@ -2132,9 +1914,9 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
FixedArray::kHeaderSize));
- __ cmp(ecx, Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
__ j(equal, &slow_start);
- __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate)));
+ __ cmp(ecx, Immediate(TypeFeedbackVector::UninitializedSentinel(isolate)));
__ j(equal, &miss);
if (!FLAG_trace_ic) {
@@ -2145,13 +1927,13 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &miss);
__ mov(FieldOperand(ebx, edx, times_half_pointer_size,
FixedArray::kHeaderSize),
- Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
__ jmp(&slow_start);
}
// We are here because tracing is on or we are going monomorphic.
__ bind(&miss);
- GenerateMiss(masm, IC::kCallIC_Miss);
+ GenerateMiss(masm);
// the slow case
__ bind(&slow_start);
@@ -2169,9 +1951,9 @@ void CallICStub::Generate(MacroAssembler* masm) {
}
-void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
// Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(ecx, Operand(esp, (state_.arg_count() + 1) * kPointerSize));
+ __ mov(ecx, Operand(esp, (arg_count() + 1) * kPointerSize));
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -2183,6 +1965,9 @@ void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
__ push(edx);
// Call the entry.
+ IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+ : IC::kCallIC_Customization_Miss;
+
ExternalReference miss = ExternalReference(IC_Utility(id),
masm->isolate());
__ CallExternalReference(miss, 4);
@@ -2211,12 +1996,19 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
void CodeStub::GenerateFPStubs(Isolate* isolate) {
- // Do nothing.
+ CEntryStub save_doubles(isolate, 1, kSaveFPRegs);
+ // Stubs might already be in the snapshot, detect that and don't regenerate,
+ // which would lead to code stub initialization state being messed up.
+ Code* save_doubles_code;
+ if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
+ save_doubles_code = *(save_doubles.GetCode());
+ }
+ isolate->set_fp_stubs_generated(true);
}
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
- CEntryStub stub(isolate, 1);
+ CEntryStub stub(isolate, 1, kDontSaveFPRegs);
stub.GetCode();
}
@@ -2232,7 +2024,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame();
+ __ EnterExitFrame(save_doubles());
// ebx: pointer to C function (C callee-saved)
// ebp: frame pointer (restored after C call)
@@ -2240,7 +2032,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// edi: number of arguments including receiver (C callee-saved)
// esi: pointer to the first argument (C callee-saved)
- // Result returned in eax, or eax+edx if result_size_ is 2.
+ // Result returned in eax, or eax+edx if result size is 2.
// Check stack alignment.
if (FLAG_debug_code) {
@@ -2288,7 +2080,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
}
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame();
+ __ LeaveExitFrame(save_doubles());
__ ret(0);
// Handling of exception.
@@ -2315,7 +2107,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
}
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+void JSEntryStub::Generate(MacroAssembler* masm) {
Label invoke, handler_entry, exit;
Label not_outermost_js, not_outermost_js_2;
@@ -2326,7 +2118,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ mov(ebp, esp);
// Push marker in two places.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ int marker = type();
__ push(Immediate(Smi::FromInt(marker))); // context slot
__ push(Immediate(Smi::FromInt(marker))); // function slot
// Save callee-saved registers (C calling conventions).
@@ -2377,7 +2169,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// pop the faked function when we return. Notice that we cannot store a
// reference to the trampoline code directly in this stub, because the
// builtin stubs may not have been generated yet.
- if (is_construct) {
+ if (type() == StackFrame::ENTRY_CONSTRUCT) {
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
isolate());
__ mov(edx, Immediate(construct_entry));
@@ -2447,9 +2239,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
static const int kDeltaToCmpImmediate = 2;
static const int kDeltaToMov = 8;
static const int kDeltaToMovImmediate = 9;
- static const int8_t kCmpEdiOperandByte1 = BitCast<int8_t, uint8_t>(0x3b);
- static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d);
- static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
+ static const int8_t kCmpEdiOperandByte1 = bit_cast<int8_t, uint8_t>(0x3b);
+ static const int8_t kCmpEdiOperandByte2 = bit_cast<int8_t, uint8_t>(0x3d);
+ static const int8_t kMovEaxImmediateByte = bit_cast<int8_t, uint8_t>(0xb8);
DCHECK_EQ(object.code(), InstanceofStub::left().code());
DCHECK_EQ(function.code(), InstanceofStub::right().code());
@@ -2640,12 +2432,6 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
-Register InstanceofStub::left() { return eax; }
-
-
-Register InstanceofStub::right() { return edx; }
-
-
// -------------------------------------------------------------------------
// StringCharCodeAtGenerator
@@ -2745,7 +2531,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiShiftSize == 0);
- DCHECK(IsPowerOf2(String::kMaxOneByteCharCode + 1));
+ DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
__ test(code_,
Immediate(kSmiTagMask |
((~String::kMaxOneByteCharCode) << kSmiTagSize)));
@@ -2756,7 +2542,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiShiftSize == 0);
- // At this point code register contains smi tagged ASCII char code.
+ // At this point code register contains smi tagged one byte char code.
__ mov(result_, FieldOperand(result_,
code_, times_half_pointer_size,
FixedArray::kHeaderSize));
@@ -2818,74 +2604,6 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
}
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash = (seed + character) + ((seed + character) << 10);
- if (masm->serializer_enabled()) {
- __ LoadRoot(scratch, Heap::kHashSeedRootIndex);
- __ SmiUntag(scratch);
- __ add(scratch, character);
- __ mov(hash, scratch);
- __ shl(scratch, 10);
- __ add(hash, scratch);
- } else {
- int32_t seed = masm->isolate()->heap()->HashSeed();
- __ lea(scratch, Operand(character, seed));
- __ shl(scratch, 10);
- __ lea(hash, Operand(scratch, character, times_1, seed));
- }
- // hash ^= hash >> 6;
- __ mov(scratch, hash);
- __ shr(scratch, 6);
- __ xor_(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash += character;
- __ add(hash, character);
- // hash += hash << 10;
- __ mov(scratch, hash);
- __ shl(scratch, 10);
- __ add(hash, scratch);
- // hash ^= hash >> 6;
- __ mov(scratch, hash);
- __ shr(scratch, 6);
- __ xor_(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch) {
- // hash += hash << 3;
- __ mov(scratch, hash);
- __ shl(scratch, 3);
- __ add(hash, scratch);
- // hash ^= hash >> 11;
- __ mov(scratch, hash);
- __ shr(scratch, 11);
- __ xor_(hash, scratch);
- // hash += hash << 15;
- __ mov(scratch, hash);
- __ shl(scratch, 15);
- __ add(hash, scratch);
-
- __ and_(hash, String::kHashBitMask);
-
- // if (hash == 0) hash = 27;
- Label hash_not_zero;
- __ j(not_zero, &hash_not_zero, Label::kNear);
- __ mov(hash, Immediate(StringHasher::kZeroHash));
- __ bind(&hash_not_zero);
-}
-
-
void SubStringStub::Generate(MacroAssembler* masm) {
Label runtime;
@@ -2988,7 +2706,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ test(ebx, Immediate(kStringEncodingMask));
__ j(zero, &two_byte_slice, Label::kNear);
- __ AllocateAsciiSlicedString(eax, ebx, no_reg, &runtime);
+ __ AllocateOneByteSlicedString(eax, ebx, no_reg, &runtime);
__ jmp(&set_slice_header, Label::kNear);
__ bind(&two_byte_slice);
__ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
@@ -3035,8 +2753,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ test_b(ebx, kStringEncodingMask);
__ j(zero, &two_byte_sequential);
- // Sequential ASCII string. Allocate the result.
- __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
+ // Sequential one byte string. Allocate the result.
+ __ AllocateOneByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
// eax: result string
// ecx: result string length
@@ -3107,11 +2825,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
}
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2) {
+void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2) {
Register length = scratch1;
// Compare lengths.
@@ -3134,8 +2852,8 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
// Compare characters.
__ bind(&compare_chars);
- GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
- &strings_not_equal, Label::kNear);
+ GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
+ &strings_not_equal, Label::kNear);
// Characters are equal.
__ Move(eax, Immediate(Smi::FromInt(EQUAL)));
@@ -3143,12 +2861,9 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
}
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
+void StringHelper::GenerateCompareFlatOneByteStrings(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3) {
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->string_compare_native(), 1);
@@ -3174,8 +2889,8 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Compare characters.
Label result_not_equal;
- GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
- &result_not_equal, Label::kNear);
+ GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
+ &result_not_equal, Label::kNear);
// Compare lengths - strings up to min-length are equal.
__ bind(&compare_lengths);
@@ -3209,13 +2924,9 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
}
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch,
- Label* chars_not_equal,
+void StringHelper::GenerateOneByteCharsCompareLoop(
+ MacroAssembler* masm, Register left, Register right, Register length,
+ Register scratch, Label* chars_not_equal,
Label::Distance chars_not_equal_near) {
// Change index to run from -length to -1 by adding length to string
// start. This means that loop ends when index reaches zero, which
@@ -3261,15 +2972,16 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ bind(&not_same);
- // Check that both objects are sequential ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
+ // Check that both objects are sequential one-byte strings.
+ __ JumpIfNotBothSequentialOneByteStrings(edx, eax, ecx, ebx, &runtime);
- // Compare flat ASCII strings.
+ // Compare flat one-byte strings.
// Drop arguments from the stack.
__ pop(ecx);
__ add(esp, Immediate(2 * kPointerSize));
__ push(ecx);
- GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, edx, eax, ecx, ebx,
+ edi);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
@@ -3301,13 +3013,13 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// Tail call into the stub that handles binary operations with allocation
// sites.
- BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+ BinaryOpWithAllocationSiteStub stub(isolate(), state());
__ TailCallStub(&stub);
}
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::SMI);
+void CompareICStub::GenerateSmis(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::SMI);
Label miss;
__ mov(ecx, edx);
__ or_(ecx, eax);
@@ -3332,17 +3044,17 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::NUMBER);
+void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- if (left_ == CompareIC::SMI) {
+ if (left() == CompareICState::SMI) {
__ JumpIfNotSmi(edx, &miss);
}
- if (right_ == CompareIC::SMI) {
+ if (right() == CompareICState::SMI) {
__ JumpIfNotSmi(eax, &miss);
}
@@ -3361,12 +3073,12 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+ CompareICState::GENERIC, CompareICState::GENERIC);
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
+ if (Token::IsOrderedRelationalCompareOp(op())) {
__ cmp(eax, Immediate(isolate()->factory()->undefined_value()));
__ j(not_equal, &miss);
__ JumpIfSmi(edx, &unordered);
@@ -3376,7 +3088,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
}
__ bind(&maybe_undefined2);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
+ if (Token::IsOrderedRelationalCompareOp(op())) {
__ cmp(edx, Immediate(isolate()->factory()->undefined_value()));
__ j(equal, &unordered);
}
@@ -3386,8 +3098,8 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::INTERNALIZED_STRING);
+void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::INTERNALIZED_STRING);
DCHECK(GetCondition() == equal);
// Registers containing left and right operands respectively.
@@ -3431,8 +3143,8 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::UNIQUE_NAME);
+void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::UNIQUE_NAME);
DCHECK(GetCondition() == equal);
// Registers containing left and right operands respectively.
@@ -3455,8 +3167,8 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
__ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
- __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
+ __ JumpIfNotUniqueNameInstanceType(tmp1, &miss, Label::kNear);
+ __ JumpIfNotUniqueNameInstanceType(tmp2, &miss, Label::kNear);
// Unique names are compared by identity.
Label done;
@@ -3476,11 +3188,11 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::STRING);
+void CompareICStub::GenerateStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::STRING);
Label miss;
- bool equality = Token::IsEqualityOp(op_);
+ bool equality = Token::IsEqualityOp(op());
// Registers containing left and right operands respectively.
Register left = edx;
@@ -3536,17 +3248,17 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
__ bind(&do_compare);
}
- // Check that both strings are sequential ASCII.
+ // Check that both strings are sequential one-byte.
Label runtime;
- __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
+ __ JumpIfNotBothSequentialOneByteStrings(left, right, tmp1, tmp2, &runtime);
- // Compare flat ASCII strings. Returns when done.
+ // Compare flat one byte strings. Returns when done.
if (equality) {
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, left, right, tmp1, tmp2);
+ StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1,
+ tmp2);
} else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(
- masm, left, right, tmp1, tmp2, tmp3);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
+ tmp2, tmp3);
}
// Handle more complex cases in runtime.
@@ -3566,8 +3278,8 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state_ == CompareIC::OBJECT);
+void CompareICStub::GenerateObjects(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::OBJECT);
Label miss;
__ mov(ecx, edx);
__ and_(ecx, eax);
@@ -3587,7 +3299,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
Label miss;
__ mov(ecx, edx);
__ and_(ecx, eax);
@@ -3608,7 +3320,7 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+void CompareICStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
@@ -3618,7 +3330,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
__ push(eax);
__ push(edx); // And also use them as the arguments.
__ push(eax);
- __ push(Immediate(Smi::FromInt(op_)));
+ __ push(Immediate(Smi::FromInt(op())));
__ CallExternalReference(miss, 3);
// Compute the entry point of the rewritten stub.
__ lea(edi, FieldOperand(eax, Code::kHeaderSize));
@@ -3681,8 +3393,8 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// Check if the entry name is not a unique name.
__ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
- __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
- miss);
+ __ JumpIfNotUniqueNameInstanceType(
+ FieldOperand(entity_name, Map::kInstanceTypeOffset), miss);
__ bind(&good);
}
@@ -3774,9 +3486,9 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
- Register scratch = result_;
+ Register scratch = result();
- __ mov(scratch, FieldOperand(dictionary_, kCapacityOffset));
+ __ mov(scratch, FieldOperand(dictionary(), kCapacityOffset));
__ dec(scratch);
__ SmiUntag(scratch);
__ push(scratch);
@@ -3796,13 +3508,11 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// Scale the index by multiplying by the entry size.
DCHECK(NameDictionary::kEntrySize == 3);
- __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
+ __ lea(index(), Operand(scratch, scratch, times_2, 0)); // index *= 3.
// Having undefined at this place means the name is not contained.
DCHECK_EQ(kSmiTagSize, 1);
- __ mov(scratch, Operand(dictionary_,
- index_,
- times_pointer_size,
+ __ mov(scratch, Operand(dictionary(), index(), times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
__ cmp(scratch, isolate()->factory()->undefined_value());
__ j(equal, &not_in_dictionary);
@@ -3811,15 +3521,16 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ cmp(scratch, Operand(esp, 3 * kPointerSize));
__ j(equal, &in_dictionary);
- if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
// If we hit a key that is not a unique name during negative
// lookup we have to bailout as this key might be equal to the
// key we are looking for.
// Check if the entry name is not a unique name.
__ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
- &maybe_in_dictionary);
+ __ JumpIfNotUniqueNameInstanceType(
+ FieldOperand(scratch, Map::kInstanceTypeOffset),
+ &maybe_in_dictionary);
}
}
@@ -3827,19 +3538,19 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// If we are doing negative lookup then probing failure should be
// treated as a lookup success. For positive lookup probing failure
// should be treated as lookup failure.
- if (mode_ == POSITIVE_LOOKUP) {
- __ mov(result_, Immediate(0));
+ if (mode() == POSITIVE_LOOKUP) {
+ __ mov(result(), Immediate(0));
__ Drop(1);
__ ret(2 * kPointerSize);
}
__ bind(&in_dictionary);
- __ mov(result_, Immediate(1));
+ __ mov(result(), Immediate(1));
__ Drop(1);
__ ret(2 * kPointerSize);
__ bind(&not_in_dictionary);
- __ mov(result_, Immediate(0));
+ __ mov(result(), Immediate(0));
__ Drop(1);
__ ret(2 * kPointerSize);
}
@@ -3847,8 +3558,10 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
- StoreBufferOverflowStub stub(isolate);
+ StoreBufferOverflowStub stub(isolate, kDontSaveFPRegs);
stub.GetCode();
+ StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
}
@@ -3867,10 +3580,8 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
__ jmp(&skip_to_incremental_noncompacting, Label::kNear);
__ jmp(&skip_to_incremental_compacting, Label::kFar);
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
} else {
__ ret(0);
@@ -3892,7 +3603,7 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.Save(masm);
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
__ mov(regs_.scratch0(), Operand(regs_.address(), 0));
@@ -3914,9 +3625,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
mode);
InformIncrementalMarker(masm);
regs_.Restore(masm);
- __ RememberedSetHelper(object_,
- address_,
- value_,
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
__ bind(&dont_need_remembered_set);
@@ -3933,7 +3642,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
- regs_.SaveCallerSaveRegisters(masm);
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
__ mov(Operand(esp, 0 * kPointerSize), regs_.object());
@@ -3946,7 +3655,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
ExternalReference::incremental_marking_record_write_function(isolate()),
argument_count);
- regs_.RestoreCallerSaveRegisters(masm);
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
}
@@ -3977,9 +3686,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
} else {
__ ret(0);
@@ -4024,9 +3731,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
} else {
__ ret(0);
@@ -4096,8 +3801,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
FixedArrayBase::kHeaderSize));
__ mov(Operand(ecx, 0), eax);
// Update the write barrier for the array store.
- __ RecordWrite(ebx, ecx, eax,
- EMIT_REMEMBERED_SET,
+ __ RecordWrite(ebx, ecx, eax, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ ret(0);
@@ -4126,21 +3830,34 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(isolate(), 1);
+ CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
__ mov(ebx, MemOperand(ebp, parameter_count_offset));
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
__ pop(ecx);
- int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
- ? kPointerSize
- : 0;
+ int additional_offset =
+ function_mode() == JS_FUNCTION_STUB_MODE ? kPointerSize : 0;
__ lea(esp, MemOperand(esp, ebx, times_pointer_size, additional_offset));
__ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
}
+void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorLoadStub stub(isolate(), state());
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorKeyedLoadStub stub(isolate());
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -4335,7 +4052,7 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
- if (argument_count_ == ANY) {
+ if (argument_count() == ANY) {
Label not_zero_case, not_one_case;
__ test(eax, eax);
__ j(not_zero, &not_zero_case);
@@ -4348,11 +4065,11 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
__ bind(&not_one_case);
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
- } else if (argument_count_ == NONE) {
+ } else if (argument_count() == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- } else if (argument_count_ == ONE) {
+ } else if (argument_count() == ONE) {
CreateArrayDispatchOneArgument(masm, mode);
- } else if (argument_count_ == MORE_THAN_ONE) {
+ } else if (argument_count() == MORE_THAN_ONE) {
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
} else {
UNREACHABLE();
@@ -4362,7 +4079,7 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- eax : argc (only if argument_count_ == ANY)
+ // -- eax : argc (only if argument_count() == ANY)
// -- ebx : AllocationSite or undefined
// -- edi : constructor
// -- esp[0] : return address
@@ -4510,9 +4227,9 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register return_address = edi;
Register context = esi;
- int argc = ArgumentBits::decode(bit_field_);
- bool is_store = IsStoreBits::decode(bit_field_);
- bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+ int argc = this->argc();
+ bool is_store = this->is_store();
+ bool call_data_undefined = this->call_data_undefined();
typedef FunctionCallbackArguments FCA;
@@ -4616,6 +4333,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// -- ...
// -- edx : api_function_address
// -----------------------------------
+ DCHECK(edx.is(ApiGetterDescriptor::function_address()));
// array for v8::Arguments::values_, handler for name and pointer
// to the values (it considered as smi in GC).
diff --git a/deps/v8/src/x87/code-stubs-x87.h b/deps/v8/src/x87/code-stubs-x87.h
index e32902f27c..03ff477f6a 100644
--- a/deps/v8/src/x87/code-stubs-x87.h
+++ b/deps/v8/src/x87/code-stubs-x87.h
@@ -5,9 +5,6 @@
#ifndef V8_X87_CODE_STUBS_X87_H_
#define V8_X87_CODE_STUBS_X87_H_
-#include "src/ic-inl.h"
-#include "src/macro-assembler.h"
-
namespace v8 {
namespace internal {
@@ -17,22 +14,6 @@ void ArrayNativeCode(MacroAssembler* masm,
Label* call_generic_code);
-class StoreBufferOverflowStub: public PlatformCodeStub {
- public:
- explicit StoreBufferOverflowStub(Isolate* isolate)
- : PlatformCodeStub(isolate) { }
-
- void Generate(MacroAssembler* masm);
-
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- Major MajorKey() const { return StoreBufferOverflow; }
- int MinorKey() const { return 0; }
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using the rep movs instruction.
@@ -45,69 +26,26 @@ class StringHelper : public AllStatic {
Register scratch,
String::Encoding encoding);
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-class SubStringStub: public PlatformCodeStub {
- public:
- explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- private:
- Major MajorKey() const { return SubString; }
- int MinorKey() const { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public PlatformCodeStub {
- public:
- explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
+ // Compares two flat one byte strings and returns result in eax.
+ static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
+ Register left, Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
- // Compares two flat ASCII strings and returns result in eax.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
+ // Compares two flat one byte strings for equality and returns result in eax.
+ static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+ Register left, Register right,
Register scratch1,
- Register scratch2,
- Register scratch3);
-
- // Compares two flat ASCII strings for equality and returns result
- // in eax.
- static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2);
+ Register scratch2);
private:
- virtual Major MajorKey() const { return StringCompare; }
- virtual int MinorKey() const { return 0; }
- virtual void Generate(MacroAssembler* masm);
-
- static void GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch,
- Label* chars_not_equal,
+ static void GenerateOneByteCharsCompareLoop(
+ MacroAssembler* masm, Register left, Register right, Register length,
+ Register scratch, Label* chars_not_equal,
Label::Distance chars_not_equal_near = Label::kFar);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
@@ -115,15 +53,13 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
- NameDictionaryLookupStub(Isolate* isolate,
- Register dictionary,
- Register result,
- Register index,
- LookupMode mode)
- : PlatformCodeStub(isolate),
- dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
-
- void Generate(MacroAssembler* masm);
+ NameDictionaryLookupStub(Isolate* isolate, Register dictionary,
+ Register result, Register index, LookupMode mode)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = DictionaryBits::encode(dictionary.code()) |
+ ResultBits::encode(result.code()) |
+ IndexBits::encode(index.code()) | LookupModeBits::encode(mode);
+ }
static void GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
@@ -154,44 +90,49 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() const { return NameDictionaryLookup; }
+ Register dictionary() const {
+ return Register::from_code(DictionaryBits::decode(minor_key_));
+ }
+
+ Register result() const {
+ return Register::from_code(ResultBits::decode(minor_key_));
+ }
- int MinorKey() const {
- return DictionaryBits::encode(dictionary_.code()) |
- ResultBits::encode(result_.code()) |
- IndexBits::encode(index_.code()) |
- LookupModeBits::encode(mode_);
+ Register index() const {
+ return Register::from_code(IndexBits::decode(minor_key_));
}
+ LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
+
class DictionaryBits: public BitField<int, 0, 3> {};
class ResultBits: public BitField<int, 3, 3> {};
class IndexBits: public BitField<int, 6, 3> {};
class LookupModeBits: public BitField<LookupMode, 9, 1> {};
- Register dictionary_;
- Register result_;
- Register index_;
- LookupMode mode_;
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
};
class RecordWriteStub: public PlatformCodeStub {
public:
- RecordWriteStub(Isolate* isolate,
- Register object,
- Register value,
- Register address,
- RememberedSetAction remembered_set_action)
+ RecordWriteStub(Isolate* isolate, Register object, Register value,
+ Register address, RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode)
: PlatformCodeStub(isolate),
- object_(object),
- value_(value),
- address_(address),
- remembered_set_action_(remembered_set_action),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
+ minor_key_ = ObjectBits::encode(object.code()) |
+ ValueBits::encode(value.code()) |
+ AddressBits::encode(address.code()) |
+ RememberedSetActionBits::encode(remembered_set_action) |
+ SaveFPRegsModeBits::encode(fp_mode);
}
+ RecordWriteStub(uint32_t key, Isolate* isolate)
+ : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
+
enum Mode {
STORE_BUFFER_ONLY,
INCREMENTAL,
@@ -247,6 +188,8 @@ class RecordWriteStub: public PlatformCodeStub {
CpuFeatures::FlushICache(stub->instruction_start(), 7);
}
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+
private:
// This is a helper class for freeing up 3 scratch registers, where the third
// is always ecx (needed for shift operations). The input is two registers
@@ -327,12 +270,23 @@ class RecordWriteStub: public PlatformCodeStub {
// saved registers that were not already preserved. The caller saved
// registers are eax, ecx and edx. The three scratch registers (incl. ecx)
// will be restored by other means so we don't bother pushing them here.
- void SaveCallerSaveRegisters(MacroAssembler* masm) {
+ void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
+ if (mode == kSaveFPRegs) {
+ // Save FPU state in m108byte.
+ masm->sub(esp, Immediate(108));
+ masm->fnsave(Operand(esp, 0));
+ }
}
- inline void RestoreCallerSaveRegisters(MacroAssembler*masm) {
+ inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
+ SaveFPRegsMode mode) {
+ if (mode == kSaveFPRegs) {
+ // Restore FPU state in m108byte.
+ masm->frstor(Operand(esp, 0));
+ masm->add(esp, Immediate(108));
+ }
if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
}
@@ -372,9 +326,11 @@ class RecordWriteStub: public PlatformCodeStub {
enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- }
-;
- void Generate(MacroAssembler* masm);
+ };
+
+ virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+
+ virtual void Generate(MacroAssembler* masm) OVERRIDE;
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
@@ -382,29 +338,39 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
- Major MajorKey() const { return RecordWrite; }
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
- int MinorKey() const {
- return ObjectBits::encode(object_.code()) |
- ValueBits::encode(value_.code()) |
- AddressBits::encode(address_.code()) |
- RememberedSetActionBits::encode(remembered_set_action_);
+ Register object() const {
+ return Register::from_code(ObjectBits::decode(minor_key_));
}
- void Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ Register value() const {
+ return Register::from_code(ValueBits::decode(minor_key_));
+ }
+
+ Register address() const {
+ return Register::from_code(AddressBits::decode(minor_key_));
+ }
+
+ RememberedSetAction remembered_set_action() const {
+ return RememberedSetActionBits::decode(minor_key_);
+ }
+
+ SaveFPRegsMode save_fp_regs_mode() const {
+ return SaveFPRegsModeBits::decode(minor_key_);
}
class ObjectBits: public BitField<int, 0, 3> {};
class ValueBits: public BitField<int, 3, 3> {};
class AddressBits: public BitField<int, 6, 3> {};
class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
+ class SaveFPRegsModeBits : public BitField<SaveFPRegsMode, 10, 1> {};
- Register object_;
- Register value_;
- Register address_;
- RememberedSetAction remembered_set_action_;
RegisterAllocation regs_;
+
+ DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
};
diff --git a/deps/v8/src/x87/codegen-x87.cc b/deps/v8/src/x87/codegen-x87.cc
index f6b8fc4f2a..e33959e65b 100644
--- a/deps/v8/src/x87/codegen-x87.cc
+++ b/deps/v8/src/x87/codegen-x87.cc
@@ -217,12 +217,8 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// Set transitioned map.
__ mov(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
- __ RecordWriteField(receiver,
- HeapObject::kMapOffset,
- target_map,
- scratch,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
}
@@ -275,12 +271,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Replace receiver's backing store with newly created FixedDoubleArray.
__ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
__ mov(ebx, eax);
- __ RecordWriteField(edx,
- JSObject::kElementsOffset,
- ebx,
- edi,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWriteField(edx, JSObject::kElementsOffset, ebx, edi, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
@@ -339,12 +331,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// ebx: target map
// Set transitioned map.
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
- __ RecordWriteField(edx,
- HeapObject::kMapOffset,
- ebx,
- edi,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
}
@@ -399,12 +387,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Set transitioned map.
__ bind(&only_change_map);
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
- __ RecordWriteField(edx,
- HeapObject::kMapOffset,
- ebx,
- edi,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ jmp(&success);
// Call into runtime if GC is required.
@@ -433,10 +417,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi);
__ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
__ mov(esi, ebx);
- __ RecordWriteArray(eax,
- edx,
- esi,
- EMIT_REMEMBERED_SET,
+ __ RecordWriteArray(eax, edx, esi, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ jmp(&entry, Label::kNear);
@@ -455,20 +436,12 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// edx: receiver
// Set transitioned map.
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
- __ RecordWriteField(edx,
- HeapObject::kMapOffset,
- ebx,
- edi,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created and filled FixedArray.
__ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
- __ RecordWriteField(edx,
- JSObject::kElementsOffset,
- eax,
- edi,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWriteField(edx, JSObject::kElementsOffset, eax, edi, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Restore registers.
__ pop(eax);
@@ -531,7 +504,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ j(zero, &seq_string, Label::kNear);
// Handle external strings.
- Label ascii_external, done;
+ Label one_byte_external, done;
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
@@ -546,22 +519,22 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
STATIC_ASSERT(kTwoByteStringTag == 0);
__ test_b(result, kStringEncodingMask);
__ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
- __ j(not_equal, &ascii_external, Label::kNear);
+ __ j(not_equal, &one_byte_external, Label::kNear);
// Two-byte string.
__ movzx_w(result, Operand(result, index, times_2, 0));
__ jmp(&done, Label::kNear);
- __ bind(&ascii_external);
- // Ascii string.
+ __ bind(&one_byte_external);
+ // One-byte string.
__ movzx_b(result, Operand(result, index, times_1, 0));
__ jmp(&done, Label::kNear);
- // Dispatch on the encoding: ASCII or two-byte.
- Label ascii;
+ // Dispatch on the encoding: one-byte or two-byte.
+ Label one_byte;
__ bind(&seq_string);
STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ test(result, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii, Label::kNear);
+ __ j(not_zero, &one_byte, Label::kNear);
// Two-byte string.
// Load the two-byte character code into the result register.
@@ -571,9 +544,9 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
SeqTwoByteString::kHeaderSize));
__ jmp(&done, Label::kNear);
- // Ascii string.
+ // One-byte string.
// Load the byte into the result register.
- __ bind(&ascii);
+ __ bind(&one_byte);
__ movzx_b(result, FieldOperand(string,
index,
times_1,
diff --git a/deps/v8/src/x87/codegen-x87.h b/deps/v8/src/x87/codegen-x87.h
index 15b2702407..c23e8668da 100644
--- a/deps/v8/src/x87/codegen-x87.h
+++ b/deps/v8/src/x87/codegen-x87.h
@@ -6,7 +6,7 @@
#define V8_X87_CODEGEN_X87_H_
#include "src/ast.h"
-#include "src/ic-inl.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/x87/debug-x87.cc b/deps/v8/src/x87/debug-x87.cc
index 3f94edd217..92c23abea2 100644
--- a/deps/v8/src/x87/debug-x87.cc
+++ b/deps/v8/src/x87/debug-x87.cc
@@ -180,17 +180,17 @@ void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Register state for IC load call (from ic-x87.cc).
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0, false);
}
void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Register state for IC store call (from ic-x87.cc).
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- Register value = StoreIC::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.bit() | name.bit() | value.bit(), 0, false);
}
@@ -204,9 +204,9 @@ void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC store call (from ic-x87.cc).
- Register receiver = KeyedStoreIC::ReceiverRegister();
- Register name = KeyedStoreIC::NameRegister();
- Register value = KeyedStoreIC::ValueRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.bit() | name.bit() | value.bit(), 0, false);
}
diff --git a/deps/v8/src/x87/deoptimizer-x87.cc b/deps/v8/src/x87/deoptimizer-x87.cc
index 96698a1325..a76c7a709d 100644
--- a/deps/v8/src/x87/deoptimizer-x87.cc
+++ b/deps/v8/src/x87/deoptimizer-x87.cc
@@ -194,7 +194,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
void Deoptimizer::SetPlatformCompiledStubRegisters(
- FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+ FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
intptr_t handler =
reinterpret_cast<intptr_t>(descriptor->deoptimization_handler());
int params = descriptor->GetHandlerParameterCount();
@@ -204,8 +204,10 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
- // Do nothing for X87.
- return;
+ for (int i = 0; i < X87Register::kMaxNumAllocatableRegisters; ++i) {
+ double double_value = input_->GetDoubleRegister(i);
+ output_frame->SetDoubleRegister(i, double_value);
+ }
}
@@ -230,9 +232,42 @@ void Deoptimizer::EntryGenerator::Generate() {
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
+
+ const int kDoubleRegsSize =
+ kDoubleSize * X87Register::kMaxNumAllocatableRegisters;
+
+ // Reserve space for x87 fp registers.
+ __ sub(esp, Immediate(kDoubleRegsSize));
+
__ pushad();
- const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize;
+ // GP registers are safe to use now.
+ // Save used x87 fp registers in correct position of previous reserve space.
+ Label loop, done;
+ // Get the layout of x87 stack.
+ __ sub(esp, Immediate(kPointerSize));
+ __ fistp_s(MemOperand(esp, 0));
+ __ pop(eax);
+ // Preserve stack layout in edi
+ __ mov(edi, eax);
+ // Get the x87 stack depth, the first 3 bits.
+ __ mov(ecx, eax);
+ __ and_(ecx, 0x7);
+ __ j(zero, &done, Label::kNear);
+
+ __ bind(&loop);
+ __ shr(eax, 0x3);
+ __ mov(ebx, eax);
+ __ and_(ebx, 0x7); // Extract the st_x index into ebx.
+ // Pop TOS to the correct position. The disp(0x20) is due to pushad.
+ // The st_i should be saved to (esp + ebx * kDoubleSize + 0x20).
+ __ fstp_d(Operand(esp, ebx, times_8, 0x20));
+ __ dec(ecx); // Decrease stack depth.
+ __ j(not_zero, &loop, Label::kNear);
+ __ bind(&done);
+
+ const int kSavedRegistersAreaSize =
+ kNumberOfRegisters * kPointerSize + kDoubleRegsSize;
// Get the bailout id from the stack.
__ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
@@ -245,6 +280,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ sub(edx, ebp);
__ neg(edx);
+ __ push(edi);
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6, eax);
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
@@ -260,6 +296,8 @@ void Deoptimizer::EntryGenerator::Generate() {
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
}
+ __ pop(edi);
+
// Preserve deoptimizer object in register eax and get the input
// frame descriptor pointer.
__ mov(ebx, Operand(eax, Deoptimizer::input_offset()));
@@ -270,13 +308,22 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(Operand(ebx, offset));
}
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ // Fill in the double input registers.
+ for (int i = 0; i < X87Register::kMaxNumAllocatableRegisters; ++i) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ int src_offset = i * kDoubleSize;
+ __ fld_d(Operand(esp, src_offset));
+ __ fstp_d(Operand(ebx, dst_offset));
+ }
+
// Clear FPU all exceptions.
// TODO(ulan): Find out why the TOP register is not zero here in some cases,
// and check that the generated code never deoptimizes with unbalanced stack.
__ fnclex();
// Remove the bailout id, return address and the double registers.
- __ add(esp, Immediate(2 * kPointerSize));
+ __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
// Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame.
@@ -298,6 +345,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ j(not_equal, &pop_loop);
// Compute the output frame in the deoptimizer.
+ __ push(edi);
__ push(eax);
__ PrepareCallCFunction(1, ebx);
__ mov(Operand(esp, 0 * kPointerSize), eax);
@@ -307,6 +355,7 @@ void Deoptimizer::EntryGenerator::Generate() {
ExternalReference::compute_output_frames_function(isolate()), 1);
}
__ pop(eax);
+ __ pop(edi);
// If frame was dynamically aligned, pop padding.
Label no_padding;
@@ -345,6 +394,25 @@ void Deoptimizer::EntryGenerator::Generate() {
__ cmp(eax, edx);
__ j(below, &outer_push_loop);
+
+ // In case of a failed STUB, we have to restore the x87 stack.
+ // x87 stack layout is in edi.
+ Label loop2, done2;
+ // Get the x87 stack depth, the first 3 bits.
+ __ mov(ecx, edi);
+ __ and_(ecx, 0x7);
+ __ j(zero, &done2, Label::kNear);
+
+ __ lea(ecx, Operand(ecx, ecx, times_2, 0));
+ __ bind(&loop2);
+ __ mov(eax, edi);
+ __ shr_cl(eax);
+ __ and_(eax, 0x7);
+ __ fld_d(Operand(ebx, eax, times_8, double_regs_offset));
+ __ sub(ecx, Immediate(0x3));
+ __ j(not_zero, &loop2, Label::kNear);
+ __ bind(&done2);
+
// Push state, pc, and continuation from the last output frame.
__ push(Operand(ebx, FrameDescription::state_offset()));
__ push(Operand(ebx, FrameDescription::pc_offset()));
diff --git a/deps/v8/src/x87/disasm-x87.cc b/deps/v8/src/x87/disasm-x87.cc
index 53a8c29067..908e8b0439 100644
--- a/deps/v8/src/x87/disasm-x87.cc
+++ b/deps/v8/src/x87/disasm-x87.cc
@@ -702,7 +702,12 @@ int DisassemblerX87::MemoryFPUInstruction(int escape_opcode,
case 0: mnem = "fld_s"; break;
case 2: mnem = "fst_s"; break;
case 3: mnem = "fstp_s"; break;
- case 7: mnem = "fstcw"; break;
+ case 5:
+ mnem = "fldcw";
+ break;
+ case 7:
+ mnem = "fnstcw";
+ break;
default: UnimplementedInstruction();
}
break;
@@ -716,11 +721,27 @@ int DisassemblerX87::MemoryFPUInstruction(int escape_opcode,
}
break;
+ case 0xDC:
+ switch (regop) {
+ case 0:
+ mnem = "fadd_d";
+ break;
+ default:
+ UnimplementedInstruction();
+ }
+ break;
+
case 0xDD: switch (regop) {
case 0: mnem = "fld_d"; break;
case 1: mnem = "fisttp_d"; break;
case 2: mnem = "fst_d"; break;
case 3: mnem = "fstp_d"; break;
+ case 4:
+ mnem = "frstor";
+ break;
+ case 6:
+ mnem = "fnsave";
+ break;
default: UnimplementedInstruction();
}
break;
diff --git a/deps/v8/src/x87/full-codegen-x87.cc b/deps/v8/src/x87/full-codegen-x87.cc
index 2e8b8651ff..05076047ef 100644
--- a/deps/v8/src/x87/full-codegen-x87.cc
+++ b/deps/v8/src/x87/full-codegen-x87.cc
@@ -6,15 +6,16 @@
#if V8_TARGET_ARCH_X87
+#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compiler.h"
#include "src/debug.h"
#include "src/full-codegen.h"
+#include "src/ic/ic.h"
#include "src/isolate-inl.h"
#include "src/parser.h"
#include "src/scopes.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -220,10 +221,8 @@ void FullCodeGenerator::Generate() {
__ mov(Operand(esi, context_offset), eax);
// Update the write barrier. This clobbers eax and ebx.
if (need_write_barrier) {
- __ RecordWriteContextSlot(esi,
- context_offset,
- eax,
- ebx);
+ __ RecordWriteContextSlot(esi, context_offset, eax, ebx,
+ kDontSaveFPRegs);
} else if (FLAG_debug_code) {
Label done;
__ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
@@ -707,7 +706,7 @@ void FullCodeGenerator::SetVar(Variable* var,
if (var->IsContextSlot()) {
int offset = Context::SlotOffset(var->index());
DCHECK(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi));
- __ RecordWriteContextSlot(scratch0, offset, src, scratch1);
+ __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
}
}
@@ -837,12 +836,9 @@ void FullCodeGenerator::VisitFunctionDeclaration(
VisitForAccumulatorValue(declaration->fun());
__ mov(ContextOperand(esi, variable->index()), result_register());
// We know that we have written a function, which is not a smi.
- __ RecordWriteContextSlot(esi,
- Context::SlotOffset(variable->index()),
- result_register(),
- ecx,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWriteContextSlot(esi, Context::SlotOffset(variable->index()),
+ result_register(), ecx, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
break;
}
@@ -876,11 +872,8 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
// Assign it.
__ mov(ContextOperand(esi, variable->index()), eax);
// We know that we have written a module, which is not a smi.
- __ RecordWriteContextSlot(esi,
- Context::SlotOffset(variable->index()),
- eax,
- ecx,
- EMIT_REMEMBERED_SET,
+ __ RecordWriteContextSlot(esi, Context::SlotOffset(variable->index()), eax,
+ ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
@@ -985,7 +978,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1113,7 +1107,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// No need for a write barrier, we are storing a Smi in the feedback vector.
__ LoadHeapObject(ebx, FeedbackVector());
__ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(slot)),
- Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
+ Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check
__ mov(ecx, Operand(esp, 0 * kPointerSize)); // Get enumerated object
@@ -1252,9 +1246,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(),
- info->strict_mode(),
- info->is_generator());
+ FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
__ mov(ebx, Immediate(info));
__ CallStub(&stub);
} else {
@@ -1275,6 +1267,25 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
}
+void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
+ Comment cnmt(masm_, "[ SuperReference ");
+
+ __ mov(LoadDescriptor::ReceiverRegister(),
+ Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+
+ Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
+ __ mov(LoadDescriptor::NameRegister(), home_object_symbol);
+
+ CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+
+ __ cmp(eax, isolate()->factory()->undefined_value());
+ Label done;
+ __ j(not_equal, &done);
+ __ CallRuntime(Runtime::kThrowNonMethodError, 0);
+ __ bind(&done);
+}
+
+
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
TypeofState typeof_state,
Label* slow) {
@@ -1325,10 +1336,10 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
// All extension objects were empty and it is safe to use a global
// load IC call.
- __ mov(LoadIC::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadIC::NameRegister(), proxy->var()->name());
+ __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), proxy->var()->name());
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Immediate(Smi::FromInt(proxy->VariableFeedbackSlot())));
}
@@ -1412,10 +1423,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
switch (var->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
- __ mov(LoadIC::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadIC::NameRegister(), var->name());
+ __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), var->name());
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Immediate(Smi::FromInt(proxy->VariableFeedbackSlot())));
}
CallLoadIC(CONTEXTUAL);
@@ -1631,9 +1642,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
- DCHECK(StoreIC::ValueRegister().is(eax));
- __ mov(StoreIC::NameRegister(), Immediate(key->value()));
- __ mov(StoreIC::ReceiverRegister(), Operand(esp, 0));
+ DCHECK(StoreDescriptor::ValueRegister().is(eax));
+ __ mov(StoreDescriptor::NameRegister(), Immediate(key->value()));
+ __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
@@ -1764,9 +1775,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Store the subexpression value in the array's elements.
__ mov(FieldOperand(ebx, offset), result_register());
// Update the write barrier for the array store.
- __ RecordWriteField(ebx, offset, result_register(), ecx,
- EMIT_REMEMBERED_SET,
- INLINE_SMI_CHECK);
+ __ RecordWriteField(ebx, offset, result_register(), ecx, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
} else {
// Store the subexpression value in the array's elements.
__ mov(ecx, Immediate(Smi::FromInt(i)));
@@ -1793,13 +1803,19 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ enum LhsKind {
+ VARIABLE,
+ NAMED_PROPERTY,
+ KEYED_PROPERTY,
+ NAMED_SUPER_PROPERTY
+ };
LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
if (property != NULL) {
assign_type = (property->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
+ ? (property->IsSuperAccess() ? NAMED_SUPER_PROPERTY
+ : NAMED_PROPERTY)
+ : KEYED_PROPERTY;
}
// Evaluate LHS expression.
@@ -1807,11 +1823,20 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case VARIABLE:
// Nothing to do here.
break;
+ case NAMED_SUPER_PROPERTY:
+ VisitForStackValue(property->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(property->obj()->AsSuperReference());
+ __ push(result_register());
+ if (expr->is_compound()) {
+ __ push(MemOperand(esp, kPointerSize));
+ __ push(result_register());
+ }
+ break;
case NAMED_PROPERTY:
if (expr->is_compound()) {
// We need the receiver both on the stack and in the register.
VisitForStackValue(property->obj());
- __ mov(LoadIC::ReceiverRegister(), Operand(esp, 0));
+ __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
} else {
VisitForStackValue(property->obj());
}
@@ -1820,8 +1845,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ mov(LoadIC::ReceiverRegister(), Operand(esp, kPointerSize));
- __ mov(LoadIC::NameRegister(), Operand(esp, 0));
+ __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, kPointerSize));
+ __ mov(LoadDescriptor::NameRegister(), Operand(esp, 0));
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@@ -1840,6 +1865,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
EmitVariableLoad(expr->target()->AsVariableProxy());
PrepareForBailout(expr->target(), TOS_REG);
break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
@@ -1889,6 +1918,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyAssignment(expr);
+ break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
@@ -1903,12 +1935,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
VisitForStackValue(expr->expression());
switch (expr->yield_kind()) {
- case Yield::SUSPEND:
+ case Yield::kSuspend:
// Pop value from top-of-stack slot; box result into result register.
EmitCreateIteratorResult(false);
__ push(result_register());
// Fall through.
- case Yield::INITIAL: {
+ case Yield::kInitial: {
Label suspend, continuation, post_runtime, resume;
__ jmp(&suspend);
@@ -1923,7 +1955,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Immediate(Smi::FromInt(continuation.pos())));
__ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
__ mov(ecx, esi);
- __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx);
+ __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
+ kDontSaveFPRegs);
__ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset));
__ cmp(esp, ebx);
__ j(equal, &post_runtime);
@@ -1940,7 +1973,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
- case Yield::FINAL: {
+ case Yield::kFinal: {
VisitForAccumulatorValue(expr->generator_object());
__ mov(FieldOperand(result_register(),
JSGeneratorObject::kContinuationOffset),
@@ -1952,7 +1985,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
- case Yield::DELEGATING: {
+ case Yield::kDelegating: {
VisitForStackValue(expr->generator_object());
// Initial stack layout is as follows:
@@ -1961,8 +1994,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label l_catch, l_try, l_suspend, l_continuation, l_resume;
Label l_next, l_call, l_loop;
- Register load_receiver = LoadIC::ReceiverRegister();
- Register load_name = LoadIC::NameRegister();
+ Register load_receiver = LoadDescriptor::ReceiverRegister();
+ Register load_name = LoadDescriptor::NameRegister();
// Initial send value is undefined.
__ mov(eax, isolate()->factory()->undefined_value());
@@ -1997,7 +2030,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Immediate(Smi::FromInt(l_continuation.pos())));
__ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
__ mov(ecx, esi);
- __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx);
+ __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
+ kDontSaveFPRegs);
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
__ mov(context_register(),
Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2018,10 +2052,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_call);
__ mov(load_receiver, Operand(esp, kPointerSize));
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Immediate(Smi::FromInt(expr->KeyedLoadFeedbackSlot())));
}
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallIC(ic, TypeFeedbackId::None());
__ mov(edi, eax);
__ mov(Operand(esp, 2 * kPointerSize), edi);
@@ -2038,7 +2072,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(load_name,
isolate()->factory()->done_string()); // "done"
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Immediate(Smi::FromInt(expr->DoneFeedbackSlot())));
}
CallLoadIC(NOT_CONTEXTUAL); // result.done in eax
@@ -2052,7 +2086,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(load_name,
isolate()->factory()->value_string()); // "value"
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Immediate(Smi::FromInt(expr->ValueFeedbackSlot())));
}
CallLoadIC(NOT_CONTEXTUAL); // result.value in eax
@@ -2205,8 +2239,8 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
// Only the value field needs a write barrier, as the other values are in the
// root set.
- __ RecordWriteField(eax, JSGeneratorObject::kResultValuePropertyOffset,
- ecx, edx);
+ __ RecordWriteField(eax, JSGeneratorObject::kResultValuePropertyOffset, ecx,
+ edx, kDontSaveFPRegs);
}
@@ -2214,9 +2248,11 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
- __ mov(LoadIC::NameRegister(), Immediate(key->value()));
+ DCHECK(!prop->IsSuperAccess());
+
+ __ mov(LoadDescriptor::NameRegister(), Immediate(key->value()));
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Immediate(Smi::FromInt(prop->PropertyFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL);
} else {
@@ -2225,11 +2261,23 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
}
+void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+ // Stack: receiver, home_object.
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+ DCHECK(prop->IsSuperAccess());
+
+ __ push(Immediate(key->value()));
+ __ CallRuntime(Runtime::kLoadFromSuper, 3);
+}
+
+
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Immediate(Smi::FromInt(prop->PropertyFeedbackSlot())));
CallIC(ic);
} else {
@@ -2254,8 +2302,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ mov(eax, ecx);
- BinaryOpICStub stub(isolate(), op, mode);
- CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2337,9 +2385,9 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
__ pop(edx);
- BinaryOpICStub stub(isolate(), op, mode);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(eax);
}
@@ -2369,9 +2417,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
case NAMED_PROPERTY: {
__ push(eax); // Preserve value.
VisitForAccumulatorValue(prop->obj());
- __ Move(StoreIC::ReceiverRegister(), eax);
- __ pop(StoreIC::ValueRegister()); // Restore value.
- __ mov(StoreIC::NameRegister(), prop->key()->AsLiteral()->value());
+ __ Move(StoreDescriptor::ReceiverRegister(), eax);
+ __ pop(StoreDescriptor::ValueRegister()); // Restore value.
+ __ mov(StoreDescriptor::NameRegister(),
+ prop->key()->AsLiteral()->value());
CallStoreIC();
break;
}
@@ -2379,12 +2428,11 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ push(eax); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
- __ Move(KeyedStoreIC::NameRegister(), eax);
- __ pop(KeyedStoreIC::ReceiverRegister()); // Receiver.
- __ pop(KeyedStoreIC::ValueRegister()); // Restore value.
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ __ Move(StoreDescriptor::NameRegister(), eax);
+ __ pop(StoreDescriptor::ReceiverRegister()); // Receiver.
+ __ pop(StoreDescriptor::ValueRegister()); // Restore value.
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic);
break;
}
@@ -2399,7 +2447,7 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
if (var->IsContextSlot()) {
__ mov(edx, eax);
int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(ecx, offset, edx, ebx);
+ __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
}
}
@@ -2408,8 +2456,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
- __ mov(StoreIC::NameRegister(), var->name());
- __ mov(StoreIC::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(StoreDescriptor::NameRegister(), var->name());
+ __ mov(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
CallStoreIC();
} else if (op == Token::INIT_CONST_LEGACY) {
@@ -2482,28 +2530,44 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
- __ mov(StoreIC::NameRegister(), prop->key()->AsLiteral()->value());
- __ pop(StoreIC::ReceiverRegister());
+ __ mov(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
+ __ pop(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
+void FullCodeGenerator::EmitNamedSuperPropertyAssignment(Assignment* expr) {
+ // Assignment to named property of super.
+ // eax : value
+ // stack : receiver ('this'), home_object
+ Property* prop = expr->target()->AsProperty();
+ DCHECK(prop != NULL);
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(key != NULL);
+
+ __ push(eax);
+ __ push(Immediate(key->value()));
+ __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy),
+ 4);
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
// eax : value
// esp[0] : key
// esp[kPointerSize] : receiver
- __ pop(KeyedStoreIC::NameRegister()); // Key.
- __ pop(KeyedStoreIC::ReceiverRegister());
- DCHECK(KeyedStoreIC::ValueRegister().is(eax));
+ __ pop(StoreDescriptor::NameRegister()); // Key.
+ __ pop(StoreDescriptor::ReceiverRegister());
+ DCHECK(StoreDescriptor::ValueRegister().is(eax));
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2516,16 +2580,23 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
Expression* key = expr->key();
if (key->IsPropertyName()) {
- VisitForAccumulatorValue(expr->obj());
- __ Move(LoadIC::ReceiverRegister(), result_register());
- EmitNamedPropertyLoad(expr);
+ if (!expr->IsSuperAccess()) {
+ VisitForAccumulatorValue(expr->obj());
+ __ Move(LoadDescriptor::ReceiverRegister(), result_register());
+ EmitNamedPropertyLoad(expr);
+ } else {
+ VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(expr->obj()->AsSuperReference());
+ __ push(result_register());
+ EmitNamedSuperPropertyLoad(expr);
+ }
PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(eax);
} else {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
- __ pop(LoadIC::ReceiverRegister()); // Object.
- __ Move(LoadIC::NameRegister(), result_register()); // Key.
+ __ pop(LoadDescriptor::ReceiverRegister()); // Object.
+ __ Move(LoadDescriptor::NameRegister(), result_register()); // Key.
EmitKeyedPropertyLoad(expr);
context()->Plug(eax);
}
@@ -2543,11 +2614,10 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallIC::CallType call_type = callee->IsVariableProxy()
- ? CallIC::FUNCTION
- : CallIC::METHOD;
+ CallICState::CallType call_type =
+ callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
// Get the target function.
- if (call_type == CallIC::FUNCTION) {
+ if (call_type == CallICState::FUNCTION) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
@@ -2558,7 +2628,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
- __ mov(LoadIC::ReceiverRegister(), Operand(esp, 0));
+ DCHECK(!callee->AsProperty()->IsSuperAccess());
+ __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
@@ -2570,6 +2641,42 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+ DCHECK(callee->IsProperty());
+ Property* prop = callee->AsProperty();
+ DCHECK(prop->IsSuperAccess());
+
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+ // Load the function from the receiver.
+ SuperReference* super_ref = callee->AsProperty()->obj()->AsSuperReference();
+ EmitLoadHomeObject(super_ref);
+ __ push(eax);
+ VisitForAccumulatorValue(super_ref->this_var());
+ __ push(eax);
+ __ push(eax);
+ __ push(Operand(esp, kPointerSize * 2));
+ __ push(Immediate(key->value()));
+ // Stack here:
+ // - home_object
+ // - this (receiver)
+ // - this (receiver) <-- LoadFromSuper will pop here and below.
+ // - home_object
+ // - key
+ __ CallRuntime(Runtime::kLoadFromSuper, 3);
+
+ // Replace home_object with target function.
+ __ mov(Operand(esp, kPointerSize), eax);
+
+ // Stack here:
+ // - target function
+ // - this (receiver)
+ EmitCall(expr, CallICState::METHOD);
+}
+
+
// Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* key) {
@@ -2580,8 +2687,8 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
// Load the function from the receiver.
DCHECK(callee->IsProperty());
- __ mov(LoadIC::ReceiverRegister(), Operand(esp, 0));
- __ mov(LoadIC::NameRegister(), eax);
+ __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
+ __ mov(LoadDescriptor::NameRegister(), eax);
EmitKeyedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
@@ -2589,11 +2696,11 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ push(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
- EmitCall(expr, CallIC::METHOD);
+ EmitCall(expr, CallICState::METHOD);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2730,15 +2837,21 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty();
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(property->obj());
- }
- if (property->key()->IsPropertyName()) {
- EmitCallWithLoadIC(expr);
+ bool is_named_call = property->key()->IsPropertyName();
+ // super.x() is handled in EmitCallWithLoadIC.
+ if (property->IsSuperAccess() && is_named_call) {
+ EmitSuperCallWithLoadIC(expr);
} else {
- EmitKeyedCallWithLoadIC(expr, property->key());
+ {
+ PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(property->obj());
+ }
+ if (is_named_call) {
+ EmitCallWithLoadIC(expr);
+ } else {
+ EmitKeyedCallWithLoadIC(expr, property->key());
+ }
}
-
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
@@ -3241,7 +3354,7 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
// Functions have class 'Function'.
__ bind(&function);
- __ mov(eax, isolate()->factory()->function_class_string());
+ __ mov(eax, isolate()->factory()->Function_string());
__ jmp(&done);
// Objects with a non-function constructor have class 'Object'.
@@ -3359,9 +3472,9 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
Register index = ebx;
Register value = ecx;
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- VisitForAccumulatorValue(args->at(0)); // string
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
__ pop(value);
__ pop(index);
@@ -3395,9 +3508,9 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
Register index = ebx;
Register value = ecx;
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- VisitForAccumulatorValue(args->at(0)); // string
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
__ pop(value);
__ pop(index);
@@ -3454,7 +3567,7 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
__ mov(edx, eax);
- __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx);
+ __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx, kDontSaveFPRegs);
__ bind(&done);
context()->Plug(eax);
@@ -3747,7 +3860,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
@@ -3805,7 +3918,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
array = no_reg;
- // Check that all array elements are sequential ASCII strings, and
+ // Check that all array elements are sequential one-byte strings, and
// accumulate the sum of their lengths, as a smi-encoded value.
__ Move(index, Immediate(0));
__ Move(string_length, Immediate(0));
@@ -3814,7 +3927,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// scratch, string_length, elements.
if (generate_debug_code_) {
__ cmp(index, array_length);
- __ Assert(less, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
+ __ Assert(less, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
}
__ bind(&loop);
__ mov(string, FieldOperand(elements,
@@ -3852,7 +3965,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// string_length: Sum of string lengths, as a smi.
// elements: FixedArray of strings.
- // Check that the separator is a flat ASCII string.
+ // Check that the separator is a flat one-byte string.
__ mov(string, separator_operand);
__ JumpIfSmi(string, &bailout);
__ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
@@ -3876,8 +3989,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Live registers and stack values:
// string_length
// elements
- __ AllocateAsciiString(result_pos, string_length, scratch,
- index, string, &bailout);
+ __ AllocateOneByteString(result_pos, string_length, scratch, index, string,
+ &bailout);
__ mov(result_operand, result_pos);
__ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
@@ -3920,7 +4033,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case
__ bind(&one_char_separator);
- // Replace separator with its ASCII character value.
+ // Replace separator with its one-byte character value.
__ mov_b(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
__ mov_b(separator_operand, scratch);
@@ -4038,10 +4151,10 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
// Load the function from the receiver.
- __ mov(LoadIC::ReceiverRegister(), Operand(esp, 0));
- __ mov(LoadIC::NameRegister(), Immediate(expr->name()));
+ __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
+ __ mov(LoadDescriptor::NameRegister(), Immediate(expr->name()));
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Immediate(Smi::FromInt(expr->CallRuntimeFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL);
} else {
@@ -4213,6 +4326,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (prop != NULL) {
assign_type =
(prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ if (prop->IsSuperAccess()) {
+ // throw exception.
+ VisitSuperReference(prop->obj()->AsSuperReference());
+ return;
+ }
}
// Evaluate expression and get value.
@@ -4228,14 +4346,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == NAMED_PROPERTY) {
// Put the object both on the stack and in the register.
VisitForStackValue(prop->obj());
- __ mov(LoadIC::ReceiverRegister(), Operand(esp, 0));
+ __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
EmitNamedPropertyLoad(prop);
} else {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
- __ mov(LoadIC::ReceiverRegister(),
- Operand(esp, kPointerSize)); // Object.
- __ mov(LoadIC::NameRegister(), Operand(esp, 0)); // Key.
+ __ mov(LoadDescriptor::ReceiverRegister(),
+ Operand(esp, kPointerSize)); // Object.
+ __ mov(LoadDescriptor::NameRegister(), Operand(esp, 0)); // Key.
EmitKeyedPropertyLoad(prop);
}
}
@@ -4320,8 +4438,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ bind(&stub_call);
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
- BinaryOpICStub stub(isolate(), expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), expr->binary_op(),
+ NO_OVERWRITE).code();
+ CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4350,8 +4469,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
case NAMED_PROPERTY: {
- __ mov(StoreIC::NameRegister(), prop->key()->AsLiteral()->value());
- __ pop(StoreIC::ReceiverRegister());
+ __ mov(StoreDescriptor::NameRegister(),
+ prop->key()->AsLiteral()->value());
+ __ pop(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -4364,11 +4484,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
- __ pop(KeyedStoreIC::NameRegister());
- __ pop(KeyedStoreIC::ReceiverRegister());
- Handle<Code> ic = strict_mode() == SLOPPY
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ __ pop(StoreDescriptor::NameRegister());
+ __ pop(StoreDescriptor::ReceiverRegister());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -4392,10 +4511,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "[ Global variable");
- __ mov(LoadIC::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadIC::NameRegister(), Immediate(proxy->name()));
+ __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), Immediate(proxy->name()));
if (FLAG_vector_ics) {
- __ mov(LoadIC::SlotRegister(),
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
Immediate(Smi::FromInt(proxy->VariableFeedbackSlot())));
}
// Use a regular load, not a contextual load, to avoid a reference
@@ -4557,7 +4676,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
diff --git a/deps/v8/src/x87/interface-descriptors-x87.cc b/deps/v8/src/x87/interface-descriptors-x87.cc
new file mode 100644
index 0000000000..8dfad3633c
--- /dev/null
+++ b/deps/v8/src/x87/interface-descriptors-x87.cc
@@ -0,0 +1,304 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+const Register CallInterfaceDescriptor::ContextRegister() { return esi; }
+
+
+const Register LoadDescriptor::ReceiverRegister() { return edx; }
+const Register LoadDescriptor::NameRegister() { return ecx; }
+
+
+const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return eax; }
+
+
+const Register VectorLoadICDescriptor::VectorRegister() { return ebx; }
+
+
+const Register StoreDescriptor::ReceiverRegister() { return edx; }
+const Register StoreDescriptor::NameRegister() { return ecx; }
+const Register StoreDescriptor::ValueRegister() { return eax; }
+
+
+const Register ElementTransitionAndStoreDescriptor::MapRegister() {
+ return ebx;
+}
+
+
+const Register InstanceofDescriptor::left() { return eax; }
+const Register InstanceofDescriptor::right() { return edx; }
+
+
+const Register ArgumentsAccessReadDescriptor::index() { return edx; }
+const Register ArgumentsAccessReadDescriptor::parameter_count() { return eax; }
+
+
+const Register ApiGetterDescriptor::function_address() { return edx; }
+
+
+const Register MathPowTaggedDescriptor::exponent() { return eax; }
+
+
+const Register MathPowIntegerDescriptor::exponent() {
+ return MathPowTaggedDescriptor::exponent();
+}
+
+
+void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, ebx};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, edi};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // ToNumberStub invokes a function, and therefore needs a context.
+ Register registers[] = {esi, eax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, eax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastCloneShallowArrayDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, eax, ebx, ecx};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void FastCloneShallowObjectDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, eax, ebx, ecx, edx};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CreateAllocationSiteDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, ebx, edx};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StoreArrayLiteralElementDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, ecx, eax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, edi};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionWithFeedbackDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, edi, edx};
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // eax : number of arguments
+ // ebx : feedback vector
+ // edx : (only if ebx is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
+ // edi : constructor function
+ // TODO(turbofan): So far we don't gather type feedback and hence skip the
+ // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+ Register registers[] = {esi, eax, edi, ebx};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void RegExpConstructResultDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, ecx, ebx, eax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void TransitionElementsKindDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, eax, ebx};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorConstantArgCountDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // eax -- number of arguments
+ // edi -- function
+ // ebx -- allocation site with elements kind
+ Register registers[] = {esi, edi, ebx};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = {esi, edi, ebx, eax};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(),
+ Representation::Tagged(), Representation::Integer32()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // eax -- number of arguments
+ // edi -- function
+ Register registers[] = {esi, edi};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void InternalArrayConstructorDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = {esi, edi, eax};
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Integer32()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, eax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, eax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, edx, eax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpWithAllocationSiteDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, ecx, edx, eax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, edx, eax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ esi, // context
+ ecx, // key
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ esi, // context
+ ecx, // name
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ esi, // context
+ edx, // name
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ esi, // context
+ edi, // JSFunction
+ eax, // actual number of arguments
+ ebx, // expected number of arguments
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // JSFunction
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ esi, // context
+ eax, // callee
+ ebx, // call_data
+ ecx, // holder
+ edx, // api_function_address
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/lithium-codegen-x87.cc b/deps/v8/src/x87/lithium-codegen-x87.cc
index 8ba73c6236..00bbe5e72b 100644
--- a/deps/v8/src/x87/lithium-codegen-x87.cc
+++ b/deps/v8/src/x87/lithium-codegen-x87.cc
@@ -6,12 +6,14 @@
#if V8_TARGET_ARCH_X87
+#include "src/base/bits.h"
+#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/hydrogen-osr.h"
-#include "src/ic.h"
-#include "src/stub-cache.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
#include "src/x87/lithium-codegen-x87.h"
namespace v8 {
@@ -20,7 +22,7 @@ namespace internal {
// When invoking builtins, we need to record the safepoint in the middle of
// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator V8_FINAL : public CallWrapper {
+class SafepointGenerator FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -30,9 +32,9 @@ class SafepointGenerator V8_FINAL : public CallWrapper {
deopt_mode_(mode) {}
virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
+ virtual void BeforeCall(int call_size) const OVERRIDE {}
- virtual void AfterCall() const V8_OVERRIDE {
+ virtual void AfterCall() const OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@@ -252,10 +254,8 @@ bool LCodeGen::GeneratePrologue() {
__ mov(Operand(esi, context_offset), eax);
// Update the write barrier. This clobbers eax and ebx.
if (need_write_barrier) {
- __ RecordWriteContextSlot(esi,
- context_offset,
- eax,
- ebx);
+ __ RecordWriteContextSlot(esi, context_offset, eax, ebx,
+ kDontSaveFPRegs);
} else if (FLAG_debug_code) {
Label done;
__ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
@@ -267,6 +267,8 @@ bool LCodeGen::GeneratePrologue() {
Comment(";;; End allocate local context");
}
+ // Initailize FPU state.
+ __ fninit();
// Trace the call.
if (FLAG_trace && info()->IsOptimizing()) {
// We have not executed any compiled code yet, so esi still holds the
@@ -325,6 +327,9 @@ void LCodeGen::GenerateOsrPrologue() {
int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
DCHECK(slots >= 1);
__ sub(esp, Immediate((slots - 1) * kPointerSize));
+
+ // Initailize FPU state.
+ __ fninit();
}
@@ -340,8 +345,21 @@ void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
+ // When return from function call, FPU should be initialized again.
+ if (instr->IsCall() && instr->ClobbersDoubleRegisters(isolate())) {
+ bool double_result = instr->HasDoubleRegisterResult();
+ if (double_result) {
+ __ lea(esp, Operand(esp, -kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ }
+ __ fninit();
+ if (double_result) {
+ __ fld_d(Operand(esp, 0));
+ __ lea(esp, Operand(esp, kDoubleSize));
+ }
+ }
if (instr->IsGoto()) {
- x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
+ x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr), this);
} else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
!instr->IsGap() && !instr->IsReturn()) {
if (instr->ClobbersDoubleRegisters(isolate())) {
@@ -362,16 +380,11 @@ bool LCodeGen::GenerateJumpTable() {
Comment(";;; -------------------- Jump table --------------------");
}
for (int i = 0; i < jump_table_.length(); i++) {
- __ bind(&jump_table_[i].label);
- Address entry = jump_table_[i].address;
- Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
- int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- Comment(";;; jump table entry %d.", i);
- } else {
- Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
- }
- if (jump_table_[i].needs_frame) {
+ Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+ __ bind(&table_entry->label);
+ Address entry = table_entry->address;
+ DeoptComment(table_entry->reason);
+ if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
__ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
if (needs_frame.is_bound()) {
@@ -493,10 +506,27 @@ void LCodeGen::X87LoadForUsage(X87Register reg) {
void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
DCHECK(x87_stack_.Contains(reg1));
DCHECK(x87_stack_.Contains(reg2));
- x87_stack_.Fxch(reg1, 1);
- x87_stack_.Fxch(reg2);
- x87_stack_.pop();
- x87_stack_.pop();
+ if (reg1.is(reg2) && x87_stack_.depth() == 1) {
+ __ fld(x87_stack_.st(reg1));
+ x87_stack_.push(reg1);
+ x87_stack_.pop();
+ x87_stack_.pop();
+ } else {
+ x87_stack_.Fxch(reg1, 1);
+ x87_stack_.Fxch(reg2);
+ x87_stack_.pop();
+ x87_stack_.pop();
+ }
+}
+
+
+int LCodeGen::X87Stack::GetLayout() {
+ int layout = stack_depth_;
+ for (int i = 0; i < stack_depth_; i++) {
+ layout |= (stack_[stack_depth_ - 1 - i].code() << ((i + 1) * 3));
+ }
+
+ return layout;
}
@@ -571,6 +601,22 @@ void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
}
+void LCodeGen::X87Mov(X87Register dst, X87Register src, X87OperandType opts) {
+ if (x87_stack_.Contains(dst)) {
+ x87_stack_.Fxch(dst);
+ __ fstp(0);
+ x87_stack_.pop();
+ // Push ST(i) onto the FPU register stack
+ __ fld(x87_stack_.st(src));
+ x87_stack_.push(dst);
+ } else {
+ // Push ST(i) onto the FPU register stack
+ __ fld(x87_stack_.st(src));
+ x87_stack_.push(dst);
+ }
+}
+
+
void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
DCHECK(!src.is_reg_only());
switch (opts) {
@@ -596,6 +642,9 @@ void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
case kX87DoubleOperand:
__ fst_d(dst);
break;
+ case kX87FloatOperand:
+ __ fst_s(dst);
+ break;
case kX87IntOperand:
__ fist_s(dst);
break;
@@ -659,15 +708,39 @@ void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
}
-void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) {
- DCHECK(stack_depth_ <= 1);
- // If ever used for new stubs producing two pairs of doubles joined into two
- // phis this assert hits. That situation is not handled, since the two stacks
- // might have st0 and st1 swapped.
- if (current_block_id + 1 != goto_instr->block_id()) {
+void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr,
+ LCodeGen* cgen) {
+ // For going to a joined block, an explicit LClobberDoubles is inserted before
+ // LGoto. Because all used x87 registers are spilled to stack slots. The
+ // ResolvePhis phase of register allocator could guarantee the two input's x87
+ // stacks have the same layout. So don't check stack_depth_ <= 1 here.
+ int goto_block_id = goto_instr->block_id();
+ if (current_block_id + 1 != goto_block_id) {
// If we have a value on the x87 stack on leaving a block, it must be a
// phi input. If the next block we compile is not the join block, we have
// to discard the stack state.
+ // Before discarding the stack state, we need to save it if the "goto block"
+ // has unreachable last predecessor when FLAG_unreachable_code_elimination.
+ if (FLAG_unreachable_code_elimination) {
+ int length = goto_instr->block()->predecessors()->length();
+ bool has_unreachable_last_predecessor = false;
+ for (int i = 0; i < length; i++) {
+ HBasicBlock* block = goto_instr->block()->predecessors()->at(i);
+ if (block->IsUnreachable() &&
+ (block->block_id() + 1) == goto_block_id) {
+ has_unreachable_last_predecessor = true;
+ }
+ }
+ if (has_unreachable_last_predecessor) {
+ if (cgen->x87_stack_map_.find(goto_block_id) ==
+ cgen->x87_stack_map_.end()) {
+ X87Stack* stack = new (cgen->zone()) X87Stack(*this);
+ cgen->x87_stack_map_.insert(std::make_pair(goto_block_id, stack));
+ }
+ }
+ }
+
+ // Discard the stack state.
stack_depth_ = 0;
}
}
@@ -677,13 +750,14 @@ void LCodeGen::EmitFlushX87ForDeopt() {
// The deoptimizer does not support X87 Registers. But as long as we
// deopt from a stub its not a problem, since we will re-materialize the
// original stub inputs, which can't be double registers.
- DCHECK(info()->IsStub());
+ // DCHECK(info()->IsStub());
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ pushfd();
__ VerifyX87StackDepth(x87_stack_.depth());
__ popfd();
}
- for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0);
+
+ // Flush X87 stack in the deoptimizer entry.
}
@@ -890,6 +964,9 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
} else {
translation->StoreInt32Register(reg);
}
+ } else if (op->IsDoubleRegister()) {
+ X87Register reg = ToX87Register(op);
+ translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
@@ -924,13 +1001,12 @@ void LCodeGen::CallCode(Handle<Code> code,
}
-void LCodeGen::CallRuntime(const Runtime::Function* fun,
- int argc,
- LInstruction* instr) {
+void LCodeGen::CallRuntime(const Runtime::Function* fun, int argc,
+ LInstruction* instr, SaveFPRegsMode save_doubles) {
DCHECK(instr != NULL);
DCHECK(instr->HasPointerMap());
- __ CallRuntime(fun, argc);
+ __ CallRuntime(fun, argc, save_doubles);
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
@@ -960,7 +1036,7 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
LOperand* context) {
LoadContextFromDeferred(context);
- __ CallRuntime(id);
+ __ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
@@ -1005,9 +1081,10 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(
}
-void LCodeGen::DeoptimizeIf(Condition cc,
- LEnvironment* environment,
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
+ const char* detail,
Deoptimizer::BailoutType bailout_type) {
+ LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
@@ -1033,6 +1110,12 @@ void LCodeGen::DeoptimizeIf(Condition cc,
__ pop(eax);
__ popfd();
DCHECK(frame_is_built_);
+ // Put the x87 stack layout in TOS.
+ if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt();
+ __ push(Immediate(x87_stack_.GetLayout()));
+ __ fild_s(MemOperand(esp, 0));
+ // Don't touch eflags.
+ __ lea(esp, Operand(esp, kPointerSize));
__ call(entry, RelocInfo::RUNTIME_ENTRY);
__ bind(&no_deopt);
__ mov(Operand::StaticVariable(count), eax);
@@ -1040,14 +1123,18 @@ void LCodeGen::DeoptimizeIf(Condition cc,
__ popfd();
}
- // Before Instructions which can deopt, we normally flush the x87 stack. But
- // we can have inputs or outputs of the current instruction on the stack,
- // thus we need to flush them here from the physical stack to leave it in a
- // consistent state.
- if (x87_stack_.depth() > 0) {
+ // Put the x87 stack layout in TOS, so that we can save x87 fp registers in
+ // the correct location.
+ {
Label done;
if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
- EmitFlushX87ForDeopt();
+ if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt();
+
+ int x87_stack_layout = x87_stack_.GetLayout();
+ __ push(Immediate(x87_stack_layout));
+ __ fild_s(MemOperand(esp, 0));
+ // Don't touch eflags.
+ __ lea(esp, Operand(esp, kPointerSize));
__ bind(&done);
}
@@ -1058,19 +1145,19 @@ void LCodeGen::DeoptimizeIf(Condition cc,
__ bind(&done);
}
+ Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), detail);
DCHECK(info()->IsStub() || frame_is_built_);
if (cc == no_condition && frame_is_built_) {
+ DeoptComment(reason);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
+ Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ !frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (jump_table_.is_empty() ||
- jump_table_.last().address != entry ||
- jump_table_.last().needs_frame != !frame_is_built_ ||
- jump_table_.last().bailout_type != bailout_type) {
- Deoptimizer::JumpTableEntry table_entry(entry,
- bailout_type,
- !frame_is_built_);
+ !table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
}
if (cc == no_condition) {
@@ -1082,12 +1169,12 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
-void LCodeGen::DeoptimizeIf(Condition cc,
- LEnvironment* environment) {
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
+ const char* detail) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(cc, environment, bailout_type);
+ DeoptimizeIf(cc, instr, detail, bailout_type);
}
@@ -1095,7 +1182,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, 0, TENURED);
+ DeoptimizationInputData::New(isolate(), length, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
@@ -1234,6 +1321,16 @@ void LCodeGen::DoLabel(LLabel* label) {
LabelType(label));
__ bind(label->label());
current_block_ = label->block_id();
+ if (label->block()->predecessors()->length() > 1) {
+ // A join block's x87 stack is that of its last visited predecessor.
+ // If the last visited predecessor block is unreachable, the stack state
+ // will be wrong. In such case, use the x87 stack of reachable predecessor.
+ X87StackMap::const_iterator it = x87_stack_map_.find(current_block_);
+ // Restore x87 stack.
+ if (it != x87_stack_map_.end()) {
+ x87_stack_ = *(it->second);
+ }
+ }
DoGap(label);
}
@@ -1316,7 +1413,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ and_(dividend, mask);
__ neg(dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
}
__ jmp(&done, Label::kNear);
}
@@ -1333,7 +1430,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
@@ -1348,7 +1445,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ j(not_zero, &remainder_not_zero, Label::kNear);
__ cmp(dividend, Immediate(0));
- DeoptimizeIf(less, instr->environment());
+ DeoptimizeIf(less, instr, "minus zero");
__ bind(&remainder_not_zero);
}
}
@@ -1370,7 +1467,7 @@ void LCodeGen::DoModI(LModI* instr) {
// deopt in this case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(right_reg, Operand(right_reg));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "division by zero");
}
// Check for kMinInt % -1, idiv would signal a divide error. We
@@ -1381,7 +1478,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ cmp(right_reg, -1);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "minus zero");
} else {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ Move(result_reg, Immediate(0));
@@ -1400,7 +1497,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_sign, &positive_left, Label::kNear);
__ idiv(right_reg);
__ test(result_reg, Operand(result_reg));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
__ jmp(&done, Label::kNear);
__ bind(&positive_left);
}
@@ -1413,26 +1510,26 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
Register dividend = ToRegister(instr->dividend());
int32_t divisor = instr->divisor();
Register result = ToRegister(instr->result());
- DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
+ DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
DCHECK(!result.is(dividend));
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmp(dividend, kMinInt);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "overflow");
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ test(dividend, Immediate(mask));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr, "lost precision");
}
__ Move(result, dividend);
int32_t shift = WhichPowerOf2Abs(divisor);
@@ -1453,7 +1550,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
@@ -1461,7 +1558,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
}
__ TruncatingDiv(dividend, Abs(divisor));
@@ -1471,7 +1568,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ mov(eax, edx);
__ imul(eax, eax, divisor);
__ sub(eax, dividend);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "lost precision");
}
}
@@ -1491,7 +1588,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "division by zero");
}
// Check for (0 / -x) that will produce negative zero.
@@ -1500,7 +1597,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ test(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ test(divisor, divisor);
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr, "minus zero");
__ bind(&dividend_not_zero);
}
@@ -1510,7 +1607,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cmp(dividend, kMinInt);
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmp(divisor, -1);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "overflow");
__ bind(&dividend_not_min_int);
}
@@ -1521,7 +1618,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ test(remainder, remainder);
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr, "lost precision");
}
}
@@ -1543,13 +1640,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ neg(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
return;
}
@@ -1576,7 +1673,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
@@ -1584,7 +1681,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -1631,7 +1728,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "division by zero");
}
// Check for (0 / -x) that will produce negative zero.
@@ -1640,7 +1737,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ test(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ test(divisor, divisor);
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr, "minus zero");
__ bind(&dividend_not_zero);
}
@@ -1650,7 +1747,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ cmp(dividend, kMinInt);
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmp(divisor, -1);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "overflow");
__ bind(&dividend_not_min_int);
}
@@ -1728,25 +1825,25 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Bail out if the result is supposed to be negative zero.
Label done;
__ test(left, Operand(left));
- __ j(not_zero, &done, Label::kNear);
+ __ j(not_zero, &done);
if (right->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr, "minus zero");
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
__ cmp(ToRegister(instr->temp()), Immediate(0));
- DeoptimizeIf(less, instr->environment());
+ DeoptimizeIf(less, instr, "minus zero");
}
} else {
// Test the non-zero operand for negative sign.
__ or_(ToRegister(instr->temp()), ToOperand(right));
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr, "minus zero");
}
__ bind(&done);
}
@@ -1811,10 +1908,6 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
switch (instr->op()) {
case Token::ROR:
__ ror_cl(ToRegister(left));
- if (instr->can_deopt()) {
- __ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr->environment());
- }
break;
case Token::SAR:
__ sar_cl(ToRegister(left));
@@ -1823,7 +1916,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shr_cl(ToRegister(left));
if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr, "negative value");
}
break;
case Token::SHL:
@@ -1840,7 +1933,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::ROR:
if (shift_count == 0 && instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr, "negative value");
} else {
__ ror(ToRegister(left), shift_count);
}
@@ -1855,7 +1948,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shr(ToRegister(left), shift_count);
} else if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr, "negative value");
}
break;
case Token::SHL:
@@ -1866,7 +1959,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shl(ToRegister(left), shift_count - 1);
}
__ SmiTag(ToRegister(left));
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
} else {
__ shl(ToRegister(left), shift_count);
}
@@ -1892,7 +1985,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
__ sub(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
}
@@ -1909,7 +2002,7 @@ void LCodeGen::DoConstantS(LConstantS* instr) {
void LCodeGen::DoConstantD(LConstantD* instr) {
double v = instr->value();
- uint64_t int_val = BitCast<uint64_t, double>(v);
+ uint64_t int_val = bit_cast<uint64_t, double>(v);
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
DCHECK(instr->result()->IsDoubleRegister());
@@ -1952,9 +2045,9 @@ void LCodeGen::DoDateField(LDateField* instr) {
DCHECK(object.is(eax));
__ test(object, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "Smi");
__ CmpObjectType(object, JS_DATE_TYPE, scratch);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "not a date object");
if (index->value() == 0) {
__ mov(result, FieldOperand(object, JSDate::kValueOffset));
@@ -2084,7 +2177,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
__ add(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
}
}
@@ -2116,8 +2209,58 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
}
__ bind(&return_left);
} else {
- // TODO(weiliang) use X87 for double representation.
- UNIMPLEMENTED();
+ DCHECK(instr->hydrogen()->representation().IsDouble());
+ Label check_nan_left, check_zero, return_left, return_right;
+ Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
+ X87Register left_reg = ToX87Register(left);
+ X87Register right_reg = ToX87Register(right);
+
+ X87PrepareBinaryOp(left_reg, right_reg, ToX87Register(instr->result()));
+ __ fld(1);
+ __ fld(1);
+ __ FCmp();
+ __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
+ __ j(equal, &check_zero, Label::kNear); // left == right.
+ __ j(condition, &return_left, Label::kNear);
+ __ jmp(&return_right, Label::kNear);
+
+ __ bind(&check_zero);
+ __ fld(0);
+ __ fldz();
+ __ FCmp();
+ __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
+ // At this point, both left and right are either 0 or -0.
+ if (operation == HMathMinMax::kMathMin) {
+ // Push st0 and st1 to stack, then pop them to temp registers and OR them,
+ // load it to left.
+ Register scratch_reg = ToRegister(instr->temp());
+ __ fld(1);
+ __ fld(1);
+ __ sub(esp, Immediate(2 * kPointerSize));
+ __ fstp_s(MemOperand(esp, 0));
+ __ fstp_s(MemOperand(esp, kPointerSize));
+ __ pop(scratch_reg);
+ __ xor_(MemOperand(esp, 0), scratch_reg);
+ X87Mov(left_reg, MemOperand(esp, 0), kX87FloatOperand);
+ __ pop(scratch_reg); // restore esp
+ } else {
+ // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
+ X87Fxch(left_reg);
+ __ fadd(1);
+ }
+ __ jmp(&return_left, Label::kNear);
+
+ __ bind(&check_nan_left);
+ __ fld(0);
+ __ fld(0);
+ __ FCmp(); // NaN check.
+ __ j(parity_even, &return_left, Label::kNear); // left == NaN.
+
+ __ bind(&return_right);
+ X87Fxch(left_reg);
+ X87Mov(left_reg, right_reg);
+
+ __ bind(&return_left);
}
}
@@ -2162,6 +2305,13 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
UNREACHABLE();
break;
}
+
+ // Only always explicitly storing to memory to force the round-down for double
+ // arithmetic.
+ __ lea(esp, Operand(esp, -kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ __ fld_d(Operand(esp, 0));
+ __ lea(esp, Operand(esp, kDoubleSize));
}
@@ -2171,8 +2321,9 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(eax));
DCHECK(ToRegister(instr->result()).is(eax));
- BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@@ -2214,7 +2365,11 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ test(reg, Operand(reg));
EmitBranch(instr, not_zero);
} else if (r.IsDouble()) {
- UNREACHABLE();
+ X87Register reg = ToX87Register(instr->value());
+ X87LoadForUsage(reg);
+ __ fldz();
+ __ FCmp();
+ EmitBranch(instr, not_zero);
} else {
DCHECK(r.IsTagged());
Register reg = ToRegister(instr->value());
@@ -2267,7 +2422,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ test(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "Smi");
}
Register map = no_reg; // Keep the compiler happy.
@@ -2324,7 +2479,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr, "unexpected object");
}
}
}
@@ -2470,7 +2625,10 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
DCHECK(!rep.IsInteger32());
if (rep.IsDouble()) {
- UNREACHABLE();
+ X87Register input = ToX87Register(instr->value());
+ X87LoadForUsage(input);
+ __ FXamMinusZero();
+ EmitBranch(instr, equal);
} else {
Register value = ToRegister(instr->value());
Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
@@ -2594,7 +2752,7 @@ static Condition ComputeCompareCondition(Token::Value op) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
@@ -2671,7 +2829,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
DCHECK(!temp.is(temp2));
__ JumpIfSmi(input, is_false);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
+ if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2701,7 +2859,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
+ if (String::Equals(class_name, isolate()->factory()->Object_string())) {
__ j(not_equal, is_true);
} else {
__ j(not_equal, is_false);
@@ -2762,16 +2920,16 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr,
const X87Stack& x87_stack)
: LDeferredCode(codegen, x87_stack), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
@@ -2866,7 +3024,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
@@ -2960,28 +3118,36 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
__ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
}
}
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+ DCHECK(FLAG_vector_ics);
+ Register vector = ToRegister(instr->temp_vector());
+ DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
+ __ mov(vector, instr->hydrogen()->feedback_vector());
+ // No need to allocate this register.
+ DCHECK(VectorLoadICDescriptor::SlotRegister().is(eax));
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Immediate(Smi::FromInt(instr->hydrogen()->slot())));
+}
+
+
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister()));
+ DCHECK(ToRegister(instr->global_object())
+ .is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(eax));
- __ mov(LoadIC::NameRegister(), instr->name());
+ __ mov(LoadDescriptor::NameRegister(), instr->name());
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ mov(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(eax));
- __ mov(LoadIC::SlotRegister(),
- Immediate(Smi::FromInt(instr->hydrogen()->slot())));
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2996,7 +3162,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
}
// Store the value.
@@ -3013,7 +3179,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
} else {
Label is_not_hole;
__ j(not_equal, &is_not_hole, Label::kNear);
@@ -3034,7 +3200,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(target, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
} else {
__ j(not_equal, &skip_assignment, Label::kNear);
}
@@ -3047,12 +3213,8 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Register temp = ToRegister(instr->temp());
int offset = Context::SlotOffset(instr->slot_index());
- __ RecordWriteContextSlot(context,
- offset,
- value,
- temp,
- EMIT_REMEMBERED_SET,
- check_needed);
+ __ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs,
+ EMIT_REMEMBERED_SET, check_needed);
}
__ bind(&skip_assignment);
@@ -3108,20 +3270,14 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(eax));
- __ mov(LoadIC::NameRegister(), instr->name());
+ __ mov(LoadDescriptor::NameRegister(), instr->name());
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ mov(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(eax));
- __ mov(LoadIC::SlotRegister(),
- Immediate(Smi::FromInt(instr->hydrogen()->slot())));
- }
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+ }
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3137,7 +3293,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ cmp(Operand(result), Immediate(factory()->the_hole_value()));
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
// If the function does not have an initial map, we're done.
Label done;
@@ -3228,7 +3384,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ mov(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ test(result, Operand(result));
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr, "negative value");
}
break;
case EXTERNAL_FLOAT32_ELEMENTS:
@@ -3258,7 +3414,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
FAST_DOUBLE_ELEMENTS,
instr->base_offset() + sizeof(kHoleNanLower32));
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
}
Operand double_load_operand = BuildFastArrayOperand(
@@ -3276,20 +3432,18 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// Load the result.
__ mov(result,
- BuildFastArrayOperand(instr->elements(),
- instr->key(),
+ BuildFastArrayOperand(instr->elements(), instr->key(),
instr->hydrogen()->key()->representation(),
- FAST_ELEMENTS,
- instr->base_offset()));
+ FAST_ELEMENTS, instr->base_offset()));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "not a Smi");
} else {
__ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
}
}
}
@@ -3339,20 +3493,14 @@ Operand LCodeGen::BuildFastArrayOperand(
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister()));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ mov(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(eax));
- __ mov(LoadIC::SlotRegister(),
- Immediate(Smi::FromInt(instr->hydrogen()->slot())));
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3442,9 +3590,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "Smi");
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
- DeoptimizeIf(below, instr->environment());
+ DeoptimizeIf(below, instr, "not a JavaScript object");
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
@@ -3470,7 +3618,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, kArgumentsLimit);
- DeoptimizeIf(above, instr->environment());
+ DeoptimizeIf(above, instr, "too many arguments");
__ push(receiver);
__ mov(receiver, length);
@@ -3582,6 +3730,32 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
+void LCodeGen::DoTailCallThroughMegamorphicCache(
+ LTailCallThroughMegamorphicCache* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register name = ToRegister(instr->name());
+ DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(name.is(LoadDescriptor::NameRegister()));
+
+ Register scratch = ebx;
+ Register extra = eax;
+ DCHECK(!scratch.is(receiver) && !scratch.is(name));
+ DCHECK(!extra.is(receiver) && !extra.is(name));
+
+ // Important for the tail-call.
+ bool must_teardown_frame = NeedsEagerFrame();
+
+ // The probe will tail call to a handler if found.
+ isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
+ must_teardown_frame, receiver, name,
+ scratch, extra);
+
+ // Tail call to miss if we ended up here.
+ if (must_teardown_frame) __ leave();
+ LoadIC::GenerateMiss(masm());
+}
+
+
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
@@ -3637,7 +3811,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "not a heap number");
Label slow, allocated, done;
Register tmp = input_reg.is(eax) ? ecx : eax;
@@ -3684,23 +3858,23 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ neg(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr, "overflow");
__ bind(&is_positive);
}
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
LMathAbs* instr,
const X87Stack& x87_stack)
: LDeferredCode(codegen, x87_stack), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LMathAbs* instr_;
};
@@ -3709,7 +3883,9 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsDouble()) {
- UNIMPLEMENTED();
+ X87Register value = ToX87Register(instr->value());
+ X87Fxch(value);
+ __ fabs();
} else if (r.IsSmiOrInteger32()) {
EmitIntegerMathAbs(instr);
} else { // Tagged case.
@@ -3725,47 +3901,400 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
void LCodeGen::DoMathFloor(LMathFloor* instr) {
- UNIMPLEMENTED();
+ Register output_reg = ToRegister(instr->result());
+ X87Register input_reg = ToX87Register(instr->value());
+ X87Fxch(input_reg);
+
+ Label not_minus_zero, done;
+ // Deoptimize on unordered.
+ __ fldz();
+ __ fld(1);
+ __ FCmp();
+ DeoptimizeIf(parity_even, instr, "NaN");
+ __ j(below, &not_minus_zero, Label::kNear);
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // Check for negative zero.
+ __ j(not_equal, &not_minus_zero, Label::kNear);
+ // +- 0.0.
+ __ fld(0);
+ __ FXamSign();
+ DeoptimizeIf(not_zero, instr, "minus zero");
+ __ Move(output_reg, Immediate(0));
+ __ jmp(&done, Label::kFar);
+ }
+
+ // Positive input.
+ // rc=01B, round down.
+ __ bind(&not_minus_zero);
+ __ fnclex();
+ __ X87SetRC(0x0400);
+ __ sub(esp, Immediate(kPointerSize));
+ __ fist_s(Operand(esp, 0));
+ __ pop(output_reg);
+ __ X87CheckIA();
+ DeoptimizeIf(equal, instr, "overflow");
+ __ fnclex();
+ __ X87SetRC(0x0000);
+ __ bind(&done);
}
void LCodeGen::DoMathRound(LMathRound* instr) {
- UNIMPLEMENTED();
+ X87Register input_reg = ToX87Register(instr->value());
+ Register result = ToRegister(instr->result());
+ X87Fxch(input_reg);
+ Label below_one_half, below_minus_one_half, done;
+
+ ExternalReference one_half = ExternalReference::address_of_one_half();
+ ExternalReference minus_one_half =
+ ExternalReference::address_of_minus_one_half();
+
+ __ fld_d(Operand::StaticVariable(one_half));
+ __ fld(1);
+ __ FCmp();
+ __ j(carry, &below_one_half);
+
+ // Use rounds towards zero, since 0.5 <= x, we use floor(0.5 + x)
+ __ fld(0);
+ __ fadd_d(Operand::StaticVariable(one_half));
+ // rc=11B, round toward zero.
+ __ X87SetRC(0x0c00);
+ __ sub(esp, Immediate(kPointerSize));
+ // Clear exception bits.
+ __ fnclex();
+ __ fistp_s(MemOperand(esp, 0));
+ // Check overflow.
+ __ X87CheckIA();
+ __ pop(result);
+ DeoptimizeIf(equal, instr, "conversion overflow");
+ __ fnclex();
+ // Restore round mode.
+ __ X87SetRC(0x0000);
+ __ jmp(&done);
+
+ __ bind(&below_one_half);
+ __ fld_d(Operand::StaticVariable(minus_one_half));
+ __ fld(1);
+ __ FCmp();
+ __ j(carry, &below_minus_one_half);
+ // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
+ // we can ignore the difference between a result of -0 and +0.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // If the sign is positive, we return +0.
+ __ fld(0);
+ __ FXamSign();
+ DeoptimizeIf(not_zero, instr, "minus zero");
+ }
+ __ Move(result, Immediate(0));
+ __ jmp(&done);
+
+ __ bind(&below_minus_one_half);
+ __ fld(0);
+ __ fadd_d(Operand::StaticVariable(one_half));
+ // rc=01B, round down.
+ __ X87SetRC(0x0400);
+ __ sub(esp, Immediate(kPointerSize));
+ // Clear exception bits.
+ __ fnclex();
+ __ fistp_s(MemOperand(esp, 0));
+ // Check overflow.
+ __ X87CheckIA();
+ __ pop(result);
+ DeoptimizeIf(equal, instr, "conversion overflow");
+ __ fnclex();
+ // Restore round mode.
+ __ X87SetRC(0x0000);
+
+ __ bind(&done);
}
void LCodeGen::DoMathFround(LMathFround* instr) {
- UNIMPLEMENTED();
+ X87Register input_reg = ToX87Register(instr->value());
+ X87Fxch(input_reg);
+ __ sub(esp, Immediate(kPointerSize));
+ __ fstp_s(MemOperand(esp, 0));
+ X87Fld(MemOperand(esp, 0), kX87FloatOperand);
+ __ add(esp, Immediate(kPointerSize));
}
void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
- UNIMPLEMENTED();
+ X87Register input = ToX87Register(instr->value());
+ X87Register result_reg = ToX87Register(instr->result());
+ Register temp_result = ToRegister(instr->temp1());
+ Register temp = ToRegister(instr->temp2());
+ Label slow, done, smi, finish;
+ DCHECK(result_reg.is(input));
+
+ // Store input into Heap number and call runtime function kMathExpRT.
+ if (FLAG_inline_new) {
+ __ AllocateHeapNumber(temp_result, temp, no_reg, &slow);
+ __ jmp(&done, Label::kNear);
+ }
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+ {
+ // TODO(3095996): Put a valid pointer value in the stack slot where the
+ // result register is stored, as this register is in the pointer map, but
+ // contains an integer value.
+ __ Move(temp_result, Immediate(0));
+
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this);
+
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(temp_result, eax);
+ }
+ __ bind(&done);
+ X87LoadForUsage(input);
+ __ fstp_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
+
+ {
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this);
+
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ push(temp_result);
+ __ CallRuntimeSaveDoubles(Runtime::kMathSqrtRT);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(temp_result, eax);
+ }
+ X87PrepareToWrite(result_reg);
+ // return value of MathExpRT is Smi or Heap Number.
+ __ JumpIfSmi(temp_result, &smi);
+ // Heap number(double)
+ __ fld_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
+ __ jmp(&finish);
+ // SMI
+ __ bind(&smi);
+ __ SmiUntag(temp_result);
+ __ push(temp_result);
+ __ fild_s(MemOperand(esp, 0));
+ __ pop(temp_result);
+ __ bind(&finish);
+ X87CommitWrite(result_reg);
}
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
- UNIMPLEMENTED();
+ X87Register input_reg = ToX87Register(instr->value());
+ DCHECK(ToX87Register(instr->result()).is(input_reg));
+ X87Fxch(input_reg);
+ // Note that according to ECMA-262 15.8.2.13:
+ // Math.pow(-Infinity, 0.5) == Infinity
+ // Math.sqrt(-Infinity) == NaN
+ Label done, sqrt;
+ // Check base for -Infinity. C3 == 0, C2 == 1, C1 == 1 and C0 == 1
+ __ fxam();
+ __ push(eax);
+ __ fnstsw_ax();
+ __ and_(eax, Immediate(0x4700));
+ __ cmp(eax, Immediate(0x0700));
+ __ j(not_equal, &sqrt, Label::kNear);
+ // If input is -Infinity, return Infinity.
+ __ fchs();
+ __ jmp(&done, Label::kNear);
+
+ // Square root.
+ __ bind(&sqrt);
+ __ fldz();
+ __ faddp(); // Convert -0 to +0.
+ __ fsqrt();
+ __ bind(&done);
+ __ pop(eax);
}
void LCodeGen::DoPower(LPower* instr) {
- UNIMPLEMENTED();
+ Representation exponent_type = instr->hydrogen()->right()->representation();
+ X87Register result = ToX87Register(instr->result());
+ // Having marked this as a call, we can use any registers.
+ X87Register base = ToX87Register(instr->left());
+ ExternalReference one_half = ExternalReference::address_of_one_half();
+
+ if (exponent_type.IsSmi()) {
+ Register exponent = ToRegister(instr->right());
+ X87LoadForUsage(base);
+ __ SmiUntag(exponent);
+ __ push(exponent);
+ __ fild_s(MemOperand(esp, 0));
+ __ pop(exponent);
+ } else if (exponent_type.IsTagged()) {
+ Register exponent = ToRegister(instr->right());
+ Register temp = exponent.is(ecx) ? eax : ecx;
+ Label no_deopt, done;
+ X87LoadForUsage(base);
+ __ JumpIfSmi(exponent, &no_deopt);
+ __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, temp);
+ DeoptimizeIf(not_equal, instr, "not a heap number");
+ // Heap number(double)
+ __ fld_d(FieldOperand(exponent, HeapNumber::kValueOffset));
+ __ jmp(&done);
+ // SMI
+ __ bind(&no_deopt);
+ __ SmiUntag(exponent);
+ __ push(exponent);
+ __ fild_s(MemOperand(esp, 0));
+ __ pop(exponent);
+ __ bind(&done);
+ } else if (exponent_type.IsInteger32()) {
+ Register exponent = ToRegister(instr->right());
+ X87LoadForUsage(base);
+ __ push(exponent);
+ __ fild_s(MemOperand(esp, 0));
+ __ pop(exponent);
+ } else {
+ DCHECK(exponent_type.IsDouble());
+ X87Register exponent_double = ToX87Register(instr->right());
+ X87LoadForUsage(base, exponent_double);
+ }
+
+ // FP data stack {base, exponent(TOS)}.
+ // Handle (exponent==+-0.5 && base == -0).
+ Label not_plus_0;
+ __ fld(0);
+ __ fabs();
+ X87Fld(Operand::StaticVariable(one_half), kX87DoubleOperand);
+ __ FCmp();
+ __ j(parity_even, &not_plus_0, Label::kNear); // NaN.
+ __ j(not_equal, &not_plus_0, Label::kNear);
+ __ fldz();
+ // FP data stack {base, exponent(TOS), zero}.
+ __ faddp(2);
+ __ bind(&not_plus_0);
+
+ {
+ __ PrepareCallCFunction(4, eax);
+ __ fstp_d(MemOperand(esp, kDoubleSize)); // Exponent value.
+ __ fstp_d(MemOperand(esp, 0)); // Base value.
+ X87PrepareToWrite(result);
+ __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
+ 4);
+ // Return value is in st(0) on ia32.
+ X87CommitWrite(result);
+ }
}
void LCodeGen::DoMathLog(LMathLog* instr) {
- UNIMPLEMENTED();
+ DCHECK(instr->value()->Equals(instr->result()));
+ X87Register input_reg = ToX87Register(instr->value());
+ X87Fxch(input_reg);
+
+ Label positive, done, zero, nan_result;
+ __ fldz();
+ __ fld(1);
+ __ FCmp();
+ __ j(below, &nan_result, Label::kNear);
+ __ j(equal, &zero, Label::kNear);
+ // Positive input.
+ // {input, ln2}.
+ __ fldln2();
+ // {ln2, input}.
+ __ fxch();
+ // {result}.
+ __ fyl2x();
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&nan_result);
+ ExternalReference nan =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ X87PrepareToWrite(input_reg);
+ __ fld_d(Operand::StaticVariable(nan));
+ X87CommitWrite(input_reg);
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&zero);
+ ExternalReference ninf = ExternalReference::address_of_negative_infinity();
+ X87PrepareToWrite(input_reg);
+ __ fld_d(Operand::StaticVariable(ninf));
+ X87CommitWrite(input_reg);
+
+ __ bind(&done);
}
void LCodeGen::DoMathClz32(LMathClz32* instr) {
- UNIMPLEMENTED();
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label not_zero_input;
+ __ bsr(result, input);
+
+ __ j(not_zero, &not_zero_input);
+ __ Move(result, Immediate(63)); // 63^31 == 32
+
+ __ bind(&not_zero_input);
+ __ xor_(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
}
void LCodeGen::DoMathExp(LMathExp* instr) {
- UNIMPLEMENTED();
+ X87Register input = ToX87Register(instr->value());
+ X87Register result_reg = ToX87Register(instr->result());
+ Register temp_result = ToRegister(instr->temp1());
+ Register temp = ToRegister(instr->temp2());
+ Label slow, done, smi, finish;
+ DCHECK(result_reg.is(input));
+
+ // Store input into Heap number and call runtime function kMathExpRT.
+ if (FLAG_inline_new) {
+ __ AllocateHeapNumber(temp_result, temp, no_reg, &slow);
+ __ jmp(&done, Label::kNear);
+ }
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+ {
+ // TODO(3095996): Put a valid pointer value in the stack slot where the
+ // result register is stored, as this register is in the pointer map, but
+ // contains an integer value.
+ __ Move(temp_result, Immediate(0));
+
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this);
+
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(instr->pointer_map(), 0,
+ Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(temp_result, eax);
+ }
+ __ bind(&done);
+ X87LoadForUsage(input);
+ __ fstp_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
+
+ {
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this);
+
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ push(temp_result);
+ __ CallRuntimeSaveDoubles(Runtime::kMathExpRT);
+ RecordSafepointWithRegisters(instr->pointer_map(), 0,
+ Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(temp_result, eax);
+ }
+ X87PrepareToWrite(result_reg);
+ // return value of MathExpRT is Smi or Heap Number.
+ __ JumpIfSmi(temp_result, &smi);
+ // Heap number(double)
+ __ fld_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
+ __ jmp(&finish);
+ // SMI
+ __ bind(&smi);
+ __ SmiUntag(temp_result);
+ __ push(temp_result);
+ __ fild_s(MemOperand(esp, 0));
+ __ pop(temp_result);
+ __ bind(&finish);
+ X87CommitWrite(result_reg);
}
@@ -3862,7 +4391,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
- CallRuntime(instr->function(), instr->arity(), instr);
+ CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
}
@@ -3933,7 +4462,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ mov(temp_map, transition);
__ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
// Update the write barrier for the map field.
- __ RecordWriteForMap(object, transition, temp_map, temp);
+ __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs);
}
}
@@ -3968,10 +4497,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register value = ToRegister(instr->value());
Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
// Update the write barrier for the object for in-object properties.
- __ RecordWriteField(write_register,
- offset,
- value,
- temp,
+ __ RecordWriteField(write_register, offset, value, temp, kSaveFPRegs,
EMIT_REMEMBERED_SET,
instr->hydrogen()->SmiCheckForWriteBarrier(),
instr->hydrogen()->PointersToHereCheckForValue());
@@ -3981,10 +4507,10 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister()));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- __ mov(StoreIC::NameRegister(), instr->name());
+ __ mov(StoreDescriptor::NameRegister(), instr->name());
Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4010,7 +4536,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ int3();
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr->environment());
+ DeoptimizeIf(cc, instr, "out of bounds");
}
}
@@ -4031,8 +4557,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
instr->base_offset()));
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
elements_kind == FLOAT32_ELEMENTS) {
- __ fld(0);
- __ fstp_s(operand);
+ X87Mov(operand, ToX87Register(instr->value()), kX87FloatOperand);
} else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
elements_kind == FLOAT64_ELEMENTS) {
X87Mov(operand, ToX87Register(instr->value()));
@@ -4093,7 +4618,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
// This means we should store the (double) hole. No floating point
// registers required.
double nan_double = FixedDoubleArray::hole_nan_as_double();
- uint64_t int_val = BitCast<uint64_t, double>(nan_double);
+ uint64_t int_val = bit_cast<uint64_t, double>(nan_double);
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
@@ -4168,10 +4693,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
__ lea(key, operand);
- __ RecordWrite(elements,
- key,
- value,
- EMIT_REMEMBERED_SET,
+ __ RecordWrite(elements, key, value, kSaveFPRegs, EMIT_REMEMBERED_SET,
check_needed,
instr->hydrogen()->PointersToHereCheckForValue());
}
@@ -4192,13 +4714,12 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister()));
- DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister()));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- Handle<Code> ic = instr->strict_mode() == STRICT
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4208,7 +4729,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "memento found");
__ bind(&no_memento_found);
}
@@ -4235,7 +4756,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
// Write barrier.
DCHECK_NE(instr->temp(), NULL);
__ RecordWriteForMap(object_reg, to_map, new_map_reg,
- ToRegister(instr->temp()));
+ ToRegister(instr->temp()), kDontSaveFPRegs);
} else {
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(object_reg.is(eax));
@@ -4252,16 +4773,16 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
+ class DeferredStringCharCodeAt FINAL : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen,
LStringCharCodeAt* instr,
const X87Stack& x87_stack)
: LDeferredCode(codegen, x87_stack), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStringCharCodeAt(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@@ -4311,16 +4832,16 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
+ class DeferredStringCharFromCode FINAL : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen,
LStringCharFromCode* instr,
const X87Stack& x87_stack)
: LDeferredCode(codegen, x87_stack), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStringCharFromCode(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -4400,17 +4921,17 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagI FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen,
LNumberTagI* instr,
const X87Stack& x87_stack)
: LDeferredCode(codegen, x87_stack), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
SIGNED_INT32);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagI* instr_;
};
@@ -4428,17 +4949,17 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagU FINAL : public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen,
LNumberTagU* instr,
const X87Stack& x87_stack)
: LDeferredCode(codegen, x87_stack), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
UNSIGNED_INT32);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
@@ -4505,7 +5026,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kAllocateHeapNumber);
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(reg, eax);
@@ -4517,16 +5038,16 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagD FINAL : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen,
LNumberTagD* instr,
const X87Stack& x87_stack)
: LDeferredCode(codegen, x87_stack), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagD(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -4535,7 +5056,9 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
// Put the value to the top of stack
X87Register src = ToX87Register(instr->value());
- X87LoadForUsage(src);
+ // Don't use X87LoadForUsage here, which is only used by Instruction which
+ // clobbers fp registers.
+ x87_stack_.Fxch(src);
DeferredNumberTagD* deferred =
new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
@@ -4546,7 +5069,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
- __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
+ __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
@@ -4564,7 +5087,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kAllocateHeapNumber);
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(reg, eax);
@@ -4577,12 +5100,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ test(input, Immediate(0xc0000000));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr, "overflow");
}
__ SmiTag(input);
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
}
@@ -4593,7 +5116,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
DCHECK(input->IsRegister() && input->Equals(instr->result()));
if (instr->needs_check()) {
__ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr, "not a Smi");
} else {
__ AssertSmi(result);
}
@@ -4601,32 +5124,32 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
}
-void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
- Register temp_reg,
- X87Register res_reg,
- bool can_convert_undefined_to_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
+void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg,
+ Register temp_reg, X87Register res_reg,
NumberUntagDMode mode) {
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+ bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
Label load_smi, done;
X87PrepareToWrite(res_reg);
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
- __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
+ __ JumpIfSmi(input_reg, &load_smi);
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
if (!can_convert_undefined_to_nan) {
- DeoptimizeIf(not_equal, env);
+ DeoptimizeIf(not_equal, instr, "not a heap number");
} else {
Label heap_number, convert;
- __ j(equal, &heap_number, Label::kNear);
+ __ j(equal, &heap_number);
// Convert undefined (or hole) to NaN.
__ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, env);
+ DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
__ bind(&convert);
ExternalReference nan =
@@ -4651,7 +5174,7 @@ void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
// Pop FPU stack before deoptimizing.
__ fstp(0);
- DeoptimizeIf(not_zero, env);
+ DeoptimizeIf(not_zero, instr, "minus zero");
}
__ jmp(&done, Label::kNear);
} else {
@@ -4704,31 +5227,73 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ bind(&check_false);
__ cmp(input_reg, factory()->false_value());
- __ RecordComment("Deferred TaggedToI: cannot truncate");
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false");
__ Move(input_reg, Immediate(0));
} else {
- Label bailout;
- __ TaggedToI(input_reg, input_reg,
- instr->hydrogen()->GetMinusZeroMode(), &bailout);
- __ jmp(done);
- __ bind(&bailout);
- DeoptimizeIf(no_condition, instr->environment());
+ // TODO(olivf) Converting a number on the fpu is actually quite slow. We
+ // should first try a fast conversion and then bailout to this slow case.
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ DeoptimizeIf(not_equal, instr, "not a heap number");
+
+ __ sub(esp, Immediate(kPointerSize));
+ __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+
+ if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
+ Label no_precision_lost, not_nan, zero_check;
+ __ fld(0);
+
+ __ fist_s(MemOperand(esp, 0));
+ __ fild_s(MemOperand(esp, 0));
+ __ FCmp();
+ __ pop(input_reg);
+
+ __ j(equal, &no_precision_lost, Label::kNear);
+ __ fstp(0);
+ DeoptimizeIf(no_condition, instr, "lost precision");
+ __ bind(&no_precision_lost);
+
+ __ j(parity_odd, &not_nan);
+ __ fstp(0);
+ DeoptimizeIf(no_condition, instr, "NaN");
+ __ bind(&not_nan);
+
+ __ test(input_reg, Operand(input_reg));
+ __ j(zero, &zero_check, Label::kNear);
+ __ fstp(0);
+ __ jmp(done);
+
+ __ bind(&zero_check);
+ // To check for minus zero, we load the value again as float, and check
+ // if that is still 0.
+ __ sub(esp, Immediate(kPointerSize));
+ __ fstp_s(Operand(esp, 0));
+ __ pop(input_reg);
+ __ test(input_reg, Operand(input_reg));
+ DeoptimizeIf(not_zero, instr, "minus zero");
+ } else {
+ __ fist_s(MemOperand(esp, 0));
+ __ fild_s(MemOperand(esp, 0));
+ __ FCmp();
+ __ pop(input_reg);
+ DeoptimizeIf(not_equal, instr, "lost precision");
+ DeoptimizeIf(parity_even, instr, "NaN");
+ }
}
}
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI V8_FINAL : public LDeferredCode {
+ class DeferredTaggedToI FINAL : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen,
LTaggedToI* instr,
const X87Stack& x87_stack)
: LDeferredCode(codegen, x87_stack), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredTaggedToI(instr_, done());
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
@@ -4764,20 +5329,13 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
DCHECK(result->IsDoubleRegister());
Register input_reg = ToRegister(input);
- bool deoptimize_on_minus_zero =
- instr->hydrogen()->deoptimize_on_minus_zero();
Register temp_reg = ToRegister(temp);
HValue* value = instr->hydrogen()->value();
NumberUntagDMode mode = value->representation().IsSmi()
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
- EmitNumberUntagDNoSSE2(input_reg,
- temp_reg,
- ToX87Register(result),
- instr->hydrogen()->can_convert_undefined_to_nan(),
- deoptimize_on_minus_zero,
- instr->environment(),
+ EmitNumberUntagDNoSSE2(instr, input_reg, temp_reg, ToX87Register(result),
mode);
}
@@ -4794,14 +5352,19 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
X87Fxch(input_reg);
__ TruncateX87TOSToI(result_reg);
} else {
- Label bailout, done;
+ Label lost_precision, is_nan, minus_zero, done;
X87Register input_reg = ToX87Register(input);
X87Fxch(input_reg);
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
__ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
- &bailout, Label::kNear);
- __ jmp(&done, Label::kNear);
- __ bind(&bailout);
- DeoptimizeIf(no_condition, instr->environment());
+ &lost_precision, &is_nan, &minus_zero, dist);
+ __ jmp(&done);
+ __ bind(&lost_precision);
+ DeoptimizeIf(no_condition, instr, "lost precision");
+ __ bind(&is_nan);
+ DeoptimizeIf(no_condition, instr, "NaN");
+ __ bind(&minus_zero);
+ DeoptimizeIf(no_condition, instr, "minus zero");
__ bind(&done);
}
}
@@ -4814,25 +5377,29 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
DCHECK(result->IsRegister());
Register result_reg = ToRegister(result);
- Label bailout, done;
+ Label lost_precision, is_nan, minus_zero, done;
X87Register input_reg = ToX87Register(input);
X87Fxch(input_reg);
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
__ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
- &bailout, Label::kNear);
- __ jmp(&done, Label::kNear);
- __ bind(&bailout);
- DeoptimizeIf(no_condition, instr->environment());
+ &lost_precision, &is_nan, &minus_zero, dist);
+ __ jmp(&done);
+ __ bind(&lost_precision);
+ DeoptimizeIf(no_condition, instr, "lost precision");
+ __ bind(&is_nan);
+ DeoptimizeIf(no_condition, instr, "NaN");
+ __ bind(&minus_zero);
+ DeoptimizeIf(no_condition, instr, "minus zero");
__ bind(&done);
-
__ SmiTag(result_reg);
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr, "not a Smi");
}
@@ -4840,7 +5407,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "Smi");
}
}
@@ -4861,14 +5428,14 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "wrong instance type");
} else {
- DeoptimizeIf(below, instr->environment());
+ DeoptimizeIf(below, instr, "wrong instance type");
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
static_cast<int8_t>(last));
- DeoptimizeIf(above, instr->environment());
+ DeoptimizeIf(above, instr, "wrong instance type");
}
}
} else {
@@ -4876,15 +5443,15 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
uint8_t tag;
instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
- if (IsPowerOf2(mask)) {
- DCHECK(tag == 0 || IsPowerOf2(tag));
+ if (base::bits::IsPowerOfTwo32(mask)) {
+ DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
- DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
+ DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type");
} else {
__ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
__ and_(temp, mask);
__ cmp(temp, tag);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "wrong instance type");
}
}
}
@@ -4900,7 +5467,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
Operand operand = ToOperand(instr->value());
__ cmp(operand, object);
}
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "value mismatch");
}
@@ -4909,18 +5476,18 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
PushSafepointRegistersScope scope(this);
__ push(object);
__ xor_(esi, esi);
- __ CallRuntime(Runtime::kTryMigrateInstance);
+ __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
RecordSafepointWithRegisters(
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ test(eax, Immediate(kSmiTagMask));
}
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "instance migration failed");
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+ class DeferredCheckMaps FINAL : public LDeferredCode {
public:
DeferredCheckMaps(LCodeGen* codegen,
LCheckMaps* instr,
@@ -4929,11 +5496,11 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
: LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
SetExit(check_maps());
}
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredInstanceMigration(instr_, object_);
}
Label* check_maps() { return &check_maps_; }
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LCheckMaps* instr_;
Label check_maps_;
@@ -4971,7 +5538,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "wrong map");
}
__ bind(&success);
@@ -4979,7 +5546,10 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- UNREACHABLE();
+ X87Register value_reg = ToX87Register(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ X87Fxch(value_reg);
+ __ ClampTOSToUint8(result_reg);
}
@@ -5009,7 +5579,7 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
__ jmp(&zero_result, Label::kNear);
// Heap number
@@ -5113,26 +5683,46 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
- UNREACHABLE();
+ X87Register value_reg = ToX87Register(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ X87Fxch(value_reg);
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fst_d(Operand(esp, 0));
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ __ mov(result_reg, Operand(esp, kPointerSize));
+ } else {
+ __ mov(result_reg, Operand(esp, 0));
+ }
+ __ add(esp, Immediate(kDoubleSize));
}
void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
- UNREACHABLE();
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ X87Register result_reg = ToX87Register(instr->result());
+ // Follow below pattern to write a x87 fp register.
+ X87PrepareToWrite(result_reg);
+ __ sub(esp, Immediate(kDoubleSize));
+ __ mov(Operand(esp, 0), lo_reg);
+ __ mov(Operand(esp, kPointerSize), hi_reg);
+ __ fld_d(Operand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ X87CommitWrite(result_reg);
}
void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate V8_FINAL : public LDeferredCode {
+ class DeferredAllocate FINAL : public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen,
LAllocate* instr,
const X87Stack& x87_stack)
: LDeferredCode(codegen, x87_stack), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredAllocate(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LAllocate* instr_;
};
@@ -5300,9 +5890,8 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(),
- instr->hydrogen()->strict_mode(),
- instr->hydrogen()->is_generator());
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ instr->hydrogen()->kind());
__ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
@@ -5466,8 +6055,7 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
if (info()->IsStub() && type == Deoptimizer::EAGER) {
type = Deoptimizer::LAZY;
}
- Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
- DeoptimizeIf(no_condition, instr->environment(), type);
+ DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
}
@@ -5484,7 +6072,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kStackGuard);
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
DCHECK(instr->HasEnvironment());
@@ -5494,16 +6082,16 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck V8_FINAL : public LDeferredCode {
+ class DeferredStackCheck FINAL : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen,
LStackCheck* instr,
const X87Stack& x87_stack)
: LDeferredCode(codegen, x87_stack), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStackCheck(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
@@ -5564,17 +6152,17 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
__ cmp(eax, isolate()->factory()->undefined_value());
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "undefined");
__ cmp(eax, isolate()->factory()->null_value());
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "null");
__ test(eax, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "Smi");
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
- DeoptimizeIf(below_equal, instr->environment());
+ DeoptimizeIf(below_equal, instr, "wrong instance type");
Label use_cache, call_runtime;
__ CheckEnumCache(&call_runtime);
@@ -5589,7 +6177,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "wrong map");
__ bind(&use_cache);
}
@@ -5612,7 +6200,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
__ test(result, result);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "no cache");
}
@@ -5620,7 +6208,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
__ cmp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "wrong map");
}
@@ -5631,7 +6219,7 @@ void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
__ push(object);
__ push(index);
__ xor_(esi, esi);
- __ CallRuntime(Runtime::kLoadMutableDouble);
+ __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
RecordSafepointWithRegisters(
instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(object, eax);
@@ -5639,7 +6227,7 @@ void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ class DeferredLoadMutableDouble FINAL : public LDeferredCode {
public:
DeferredLoadMutableDouble(LCodeGen* codegen,
LLoadFieldByIndex* instr,
@@ -5651,10 +6239,10 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
object_(object),
index_(index) {
}
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LLoadFieldByIndex* instr_;
Register object_;
diff --git a/deps/v8/src/x87/lithium-codegen-x87.h b/deps/v8/src/x87/lithium-codegen-x87.h
index 327d5398e0..2f4a8d3111 100644
--- a/deps/v8/src/x87/lithium-codegen-x87.h
+++ b/deps/v8/src/x87/lithium-codegen-x87.h
@@ -5,6 +5,7 @@
#ifndef V8_X87_LITHIUM_CODEGEN_X87_H_
#define V8_X87_LITHIUM_CODEGEN_X87_H_
+#include <map>
#include "src/x87/lithium-x87.h"
#include "src/base/logging.h"
@@ -84,6 +85,8 @@ class LCodeGen: public LCodeGenBase {
X87OperandType operand = kX87DoubleOperand);
void X87Mov(Operand src, X87Register reg,
X87OperandType operand = kX87DoubleOperand);
+ void X87Mov(X87Register reg, X87Register src,
+ X87OperandType operand = kX87DoubleOperand);
void X87PrepareBinaryOp(
X87Register left, X87Register right, X87Register result);
@@ -174,8 +177,8 @@ class LCodeGen: public LCodeGenBase {
// Code generation passes. Returns true if code generation should
// continue.
- void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
- void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE;
+ void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
+ void GenerateBodyInstructionPost(LInstruction* instr) OVERRIDE;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateJumpTable();
@@ -198,9 +201,8 @@ class LCodeGen: public LCodeGenBase {
LInstruction* instr,
SafepointMode safepoint_mode);
- void CallRuntime(const Runtime::Function* fun,
- int argc,
- LInstruction* instr);
+ void CallRuntime(const Runtime::Function* fun, int argc, LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int argc,
@@ -234,10 +236,9 @@ class LCodeGen: public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc,
- LEnvironment* environment,
+ void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail,
Deoptimizer::BailoutType bailout_type);
- void DeoptimizeIf(Condition cc, LEnvironment* environment);
+ void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
@@ -284,7 +285,7 @@ class LCodeGen: public LCodeGenBase {
int arguments,
Safepoint::DeoptMode mode);
- void RecordAndWritePosition(int position) V8_OVERRIDE;
+ void RecordAndWritePosition(int position) OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
@@ -294,14 +295,9 @@ class LCodeGen: public LCodeGenBase {
void EmitBranch(InstrType instr, Condition cc);
template<class InstrType>
void EmitFalseBranch(InstrType instr, Condition cc);
- void EmitNumberUntagDNoSSE2(
- Register input,
- Register temp,
- X87Register res_reg,
- bool allow_undefined_as_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
+ void EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input,
+ Register temp, X87Register res_reg,
+ NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
@@ -336,7 +332,7 @@ class LCodeGen: public LCodeGenBase {
int* offset,
AllocationSiteMode mode);
- void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+ void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
@@ -344,6 +340,9 @@ class LCodeGen: public LCodeGenBase {
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+ template <class T>
+ void EmitVectorLoadICRegisters(T* instr);
+
void EmitReturn(LReturn* instr, bool dynamic_frame_alignment);
// Emits code for pushing either a tagged constant, a (non-double)
@@ -378,7 +377,7 @@ class LCodeGen: public LCodeGenBase {
int osr_pc_offset_;
bool frame_is_built_;
- class X87Stack {
+ class X87Stack : public ZoneObject {
public:
explicit X87Stack(MacroAssembler* masm)
: stack_depth_(0), is_mutable_(true), masm_(masm) { }
@@ -395,14 +394,23 @@ class LCodeGen: public LCodeGenBase {
}
return true;
}
+ X87Stack& operator=(const X87Stack& other) {
+ stack_depth_ = other.stack_depth_;
+ for (int i = 0; i < stack_depth_; i++) {
+ stack_[i] = other.stack_[i];
+ }
+ return *this;
+ }
bool Contains(X87Register reg);
void Fxch(X87Register reg, int other_slot = 0);
void Free(X87Register reg);
void PrepareToWrite(X87Register reg);
void CommitWrite(X87Register reg);
void FlushIfNecessary(LInstruction* instr, LCodeGen* cgen);
- void LeavingBlock(int current_block_id, LGoto* goto_instr);
+ void LeavingBlock(int current_block_id, LGoto* goto_instr, LCodeGen* cgen);
int depth() const { return stack_depth_; }
+ int GetLayout();
+ int st(X87Register reg) { return st2idx(ArrayIndex(reg)); }
void pop() {
DCHECK(is_mutable_);
stack_depth_--;
@@ -427,6 +435,9 @@ class LCodeGen: public LCodeGenBase {
MacroAssembler* masm_;
};
X87Stack x87_stack_;
+ // block_id -> X87Stack*;
+ typedef std::map<int, X87Stack*> X87StackMap;
+ X87StackMap x87_stack_map_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@@ -437,7 +448,7 @@ class LCodeGen: public LCodeGenBase {
Safepoint::Kind expected_safepoint_kind_;
- class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
+ class PushSafepointRegistersScope FINAL BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
@@ -460,6 +471,7 @@ class LCodeGen: public LCodeGenBase {
friend class LDeferredCode;
friend class LEnvironment;
friend class SafepointGenerator;
+ friend class X87Stack;
DISALLOW_COPY_AND_ASSIGN(LCodeGen);
};
diff --git a/deps/v8/src/x87/lithium-gap-resolver-x87.cc b/deps/v8/src/x87/lithium-gap-resolver-x87.cc
index e25c78c993..6a6427550c 100644
--- a/deps/v8/src/x87/lithium-gap-resolver-x87.cc
+++ b/deps/v8/src/x87/lithium-gap-resolver-x87.cc
@@ -292,7 +292,7 @@ void LGapResolver::EmitMove(int index) {
}
} else if (destination->IsDoubleRegister()) {
double v = cgen_->ToDouble(constant_source);
- uint64_t int_val = BitCast<uint64_t, double>(v);
+ uint64_t int_val = bit_cast<uint64_t, double>(v);
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
__ push(Immediate(upper));
@@ -317,10 +317,15 @@ void LGapResolver::EmitMove(int index) {
} else if (source->IsDoubleRegister()) {
// load from the register onto the stack, store in destination, which must
// be a double stack slot in the non-SSE2 case.
- DCHECK(destination->IsDoubleStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- X87Register src = cgen_->ToX87Register(source);
- cgen_->X87Mov(dst, src);
+ if (destination->IsDoubleStackSlot()) {
+ Operand dst = cgen_->ToOperand(destination);
+ X87Register src = cgen_->ToX87Register(source);
+ cgen_->X87Mov(dst, src);
+ } else {
+ X87Register dst = cgen_->ToX87Register(destination);
+ X87Register src = cgen_->ToX87Register(source);
+ cgen_->X87Mov(dst, src);
+ }
} else if (source->IsDoubleStackSlot()) {
// load from the stack slot on top of the floating point stack, and then
// store in destination. If destination is a double register, then it
diff --git a/deps/v8/src/x87/lithium-gap-resolver-x87.h b/deps/v8/src/x87/lithium-gap-resolver-x87.h
index 737660c71a..4d1496b4fa 100644
--- a/deps/v8/src/x87/lithium-gap-resolver-x87.h
+++ b/deps/v8/src/x87/lithium-gap-resolver-x87.h
@@ -15,7 +15,7 @@ namespace internal {
class LCodeGen;
class LGapResolver;
-class LGapResolver V8_FINAL BASE_EMBEDDED {
+class LGapResolver FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
diff --git a/deps/v8/src/x87/lithium-x87.cc b/deps/v8/src/x87/lithium-x87.cc
index f2eb9f0a00..9304b8975c 100644
--- a/deps/v8/src/x87/lithium-x87.cc
+++ b/deps/v8/src/x87/lithium-x87.cc
@@ -472,18 +472,18 @@ LPlatformChunk* LChunkBuilder::Build() {
}
-void LChunkBuilder::Abort(BailoutReason reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
Register::ToAllocationIndex(reg));
}
+LUnallocated* LChunkBuilder::ToUnallocated(X87Register reg) {
+ return new (zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+ X87Register::ToAllocationIndex(reg));
+}
+
+
LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
return Use(value, ToUnallocated(fixed_register));
}
@@ -616,6 +616,12 @@ LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
}
+LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
+ X87Register reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
@@ -872,6 +878,14 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
if (current->IsControlInstruction() &&
HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
successor != NULL) {
+ // Always insert a fpu register barrier here when branch is optimized to
+ // be a direct goto.
+ // TODO(weiliang): require a better solution.
+ if (!current->IsGoto()) {
+ LClobberDoubles* clobber = new (zone()) LClobberDoubles(isolate());
+ clobber->set_hydrogen_value(current);
+ chunk_->AddInstruction(clobber, current_block_);
+ }
instr = new(zone()) LGoto(successor);
} else {
instr = current->CompileToLithium(this);
@@ -931,7 +945,8 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- if (instr->IsGoto() && LGoto::cast(instr)->jumps_to_join()) {
+ if (instr->IsGoto() &&
+ (LGoto::cast(instr)->jumps_to_join() || next_block_->is_osr_entry())) {
// TODO(olivf) Since phis of spilled values are joined as registers
// (not in the stack slot), we need to allow the goto gaps to keep one
// x87 register alive. To ensure all other values are still spilled, we
@@ -979,7 +994,9 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
LOperand* temp = !easy_case && expected.NeedsMap() ? TempRegister() : NULL;
- LInstruction* branch = new(zone()) LBranch(UseRegister(value), temp);
+ LInstruction* branch =
+ temp != NULL ? new (zone()) LBranch(UseRegister(value), temp)
+ : new (zone()) LBranch(UseRegisterAtStart(value), temp);
if (!easy_case &&
((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
!expected.IsGeneric())) {
@@ -1119,13 +1136,13 @@ LInstruction* LChunkBuilder::DoCallJSFunction(
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
- const InterfaceDescriptor* descriptor = instr->descriptor();
+ CallInterfaceDescriptor descriptor = instr->descriptor();
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
ops.Add(target, zone());
for (int i = 1; i < instr->OperandCount(); i++) {
- LOperand* op = UseFixed(instr->OperandAt(i),
- descriptor->GetParameterRegister(i - 1));
+ LOperand* op =
+ UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
ops.Add(op, zone());
}
@@ -1135,6 +1152,19 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
}
+LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
+ HTailCallThroughMegamorphicCache* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* receiver_register =
+ UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
+ LOperand* name_register =
+ UseFixed(instr->name(), LoadDescriptor::NameRegister());
+ // Not marked as call. It can't deoptimize, and it never returns.
+ return new (zone()) LTailCallThroughMegamorphicCache(
+ context, receiver_register, name_register);
+}
+
+
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
@@ -1169,16 +1199,16 @@ LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
- // Crankshaft is turned off for nosse2.
- UNREACHABLE();
- return NULL;
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LInstruction* result = DefineAsRegister(new (zone()) LMathRound(input));
+ return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
+ LOperand* input = UseRegister(instr->value());
LMathFround* result = new (zone()) LMathFround(input);
- return AssignEnvironment(DefineAsRegister(result));
+ return DefineSameAsFirst(result);
}
@@ -1212,25 +1242,26 @@ LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
DCHECK(instr->representation().IsDouble());
DCHECK(instr->value()->representation().IsDouble());
- LOperand* value = UseTempRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp1 = FixedTemp(ecx);
+ LOperand* temp2 = FixedTemp(edx);
LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
- return DefineAsRegister(result);
+ return MarkAsCall(DefineSameAsFirst(result), instr);
}
LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
- LMathSqrt* result = new(zone()) LMathSqrt(input);
- return DefineSameAsFirst(result);
+ LOperand* temp1 = FixedTemp(ecx);
+ LOperand* temp2 = FixedTemp(edx);
+ LMathSqrt* result = new(zone()) LMathSqrt(input, temp1, temp2);
+ return MarkAsCall(DefineSameAsFirst(result), instr);
}
LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
+ LMathPowHalf* result = new (zone()) LMathPowHalf(input);
return DefineSameAsFirst(result);
}
@@ -1602,6 +1633,8 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
LOperand* left = NULL;
LOperand* right = NULL;
+ LOperand* scratch = TempRegister();
+
if (instr->representation().IsSmiOrInteger32()) {
DCHECK(instr->left()->representation().Equals(instr->representation()));
DCHECK(instr->right()->representation().Equals(instr->representation()));
@@ -1614,15 +1647,19 @@ LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
left = UseRegisterAtStart(instr->left());
right = UseRegisterAtStart(instr->right());
}
- LMathMinMax* minmax = new(zone()) LMathMinMax(left, right);
+ LMathMinMax* minmax = new (zone()) LMathMinMax(left, right, scratch);
return DefineSameAsFirst(minmax);
}
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- // Crankshaft is turned off for nosse2.
- UNREACHABLE();
- return NULL;
+ // Unlike ia32, we don't have a MathPowStub and directly call c function.
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->left()->representation().IsDouble());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LPower* result = new (zone()) LPower(left, right);
+ return MarkAsCall(DefineSameAsFirst(result), instr);
}
@@ -1684,9 +1721,8 @@ LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
HCompareMinusZeroAndBranch* instr) {
- LOperand* value = UseRegister(instr->value());
- LOperand* scratch = TempRegister();
- return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new (zone()) LCompareMinusZeroAndBranch(value);
}
@@ -2009,8 +2045,8 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
HValue* value = instr->value();
Representation input_rep = value->representation();
if (input_rep.IsDouble()) {
- UNREACHABLE();
- return NULL;
+ LOperand* reg = UseRegister(value);
+ return DefineFixed(new (zone()) LClampDToUint8(reg), eax);
} else if (input_rep.IsInteger32()) {
LOperand* reg = UseFixed(value, eax);
return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
@@ -2054,10 +2090,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
} else if (r.IsInteger32()) {
return DefineAsRegister(new(zone()) LConstantI);
} else if (r.IsDouble()) {
- double value = instr->DoubleValue();
- bool value_is_zero = BitCast<uint64_t, double>(value) == 0;
- LOperand* temp = value_is_zero ? NULL : TempRegister();
- return DefineAsRegister(new(zone()) LConstantD(temp));
+ return DefineAsRegister(new (zone()) LConstantD);
} else if (r.IsExternal()) {
return DefineAsRegister(new(zone()) LConstantE);
} else if (r.IsTagged()) {
@@ -2079,11 +2112,11 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- LOperand* global_object = UseFixed(instr->global_object(),
- LoadIC::ReceiverRegister());
+ LOperand* global_object =
+ UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
@@ -2140,10 +2173,11 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister());
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
context, object, vector);
@@ -2203,11 +2237,12 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), LoadIC::NameRegister());
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
- vector = FixedTemp(LoadIC::VectorRegister());
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadKeyedGeneric* result =
new(zone()) LLoadKeyedGeneric(context, object, key, vector);
@@ -2246,8 +2281,10 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (instr->value()->representation().IsDouble()) {
LOperand* object = UseRegisterAtStart(instr->elements());
- LOperand* val = NULL;
- val = UseRegisterAtStart(instr->value());
+ // For storing double hole, no fp register required.
+ LOperand* val = instr->IsConstantHoleStore()
+ ? NULL
+ : UseRegisterAtStart(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
return new(zone()) LStoreKeyed(object, key, val);
} else {
@@ -2292,10 +2329,10 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->object(),
- KeyedStoreIC::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), KeyedStoreIC::NameRegister());
- LOperand* value = UseFixed(instr->value(), KeyedStoreIC::ValueRegister());
+ LOperand* object =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+ LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
DCHECK(instr->object()->representation().IsTagged());
DCHECK(instr->key()->representation().IsTagged());
@@ -2375,8 +2412,6 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
val = UseTempRegister(instr->value());
} else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
- } else if (instr->field_representation().IsSmi()) {
- val = UseTempRegister(instr->value());
} else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
} else {
@@ -2397,8 +2432,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->object(), StoreIC::ReceiverRegister());
- LOperand* value = UseFixed(instr->value(), StoreIC::ValueRegister());
+ LOperand* object =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
LStoreNamedGeneric* result =
new(zone()) LStoreNamedGeneric(context, object, value);
@@ -2475,10 +2511,10 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
return DefineAsSpilled(result, spill_index);
} else {
DCHECK(info()->IsStub());
- CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor();
+ CallInterfaceDescriptor descriptor =
+ info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index());
- Register reg = descriptor->GetEnvironmentParameterRegister(index);
+ Register reg = descriptor.GetEnvironmentParameterRegister(index);
return DefineFixed(result, reg);
}
}
@@ -2494,7 +2530,7 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
} else {
spill_index = env_index - instr->environment()->first_local_index();
if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Abort(kNotEnoughSpillSlotsForOsr);
+ Retry(kNotEnoughSpillSlotsForOsr);
spill_index = 0;
}
if (spill_index == 0) {
@@ -2606,6 +2642,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
}
+ inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
diff --git a/deps/v8/src/x87/lithium-x87.h b/deps/v8/src/x87/lithium-x87.h
index 56d7c640ff..dbb18ecabb 100644
--- a/deps/v8/src/x87/lithium-x87.h
+++ b/deps/v8/src/x87/lithium-x87.h
@@ -21,158 +21,159 @@ class RCodeVisualizer;
// Forward declarations.
class LCodeGen;
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddI) \
- V(AllocateBlockContext) \
- V(Allocate) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(BitI) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallJSFunction) \
- V(CallWithDescriptor) \
- V(CallFunction) \
- V(CallNew) \
- V(CallNewArray) \
- V(CallRuntime) \
- V(CallStub) \
- V(CheckInstanceType) \
- V(CheckMaps) \
- V(CheckMapValue) \
- V(CheckNonSmi) \
- V(CheckSmi) \
- V(CheckValue) \
- V(ClampDToUint8) \
- V(ClampIToUint8) \
- V(ClampTToUint8NoSSE2) \
- V(ClassOfTestAndBranch) \
- V(ClobberDoubles) \
- V(CompareMinusZeroAndBranch) \
- V(CompareNumericAndBranch) \
- V(CmpObjectEqAndBranch) \
- V(CmpHoleAndBranch) \
- V(CmpMapAndBranch) \
- V(CmpT) \
- V(ConstantD) \
- V(ConstantE) \
- V(ConstantI) \
- V(ConstantS) \
- V(ConstantT) \
- V(ConstructDouble) \
- V(Context) \
- V(DateField) \
- V(DebugBreak) \
- V(DeclareGlobals) \
- V(Deoptimize) \
- V(DivByConstI) \
- V(DivByPowerOf2I) \
- V(DivI) \
- V(DoubleBits) \
- V(DoubleToI) \
- V(DoubleToSmi) \
- V(Drop) \
- V(Dummy) \
- V(DummyUse) \
- V(FlooringDivByConstI) \
- V(FlooringDivByPowerOf2I) \
- V(FlooringDivI) \
- V(ForInCacheArray) \
- V(ForInPrepareMap) \
- V(FunctionLiteral) \
- V(GetCachedArrayIndex) \
- V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceTypeAndBranch) \
- V(InnerAllocatedObject) \
- V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
- V(InstructionGap) \
- V(Integer32ToDouble) \
- V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
- V(IsObjectAndBranch) \
- V(IsStringAndBranch) \
- V(IsSmiAndBranch) \
- V(IsUndetectableAndBranch) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadFieldByIndex) \
- V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
- V(LoadGlobalGeneric) \
- V(LoadKeyed) \
- V(LoadKeyedGeneric) \
- V(LoadNamedField) \
- V(LoadNamedGeneric) \
- V(LoadRoot) \
- V(MapEnumLength) \
- V(MathAbs) \
- V(MathClz32) \
- V(MathExp) \
- V(MathFloor) \
- V(MathFround) \
- V(MathLog) \
- V(MathMinMax) \
- V(MathPowHalf) \
- V(MathRound) \
- V(MathSqrt) \
- V(ModByConstI) \
- V(ModByPowerOf2I) \
- V(ModI) \
- V(MulI) \
- V(NumberTagD) \
- V(NumberTagI) \
- V(NumberTagU) \
- V(NumberUntagD) \
- V(OsrEntry) \
- V(Parameter) \
- V(Power) \
- V(PushArgument) \
- V(RegExpLiteral) \
- V(Return) \
- V(SeqStringGetChar) \
- V(SeqStringSetChar) \
- V(ShiftI) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreCodeEntry) \
- V(StoreContextSlot) \
- V(StoreFrameContext) \
- V(StoreGlobalCell) \
- V(StoreKeyed) \
- V(StoreKeyedGeneric) \
- V(StoreNamedField) \
- V(StoreNamedGeneric) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringCompareAndBranch) \
- V(SubI) \
- V(TaggedToI) \
- V(ThisFunction) \
- V(ToFastProperties) \
- V(TransitionElementsKind) \
- V(TrapAllocationMemento) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(Uint32ToDouble) \
- V(UnknownOSRValue) \
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+ V(AccessArgumentsAt) \
+ V(AddI) \
+ V(AllocateBlockContext) \
+ V(Allocate) \
+ V(ApplyArguments) \
+ V(ArgumentsElements) \
+ V(ArgumentsLength) \
+ V(ArithmeticD) \
+ V(ArithmeticT) \
+ V(BitI) \
+ V(BoundsCheck) \
+ V(Branch) \
+ V(CallJSFunction) \
+ V(CallWithDescriptor) \
+ V(CallFunction) \
+ V(CallNew) \
+ V(CallNewArray) \
+ V(CallRuntime) \
+ V(CallStub) \
+ V(CheckInstanceType) \
+ V(CheckMaps) \
+ V(CheckMapValue) \
+ V(CheckNonSmi) \
+ V(CheckSmi) \
+ V(CheckValue) \
+ V(ClampDToUint8) \
+ V(ClampIToUint8) \
+ V(ClampTToUint8NoSSE2) \
+ V(ClassOfTestAndBranch) \
+ V(ClobberDoubles) \
+ V(CompareMinusZeroAndBranch) \
+ V(CompareNumericAndBranch) \
+ V(CmpObjectEqAndBranch) \
+ V(CmpHoleAndBranch) \
+ V(CmpMapAndBranch) \
+ V(CmpT) \
+ V(ConstantD) \
+ V(ConstantE) \
+ V(ConstantI) \
+ V(ConstantS) \
+ V(ConstantT) \
+ V(ConstructDouble) \
+ V(Context) \
+ V(DateField) \
+ V(DebugBreak) \
+ V(DeclareGlobals) \
+ V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
+ V(DivI) \
+ V(DoubleBits) \
+ V(DoubleToI) \
+ V(DoubleToSmi) \
+ V(Drop) \
+ V(Dummy) \
+ V(DummyUse) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
+ V(FlooringDivI) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
+ V(FunctionLiteral) \
+ V(GetCachedArrayIndex) \
+ V(Goto) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceTypeAndBranch) \
+ V(InnerAllocatedObject) \
+ V(InstanceOf) \
+ V(InstanceOfKnownGlobal) \
+ V(InstructionGap) \
+ V(Integer32ToDouble) \
+ V(InvokeFunction) \
+ V(IsConstructCallAndBranch) \
+ V(IsObjectAndBranch) \
+ V(IsStringAndBranch) \
+ V(IsSmiAndBranch) \
+ V(IsUndetectableAndBranch) \
+ V(Label) \
+ V(LazyBailout) \
+ V(LoadContextSlot) \
+ V(LoadFieldByIndex) \
+ V(LoadFunctionPrototype) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
+ V(LoadKeyed) \
+ V(LoadKeyedGeneric) \
+ V(LoadNamedField) \
+ V(LoadNamedGeneric) \
+ V(LoadRoot) \
+ V(MapEnumLength) \
+ V(MathAbs) \
+ V(MathClz32) \
+ V(MathExp) \
+ V(MathFloor) \
+ V(MathFround) \
+ V(MathLog) \
+ V(MathMinMax) \
+ V(MathPowHalf) \
+ V(MathRound) \
+ V(MathSqrt) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
+ V(ModI) \
+ V(MulI) \
+ V(NumberTagD) \
+ V(NumberTagI) \
+ V(NumberTagU) \
+ V(NumberUntagD) \
+ V(OsrEntry) \
+ V(Parameter) \
+ V(Power) \
+ V(PushArgument) \
+ V(RegExpLiteral) \
+ V(Return) \
+ V(SeqStringGetChar) \
+ V(SeqStringSetChar) \
+ V(ShiftI) \
+ V(SmiTag) \
+ V(SmiUntag) \
+ V(StackCheck) \
+ V(StoreCodeEntry) \
+ V(StoreContextSlot) \
+ V(StoreFrameContext) \
+ V(StoreGlobalCell) \
+ V(StoreKeyed) \
+ V(StoreKeyedGeneric) \
+ V(StoreNamedField) \
+ V(StoreNamedGeneric) \
+ V(StringAdd) \
+ V(StringCharCodeAt) \
+ V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
+ V(SubI) \
+ V(TaggedToI) \
+ V(TailCallThroughMegamorphicCache) \
+ V(ThisFunction) \
+ V(ToFastProperties) \
+ V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
+ V(Typeof) \
+ V(TypeofIsAndBranch) \
+ V(Uint32ToDouble) \
+ V(UnknownOSRValue) \
V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ virtual Opcode opcode() const FINAL OVERRIDE { \
return LInstruction::k##type; \
} \
- virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
- virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE; \
+ virtual const char* Mnemonic() const FINAL OVERRIDE { \
return mnemonic; \
} \
static L##type* cast(LInstruction* instr) { \
@@ -296,7 +297,7 @@ class LTemplateResultInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ virtual bool HasResult() const FINAL OVERRIDE {
return R != 0 && result() != NULL;
}
void set_result(LOperand* operand) { results_[0] = operand; }
@@ -318,11 +319,11 @@ class LTemplateInstruction : public LTemplateResultInstruction<R> {
private:
// Iterator support.
- virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
- virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+ virtual int InputCount() FINAL OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
- virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
- virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
+ virtual int TempCount() FINAL OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
};
@@ -336,8 +337,8 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsGap() const FINAL OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
static LGap* cast(LInstruction* instr) {
DCHECK(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@@ -373,11 +374,11 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
};
-class LInstructionGap V8_FINAL : public LGap {
+class LInstructionGap FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return !IsRedundant();
}
@@ -385,11 +386,11 @@ class LInstructionGap V8_FINAL : public LGap {
};
-class LClobberDoubles V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LClobberDoubles FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LClobberDoubles(Isolate* isolate) { }
- virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
return true;
}
@@ -397,41 +398,42 @@ class LClobberDoubles V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGoto(HBasicBlock* block) : block_(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual bool IsControl() const V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+ virtual bool IsControl() const OVERRIDE { return true; }
int block_id() const { return block_->block_id(); }
- virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
return false;
}
bool jumps_to_join() const { return block_->predecessors()->length() > 1; }
+ HBasicBlock* block() const { return block_; }
private:
HBasicBlock* block_;
};
-class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
};
-class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LDummy FINAL : public LTemplateInstruction<1, 0, 0> {
public:
LDummy() {}
DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
};
-class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
inputs_[0] = value;
@@ -440,25 +442,25 @@ class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- virtual bool IsControl() const V8_OVERRIDE { return true; }
+ virtual bool IsControl() const OVERRIDE { return true; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
-class LLabel V8_FINAL : public LGap {
+class LLabel FINAL : public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -474,16 +476,16 @@ class LLabel V8_FINAL : public LGap {
};
-class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallStub(LOperand* context) {
inputs_[0] = context;
@@ -496,9 +498,30 @@ class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LTailCallThroughMegamorphicCache FINAL
+ : public LTemplateInstruction<0, 3, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ explicit LTailCallThroughMegamorphicCache(LOperand* context,
+ LOperand* receiver,
+ LOperand* name) {
+ inputs_[0] = context;
+ inputs_[1] = receiver;
+ inputs_[2] = name;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* name() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
+ "tail-call-through-megamorphic-cache")
+ DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
+};
+
+
+class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
@@ -510,7 +533,7 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
- virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual bool IsControl() const FINAL OVERRIDE { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -549,7 +572,7 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
};
-class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LWrapReceiver FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LWrapReceiver(LOperand* receiver,
LOperand* function,
@@ -568,7 +591,7 @@ class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
@@ -589,7 +612,7 @@ class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
};
-class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
inputs_[0] = arguments;
@@ -603,11 +626,11 @@ class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -619,20 +642,20 @@ class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
};
-class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
};
-class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LModByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -650,7 +673,7 @@ class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LModByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LModByConstI(LOperand* dividend,
int32_t divisor,
@@ -675,7 +698,7 @@ class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LModI FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LModI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -692,7 +715,7 @@ class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -710,7 +733,7 @@ class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LDivByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LDivByConstI(LOperand* dividend,
int32_t divisor,
@@ -735,7 +758,7 @@ class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LDivI FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
inputs_[0] = dividend;
@@ -752,7 +775,7 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFlooringDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
inputs_[0] = dividend;
@@ -771,7 +794,7 @@ class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+class LFlooringDivByConstI FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LFlooringDivByConstI(LOperand* dividend,
int32_t divisor,
@@ -799,7 +822,7 @@ class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 3> {
};
-class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivI FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
inputs_[0] = dividend;
@@ -816,7 +839,7 @@ class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LMulI FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMulI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -833,7 +856,7 @@ class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
public:
LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -856,7 +879,7 @@ class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
};
-class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathFloor FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathFloor(LOperand* value) {
inputs_[0] = value;
@@ -869,7 +892,7 @@ class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathRound FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathRound(LOperand* value) {
inputs_[0] = value;
@@ -882,7 +905,7 @@ class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathFround V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathFround FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathFround(LOperand* value) { inputs_[0] = value; }
@@ -892,7 +915,7 @@ class LMathFround V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathAbs FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathAbs(LOperand* context, LOperand* value) {
inputs_[1] = context;
@@ -907,7 +930,7 @@ class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathLog FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathLog(LOperand* value) {
inputs_[0] = value;
@@ -919,7 +942,7 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathClz32 FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathClz32(LOperand* value) {
inputs_[0] = value;
@@ -931,7 +954,7 @@ class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LMathExp FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LMathExp(LOperand* value,
LOperand* temp1,
@@ -950,33 +973,35 @@ class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
};
-class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathSqrt FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LMathSqrt(LOperand* value) {
+ explicit LMathSqrt(LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2) {
inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
};
-class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LMathPowHalf FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LMathPowHalf(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
+ explicit LMathPowHalf(LOperand* value) { inputs_[0] = value; }
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
};
-class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -990,7 +1015,7 @@ class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
};
-class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LCmpHoleAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LCmpHoleAndBranch(LOperand* object) {
inputs_[0] = object;
@@ -1003,15 +1028,11 @@ class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
};
-class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 0> {
public:
- LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
+ explicit LCompareMinusZeroAndBranch(LOperand* value) { inputs_[0] = value; }
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
"cmp-minus-zero-and-branch")
@@ -1019,7 +1040,7 @@ class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsObjectAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1031,11 +1052,11 @@ class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1048,11 +1069,11 @@ class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1063,11 +1084,11 @@ class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1081,11 +1102,11 @@ class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
+class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
public:
LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1101,13 +1122,13 @@ class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
"string-compare-and-branch")
DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Token::Value op() const { return hydrogen()->token(); }
};
-class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 1> {
public:
LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1121,11 +1142,11 @@ class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -1138,7 +1159,7 @@ class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndexAndBranch V8_FINAL
+class LHasCachedArrayIndexAndBranch FINAL
: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@@ -1150,11 +1171,11 @@ class LHasCachedArrayIndexAndBranch V8_FINAL
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch FINAL : public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
@@ -1167,7 +1188,7 @@ class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
};
-class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
@@ -1183,11 +1204,11 @@ class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LCmpT FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LCmpT(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1203,7 +1224,7 @@ class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LInstanceOf FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1217,7 +1238,7 @@ class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LInstanceOfKnownGlobal FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
@@ -1238,7 +1259,7 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
return lazy_deopt_env_;
}
virtual void SetDeferredLazyDeoptimizationEnvironment(
- LEnvironment* env) V8_OVERRIDE {
+ LEnvironment* env) OVERRIDE {
lazy_deopt_env_ = env;
}
@@ -1247,7 +1268,7 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
@@ -1262,7 +1283,7 @@ class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
};
-class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LBitI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1279,7 +1300,7 @@ class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LShiftI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -1301,7 +1322,7 @@ class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSubI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1316,7 +1337,7 @@ class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantI FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1325,7 +1346,7 @@ class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantS FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1334,14 +1355,8 @@ class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 1> {
+class LConstantD FINAL : public LTemplateInstruction<1, 0, 1> {
public:
- explicit LConstantD(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1349,7 +1364,7 @@ class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 1> {
};
-class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantE FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1360,7 +1375,7 @@ class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantT FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1371,7 +1386,7 @@ class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LBranch FINAL : public LControlInstruction<1, 1> {
public:
LBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1384,11 +1399,11 @@ class LBranch V8_FINAL : public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LCmpMapAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LCmpMapAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1403,7 +1418,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
};
-class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
@@ -1415,7 +1430,7 @@ class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LDateField FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index)
: index_(index) {
@@ -1436,7 +1451,7 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSeqStringGetChar FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
inputs_[0] = string;
@@ -1451,7 +1466,7 @@ class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LSeqStringSetChar FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LSeqStringSetChar(LOperand* context,
LOperand* string,
@@ -1472,7 +1487,7 @@ class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
};
-class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LAddI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1492,22 +1507,24 @@ class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LMathMinMax(LOperand* left, LOperand* right) {
+ LMathMinMax(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
inputs_[1] = right;
+ temps_[0] = temp;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
};
-class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LPower FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1522,7 +1539,7 @@ class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1535,18 +1552,18 @@ class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
Token::Value op() const { return op_; }
- virtual Opcode opcode() const V8_OVERRIDE {
+ virtual Opcode opcode() const OVERRIDE {
return LInstruction::kArithmeticD;
}
- virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
- virtual const char* Mnemonic() const V8_OVERRIDE;
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
private:
Token::Value op_;
};
-class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LArithmeticT(Token::Value op,
LOperand* context,
@@ -1562,11 +1579,11 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
LOperand* left() { return inputs_[1]; }
LOperand* right() { return inputs_[2]; }
- virtual Opcode opcode() const V8_OVERRIDE {
+ virtual Opcode opcode() const OVERRIDE {
return LInstruction::kArithmeticT;
}
- virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
- virtual const char* Mnemonic() const V8_OVERRIDE;
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
Token::Value op() const { return op_; }
@@ -1575,7 +1592,7 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LReturn FINAL : public LTemplateInstruction<0, 3, 0> {
public:
explicit LReturn(LOperand* value,
LOperand* context,
@@ -1599,7 +1616,7 @@ class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
-class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
@@ -1612,7 +1629,7 @@ class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LLoadNamedGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
inputs_[0] = context;
@@ -1631,7 +1648,7 @@ class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LLoadFunctionPrototype FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
inputs_[0] = function;
@@ -1646,7 +1663,7 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadRoot FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
@@ -1655,7 +1672,7 @@ class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
@@ -1679,7 +1696,7 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
uint32_t base_offset() const { return hydrogen()->base_offset(); }
bool key_is_smi() {
return hydrogen()->key()->representation().IsTagged();
@@ -1703,7 +1720,7 @@ inline static bool ExternalArrayOpRequiresTemp(
}
-class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> {
+class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
public:
LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key,
LOperand* vector) {
@@ -1723,14 +1740,14 @@ class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> {
};
-class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
LOperand* vector) {
@@ -1751,7 +1768,7 @@ class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LStoreGlobalCell(LOperand* value) {
inputs_[0] = value;
@@ -1764,7 +1781,7 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
@@ -1777,11 +1794,11 @@ class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
@@ -1798,11 +1815,11 @@ class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LPushArgument FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
@@ -1814,7 +1831,7 @@ class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDrop FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
@@ -1827,7 +1844,7 @@ class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
+class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 0> {
public:
LStoreCodeEntry(LOperand* function, LOperand* code_object) {
inputs_[0] = function;
@@ -1844,7 +1861,7 @@ class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
};
-class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
+class LInnerAllocatedObject FINAL: public LTemplateInstruction<1, 2, 0> {
public:
LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
inputs_[0] = base_object;
@@ -1860,21 +1877,21 @@ class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
};
-class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LThisFunction FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
-class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LContext FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
DECLARE_HYDROGEN_ACCESSOR(Context)
};
-class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LDeclareGlobals FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
inputs_[0] = context;
@@ -1887,7 +1904,7 @@ class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallJSFunction(LOperand* function) {
inputs_[0] = function;
@@ -1898,44 +1915,44 @@ class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
+class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
public:
- LCallWithDescriptor(const InterfaceDescriptor* descriptor,
- const ZoneList<LOperand*>& operands,
- Zone* zone)
- : inputs_(descriptor->GetRegisterParameterCount() + 1, zone) {
- DCHECK(descriptor->GetRegisterParameterCount() + 1 == operands.length());
+ LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+ const ZoneList<LOperand*>& operands, Zone* zone)
+ : inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
+ DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
inputs_.AddAll(operands, zone);
}
LOperand* target() const { return inputs_[0]; }
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
- DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
ZoneList<LOperand*> inputs_;
// Iterator support.
- virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
- virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+ virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
- virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
- virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+ virtual int TempCount() FINAL OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
};
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
@@ -1948,13 +1965,13 @@ class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
public:
explicit LCallFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
@@ -1971,7 +1988,7 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNew(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
@@ -1984,13 +2001,13 @@ class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
@@ -2003,13 +2020,13 @@ class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallRuntime(LOperand* context) {
inputs_[0] = context;
@@ -2020,16 +2037,17 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
- return true;
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
}
const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
};
-class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -2041,7 +2059,7 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LUint32ToDouble FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LUint32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -2053,7 +2071,7 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberTagI FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LNumberTagI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2067,7 +2085,7 @@ class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberTagU FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LNumberTagU(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2081,7 +2099,7 @@ class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberTagD FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LNumberTagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2097,7 +2115,7 @@ class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToI FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleToI(LOperand* value) {
inputs_[0] = value;
@@ -2112,7 +2130,7 @@ class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToSmi FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleToSmi(LOperand* value) {
inputs_[0] = value;
@@ -2126,7 +2144,7 @@ class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LTaggedToI FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LTaggedToI(LOperand* value) {
inputs_[0] = value;
@@ -2141,7 +2159,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiTag FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -2154,7 +2172,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberUntagD FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LNumberUntagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2169,7 +2187,7 @@ class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -2187,7 +2205,7 @@ class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 2> {
public:
LStoreNamedField(LOperand* obj,
LOperand* val,
@@ -2207,11 +2225,11 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
inputs_[0] = context;
@@ -2226,13 +2244,13 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
-class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
inputs_[0] = obj;
@@ -2257,13 +2275,13 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
uint32_t base_offset() const { return hydrogen()->base_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
-class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyedGeneric(LOperand* context,
LOperand* object,
@@ -2283,13 +2301,13 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
-class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* context,
@@ -2310,7 +2328,7 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
Handle<Map> transitioned_map() {
@@ -2321,7 +2339,7 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
};
-class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LTrapAllocationMemento(LOperand* object,
LOperand* temp) {
@@ -2337,7 +2355,7 @@ class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringAdd FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -2354,7 +2372,7 @@ class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringCharCodeAt FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
inputs_[0] = context;
@@ -2371,7 +2389,7 @@ class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringCharFromCode FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringCharFromCode(LOperand* context, LOperand* char_code) {
inputs_[0] = context;
@@ -2386,7 +2404,7 @@ class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckValue FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
@@ -2399,7 +2417,7 @@ class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LCheckInstanceType FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LCheckInstanceType(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2414,7 +2432,7 @@ class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMaps(LOperand* value = NULL) {
inputs_[0] = value;
@@ -2427,7 +2445,7 @@ class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2439,7 +2457,7 @@ class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampDToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampDToUint8(LOperand* value) {
inputs_[0] = value;
@@ -2451,7 +2469,7 @@ class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* value) {
inputs_[0] = value;
@@ -2464,7 +2482,7 @@ class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
// Truncating conversion from a tagged value to an int32.
-class LClampTToUint8NoSSE2 V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+class LClampTToUint8NoSSE2 FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LClampTToUint8NoSSE2(LOperand* unclamped,
LOperand* temp1,
@@ -2487,7 +2505,7 @@ class LClampTToUint8NoSSE2 V8_FINAL : public LTemplateInstruction<1, 1, 3> {
};
-class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
@@ -2500,7 +2518,7 @@ class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleBits FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleBits(LOperand* value) {
inputs_[0] = value;
@@ -2513,7 +2531,7 @@ class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LConstructDouble FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LConstructDouble(LOperand* hi, LOperand* lo) {
inputs_[0] = hi;
@@ -2527,7 +2545,7 @@ class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LAllocate FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
inputs_[0] = context;
@@ -2544,7 +2562,7 @@ class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LRegExpLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LRegExpLiteral(LOperand* context) {
inputs_[0] = context;
@@ -2557,7 +2575,7 @@ class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFunctionLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LFunctionLiteral(LOperand* context) {
inputs_[0] = context;
@@ -2570,7 +2588,7 @@ class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
@@ -2583,7 +2601,7 @@ class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LTypeof FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -2597,7 +2615,7 @@ class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -2610,20 +2628,20 @@ class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
Handle<String> type_literal() { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
};
-class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LStackCheck FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LStackCheck(LOperand* context) {
inputs_[0] = context;
@@ -2641,7 +2659,7 @@ class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LForInPrepareMap FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LForInPrepareMap(LOperand* context, LOperand* object) {
inputs_[0] = context;
@@ -2655,7 +2673,7 @@ class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
@@ -2671,7 +2689,7 @@ class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
@@ -2685,7 +2703,7 @@ class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
};
-class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
@@ -2729,7 +2747,7 @@ class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LPlatformChunk V8_FINAL : public LChunk {
+class LPlatformChunk FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph),
@@ -2745,20 +2763,14 @@ class LPlatformChunk V8_FINAL : public LChunk {
};
-class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
+class LChunkBuilder FINAL : public LChunkBuilderBase {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : LChunkBuilderBase(graph->zone()),
- chunk_(NULL),
- info_(info),
- graph_(graph),
- status_(UNUSED),
+ : LChunkBuilderBase(info, graph),
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- allocator_(allocator) { }
-
- Isolate* isolate() const { return graph_->isolate(); }
+ allocator_(allocator) {}
// Build the sequence for the graph.
LPlatformChunk* Build();
@@ -2788,24 +2800,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
private:
- enum Status {
- UNUSED,
- BUILDING,
- DONE,
- ABORTED
- };
-
- LPlatformChunk* chunk() const { return chunk_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_building() const { return status_ == BUILDING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- void Abort(BailoutReason reason);
-
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(X87Register reg);
@@ -2849,7 +2843,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
- virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
@@ -2865,6 +2859,8 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
Register reg);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+ X87Register reg);
LInstruction* DefineX87TOS(LTemplateResultInstruction<1>* instr);
// Assigns an environment to an instruction. An instruction which can
// deoptimize must have an environment.
@@ -2897,10 +2893,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LOperand* GetStoreKeyedValueOperand(HStoreKeyed* instr);
- LPlatformChunk* chunk_;
- CompilationInfo* info_;
- HGraph* const graph_;
- Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
HBasicBlock* next_block_;
diff --git a/deps/v8/src/x87/macro-assembler-x87.cc b/deps/v8/src/x87/macro-assembler-x87.cc
index 6196d8f7ae..90ae7d3db2 100644
--- a/deps/v8/src/x87/macro-assembler-x87.cc
+++ b/deps/v8/src/x87/macro-assembler-x87.cc
@@ -6,12 +6,14 @@
#if V8_TARGET_ARCH_X87
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/isolate-inl.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
#include "src/serialize.h"
namespace v8 {
@@ -146,8 +148,7 @@ void MacroAssembler::InNewSpace(
void MacroAssembler::RememberedSetHelper(
Register object, // Only used for debug checks.
- Register addr,
- Register scratch,
+ Register addr, Register scratch, SaveFPRegsMode save_fp,
MacroAssembler::RememberedSetFinalAction and_then) {
Label done;
if (emit_debug_code()) {
@@ -178,8 +179,7 @@ void MacroAssembler::RememberedSetHelper(
DCHECK(and_then == kFallThroughAtEnd);
j(equal, &done, Label::kNear);
}
- StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(isolate());
+ StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
CallStub(&store_buffer_overflow);
if (and_then == kReturnAtEnd) {
ret(0);
@@ -190,6 +190,31 @@ void MacroAssembler::RememberedSetHelper(
}
+void MacroAssembler::ClampTOSToUint8(Register result_reg) {
+ Label done, conv_failure;
+ sub(esp, Immediate(kPointerSize));
+ fnclex();
+ fist_s(Operand(esp, 0));
+ pop(result_reg);
+ X87CheckIA();
+ j(equal, &conv_failure, Label::kNear);
+ test(result_reg, Immediate(0xFFFFFF00));
+ j(zero, &done, Label::kNear);
+ setcc(sign, result_reg);
+ sub(result_reg, Immediate(1));
+ and_(result_reg, Immediate(255));
+ jmp(&done, Label::kNear);
+ bind(&conv_failure);
+ fnclex();
+ fldz();
+ fld(1);
+ FCmp();
+ setcc(below, result_reg); // 1 if negative, 0 if positive.
+ dec_b(result_reg); // 0 if negative, 255 if positive.
+ bind(&done);
+}
+
+
void MacroAssembler::ClampUint8(Register reg) {
Label done;
test(reg, Immediate(0xFFFFFF00));
@@ -218,8 +243,8 @@ void MacroAssembler::TruncateX87TOSToI(Register result_reg) {
void MacroAssembler::X87TOSToI(Register result_reg,
MinusZeroMode minus_zero_mode,
- Label* conversion_failed,
- Label::Distance dst) {
+ Label* lost_precision, Label* is_nan,
+ Label* minus_zero, Label::Distance dst) {
Label done;
sub(esp, Immediate(kPointerSize));
fld(0);
@@ -227,8 +252,8 @@ void MacroAssembler::X87TOSToI(Register result_reg,
fild_s(MemOperand(esp, 0));
pop(result_reg);
FCmp();
- j(not_equal, conversion_failed, dst);
- j(parity_even, conversion_failed, dst);
+ j(not_equal, lost_precision, dst);
+ j(parity_even, is_nan, dst);
if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
test(result_reg, Operand(result_reg));
j(not_zero, &done, Label::kNear);
@@ -238,7 +263,7 @@ void MacroAssembler::X87TOSToI(Register result_reg,
fst_s(MemOperand(esp, 0));
pop(result_reg);
test(result_reg, Operand(result_reg));
- j(not_zero, conversion_failed, dst);
+ j(not_zero, minus_zero, dst);
}
bind(&done);
}
@@ -253,53 +278,6 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
}
-void MacroAssembler::TaggedToI(Register result_reg,
- Register input_reg,
- MinusZeroMode minus_zero_mode,
- Label* lost_precision) {
- Label done;
-
- cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- j(not_equal, lost_precision, Label::kNear);
-
- // TODO(olivf) Converting a number on the fpu is actually quite slow. We
- // should first try a fast conversion and then bailout to this slow case.
- Label lost_precision_pop, zero_check;
- Label* lost_precision_int = (minus_zero_mode == FAIL_ON_MINUS_ZERO)
- ? &lost_precision_pop : lost_precision;
- sub(esp, Immediate(kPointerSize));
- fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
- if (minus_zero_mode == FAIL_ON_MINUS_ZERO) fld(0);
- fist_s(MemOperand(esp, 0));
- fild_s(MemOperand(esp, 0));
- FCmp();
- pop(result_reg);
- j(not_equal, lost_precision_int, Label::kNear);
- j(parity_even, lost_precision_int, Label::kNear); // NaN.
- if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
- test(result_reg, Operand(result_reg));
- j(zero, &zero_check, Label::kNear);
- fstp(0);
- jmp(&done, Label::kNear);
- bind(&zero_check);
- // To check for minus zero, we load the value again as float, and check
- // if that is still 0.
- sub(esp, Immediate(kPointerSize));
- fstp_s(Operand(esp, 0));
- pop(result_reg);
- test(result_reg, Operand(result_reg));
- j(zero, &done, Label::kNear);
- jmp(lost_precision, Label::kNear);
-
- bind(&lost_precision_pop);
- fstp(0);
- jmp(lost_precision, Label::kNear);
- }
- bind(&done);
-}
-
-
void MacroAssembler::LoadUint32NoSSE2(Register src) {
Label done;
push(src);
@@ -316,11 +294,8 @@ void MacroAssembler::LoadUint32NoSSE2(Register src) {
void MacroAssembler::RecordWriteArray(
- Register object,
- Register value,
- Register index,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check,
+ Register object, Register value, Register index, SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action, SmiCheck smi_check,
PointersToHereCheck pointers_to_here_check_for_value) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
@@ -340,28 +315,24 @@ void MacroAssembler::RecordWriteArray(
lea(dst, Operand(object, index, times_half_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
- RecordWrite(object, dst, value, remembered_set_action, OMIT_SMI_CHECK,
- pointers_to_here_check_for_value);
+ RecordWrite(object, dst, value, save_fp, remembered_set_action,
+ OMIT_SMI_CHECK, pointers_to_here_check_for_value);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(value, Immediate(BitCast<int32_t>(kZapValue)));
- mov(index, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
+ mov(index, Immediate(bit_cast<int32_t>(kZapValue)));
}
}
void MacroAssembler::RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register dst,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check,
- PointersToHereCheck pointers_to_here_check_for_value) {
+ Register object, int offset, Register value, Register dst,
+ SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action,
+ SmiCheck smi_check, PointersToHereCheck pointers_to_here_check_for_value) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -384,25 +355,23 @@ void MacroAssembler::RecordWriteField(
bind(&ok);
}
- RecordWrite(object, dst, value, remembered_set_action, OMIT_SMI_CHECK,
- pointers_to_here_check_for_value);
+ RecordWrite(object, dst, value, save_fp, remembered_set_action,
+ OMIT_SMI_CHECK, pointers_to_here_check_for_value);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(value, Immediate(BitCast<int32_t>(kZapValue)));
- mov(dst, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
+ mov(dst, Immediate(bit_cast<int32_t>(kZapValue)));
}
}
-void MacroAssembler::RecordWriteForMap(
- Register object,
- Handle<Map> map,
- Register scratch1,
- Register scratch2) {
+void MacroAssembler::RecordWriteForMap(Register object, Handle<Map> map,
+ Register scratch1, Register scratch2,
+ SaveFPRegsMode save_fp) {
Label done;
Register address = scratch1;
@@ -439,7 +408,8 @@ void MacroAssembler::RecordWriteForMap(
&done,
Label::kNear);
- RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET);
+ RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET,
+ save_fp);
CallStub(&stub);
bind(&done);
@@ -451,19 +421,16 @@ void MacroAssembler::RecordWriteForMap(
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(value, Immediate(BitCast<int32_t>(kZapValue)));
- mov(scratch1, Immediate(BitCast<int32_t>(kZapValue)));
- mov(scratch2, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
+ mov(scratch1, Immediate(bit_cast<int32_t>(kZapValue)));
+ mov(scratch2, Immediate(bit_cast<int32_t>(kZapValue)));
}
}
void MacroAssembler::RecordWrite(
- Register object,
- Register address,
- Register value,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check,
+ Register object, Register address, Register value, SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action, SmiCheck smi_check,
PointersToHereCheck pointers_to_here_check_for_value) {
DCHECK(!object.is(value));
DCHECK(!object.is(address));
@@ -507,8 +474,8 @@ void MacroAssembler::RecordWrite(
&done,
Label::kNear);
- RecordWriteStub stub(isolate(), object, value, address,
- remembered_set_action);
+ RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+ fp_mode);
CallStub(&stub);
bind(&done);
@@ -520,8 +487,8 @@ void MacroAssembler::RecordWrite(
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(address, Immediate(BitCast<int32_t>(kZapValue)));
- mov(value, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(address, Immediate(bit_cast<int32_t>(kZapValue)));
+ mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
}
}
@@ -753,6 +720,53 @@ void MacroAssembler::FCmp() {
}
+void MacroAssembler::FXamMinusZero() {
+ fxam();
+ push(eax);
+ fnstsw_ax();
+ and_(eax, Immediate(0x4700));
+ // For minus zero, C3 == 1 && C1 == 1.
+ cmp(eax, Immediate(0x4200));
+ pop(eax);
+ fstp(0);
+}
+
+
+void MacroAssembler::FXamSign() {
+ fxam();
+ push(eax);
+ fnstsw_ax();
+ // For negative value (including -0.0), C1 == 1.
+ and_(eax, Immediate(0x0200));
+ pop(eax);
+ fstp(0);
+}
+
+
+void MacroAssembler::X87CheckIA() {
+ push(eax);
+ fnstsw_ax();
+ // For #IA, IE == 1 && SF == 0.
+ and_(eax, Immediate(0x0041));
+ cmp(eax, Immediate(0x0001));
+ pop(eax);
+}
+
+
+// rc=00B, round to nearest.
+// rc=01B, round down.
+// rc=10B, round up.
+// rc=11B, round toward zero.
+void MacroAssembler::X87SetRC(int rc) {
+ sub(esp, Immediate(kPointerSize));
+ fnstcw(MemOperand(esp, 0));
+ and_(MemOperand(esp, 0), Immediate(0xF3FF));
+ or_(MemOperand(esp, 0), Immediate(rc));
+ fldcw(MemOperand(esp, 0));
+ add(esp, Immediate(kPointerSize));
+}
+
+
void MacroAssembler::AssertNumber(Register object) {
if (emit_debug_code()) {
Label ok;
@@ -890,13 +904,22 @@ void MacroAssembler::EnterExitFramePrologue() {
}
-void MacroAssembler::EnterExitFrameEpilogue(int argc) {
- sub(esp, Immediate(argc * kPointerSize));
+void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
+ // Optionally save FPU state.
+ if (save_doubles) {
+ // Store FPU state to m108byte.
+ int space = 108 + argc * kPointerSize;
+ sub(esp, Immediate(space));
+ const int offset = -2 * kPointerSize; // entry fp + code object.
+ fnsave(MemOperand(ebp, offset - 108));
+ } else {
+ sub(esp, Immediate(argc * kPointerSize));
+ }
// Get the required frame alignment for the OS.
const int kFrameAlignment = base::OS::ActivationFrameAlignment();
if (kFrameAlignment > 0) {
- DCHECK(IsPowerOf2(kFrameAlignment));
+ DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
and_(esp, -kFrameAlignment);
}
@@ -905,7 +928,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc) {
}
-void MacroAssembler::EnterExitFrame() {
+void MacroAssembler::EnterExitFrame(bool save_doubles) {
EnterExitFramePrologue();
// Set up argc and argv in callee-saved registers.
@@ -914,17 +937,23 @@ void MacroAssembler::EnterExitFrame() {
lea(esi, Operand(ebp, eax, times_4, offset));
// Reserve space for argc, argv and isolate.
- EnterExitFrameEpilogue(3);
+ EnterExitFrameEpilogue(3, save_doubles);
}
void MacroAssembler::EnterApiExitFrame(int argc) {
EnterExitFramePrologue();
- EnterExitFrameEpilogue(argc);
+ EnterExitFrameEpilogue(argc, false);
}
-void MacroAssembler::LeaveExitFrame() {
+void MacroAssembler::LeaveExitFrame(bool save_doubles) {
+ // Optionally restore FPU state.
+ if (save_doubles) {
+ const int offset = -2 * kPointerSize;
+ frstor(MemOperand(ebp, offset - 108));
+ }
+
// Get the return address from the stack and restore the frame pointer.
mov(ecx, Operand(ebp, 1 * kPointerSize));
mov(ebp, Operand(ebp, 0 * kPointerSize));
@@ -1627,12 +1656,10 @@ void MacroAssembler::AllocateTwoByteString(Register result,
}
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
@@ -1641,7 +1668,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
add(scratch1, Immediate(kObjectAlignmentMask));
and_(scratch1, Immediate(~kObjectAlignmentMask));
- // Allocate ASCII string in new space.
+ // Allocate one-byte string in new space.
Allocate(SeqOneByteString::kHeaderSize,
times_1,
scratch1,
@@ -1654,7 +1681,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->ascii_string_map()));
+ Immediate(isolate()->factory()->one_byte_string_map()));
mov(scratch1, length);
SmiTag(scratch1);
mov(FieldOperand(result, String::kLengthOffset), scratch1);
@@ -1663,20 +1690,18 @@ void MacroAssembler::AllocateAsciiString(Register result,
}
-void MacroAssembler::AllocateAsciiString(Register result,
- int length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteString(Register result, int length,
+ Register scratch1, Register scratch2,
+ Label* gc_required) {
DCHECK(length > 0);
- // Allocate ASCII string in new space.
+ // Allocate one-byte string in new space.
Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
gc_required, TAG_OBJECT);
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->ascii_string_map()));
+ Immediate(isolate()->factory()->one_byte_string_map()));
mov(FieldOperand(result, String::kLengthOffset),
Immediate(Smi::FromInt(length)));
mov(FieldOperand(result, String::kHashFieldOffset),
@@ -1698,10 +1723,10 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
}
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
Allocate(ConsString::kSize,
result,
scratch1,
@@ -1711,7 +1736,7 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->cons_ascii_string_map()));
+ Immediate(isolate()->factory()->cons_one_byte_string_map()));
}
@@ -1729,17 +1754,17 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
}
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
// Allocate heap number in new space.
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->sliced_ascii_string_map()));
+ Immediate(isolate()->factory()->sliced_one_byte_string_map()));
}
@@ -1828,7 +1853,7 @@ void MacroAssembler::BooleanBitTest(Register object,
int field_offset,
int bit_index) {
bit_index += kSmiTagSize + kSmiShiftSize;
- DCHECK(IsPowerOf2(kBitsPerByte));
+ DCHECK(base::bits::IsPowerOfTwo32(kBitsPerByte));
int byte_index = bit_index / kBitsPerByte;
int byte_bit_index = bit_index & (kBitsPerByte - 1);
test_b(FieldOperand(object, field_offset + byte_index),
@@ -1958,8 +1983,8 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
}
-void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
+ SaveFPRegsMode save_doubles) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
@@ -1971,7 +1996,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
Move(eax, Immediate(num_arguments));
mov(ebx, Immediate(ExternalReference(f, isolate())));
- CEntryStub ces(isolate(), 1);
+ CEntryStub ces(isolate(), 1, save_doubles);
CallStub(&ces);
}
@@ -2015,7 +2040,7 @@ Operand ApiParameterOperand(int index) {
void MacroAssembler::PrepareCallApiFunction(int argc) {
EnterApiExitFrame(argc);
if (emit_debug_code()) {
- mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(esi, Immediate(bit_cast<int32_t>(kZapValue)));
}
}
@@ -2521,6 +2546,9 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
void MacroAssembler::VerifyX87StackDepth(uint32_t depth) {
+ // Turn off the stack depth check when serializer is enabled to reduce the
+ // code size.
+ if (serializer_enabled()) return;
// Make sure the floating point stack is either empty or has depth items.
DCHECK(depth <= 7);
// This is very expensive.
@@ -2669,7 +2697,7 @@ void MacroAssembler::CheckStackAlignment() {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
Label alignment_as_expected;
test(esp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected);
@@ -2797,10 +2825,8 @@ void MacroAssembler::LookupNumberStringCache(Register object,
}
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
- Register instance_type,
- Register scratch,
- Label* failure) {
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
+ Register instance_type, Register scratch, Label* failure) {
if (!scratch.is(instance_type)) {
mov(scratch, instance_type);
}
@@ -2811,11 +2837,11 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
}
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label* failure) {
+void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register object1,
+ Register object2,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
// Check that both objects are not smis.
STATIC_ASSERT(kSmiTag == 0);
mov(scratch1, object1);
@@ -2828,24 +2854,24 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
- // Check that both are flat ASCII strings.
- const int kFlatAsciiStringMask =
+ // Check that both are flat one-byte strings.
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag =
+ const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
// Interleave bits from both instance types and compare them in one check.
- DCHECK_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- and_(scratch1, kFlatAsciiStringMask);
- and_(scratch2, kFlatAsciiStringMask);
+ DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
+ and_(scratch1, kFlatOneByteStringMask);
+ and_(scratch2, kFlatOneByteStringMask);
lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3));
+ cmp(scratch1, kFlatOneByteStringTag | (kFlatOneByteStringTag << 3));
j(not_equal, failure);
}
-void MacroAssembler::JumpIfNotUniqueName(Operand operand,
- Label* not_unique_name,
- Label::Distance distance) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
+ Label* not_unique_name,
+ Label::Distance distance) {
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
Label succeed;
test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
@@ -2899,7 +2925,7 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
// and the original value of esp.
mov(scratch, esp);
sub(esp, Immediate((num_arguments + 1) * kPointerSize));
- DCHECK(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
and_(esp, -frame_alignment);
mov(Operand(esp, num_arguments * kPointerSize), scratch);
} else {
@@ -3177,12 +3203,12 @@ void MacroAssembler::EnsureNotWhite(
jmp(&is_data_object, Label::kNear);
bind(&not_external);
- // Sequential string, either ASCII or UC16.
+ // Sequential string, either Latin1 or UC16.
DCHECK(kOneByteStringTag == 0x04);
and_(length, Immediate(kStringEncodingMask));
xor_(length, Immediate(kStringEncodingMask));
add(length, Immediate(0x04));
- // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
+ // Value now either 4 (if Latin1) or 8 (if UC16), i.e., char-size shifted
// by 2. If we multiply the string length as smi by this, it still
// won't overflow a 32-bit value.
DCHECK_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
@@ -3310,12 +3336,14 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
DCHECK(!dividend.is(eax));
DCHECK(!dividend.is(edx));
- MultiplierAndShift ms(divisor);
- mov(eax, Immediate(ms.multiplier()));
+ base::MagicNumbersForDivision<uint32_t> mag =
+ base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+ mov(eax, Immediate(mag.multiplier));
imul(dividend);
- if (divisor > 0 && ms.multiplier() < 0) add(edx, dividend);
- if (divisor < 0 && ms.multiplier() > 0) sub(edx, dividend);
- if (ms.shift() > 0) sar(edx, ms.shift());
+ bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+ if (divisor > 0 && neg) add(edx, dividend);
+ if (divisor < 0 && !neg && mag.multiplier > 0) sub(edx, dividend);
+ if (mag.shift > 0) sar(edx, mag.shift);
mov(eax, dividend);
shr(eax, 31);
add(edx, eax);
diff --git a/deps/v8/src/x87/macro-assembler-x87.h b/deps/v8/src/x87/macro-assembler-x87.h
index 743bebdfe7..ed0b7c1745 100644
--- a/deps/v8/src/x87/macro-assembler-x87.h
+++ b/deps/v8/src/x87/macro-assembler-x87.h
@@ -6,6 +6,7 @@
#define V8_X87_MACRO_ASSEMBLER_X87_H_
#include "src/assembler.h"
+#include "src/bailout-reason.h"
#include "src/frames.h"
#include "src/globals.h"
@@ -74,8 +75,8 @@ class MacroAssembler: public Assembler {
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
void RememberedSetHelper(Register object, // Used for debug code.
- Register addr,
- Register scratch,
+ Register addr, Register scratch,
+ SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then);
void CheckPageFlag(Register object,
@@ -146,10 +147,8 @@ class MacroAssembler: public Assembler {
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldOperand(reg, off).
void RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register scratch,
+ Register object, int offset, Register value, Register scratch,
+ SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
@@ -158,20 +157,14 @@ class MacroAssembler: public Assembler {
// As above, but the offset has the tag presubtracted. For use with
// Operand(reg, off).
void RecordWriteContextSlot(
- Register context,
- int offset,
- Register value,
- Register scratch,
+ Register context, int offset, Register value, Register scratch,
+ SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting) {
- RecordWriteField(context,
- offset + kHeapObjectTag,
- value,
- scratch,
- remembered_set_action,
- smi_check,
+ RecordWriteField(context, offset + kHeapObjectTag, value, scratch, save_fp,
+ remembered_set_action, smi_check,
pointers_to_here_check_for_value);
}
@@ -182,9 +175,7 @@ class MacroAssembler: public Assembler {
// filters out smis so it does not update the write barrier if the
// value is a smi.
void RecordWriteArray(
- Register array,
- Register value,
- Register index,
+ Register array, Register value, Register index, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
@@ -196,9 +187,7 @@ class MacroAssembler: public Assembler {
// operation. RecordWrite filters out smis so it does not update the
// write barrier if the value is a smi.
void RecordWrite(
- Register object,
- Register address,
- Register value,
+ Register object, Register address, Register value, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
@@ -207,11 +196,8 @@ class MacroAssembler: public Assembler {
// For page containing |object| mark the region covering the object's map
// dirty. |object| is the object being stored into, |map| is the Map object
// that was stored.
- void RecordWriteForMap(
- Register object,
- Handle<Map> map,
- Register scratch1,
- Register scratch2);
+ void RecordWriteForMap(Register object, Handle<Map> map, Register scratch1,
+ Register scratch2, SaveFPRegsMode save_fp);
// ---------------------------------------------------------------------------
// Debugger Support
@@ -226,14 +212,14 @@ class MacroAssembler: public Assembler {
// arguments in register eax and sets up the number of arguments in
// register edi and the pointer to the first argument in register
// esi.
- void EnterExitFrame();
+ void EnterExitFrame(bool save_doubles);
void EnterApiExitFrame(int argc);
// Leave the current exit frame. Expects the return value in
// register eax:edx (untouched) and the pointer to the first
// argument in register esi.
- void LeaveExitFrame();
+ void LeaveExitFrame(bool save_doubles);
// Leave the current exit frame. Expects the return value in
// register eax (untouched).
@@ -435,8 +421,13 @@ class MacroAssembler: public Assembler {
// FCmp is similar to integer cmp, but requires unsigned
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp();
+ void FXamMinusZero();
+ void FXamSign();
+ void X87CheckIA();
+ void X87SetRC(int rc);
void ClampUint8(Register reg);
+ void ClampTOSToUint8(Register result_reg);
void SlowTruncateToI(Register result_reg, Register input_reg,
int offset = HeapNumber::kValueOffset - kHeapObjectTag);
@@ -445,10 +436,8 @@ class MacroAssembler: public Assembler {
void TruncateX87TOSToI(Register result_reg);
void X87TOSToI(Register result_reg, MinusZeroMode minus_zero_mode,
- Label* conversion_failed, Label::Distance dst = Label::kFar);
-
- void TaggedToI(Register result_reg, Register input_reg,
- MinusZeroMode minus_zero_mode, Label* lost_precision);
+ Label* lost_precision, Label* is_nan, Label* minus_zero,
+ Label::Distance dst = Label::kFar);
// Smi tagging support.
void SmiTag(Register reg) {
@@ -637,17 +626,11 @@ class MacroAssembler: public Assembler {
Register scratch2,
Register scratch3,
Label* gc_required);
- void AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateAsciiString(Register result,
- int length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3, Label* gc_required);
+ void AllocateOneByteString(Register result, int length, Register scratch1,
+ Register scratch2, Label* gc_required);
// Allocate a raw cons string object. Only the map field of the result is
// initialized.
@@ -655,10 +638,8 @@ class MacroAssembler: public Assembler {
Register scratch1,
Register scratch2,
Label* gc_required);
- void AllocateAsciiConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateOneByteConsString(Register result, Register scratch1,
+ Register scratch2, Label* gc_required);
// Allocate a raw sliced string object. Only the map field of the result is
// initialized.
@@ -666,10 +647,8 @@ class MacroAssembler: public Assembler {
Register scratch1,
Register scratch2,
Label* gc_required);
- void AllocateAsciiSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateOneByteSlicedString(Register result, Register scratch1,
+ Register scratch2, Label* gc_required);
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies.
@@ -730,14 +709,17 @@ class MacroAssembler: public Assembler {
void StubReturn(int argc);
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
- // Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id) {
+ void CallRuntime(const Runtime::Function* f, int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, function->nargs);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
}
- void CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId id, int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
@@ -888,29 +870,27 @@ class MacroAssembler: public Assembler {
Register scratch2,
Label* not_found);
- // Check whether the instance type represents a flat ASCII string. Jump to the
- // label if not. If the instance type can be scratched specify same register
- // for both instance type and scratch.
- void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
- Register scratch,
- Label* on_not_flat_ascii_string);
+ // Check whether the instance type represents a flat one-byte string. Jump to
+ // the label if not. If the instance type can be scratched specify same
+ // register for both instance type and scratch.
+ void JumpIfInstanceTypeIsNotSequentialOneByte(
+ Register instance_type, Register scratch,
+ Label* on_not_flat_one_byte_string);
- // Checks if both objects are sequential ASCII strings, and jumps to label
+ // Checks if both objects are sequential one-byte strings, and jumps to label
// if either is not.
- void JumpIfNotBothSequentialAsciiStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label* on_not_flat_ascii_strings);
+ void JumpIfNotBothSequentialOneByteStrings(
+ Register object1, Register object2, Register scratch1, Register scratch2,
+ Label* on_not_flat_one_byte_strings);
// Checks if the given register or operand is a unique name
- void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
- Label::Distance distance = Label::kFar) {
- JumpIfNotUniqueName(Operand(reg), not_unique_name, distance);
+ void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name,
+ Label::Distance distance = Label::kFar) {
+ JumpIfNotUniqueNameInstanceType(Operand(reg), not_unique_name, distance);
}
- void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
- Label::Distance distance = Label::kFar);
+ void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
+ Label::Distance distance = Label::kFar);
void EmitSeqStringSetCharCheck(Register string,
Register index,
@@ -971,7 +951,7 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper = NullCallWrapper());
void EnterExitFramePrologue();
- void EnterExitFrameEpilogue(int argc);
+ void EnterExitFrameEpilogue(int argc, bool save_doubles);
void LeaveExitFrameEpilogue(bool restore_context);
diff --git a/deps/v8/src/x87/regexp-macro-assembler-x87.cc b/deps/v8/src/x87/regexp-macro-assembler-x87.cc
index 54dd52f23a..9bd08caa2a 100644
--- a/deps/v8/src/x87/regexp-macro-assembler-x87.cc
+++ b/deps/v8/src/x87/regexp-macro-assembler-x87.cc
@@ -219,7 +219,7 @@ void RegExpMacroAssemblerX87::CheckNotBackReferenceIgnoreCase(
__ add(eax, ebx);
BranchOrBacktrack(greater, on_no_match);
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
Label success;
Label fail;
Label loop_increment;
@@ -365,7 +365,7 @@ void RegExpMacroAssemblerX87::CheckNotBackReference(
Label loop;
__ bind(&loop);
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
__ movzx_b(eax, Operand(edx, 0));
__ cmpb_al(Operand(ebx, 0));
} else {
@@ -475,7 +475,7 @@ void RegExpMacroAssemblerX87::CheckBitInTable(
Label* on_bit_set) {
__ mov(eax, Immediate(table));
Register index = current_character();
- if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
+ if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
__ mov(ebx, kTableSize - 1);
__ and_(ebx, current_character());
index = ebx;
@@ -492,7 +492,7 @@ bool RegExpMacroAssemblerX87::CheckSpecialCharacterClass(uc16 type,
switch (type) {
case 's':
// Match space-characters
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
// One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success;
__ cmp(current_character(), ' ');
@@ -542,8 +542,8 @@ bool RegExpMacroAssemblerX87::CheckSpecialCharacterClass(uc16 type,
return true;
}
case 'w': {
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
__ cmp(current_character(), Immediate('z'));
BranchOrBacktrack(above, on_no_match);
}
@@ -556,8 +556,8 @@ bool RegExpMacroAssemblerX87::CheckSpecialCharacterClass(uc16 type,
}
case 'W': {
Label done;
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
__ cmp(current_character(), Immediate('z'));
__ j(above, &done);
}
@@ -566,7 +566,7 @@ bool RegExpMacroAssemblerX87::CheckSpecialCharacterClass(uc16 type,
__ test_b(current_character(),
Operand::StaticArray(current_character(), times_1, word_map));
BranchOrBacktrack(not_zero, on_no_match);
- if (mode_ != ASCII) {
+ if (mode_ != LATIN1) {
__ bind(&done);
}
return true;
@@ -583,7 +583,7 @@ bool RegExpMacroAssemblerX87::CheckSpecialCharacterClass(uc16 type,
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ sub(eax, Immediate(0x0b));
__ cmp(eax, 0x0c - 0x0b);
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
BranchOrBacktrack(above, on_no_match);
} else {
Label done;
@@ -1098,7 +1098,7 @@ int RegExpMacroAssemblerX87::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+ bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
DCHECK(re_code->instruction_start() <= *return_address);
DCHECK(*return_address <=
@@ -1129,8 +1129,8 @@ int RegExpMacroAssemblerX87::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
- // If we changed between an ASCII and an UC16 string, the specialized
+ if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
+ // If we changed between an LATIN1 and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
return RETRY;
@@ -1277,7 +1277,7 @@ void RegExpMacroAssemblerX87::CheckStackLimit() {
void RegExpMacroAssemblerX87::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
- if (mode_ == ASCII) {
+ if (mode_ == LATIN1) {
if (characters == 4) {
__ mov(current_character(), Operand(esi, edi, times_1, cp_offset));
} else if (characters == 2) {
diff --git a/deps/v8/src/x87/regexp-macro-assembler-x87.h b/deps/v8/src/x87/regexp-macro-assembler-x87.h
index 3c98dfff67..3655bd9626 100644
--- a/deps/v8/src/x87/regexp-macro-assembler-x87.h
+++ b/deps/v8/src/x87/regexp-macro-assembler-x87.h
@@ -174,7 +174,7 @@ class RegExpMacroAssemblerX87: public NativeRegExpMacroAssembler {
MacroAssembler* masm_;
- // Which mode to generate code for (ASCII or UC16).
+ // Which mode to generate code for (LATIN1 or UC16).
Mode mode_;
// One greater than maximal register index actually used.
diff --git a/deps/v8/src/zone-containers.h b/deps/v8/src/zone-containers.h
index 1295ed7ab9..2ee178029f 100644
--- a/deps/v8/src/zone-containers.h
+++ b/deps/v8/src/zone-containers.h
@@ -5,6 +5,8 @@
#ifndef V8_ZONE_CONTAINERS_H_
#define V8_ZONE_CONTAINERS_H_
+#include <deque>
+#include <queue>
#include <vector>
#include "src/zone-allocator.h"
@@ -12,12 +14,45 @@
namespace v8 {
namespace internal {
-typedef std::vector<bool, ZoneBoolAllocator> BoolVector;
-
-typedef std::vector<int, ZoneIntAllocator> IntVector;
-typedef IntVector::iterator IntVectorIter;
-typedef IntVector::reverse_iterator IntVectorRIter;
-
+// A wrapper subclass for std::vector to make it easy to construct one
+// that uses a zone allocator.
+template <typename T>
+class ZoneVector : public std::vector<T, zone_allocator<T> > {
+ public:
+ // Constructs an empty vector.
+ explicit ZoneVector(Zone* zone)
+ : std::vector<T, zone_allocator<T> >(zone_allocator<T>(zone)) {}
+
+ // Constructs a new vector and fills it with {size} elements, each
+ // having the value {def}.
+ ZoneVector(int size, T def, Zone* zone)
+ : std::vector<T, zone_allocator<T> >(size, def, zone_allocator<T>(zone)) {
+ }
+};
+
+// A wrapper subclass std::deque to make it easy to construct one
+// that uses a zone allocator.
+template <typename T>
+class ZoneDeque : public std::deque<T, zone_allocator<T> > {
+ public:
+ explicit ZoneDeque(Zone* zone)
+ : std::deque<T, zone_allocator<T> >(zone_allocator<T>(zone)) {}
+};
+
+// A wrapper subclass for std::queue to make it easy to construct one
+// that uses a zone allocator.
+template <typename T>
+class ZoneQueue : public std::queue<T, std::deque<T, zone_allocator<T> > > {
+ public:
+ // Constructs an empty queue.
+ explicit ZoneQueue(Zone* zone)
+ : std::queue<T, std::deque<T, zone_allocator<T> > >(
+ std::deque<T, zone_allocator<T> >(zone_allocator<T>(zone))) {}
+};
+
+// Typedefs to shorten commonly used vectors.
+typedef ZoneVector<bool> BoolVector;
+typedef ZoneVector<int> IntVector;
} } // namespace v8::internal
#endif // V8_ZONE_CONTAINERS_H_
diff --git a/deps/v8/src/zone.h b/deps/v8/src/zone.h
index a690b8d8ca..6f552b6524 100644
--- a/deps/v8/src/zone.h
+++ b/deps/v8/src/zone.h
@@ -63,9 +63,9 @@ class Zone {
inline void adjust_segment_bytes_allocated(int delta);
- inline unsigned allocation_size() { return allocation_size_; }
+ inline unsigned allocation_size() const { return allocation_size_; }
- inline Isolate* isolate() { return isolate_; }
+ inline Isolate* isolate() const { return isolate_; }
private:
friend class Isolate;
diff --git a/deps/v8/test/base-unittests/DEPS b/deps/v8/test/base-unittests/DEPS
deleted file mode 100644
index 90b080063f..0000000000
--- a/deps/v8/test/base-unittests/DEPS
+++ /dev/null
@@ -1,8 +0,0 @@
-include_rules = [
- "-include",
- "+include/v8config.h",
- "+include/v8stdint.h",
- "-src",
- "+src/base",
- "+testing/gtest",
-]
diff --git a/deps/v8/test/base-unittests/testcfg.py b/deps/v8/test/base-unittests/testcfg.py
deleted file mode 100644
index 0ed46dcdb1..0000000000
--- a/deps/v8/test/base-unittests/testcfg.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import os
-import shutil
-
-from testrunner.local import commands
-from testrunner.local import testsuite
-from testrunner.local import utils
-from testrunner.objects import testcase
-
-
-class BaseUnitTestsSuite(testsuite.TestSuite):
- def __init__(self, name, root):
- super(BaseUnitTestsSuite, self).__init__(name, root)
-
- def ListTests(self, context):
- shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
- if utils.IsWindows():
- shell += ".exe"
- output = commands.Execute(context.command_prefix +
- [shell, "--gtest_list_tests"] +
- context.extra_flags)
- if output.exit_code != 0:
- print output.stdout
- print output.stderr
- return []
- tests = []
- test_case = ''
- for test_desc in output.stdout.strip().split():
- if test_desc.endswith('.'):
- test_case = test_desc
- else:
- test = testcase.TestCase(self, test_case + test_desc, dependency=None)
- tests.append(test)
- tests.sort()
- return tests
-
- def GetFlagsForTestCase(self, testcase, context):
- return (testcase.flags + ["--gtest_filter=" + testcase.path] +
- ["--gtest_random_seed=%s" % context.random_seed] +
- ["--gtest_print_time=0"] +
- context.mode_flags)
-
- def shell(self):
- return "base-unittests"
-
-
-def GetSuite(name, root):
- return BaseUnitTestsSuite(name, root)
diff --git a/deps/v8/test/benchmarks/benchmarks.status b/deps/v8/test/benchmarks/benchmarks.status
index 1afd5eca24..a08fa41c15 100644
--- a/deps/v8/test/benchmarks/benchmarks.status
+++ b/deps/v8/test/benchmarks/benchmarks.status
@@ -29,7 +29,7 @@
[ALWAYS, {
# Too slow in Debug mode.
'octane/mandreel': [PASS, ['mode == debug', SKIP]],
- # TODO(mstarzinger,ishell): Timeout with TF in predictable mode.
- 'octane/richards': [PASS, NO_VARIANTS],
+ # TODO(turbofan): Too slow in debug mode for now.
+ 'octane/pdfjs': [PASS, ['mode == debug', SKIP]],
}], # ALWAYS
]
diff --git a/deps/v8/test/benchmarks/testcfg.py b/deps/v8/test/benchmarks/testcfg.py
index 8c573ba30b..6607bef8cc 100644
--- a/deps/v8/test/benchmarks/testcfg.py
+++ b/deps/v8/test/benchmarks/testcfg.py
@@ -31,7 +31,6 @@ import shutil
import subprocess
import tarfile
-from testrunner.local import statusfile
from testrunner.local import testsuite
from testrunner.objects import testcase
@@ -184,8 +183,6 @@ class BenchmarksTestSuite(testsuite.TestSuite):
os.chdir(old_cwd)
def VariantFlags(self, testcase, default_flags):
- if testcase.outcomes and statusfile.OnlyStandardVariant(testcase.outcomes):
- return [[]]
# Both --nocrankshaft and --stressopt are very slow. Add TF but without
# always opt to match the way the benchmarks are run for performance
# testing.
diff --git a/deps/v8/test/cctest/OWNERS b/deps/v8/test/cctest/OWNERS
index 6d5f927e73..93565c5a7a 100644
--- a/deps/v8/test/cctest/OWNERS
+++ b/deps/v8/test/cctest/OWNERS
@@ -1,2 +1,5 @@
-per-file *-mips.*=plind44@gmail.com
-per-file *-mips.*=gergely@homejinni.com
+per-file *-mips*=paul.lind@imgtec.com
+per-file *-mips*=gergely.kis@imgtec.com
+per-file *-mips*=akos.palfi@imgtec.com
+per-file *-mips*=balazs.kilvady@imgtec.com
+per-file *-mips*=dusan.milosavljevic@imgtec.com
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 2bb08b0ec9..f03710a3b0 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -34,6 +34,13 @@
#include "test/cctest/profiler-extension.h"
#include "test/cctest/trace-extension.h"
+#if (defined(_WIN32) || defined(_WIN64))
+#include <windows.h> // NOLINT
+#if defined(_MSC_VER)
+#include <crtdbg.h>
+#endif // defined(_MSC_VER)
+#endif // defined(_WIN32) || defined(_WIN64)
+
enum InitializationState {kUnset, kUnintialized, kInitialized};
static InitializationState initialization_state_ = kUnset;
static bool disable_automatic_dispose_ = false;
@@ -138,11 +145,27 @@ static void SuggestTestHarness(int tests) {
int main(int argc, char* argv[]) {
+#if (defined(_WIN32) || defined(_WIN64))
+ UINT new_flags =
+ SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX;
+ UINT existing_flags = SetErrorMode(new_flags);
+ SetErrorMode(existing_flags | new_flags);
+#if defined(_MSC_VER)
+ _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_DEBUG | _CRTDBG_MODE_FILE);
+ _CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR);
+ _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_DEBUG | _CRTDBG_MODE_FILE);
+ _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR);
+ _CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_DEBUG | _CRTDBG_MODE_FILE);
+ _CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR);
+ _set_error_mode(_OUT_TO_STDERR);
+#endif // _MSC_VER
+#endif // defined(_WIN32) || defined(_WIN64)
+
v8::V8::InitializeICU();
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
-
v8::internal::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
+ v8::V8::Initialize();
CcTestArrayBufferAllocator array_buffer_allocator;
v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
@@ -159,10 +182,6 @@ int main(int argc, char* argv[]) {
for (int i = 1; i < argc; i++) {
char* arg = argv[i];
if (strcmp(arg, "--list") == 0) {
- // TODO(svenpanne) Serializer::enabled() and Serializer::code_address_map_
- // are fundamentally broken, so we can't unconditionally initialize and
- // dispose V8.
- v8::V8::Initialize();
PrintTestList(CcTest::last());
print_run_count = false;
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index 42946f5bfe..f993d2659a 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -44,6 +44,7 @@
],
'sources': [ ### gcmole(all) ###
'<(generated_file)',
+ 'compiler/c-signature.h',
'compiler/codegen-tester.cc',
'compiler/codegen-tester.h',
'compiler/function-tester.h',
@@ -52,12 +53,12 @@
'compiler/graph-tester.h',
'compiler/simplified-graph-builder.cc',
'compiler/simplified-graph-builder.h',
+ 'compiler/test-basic-block-profiler.cc',
'compiler/test-branch-combine.cc',
'compiler/test-changes-lowering.cc',
'compiler/test-codegen-deopt.cc',
'compiler/test-gap-resolver.cc',
'compiler/test-graph-reducer.cc',
- 'compiler/test-instruction-selector.cc',
'compiler/test-instruction.cc',
'compiler/test-js-context-specialization.cc',
'compiler/test-js-constant-cache.cc',
@@ -72,18 +73,18 @@
'compiler/test-pipeline.cc',
'compiler/test-representation-change.cc',
'compiler/test-run-deopt.cc',
+ 'compiler/test-run-inlining.cc',
'compiler/test-run-intrinsics.cc',
'compiler/test-run-jsbranches.cc',
'compiler/test-run-jscalls.cc',
'compiler/test-run-jsexceptions.cc',
'compiler/test-run-jsops.cc',
'compiler/test-run-machops.cc',
+ 'compiler/test-run-properties.cc',
'compiler/test-run-variables.cc',
'compiler/test-schedule.cc',
'compiler/test-scheduler.cc',
'compiler/test-simplified-lowering.cc',
- 'compiler/test-structured-ifbuilder-fuzzer.cc',
- 'compiler/test-structured-machine-assembler.cc',
'cctest.cc',
'gay-fixed.cc',
'gay-precision.cc',
@@ -125,9 +126,6 @@
'test-heap.cc',
'test-heap-profiler.cc',
'test-hydrogen-types.cc',
- 'test-libplatform-default-platform.cc',
- 'test-libplatform-task-queue.cc',
- 'test-libplatform-worker-thread.cc',
'test-list.cc',
'test-liveedit.cc',
'test-lockers.cc',
@@ -145,7 +143,6 @@
'test-regexp.cc',
'test-reloc-info.cc',
'test-representation.cc',
- 'test-semaphore.cc',
'test-serialize.cc',
'test-spaces.cc',
'test-strings.cc',
@@ -167,7 +164,6 @@
'conditions': [
['v8_target_arch=="ia32"', {
'sources': [ ### gcmole(arch:ia32) ###
- 'compiler/test-instruction-selector-ia32.cc',
'test-assembler-ia32.cc',
'test-code-stubs.cc',
'test-code-stubs-ia32.cc',
@@ -188,7 +184,6 @@
}],
['v8_target_arch=="arm"', {
'sources': [ ### gcmole(arch:arm) ###
- 'compiler/test-instruction-selector-arm.cc',
'test-assembler-arm.cc',
'test-code-stubs.cc',
'test-code-stubs-arm.cc',
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index 2ab973c52d..6d27074a6f 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -53,6 +53,13 @@
static void Test##Name()
#endif
+#ifndef UNINITIALIZED_DEPENDENT_TEST
+#define UNINITIALIZED_DEPENDENT_TEST(Name, Dep) \
+ static void Test##Name(); \
+ CcTest register_test_##Name(Test##Name, __FILE__, #Name, #Dep, true, false); \
+ static void Test##Name()
+#endif
+
#ifndef DISABLED_TEST
#define DISABLED_TEST(Name) \
static void Test##Name(); \
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 60baaca081..5198af6ff5 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -35,10 +35,6 @@
# BUG(382): Weird test. Can't guarantee that it never times out.
'test-api/ApplyInterruption': [PASS, TIMEOUT],
- # TODO(mstarzinger): Fail gracefully on multiple V8::Dispose calls.
- 'test-api/InitializeAndDisposeOnce': [SKIP],
- 'test-api/InitializeAndDisposeMultiple': [SKIP],
-
# These tests always fail. They are here to test test.py. If
# they don't fail then test.py has failed.
'test-serialize/TestThatAlwaysFails': [FAIL],
@@ -75,40 +71,28 @@
# BUG(2999). (test/cpu-profiler/CollectCpuProfile)
# BUG(3287). (test-cpu-profiler/SampleWhenFrameIsNotSetup)
'test-cpu-profiler/*': [PASS, FLAKY],
-
- ##############################################################################
- # TurboFan compiler failures.
-
- # TODO(mstarzinger): These need investigation and are not categorized yet.
'test-cpu-profiler/*': [SKIP],
- 'test-heap/NextCodeLinkIsWeak': [PASS, NO_VARIANTS],
- # TODO(mstarzinger/verwaest): This access check API is borked.
- 'test-api/TurnOnAccessCheck': [PASS, NO_VARIANTS],
- 'test-api/TurnOnAccessCheckAndRecompile': [PASS, NO_VARIANTS],
+ # BUG(3525). Test crashes flakily.
+ 'test-debug/RecursiveBreakpoints': [PASS, FLAKY],
+ 'test-debug/RecursiveBreakpointsGlobal': [PASS, FLAKY],
- # TODO(mstarzinger): Sometimes the try-catch blacklist fails.
- 'test-debug/DebugEvaluateWithoutStack': [PASS, NO_VARIANTS],
- 'test-debug/MessageQueues': [PASS, NO_VARIANTS],
- 'test-debug/NestedBreakEventContextData': [PASS, NO_VARIANTS],
- 'test-debug/SendClientDataToHandler': [PASS, NO_VARIANTS],
+ ##############################################################################
+ # TurboFan compiler failures.
- # TODO(dcarney): C calls are broken all over the place.
- 'test-run-machops/RunCall*': [SKIP],
- 'test-run-machops/RunLoadImmIndex': [SKIP],
- 'test-run-machops/RunSpillLotsOfThingsWithCall': [SKIP],
+ # TODO(sigurds): The schedule is borked with multiple inlinees,
+ # and cannot handle free-floating loops yet
+ 'test-run-inlining/InlineTwiceDependentDiamond': [SKIP],
+ 'test-run-inlining/InlineTwiceDependentDiamondDifferent': [SKIP],
+ 'test-run-inlining/InlineLoop': [SKIP],
# Some tests are just too slow to run for now.
'test-api/Threading*': [PASS, NO_VARIANTS],
- 'test-api/RequestInterruptTestWithMathAbs': [PASS, NO_VARIANTS],
'test-heap/IncrementalMarkingStepMakesBigProgressWithLargeObjects': [PASS, NO_VARIANTS],
'test-heap-profiler/ManyLocalsInSharedContext': [PASS, NO_VARIANTS],
'test-debug/ThreadedDebugging': [PASS, NO_VARIANTS],
'test-debug/DebugBreakLoop': [PASS, NO_VARIANTS],
- # Support for lazy deoptimization is missing.
- 'test-deoptimization/DeoptimizeCompare': [PASS, NO_VARIANTS],
-
# Support for breakpoints requires using LoadICs and StoreICs.
'test-debug/BreakPointICStore': [PASS, NO_VARIANTS],
'test-debug/BreakPointICLoad': [PASS, NO_VARIANTS],
@@ -190,7 +174,7 @@
'test-heap/ReleaseOverReservedPages': [PASS, FAIL],
# BUG(v8:3155).
- 'test-strings/AsciiArrayJoin': [PASS, ['mode == debug', FAIL]],
+ 'test-strings/OneByteArrayJoin': [PASS, ['mode == debug', FAIL]],
# BUG(v8:3247).
'test-mark-compact/NoPromotion': [SKIP],
@@ -199,7 +183,7 @@
'test-mark-compact/Promotion': [PASS, FAIL],
# BUG(v8:3434).
- ' test-api/LoadICFastApi_DirectCall_GCMoveStubWithProfiler': [SKIP]
+ ' test-api/LoadICFastApi_DirectCall_GCMoveStubWithProfiler': [SKIP],
}], # 'arch == arm64'
['arch == arm64 and simulator_run == True', {
@@ -306,6 +290,11 @@
'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [SKIP],
'test-serialize/DeserializeAndRunScript2': [SKIP],
'test-serialize/DeserializeFromSecondSerialization': [SKIP],
+
+ # Test requires turbofan:
+ 'test-simplified-lowering/LowerStringOps_to_call_and_compare': [SKIP],
+ 'codegen-tester/CompareWrapper': [SKIP],
+ 'codegen-tester/ParametersEqual': [SKIP],
}], # 'arch == mipsel or arch == mips'
##############################################################################
@@ -322,23 +311,20 @@
'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [SKIP],
'test-serialize/DeserializeAndRunScript2': [SKIP],
'test-serialize/DeserializeFromSecondSerialization': [SKIP],
+
+ # Test requires turbofan:
+ 'test-simplified-lowering/LowerStringOps_to_call_and_compare': [SKIP],
+ 'codegen-tester/CompareWrapper': [SKIP],
+ 'codegen-tester/ParametersEqual': [SKIP],
}], # 'arch == mips64el'
##############################################################################
['arch == x87', {
- # TODO (weiliang): Enable below tests after fixing the double register
- # allocation limit in X87 port.
- 'test-serialize/Serialize': [PASS, ['mode == debug', SKIP]],
- 'test-serialize/Deserialize': [PASS, ['mode == debug', SKIP]],
- 'test-serialize/SerializeTwice': [PASS, ['mode == debug', SKIP]],
- 'test-serialize/ContextSerialization': [PASS, ['mode == debug', SKIP]],
- 'test-serialize/ContextDeserialization': [PASS, ['mode == debug', SKIP]],
- 'test-serialize/PartialDeserialization': [PASS, ['mode == debug', SKIP]],
- 'test-serialize/PartialSerialization': [PASS, ['mode == debug', SKIP]],
- 'test-serialize/DeserializeAndRunScript2': [PASS, ['mode == debug', SKIP]],
- 'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [PASS, ['mode == debug', SKIP]],
- 'test-serialize/DeserializeFromSecondSerialization': [PASS, ['mode == debug', SKIP]],
+ # Test requires turbofan:
+ 'codegen-tester/CompareWrapper': [SKIP],
+ 'codegen-tester/ParametersEqual': [SKIP],
+ 'test-simplified-lowering/LowerStringOps_to_call_and_compare': [SKIP],
}], # 'arch == x87'
##############################################################################
@@ -435,10 +421,10 @@
'test-constantpool/ConstantPool' : [SKIP],
'test-compiler/GetScriptLineNumber' : [SKIP],
'test-api/ScriptMakingExternalString' : [SKIP],
- 'test-api/ScriptMakingExternalAsciiString' : [SKIP],
+ 'test-api/ScriptMakingExternalOneByteString' : [SKIP],
'test-api/MakingExternalStringConditions' : [SKIP],
- 'test-api/MakingExternalAsciiStringConditions' : [SKIP],
- 'test-api/MakingExternalUnalignedAsciiString' : [SKIP],
+ 'test-api/MakingExternalOneByteStringConditions' : [SKIP],
+ 'test-api/MakingExternalUnalignedOneByteString' : [SKIP],
'test-api/IndexedInterceptorUnboxedDoubleWithIndexedAccessor' : [SKIP],
'test-api/IndependentWeakHandle' : [SKIP],
'test-api/GCFromWeakCallbacks' : [SKIP],
diff --git a/deps/v8/test/cctest/compiler/c-signature.h b/deps/v8/test/cctest/compiler/c-signature.h
new file mode 100644
index 0000000000..5d161dbe7a
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/c-signature.h
@@ -0,0 +1,133 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_C_SIGNATURE_H_
+#define V8_COMPILER_C_SIGNATURE_H_
+
+#include "src/compiler/machine-type.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <typename T>
+inline MachineType MachineTypeForC() {
+ CHECK(false); // Instantiated with invalid type.
+ return kMachNone;
+}
+
+template <>
+inline MachineType MachineTypeForC<void>() {
+ return kMachNone;
+}
+
+template <>
+inline MachineType MachineTypeForC<int8_t>() {
+ return kMachInt8;
+}
+
+template <>
+inline MachineType MachineTypeForC<uint8_t>() {
+ return kMachUint8;
+}
+
+template <>
+inline MachineType MachineTypeForC<int16_t>() {
+ return kMachInt16;
+}
+
+template <>
+inline MachineType MachineTypeForC<uint16_t>() {
+ return kMachUint16;
+}
+
+template <>
+inline MachineType MachineTypeForC<int32_t>() {
+ return kMachInt32;
+}
+
+template <>
+inline MachineType MachineTypeForC<uint32_t>() {
+ return kMachUint32;
+}
+
+template <>
+inline MachineType MachineTypeForC<int64_t>() {
+ return kMachInt64;
+}
+
+template <>
+inline MachineType MachineTypeForC<uint64_t>() {
+ return kMachUint64;
+}
+
+template <>
+inline MachineType MachineTypeForC<double>() {
+ return kMachFloat64;
+}
+
+template <>
+inline MachineType MachineTypeForC<Object*>() {
+ return kMachAnyTagged;
+}
+
+template <typename Ret, uint16_t kParamCount>
+class CSignatureOf : public MachineSignature {
+ protected:
+ MachineType storage_[1 + kParamCount];
+
+ CSignatureOf()
+ : MachineSignature(MachineTypeForC<Ret>() != kMachNone ? 1 : 0,
+ kParamCount,
+ reinterpret_cast<MachineType*>(&storage_)) {
+ if (return_count_ == 1) storage_[0] = MachineTypeForC<Ret>();
+ }
+ void Set(int index, MachineType type) {
+ DCHECK(index >= 0 && index < kParamCount);
+ reps_[return_count_ + index] = type;
+ }
+};
+
+// Helper classes for instantiating Signature objects to be callable from C.
+template <typename Ret>
+class CSignature0 : public CSignatureOf<Ret, 0> {
+ public:
+ CSignature0() : CSignatureOf<Ret, 0>() {}
+};
+
+template <typename Ret, typename P1>
+class CSignature1 : public CSignatureOf<Ret, 1> {
+ public:
+ CSignature1() : CSignatureOf<Ret, 1>() {
+ this->Set(0, MachineTypeForC<P1>());
+ }
+};
+
+template <typename Ret, typename P1, typename P2>
+class CSignature2 : public CSignatureOf<Ret, 2> {
+ public:
+ CSignature2() : CSignatureOf<Ret, 2>() {
+ this->Set(0, MachineTypeForC<P1>());
+ this->Set(1, MachineTypeForC<P2>());
+ }
+};
+
+template <typename Ret, typename P1, typename P2, typename P3>
+class CSignature3 : public CSignatureOf<Ret, 3> {
+ public:
+ CSignature3() : CSignatureOf<Ret, 3>() {
+ this->Set(0, MachineTypeForC<P1>());
+ this->Set(1, MachineTypeForC<P2>());
+ this->Set(2, MachineTypeForC<P3>());
+ }
+};
+
+static const CSignature2<int32_t, int32_t, int32_t> int32_int32_to_int32;
+static const CSignature2<uint32_t, uint32_t, uint32_t> uint32_uint32_to_uint32;
+static const CSignature2<double, double, double> float64_float64_to_float64;
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_C_SIGNATURE_H_
diff --git a/deps/v8/test/cctest/compiler/call-tester.h b/deps/v8/test/cctest/compiler/call-tester.h
index 40189ab405..e86416028b 100644
--- a/deps/v8/test/cctest/compiler/call-tester.h
+++ b/deps/v8/test/cctest/compiler/call-tester.h
@@ -23,6 +23,7 @@ namespace v8 {
namespace internal {
namespace compiler {
+// TODO(titzer): use c-signature.h instead of ReturnValueTraits
template <typename R>
struct ReturnValueTraits {
static R Cast(uintptr_t r) { return reinterpret_cast<R>(r); }
@@ -32,72 +33,74 @@ struct ReturnValueTraits {
while (false) {
*(static_cast<Object* volatile*>(0)) = static_cast<R>(0);
}
- return kMachineTagged;
+ return kMachAnyTagged;
}
};
template <>
struct ReturnValueTraits<int32_t*> {
static int32_t* Cast(uintptr_t r) { return reinterpret_cast<int32_t*>(r); }
- static MachineType Representation() {
- return MachineOperatorBuilder::pointer_rep();
- }
+ static MachineType Representation() { return kMachPtr; }
};
template <>
struct ReturnValueTraits<void> {
static void Cast(uintptr_t r) {}
- static MachineType Representation() {
- return MachineOperatorBuilder::pointer_rep();
- }
+ static MachineType Representation() { return kMachPtr; }
};
template <>
struct ReturnValueTraits<bool> {
static bool Cast(uintptr_t r) { return static_cast<bool>(r); }
- static MachineType Representation() {
- return MachineOperatorBuilder::pointer_rep();
- }
+ static MachineType Representation() { return kRepBit; }
};
template <>
struct ReturnValueTraits<int32_t> {
static int32_t Cast(uintptr_t r) { return static_cast<int32_t>(r); }
- static MachineType Representation() { return kMachineWord32; }
+ static MachineType Representation() { return kMachInt32; }
};
template <>
struct ReturnValueTraits<uint32_t> {
static uint32_t Cast(uintptr_t r) { return static_cast<uint32_t>(r); }
- static MachineType Representation() { return kMachineWord32; }
+ static MachineType Representation() { return kMachUint32; }
};
template <>
struct ReturnValueTraits<int64_t> {
static int64_t Cast(uintptr_t r) { return static_cast<int64_t>(r); }
- static MachineType Representation() { return kMachineWord64; }
+ static MachineType Representation() { return kMachInt64; }
};
template <>
struct ReturnValueTraits<uint64_t> {
static uint64_t Cast(uintptr_t r) { return static_cast<uint64_t>(r); }
- static MachineType Representation() { return kMachineWord64; }
+ static MachineType Representation() { return kMachUint64; }
};
template <>
struct ReturnValueTraits<int16_t> {
static int16_t Cast(uintptr_t r) { return static_cast<int16_t>(r); }
- static MachineType Representation() {
- return MachineOperatorBuilder::pointer_rep();
- }
+ static MachineType Representation() { return kMachInt16; }
+};
+
+template <>
+struct ReturnValueTraits<uint16_t> {
+ static uint16_t Cast(uintptr_t r) { return static_cast<uint16_t>(r); }
+ static MachineType Representation() { return kMachUint16; }
};
template <>
struct ReturnValueTraits<int8_t> {
static int8_t Cast(uintptr_t r) { return static_cast<int8_t>(r); }
- static MachineType Representation() {
- return MachineOperatorBuilder::pointer_rep();
- }
+ static MachineType Representation() { return kMachInt8; }
+};
+
+template <>
+struct ReturnValueTraits<uint8_t> {
+ static uint8_t Cast(uintptr_t r) { return static_cast<uint8_t>(r); }
+ static MachineType Representation() { return kMachUint8; }
};
template <>
@@ -106,7 +109,7 @@ struct ReturnValueTraits<double> {
UNREACHABLE();
return 0.0;
}
- static MachineType Representation() { return kMachineFloat64; }
+ static MachineType Representation() { return kMachFloat64; }
};
@@ -127,34 +130,40 @@ struct ParameterTraits<T*> {
class CallHelper {
public:
- explicit CallHelper(Isolate* isolate) : isolate_(isolate) { USE(isolate_); }
+ explicit CallHelper(Isolate* isolate, MachineSignature* machine_sig)
+ : machine_sig_(machine_sig), isolate_(isolate) {
+ USE(isolate_);
+ }
virtual ~CallHelper() {}
- static MachineCallDescriptorBuilder* ToCallDescriptorBuilder(
- Zone* zone, MachineType return_type, MachineType p0 = kMachineLast,
- MachineType p1 = kMachineLast, MachineType p2 = kMachineLast,
- MachineType p3 = kMachineLast, MachineType p4 = kMachineLast) {
- const int kSize = 5;
- MachineType* params = zone->NewArray<MachineType>(kSize);
- params[0] = p0;
- params[1] = p1;
- params[2] = p2;
- params[3] = p3;
- params[4] = p4;
- int parameter_count = 0;
- for (int i = 0; i < kSize; ++i) {
- if (params[i] == kMachineLast) {
- break;
- }
- parameter_count++;
+ static MachineSignature* MakeMachineSignature(
+ Zone* zone, MachineType return_type, MachineType p0 = kMachNone,
+ MachineType p1 = kMachNone, MachineType p2 = kMachNone,
+ MachineType p3 = kMachNone, MachineType p4 = kMachNone) {
+ // Count the number of parameters.
+ size_t param_count = 5;
+ MachineType types[] = {p0, p1, p2, p3, p4};
+ while (param_count > 0 && types[param_count - 1] == kMachNone)
+ param_count--;
+ size_t return_count = return_type == kMachNone ? 0 : 1;
+
+ // Build the machine signature.
+ MachineSignature::Builder builder(zone, return_count, param_count);
+ if (return_count > 0) builder.AddReturn(return_type);
+ for (size_t i = 0; i < param_count; i++) {
+ builder.AddParam(types[i]);
}
- return new (zone)
- MachineCallDescriptorBuilder(return_type, parameter_count, params);
+ return builder.Build();
}
protected:
- virtual void VerifyParameters(int parameter_count,
- MachineType* parameters) = 0;
+ MachineSignature* machine_sig_;
+ void VerifyParameters(size_t parameter_count, MachineType* parameter_types) {
+ CHECK(machine_sig_->parameter_count() == parameter_count);
+ for (size_t i = 0; i < parameter_count; i++) {
+ CHECK_EQ(machine_sig_->GetParam(i), parameter_types[i]);
+ }
+ }
virtual byte* Generate() = 0;
private:
@@ -277,14 +286,14 @@ class CallHelper {
template <typename P1>
void VerifyParameters1() {
MachineType parameters[] = {ReturnValueTraits<P1>::Representation()};
- VerifyParameters(ARRAY_SIZE(parameters), parameters);
+ VerifyParameters(arraysize(parameters), parameters);
}
template <typename P1, typename P2>
void VerifyParameters2() {
MachineType parameters[] = {ReturnValueTraits<P1>::Representation(),
ReturnValueTraits<P2>::Representation()};
- VerifyParameters(ARRAY_SIZE(parameters), parameters);
+ VerifyParameters(arraysize(parameters), parameters);
}
template <typename P1, typename P2, typename P3>
@@ -292,7 +301,7 @@ class CallHelper {
MachineType parameters[] = {ReturnValueTraits<P1>::Representation(),
ReturnValueTraits<P2>::Representation(),
ReturnValueTraits<P3>::Representation()};
- VerifyParameters(ARRAY_SIZE(parameters), parameters);
+ VerifyParameters(arraysize(parameters), parameters);
}
template <typename P1, typename P2, typename P3, typename P4>
@@ -301,7 +310,7 @@ class CallHelper {
ReturnValueTraits<P2>::Representation(),
ReturnValueTraits<P3>::Representation(),
ReturnValueTraits<P4>::Representation()};
- VerifyParameters(ARRAY_SIZE(parameters), parameters);
+ VerifyParameters(arraysize(parameters), parameters);
}
#endif
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.cc b/deps/v8/test/cctest/compiler/codegen-tester.cc
index 24b2c6e9f0..be445de3ce 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.cc
+++ b/deps/v8/test/cctest/compiler/codegen-tester.cc
@@ -4,6 +4,7 @@
#include "src/v8.h"
+#include "src/compiler/generic-node-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/value-helper.h"
@@ -293,7 +294,7 @@ void Int32BinopInputShapeTester::TestAllInputShapes() {
for (int i = -2; i < num_int_inputs; i++) { // for all left shapes
for (int j = -2; j < num_int_inputs; j++) { // for all right shapes
if (i >= 0 && j >= 0) break; // No constant/constant combos
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
Node* p0 = m.Parameter(0);
Node* p1 = m.Parameter(1);
Node* n0;
@@ -303,7 +304,7 @@ void Int32BinopInputShapeTester::TestAllInputShapes() {
if (i == -2) {
n0 = p0;
} else if (i == -1) {
- n0 = m.LoadFromPointer(&input_a, kMachineWord32);
+ n0 = m.LoadFromPointer(&input_a, kMachInt32);
} else {
n0 = m.Int32Constant(inputs[i]);
}
@@ -312,7 +313,7 @@ void Int32BinopInputShapeTester::TestAllInputShapes() {
if (j == -2) {
n1 = p1;
} else if (j == -1) {
- n1 = m.LoadFromPointer(&input_b, kMachineWord32);
+ n1 = m.LoadFromPointer(&input_b, kMachInt32);
} else {
n1 = m.Int32Constant(inputs[j]);
}
@@ -369,8 +370,10 @@ void Int32BinopInputShapeTester::RunRight(
}
+#if V8_TURBOFAN_TARGET
+
TEST(ParametersEqual) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
Node* p1 = m.Parameter(1);
CHECK_NE(NULL, p1);
Node* p0 = m.Parameter(0);
@@ -380,8 +383,6 @@ TEST(ParametersEqual) {
}
-#if V8_TURBOFAN_TARGET
-
void RunSmiConstant(int32_t v) {
// TODO(dcarney): on x64 Smis are generated with the SmiConstantRegister
#if !V8_TARGET_ARCH_X64
@@ -486,7 +487,7 @@ TEST(RunHeapNumberConstant) {
TEST(RunParam1) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
m.Return(m.Parameter(0));
FOR_INT32_INPUTS(i) {
@@ -497,7 +498,7 @@ TEST(RunParam1) {
TEST(RunParam2_1) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
Node* p0 = m.Parameter(0);
Node* p1 = m.Parameter(1);
m.Return(p0);
@@ -511,7 +512,7 @@ TEST(RunParam2_1) {
TEST(RunParam2_2) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
Node* p0 = m.Parameter(0);
Node* p1 = m.Parameter(1);
m.Return(p1);
@@ -526,8 +527,7 @@ TEST(RunParam2_2) {
TEST(RunParam3) {
for (int i = 0; i < 3; i++) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachInt32);
Node* nodes[] = {m.Parameter(0), m.Parameter(1), m.Parameter(2)};
m.Return(nodes[i]);
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h
index 300381b493..6aa5bae560 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.h
+++ b/deps/v8/test/cctest/compiler/codegen-tester.h
@@ -9,7 +9,6 @@
#include "src/compiler/pipeline.h"
#include "src/compiler/raw-machine-assembler.h"
-#include "src/compiler/structured-machine-assembler.h"
#include "src/simulator.h"
#include "test/cctest/compiler/call-tester.h"
@@ -26,11 +25,13 @@ class MachineAssemblerTester : public HandleAndZoneScope,
MachineType p1, MachineType p2, MachineType p3,
MachineType p4)
: HandleAndZoneScope(),
- CallHelper(main_isolate()),
- MachineAssembler(new (main_zone()) Graph(main_zone()),
- ToCallDescriptorBuilder(main_zone(), return_type, p0,
- p1, p2, p3, p4),
- MachineOperatorBuilder::pointer_rep()) {}
+ CallHelper(
+ main_isolate(),
+ MakeMachineSignature(main_zone(), return_type, p0, p1, p2, p3, p4)),
+ MachineAssembler(
+ new (main_zone()) Graph(main_zone()),
+ MakeMachineSignature(main_zone(), return_type, p0, p1, p2, p3, p4),
+ kMachPtr) {}
Node* LoadFromPointer(void* address, MachineType rep, int32_t offset = 0) {
return this->Load(rep, this->PointerConstant(address),
@@ -59,15 +60,6 @@ class MachineAssemblerTester : public HandleAndZoneScope,
void GenerateCode() { Generate(); }
protected:
- virtual void VerifyParameters(int parameter_count,
- MachineType* parameter_types) {
- CHECK_EQ(this->parameter_count(), parameter_count);
- const MachineType* expected_types = this->parameter_types();
- for (int i = 0; i < parameter_count; i++) {
- CHECK_EQ(expected_types[i], parameter_types[i]);
- }
- }
-
virtual byte* Generate() {
if (code_.is_null()) {
Schedule* schedule = this->Export();
@@ -91,11 +83,11 @@ class RawMachineAssemblerTester
: public MachineAssemblerTester<RawMachineAssembler>,
public CallHelper2<ReturnType, RawMachineAssemblerTester<ReturnType> > {
public:
- RawMachineAssemblerTester(MachineType p0 = kMachineLast,
- MachineType p1 = kMachineLast,
- MachineType p2 = kMachineLast,
- MachineType p3 = kMachineLast,
- MachineType p4 = kMachineLast)
+ RawMachineAssemblerTester(MachineType p0 = kMachNone,
+ MachineType p1 = kMachNone,
+ MachineType p2 = kMachNone,
+ MachineType p3 = kMachNone,
+ MachineType p4 = kMachNone)
: MachineAssemblerTester<RawMachineAssembler>(
ReturnValueTraits<ReturnType>::Representation(), p0, p1, p2, p3,
p4) {}
@@ -121,23 +113,6 @@ class RawMachineAssemblerTester
};
-template <typename ReturnType>
-class StructuredMachineAssemblerTester
- : public MachineAssemblerTester<StructuredMachineAssembler>,
- public CallHelper2<ReturnType,
- StructuredMachineAssemblerTester<ReturnType> > {
- public:
- StructuredMachineAssemblerTester(MachineType p0 = kMachineLast,
- MachineType p1 = kMachineLast,
- MachineType p2 = kMachineLast,
- MachineType p3 = kMachineLast,
- MachineType p4 = kMachineLast)
- : MachineAssemblerTester<StructuredMachineAssembler>(
- ReturnValueTraits<ReturnType>::Representation(), p0, p1, p2, p3,
- p4) {}
-};
-
-
static const bool USE_RESULT_BUFFER = true;
static const bool USE_RETURN_REGISTER = false;
static const int32_t CHECK_VALUE = 0x99BEEDCE;
@@ -201,15 +176,25 @@ class BinopTester {
// A helper class for testing code sequences that take two int parameters and
// return an int value.
class Int32BinopTester
- : public BinopTester<int32_t, kMachineWord32, USE_RETURN_REGISTER> {
+ : public BinopTester<int32_t, kMachInt32, USE_RETURN_REGISTER> {
public:
explicit Int32BinopTester(RawMachineAssemblerTester<int32_t>* tester)
- : BinopTester<int32_t, kMachineWord32, USE_RETURN_REGISTER>(tester) {}
+ : BinopTester<int32_t, kMachInt32, USE_RETURN_REGISTER>(tester) {}
+};
+
- int32_t call(uint32_t a0, uint32_t a1) {
- p0 = static_cast<int32_t>(a0);
- p1 = static_cast<int32_t>(a1);
- return T->Call();
+// A helper class for testing code sequences that take two uint parameters and
+// return an uint value.
+class Uint32BinopTester
+ : public BinopTester<uint32_t, kMachUint32, USE_RETURN_REGISTER> {
+ public:
+ explicit Uint32BinopTester(RawMachineAssemblerTester<int32_t>* tester)
+ : BinopTester<uint32_t, kMachUint32, USE_RETURN_REGISTER>(tester) {}
+
+ uint32_t call(uint32_t a0, uint32_t a1) {
+ p0 = a0;
+ p1 = a1;
+ return static_cast<uint32_t>(T->Call());
}
};
@@ -218,10 +203,10 @@ class Int32BinopTester
// return a double value.
// TODO(titzer): figure out how to return doubles correctly on ia32.
class Float64BinopTester
- : public BinopTester<double, kMachineFloat64, USE_RESULT_BUFFER> {
+ : public BinopTester<double, kMachFloat64, USE_RESULT_BUFFER> {
public:
explicit Float64BinopTester(RawMachineAssemblerTester<int32_t>* tester)
- : BinopTester<double, kMachineFloat64, USE_RESULT_BUFFER>(tester) {}
+ : BinopTester<double, kMachFloat64, USE_RESULT_BUFFER>(tester) {}
};
@@ -230,10 +215,10 @@ class Float64BinopTester
// TODO(titzer): pick word size of pointers based on V8_TARGET.
template <typename Type>
class PointerBinopTester
- : public BinopTester<Type*, kMachineWord32, USE_RETURN_REGISTER> {
+ : public BinopTester<Type*, kMachPtr, USE_RETURN_REGISTER> {
public:
explicit PointerBinopTester(RawMachineAssemblerTester<int32_t>* tester)
- : BinopTester<Type*, kMachineWord32, USE_RETURN_REGISTER>(tester) {}
+ : BinopTester<Type*, kMachPtr, USE_RETURN_REGISTER>(tester) {}
};
@@ -241,10 +226,10 @@ class PointerBinopTester
// return a tagged value.
template <typename Type>
class TaggedBinopTester
- : public BinopTester<Type*, kMachineTagged, USE_RETURN_REGISTER> {
+ : public BinopTester<Type*, kMachAnyTagged, USE_RETURN_REGISTER> {
public:
explicit TaggedBinopTester(RawMachineAssemblerTester<int32_t>* tester)
- : BinopTester<Type*, kMachineTagged, USE_RETURN_REGISTER>(tester) {}
+ : BinopTester<Type*, kMachAnyTagged, USE_RETURN_REGISTER>(tester) {}
};
// A helper class for testing compares. Wraps a machine opcode and provides
@@ -257,7 +242,7 @@ class CompareWrapper {
return m->NewNode(op(m->machine()), a, b);
}
- Operator* op(MachineOperatorBuilder* machine) {
+ const Operator* op(MachineOperatorBuilder* machine) {
switch (opcode) {
case IrOpcode::kWord32Equal:
return machine->Word32Equal();
diff --git a/deps/v8/test/cctest/compiler/function-tester.h b/deps/v8/test/cctest/compiler/function-tester.h
index 2ed2fe9988..c869f00d19 100644
--- a/deps/v8/test/cctest/compiler/function-tester.h
+++ b/deps/v8/test/cctest/compiler/function-tester.h
@@ -26,10 +26,15 @@ namespace compiler {
class FunctionTester : public InitializedHandleScope {
public:
- explicit FunctionTester(const char* source)
+ explicit FunctionTester(const char* source, uint32_t flags = 0)
: isolate(main_isolate()),
- function((FLAG_allow_natives_syntax = true, NewFunction(source))) {
+ function((FLAG_allow_natives_syntax = true, NewFunction(source))),
+ flags_(flags) {
Compile(function);
+ const uint32_t supported_flags = CompilationInfo::kContextSpecializing |
+ CompilationInfo::kInliningEnabled |
+ CompilationInfo::kTypingEnabled;
+ CHECK_EQ(0, flags_ & ~supported_flags);
}
Isolate* isolate;
@@ -40,17 +45,25 @@ class FunctionTester : public InitializedHandleScope {
CompilationInfoWithZone info(function);
CHECK(Parser::Parse(&info));
- StrictMode strict_mode = info.function()->strict_mode();
- info.SetStrictMode(strict_mode);
info.SetOptimizing(BailoutId::None(), Handle<Code>(function->code()));
+ if (flags_ & CompilationInfo::kContextSpecializing) {
+ info.MarkAsContextSpecializing();
+ }
+ if (flags_ & CompilationInfo::kInliningEnabled) {
+ info.MarkAsInliningEnabled();
+ }
+ if (flags_ & CompilationInfo::kTypingEnabled) {
+ info.MarkAsTypingEnabled();
+ }
CHECK(Rewriter::Rewrite(&info));
CHECK(Scope::Analyze(&info));
- CHECK_NE(NULL, info.scope());
-
- EnsureDeoptimizationSupport(&info);
+ CHECK(Compiler::EnsureDeoptimizationSupport(&info));
Pipeline pipeline(&info);
Handle<Code> code = pipeline.GenerateCode();
+ if (FLAG_turbo_deoptimization) {
+ info.context()->native_context()->AddOptimizedCode(*code);
+ }
CHECK(!code.is_null());
function->ReplaceCode(*code);
@@ -70,23 +83,6 @@ class FunctionTester : public InitializedHandleScope {
return function;
}
- static void EnsureDeoptimizationSupport(CompilationInfo* info) {
- bool should_recompile = !info->shared_info()->has_deoptimization_support();
- if (should_recompile) {
- CompilationInfoWithZone unoptimized(info->shared_info());
- // Note that we use the same AST that we will use for generating the
- // optimized code.
- unoptimized.SetFunction(info->function());
- unoptimized.PrepareForCompilation(info->scope());
- unoptimized.SetContext(info->context());
- if (should_recompile) unoptimized.EnableDeoptimizationSupport();
- bool succeeded = FullCodeGenerator::MakeCode(&unoptimized);
- CHECK(succeeded);
- Handle<SharedFunctionInfo> shared = info->shared_info();
- shared->EnableDeoptimizationSupport(*unoptimized.code());
- }
- }
-
MaybeHandle<Object> Call(Handle<Object> a, Handle<Object> b) {
Handle<Object> args[] = {a, b};
return Execution::Call(isolate, function, undefined(), 2, args, false);
@@ -186,6 +182,9 @@ class FunctionTester : public InitializedHandleScope {
Handle<Object> true_value() { return isolate->factory()->true_value(); }
Handle<Object> false_value() { return isolate->factory()->false_value(); }
+
+ private:
+ uint32_t flags_;
};
}
}
diff --git a/deps/v8/test/cctest/compiler/graph-builder-tester.cc b/deps/v8/test/cctest/compiler/graph-builder-tester.cc
index fb6e4a28ce..bfa8226458 100644
--- a/deps/v8/test/cctest/compiler/graph-builder-tester.cc
+++ b/deps/v8/test/cctest/compiler/graph-builder-tester.cc
@@ -3,16 +3,16 @@
// found in the LICENSE file.
#include "test/cctest/compiler/graph-builder-tester.h"
+
+#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
namespace v8 {
namespace internal {
namespace compiler {
-MachineCallHelper::MachineCallHelper(Zone* zone,
- MachineCallDescriptorBuilder* builder)
- : CallHelper(zone->isolate()),
- call_descriptor_builder_(builder),
+MachineCallHelper::MachineCallHelper(Zone* zone, MachineSignature* machine_sig)
+ : CallHelper(zone->isolate(), machine_sig),
parameters_(NULL),
graph_(NULL) {}
@@ -21,9 +21,10 @@ void MachineCallHelper::InitParameters(GraphBuilder* builder,
CommonOperatorBuilder* common) {
DCHECK_EQ(NULL, parameters_);
graph_ = builder->graph();
- if (parameter_count() == 0) return;
- parameters_ = graph_->zone()->NewArray<Node*>(parameter_count());
- for (int i = 0; i < parameter_count(); ++i) {
+ int param_count = static_cast<int>(parameter_count());
+ if (param_count == 0) return;
+ parameters_ = graph_->zone()->NewArray<Node*>(param_count);
+ for (int i = 0; i < param_count; ++i) {
parameters_[i] = builder->NewNode(common->Parameter(i), graph_->start());
}
}
@@ -35,7 +36,8 @@ byte* MachineCallHelper::Generate() {
if (code_.is_null()) {
Zone* zone = graph_->zone();
CompilationInfo info(zone->isolate(), zone);
- Linkage linkage(&info, call_descriptor_builder_->BuildCallDescriptor(zone));
+ Linkage linkage(&info,
+ Linkage::GetSimplifiedCDescriptor(zone, machine_sig_));
Pipeline pipeline(&info);
code_ = pipeline.GenerateCodeForMachineGraph(&linkage, graph_);
}
@@ -43,21 +45,10 @@ byte* MachineCallHelper::Generate() {
}
-void MachineCallHelper::VerifyParameters(int parameter_count,
- MachineType* parameter_types) {
- CHECK_EQ(this->parameter_count(), parameter_count);
- const MachineType* expected_types =
- call_descriptor_builder_->parameter_types();
- for (int i = 0; i < parameter_count; i++) {
- CHECK_EQ(expected_types[i], parameter_types[i]);
- }
-}
-
-
-Node* MachineCallHelper::Parameter(int offset) {
+Node* MachineCallHelper::Parameter(size_t index) {
DCHECK_NE(NULL, parameters_);
- DCHECK(0 <= offset && offset < parameter_count());
- return parameters_[offset];
+ DCHECK(index < parameter_count());
+ return parameters_[index];
}
} // namespace compiler
diff --git a/deps/v8/test/cctest/compiler/graph-builder-tester.h b/deps/v8/test/cctest/compiler/graph-builder-tester.h
index 64d9b8a73d..df79250823 100644
--- a/deps/v8/test/cctest/compiler/graph-builder-tester.h
+++ b/deps/v8/test/cctest/compiler/graph-builder-tester.h
@@ -10,9 +10,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-builder.h"
-#include "src/compiler/machine-node-factory.h"
#include "src/compiler/machine-operator.h"
-#include "src/compiler/simplified-node-factory.h"
#include "src/compiler/simplified-operator.h"
#include "test/cctest/compiler/call-tester.h"
#include "test/cctest/compiler/simplified-graph-builder.h"
@@ -28,8 +26,8 @@ class DirectGraphBuilder : public GraphBuilder {
virtual ~DirectGraphBuilder() {}
protected:
- virtual Node* MakeNode(Operator* op, int value_input_count,
- Node** value_inputs) {
+ virtual Node* MakeNode(const Operator* op, int value_input_count,
+ Node** value_inputs) FINAL {
return graph()->NewNode(op, value_input_count, value_inputs);
}
};
@@ -37,24 +35,20 @@ class DirectGraphBuilder : public GraphBuilder {
class MachineCallHelper : public CallHelper {
public:
- MachineCallHelper(Zone* zone, MachineCallDescriptorBuilder* builder);
+ MachineCallHelper(Zone* zone, MachineSignature* machine_sig);
- Node* Parameter(int offset);
+ Node* Parameter(size_t index);
void GenerateCode() { Generate(); }
protected:
virtual byte* Generate();
- virtual void VerifyParameters(int parameter_count, MachineType* parameters);
void InitParameters(GraphBuilder* builder, CommonOperatorBuilder* common);
protected:
- int parameter_count() const {
- return call_descriptor_builder_->parameter_count();
- }
+ size_t parameter_count() const { return machine_sig_->parameter_count(); }
private:
- MachineCallDescriptorBuilder* call_descriptor_builder_;
Node** parameters_;
// TODO(dcarney): shouldn't need graph stored.
Graph* graph_;
@@ -67,7 +61,6 @@ class GraphAndBuilders {
explicit GraphAndBuilders(Zone* zone)
: main_graph_(new (zone) Graph(zone)),
main_common_(zone),
- main_machine_(zone),
main_simplified_(zone) {}
protected:
@@ -87,20 +80,20 @@ class GraphBuilderTester
public SimplifiedGraphBuilder,
public CallHelper2<ReturnType, GraphBuilderTester<ReturnType> > {
public:
- explicit GraphBuilderTester(MachineType p0 = kMachineLast,
- MachineType p1 = kMachineLast,
- MachineType p2 = kMachineLast,
- MachineType p3 = kMachineLast,
- MachineType p4 = kMachineLast)
+ explicit GraphBuilderTester(MachineType p0 = kMachNone,
+ MachineType p1 = kMachNone,
+ MachineType p2 = kMachNone,
+ MachineType p3 = kMachNone,
+ MachineType p4 = kMachNone)
: GraphAndBuilders(main_zone()),
MachineCallHelper(
main_zone(),
- ToCallDescriptorBuilder(
+ MakeMachineSignature(
main_zone(), ReturnValueTraits<ReturnType>::Representation(),
p0, p1, p2, p3, p4)),
SimplifiedGraphBuilder(main_graph_, &main_common_, &main_machine_,
&main_simplified_) {
- Begin(parameter_count());
+ Begin(static_cast<int>(parameter_count()));
InitParameters(this, &main_common_);
}
virtual ~GraphBuilderTester() {}
diff --git a/deps/v8/test/cctest/compiler/instruction-selector-tester.h b/deps/v8/test/cctest/compiler/instruction-selector-tester.h
index 60adaec823..3a28b2e5df 100644
--- a/deps/v8/test/cctest/compiler/instruction-selector-tester.h
+++ b/deps/v8/test/cctest/compiler/instruction-selector-tester.h
@@ -30,17 +30,17 @@ class InstructionSelectorTester : public HandleAndZoneScope,
static MachineType* BuildParameterArray(Zone* zone) {
MachineType* array = zone->NewArray<MachineType>(kParameterCount);
for (int i = 0; i < kParameterCount; ++i) {
- array[i] = kMachineWord32;
+ array[i] = kMachInt32;
}
return array;
}
InstructionSelectorTester()
: RawMachineAssembler(
- new (main_zone()) Graph(main_zone()), new (main_zone())
- MachineCallDescriptorBuilder(kMachineWord32, kParameterCount,
- BuildParameterArray(main_zone())),
- MachineOperatorBuilder::pointer_rep()) {}
+ new (main_zone()) Graph(main_zone()),
+ new (main_zone()) MachineCallDescriptorBuilder(
+ kMachInt32, kParameterCount, BuildParameterArray(main_zone())),
+ kMachPtr) {}
void SelectInstructions(CpuFeature feature) {
SelectInstructions(InstructionSelector::Features(feature));
diff --git a/deps/v8/test/cctest/compiler/simplified-graph-builder.cc b/deps/v8/test/cctest/compiler/simplified-graph-builder.cc
index c688399eae..c44d5ed52e 100644
--- a/deps/v8/test/cctest/compiler/simplified-graph-builder.cc
+++ b/deps/v8/test/cctest/compiler/simplified-graph-builder.cc
@@ -4,6 +4,9 @@
#include "test/cctest/compiler/simplified-graph-builder.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/operator-properties-inl.h"
+
namespace v8 {
namespace internal {
namespace compiler {
@@ -11,7 +14,10 @@ namespace compiler {
SimplifiedGraphBuilder::SimplifiedGraphBuilder(
Graph* graph, CommonOperatorBuilder* common,
MachineOperatorBuilder* machine, SimplifiedOperatorBuilder* simplified)
- : StructuredGraphBuilder(graph, common),
+ : GraphBuilder(graph),
+ effect_(NULL),
+ return_(NULL),
+ common_(common),
machine_(machine),
simplified_(simplified) {}
@@ -20,57 +26,63 @@ void SimplifiedGraphBuilder::Begin(int num_parameters) {
DCHECK(graph()->start() == NULL);
Node* start = graph()->NewNode(common()->Start(num_parameters));
graph()->SetStart(start);
- set_environment(new (zone()) Environment(this, start));
+ effect_ = start;
}
void SimplifiedGraphBuilder::Return(Node* value) {
- Node* control = NewNode(common()->Return(), value);
- UpdateControlDependencyToLeaveFunction(control);
+ return_ =
+ graph()->NewNode(common()->Return(), value, effect_, graph()->start());
+ effect_ = NULL;
}
void SimplifiedGraphBuilder::End() {
- environment()->UpdateControlDependency(exit_control());
- graph()->SetEnd(NewNode(common()->End()));
-}
-
-
-SimplifiedGraphBuilder::Environment::Environment(
- SimplifiedGraphBuilder* builder, Node* control_dependency)
- : StructuredGraphBuilder::Environment(builder, control_dependency) {}
-
-
-Node* SimplifiedGraphBuilder::Environment::Top() {
- DCHECK(!values()->empty());
- return values()->back();
-}
-
-
-void SimplifiedGraphBuilder::Environment::Push(Node* node) {
- values()->push_back(node);
-}
-
-
-Node* SimplifiedGraphBuilder::Environment::Pop() {
- DCHECK(!values()->empty());
- Node* back = values()->back();
- values()->pop_back();
- return back;
-}
-
-
-void SimplifiedGraphBuilder::Environment::Poke(size_t depth, Node* node) {
- DCHECK(depth < values()->size());
- size_t index = values()->size() - depth - 1;
- values()->at(index) = node;
+ Node* end = graph()->NewNode(common()->End(), return_);
+ graph()->SetEnd(end);
}
-Node* SimplifiedGraphBuilder::Environment::Peek(size_t depth) {
- DCHECK(depth < values()->size());
- size_t index = values()->size() - depth - 1;
- return values()->at(index);
+Node* SimplifiedGraphBuilder::MakeNode(const Operator* op,
+ int value_input_count,
+ Node** value_inputs) {
+ DCHECK(op->InputCount() == value_input_count);
+
+ DCHECK(!OperatorProperties::HasContextInput(op));
+ DCHECK(!OperatorProperties::HasFrameStateInput(op));
+ bool has_control = OperatorProperties::GetControlInputCount(op) == 1;
+ bool has_effect = OperatorProperties::GetEffectInputCount(op) == 1;
+
+ DCHECK(OperatorProperties::GetControlInputCount(op) < 2);
+ DCHECK(OperatorProperties::GetEffectInputCount(op) < 2);
+
+ Node* result = NULL;
+ if (!has_control && !has_effect) {
+ result = graph()->NewNode(op, value_input_count, value_inputs);
+ } else {
+ int input_count_with_deps = value_input_count;
+ if (has_control) ++input_count_with_deps;
+ if (has_effect) ++input_count_with_deps;
+ Node** buffer = zone()->NewArray<Node*>(input_count_with_deps);
+ memcpy(buffer, value_inputs, kPointerSize * value_input_count);
+ Node** current_input = buffer + value_input_count;
+ if (has_effect) {
+ *current_input++ = effect_;
+ }
+ if (has_control) {
+ *current_input++ = graph()->start();
+ }
+ result = graph()->NewNode(op, input_count_with_deps, buffer);
+ if (has_effect) {
+ effect_ = result;
+ }
+ if (OperatorProperties::HasControlOutput(result->op())) {
+ // This graph builder does not support control flow.
+ UNREACHABLE();
+ }
+ }
+
+ return result;
}
} // namespace compiler
diff --git a/deps/v8/test/cctest/compiler/simplified-graph-builder.h b/deps/v8/test/cctest/compiler/simplified-graph-builder.h
index fa9161e171..1b637b7676 100644
--- a/deps/v8/test/cctest/compiler/simplified-graph-builder.h
+++ b/deps/v8/test/cctest/compiler/simplified-graph-builder.h
@@ -7,9 +7,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-builder.h"
-#include "src/compiler/machine-node-factory.h"
#include "src/compiler/machine-operator.h"
-#include "src/compiler/simplified-node-factory.h"
#include "src/compiler/simplified-operator.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/call-tester.h"
@@ -18,40 +16,18 @@ namespace v8 {
namespace internal {
namespace compiler {
-class SimplifiedGraphBuilder
- : public StructuredGraphBuilder,
- public MachineNodeFactory<SimplifiedGraphBuilder>,
- public SimplifiedNodeFactory<SimplifiedGraphBuilder> {
+class SimplifiedGraphBuilder : public GraphBuilder {
public:
SimplifiedGraphBuilder(Graph* graph, CommonOperatorBuilder* common,
MachineOperatorBuilder* machine,
SimplifiedOperatorBuilder* simplified);
virtual ~SimplifiedGraphBuilder() {}
- class Environment : public StructuredGraphBuilder::Environment {
- public:
- Environment(SimplifiedGraphBuilder* builder, Node* control_dependency);
-
- // TODO(dcarney): encode somehow and merge into StructuredGraphBuilder.
- // SSA renaming operations.
- Node* Top();
- void Push(Node* node);
- Node* Pop();
- void Poke(size_t depth, Node* node);
- Node* Peek(size_t depth);
- };
-
+ Zone* zone() const { return graph()->zone(); }
Isolate* isolate() const { return zone()->isolate(); }
- Zone* zone() const { return StructuredGraphBuilder::zone(); }
- CommonOperatorBuilder* common() const {
- return StructuredGraphBuilder::common();
- }
+ CommonOperatorBuilder* common() const { return common_; }
MachineOperatorBuilder* machine() const { return machine_; }
SimplifiedOperatorBuilder* simplified() const { return simplified_; }
- Environment* environment() {
- return reinterpret_cast<Environment*>(
- StructuredGraphBuilder::environment());
- }
// Initialize graph and builder.
void Begin(int num_parameters);
@@ -61,7 +37,114 @@ class SimplifiedGraphBuilder
// Close the graph.
void End();
+ Node* PointerConstant(void* value) {
+ intptr_t intptr_value = reinterpret_cast<intptr_t>(value);
+ return kPointerSize == 8 ? NewNode(common()->Int64Constant(intptr_value))
+ : Int32Constant(static_cast<int>(intptr_value));
+ }
+ Node* Int32Constant(int32_t value) {
+ return NewNode(common()->Int32Constant(value));
+ }
+ Node* HeapConstant(Handle<Object> object) {
+ Unique<Object> val = Unique<Object>::CreateUninitialized(object);
+ return NewNode(common()->HeapConstant(val));
+ }
+
+ Node* BooleanNot(Node* a) { return NewNode(simplified()->BooleanNot(), a); }
+
+ Node* NumberEqual(Node* a, Node* b) {
+ return NewNode(simplified()->NumberEqual(), a, b);
+ }
+ Node* NumberLessThan(Node* a, Node* b) {
+ return NewNode(simplified()->NumberLessThan(), a, b);
+ }
+ Node* NumberLessThanOrEqual(Node* a, Node* b) {
+ return NewNode(simplified()->NumberLessThanOrEqual(), a, b);
+ }
+ Node* NumberAdd(Node* a, Node* b) {
+ return NewNode(simplified()->NumberAdd(), a, b);
+ }
+ Node* NumberSubtract(Node* a, Node* b) {
+ return NewNode(simplified()->NumberSubtract(), a, b);
+ }
+ Node* NumberMultiply(Node* a, Node* b) {
+ return NewNode(simplified()->NumberMultiply(), a, b);
+ }
+ Node* NumberDivide(Node* a, Node* b) {
+ return NewNode(simplified()->NumberDivide(), a, b);
+ }
+ Node* NumberModulus(Node* a, Node* b) {
+ return NewNode(simplified()->NumberModulus(), a, b);
+ }
+ Node* NumberToInt32(Node* a) {
+ return NewNode(simplified()->NumberToInt32(), a);
+ }
+ Node* NumberToUint32(Node* a) {
+ return NewNode(simplified()->NumberToUint32(), a);
+ }
+
+ Node* StringEqual(Node* a, Node* b) {
+ return NewNode(simplified()->StringEqual(), a, b);
+ }
+ Node* StringLessThan(Node* a, Node* b) {
+ return NewNode(simplified()->StringLessThan(), a, b);
+ }
+ Node* StringLessThanOrEqual(Node* a, Node* b) {
+ return NewNode(simplified()->StringLessThanOrEqual(), a, b);
+ }
+ Node* StringAdd(Node* a, Node* b) {
+ return NewNode(simplified()->StringAdd(), a, b);
+ }
+
+ Node* ChangeTaggedToInt32(Node* a) {
+ return NewNode(simplified()->ChangeTaggedToInt32(), a);
+ }
+ Node* ChangeTaggedToUint32(Node* a) {
+ return NewNode(simplified()->ChangeTaggedToUint32(), a);
+ }
+ Node* ChangeTaggedToFloat64(Node* a) {
+ return NewNode(simplified()->ChangeTaggedToFloat64(), a);
+ }
+ Node* ChangeInt32ToTagged(Node* a) {
+ return NewNode(simplified()->ChangeInt32ToTagged(), a);
+ }
+ Node* ChangeUint32ToTagged(Node* a) {
+ return NewNode(simplified()->ChangeUint32ToTagged(), a);
+ }
+ Node* ChangeFloat64ToTagged(Node* a) {
+ return NewNode(simplified()->ChangeFloat64ToTagged(), a);
+ }
+ Node* ChangeBoolToBit(Node* a) {
+ return NewNode(simplified()->ChangeBoolToBit(), a);
+ }
+ Node* ChangeBitToBool(Node* a) {
+ return NewNode(simplified()->ChangeBitToBool(), a);
+ }
+
+ Node* LoadField(const FieldAccess& access, Node* object) {
+ return NewNode(simplified()->LoadField(access), object);
+ }
+ Node* StoreField(const FieldAccess& access, Node* object, Node* value) {
+ return NewNode(simplified()->StoreField(access), object, value);
+ }
+ Node* LoadElement(const ElementAccess& access, Node* object, Node* index,
+ Node* length) {
+ return NewNode(simplified()->LoadElement(access), object, index, length);
+ }
+ Node* StoreElement(const ElementAccess& access, Node* object, Node* index,
+ Node* length, Node* value) {
+ return NewNode(simplified()->StoreElement(access), object, index, length,
+ value);
+ }
+
+ protected:
+ virtual Node* MakeNode(const Operator* op, int value_input_count,
+ Node** value_inputs) FINAL;
+
private:
+ Node* effect_;
+ Node* return_;
+ CommonOperatorBuilder* common_;
MachineOperatorBuilder* machine_;
SimplifiedOperatorBuilder* simplified_;
};
diff --git a/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc b/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
new file mode 100644
index 0000000000..dd96499a35
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
@@ -0,0 +1,115 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/basic-block-profiler.h"
+#include "src/compiler/generic-node-inl.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+
+#if V8_TURBOFAN_TARGET
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+typedef RawMachineAssembler::Label MLabel;
+
+class BasicBlockProfilerTest : public RawMachineAssemblerTester<int32_t> {
+ public:
+ BasicBlockProfilerTest() : RawMachineAssemblerTester<int32_t>(kMachInt32) {
+ FLAG_turbo_profiling = true;
+ }
+
+ void ResetCounts() { isolate()->basic_block_profiler()->ResetCounts(); }
+
+ void Expect(size_t size, uint32_t* expected) {
+ CHECK_NE(NULL, isolate()->basic_block_profiler());
+ const BasicBlockProfiler::DataList* l =
+ isolate()->basic_block_profiler()->data_list();
+ CHECK_NE(0, static_cast<int>(l->size()));
+ const BasicBlockProfiler::Data* data = l->back();
+ CHECK_EQ(static_cast<int>(size), static_cast<int>(data->n_blocks()));
+ const uint32_t* counts = data->counts();
+ for (size_t i = 0; i < size; ++i) {
+ CHECK_EQ(static_cast<int>(expected[i]), static_cast<int>(counts[i]));
+ }
+ }
+};
+
+
+TEST(ProfileDiamond) {
+ BasicBlockProfilerTest m;
+
+ MLabel blocka, blockb, end;
+ m.Branch(m.Parameter(0), &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Goto(&end);
+ m.Bind(&blockb);
+ m.Goto(&end);
+ m.Bind(&end);
+ m.Return(m.Int32Constant(0));
+
+ m.GenerateCode();
+ {
+ uint32_t expected[] = {0, 0, 0, 0};
+ m.Expect(arraysize(expected), expected);
+ }
+
+ m.Call(0);
+ {
+ uint32_t expected[] = {1, 1, 0, 1};
+ m.Expect(arraysize(expected), expected);
+ }
+
+ m.ResetCounts();
+
+ m.Call(1);
+ {
+ uint32_t expected[] = {1, 0, 1, 1};
+ m.Expect(arraysize(expected), expected);
+ }
+
+ m.Call(0);
+ {
+ uint32_t expected[] = {2, 1, 1, 2};
+ m.Expect(arraysize(expected), expected);
+ }
+}
+
+
+TEST(ProfileLoop) {
+ BasicBlockProfilerTest m;
+
+ MLabel header, body, end;
+ Node* one = m.Int32Constant(1);
+ m.Goto(&header);
+
+ m.Bind(&header);
+ Node* count = m.Phi(kMachInt32, m.Parameter(0), one);
+ m.Branch(count, &body, &end);
+
+ m.Bind(&body);
+ count->ReplaceInput(1, m.Int32Sub(count, one));
+ m.Goto(&header);
+
+ m.Bind(&end);
+ m.Return(one);
+
+ m.GenerateCode();
+ {
+ uint32_t expected[] = {0, 0, 0, 0};
+ m.Expect(arraysize(expected), expected);
+ }
+
+ uint32_t runs[] = {0, 1, 500, 10000};
+ for (size_t i = 0; i < arraysize(runs); i++) {
+ m.ResetCounts();
+ CHECK_EQ(1, m.Call(static_cast<int>(runs[i])));
+ uint32_t expected[] = {1, runs[i] + 1, runs[i], 1};
+ m.Expect(arraysize(expected), expected);
+ }
+}
+
+#endif // V8_TURBOFAN_TARGET
diff --git a/deps/v8/test/cctest/compiler/test-branch-combine.cc b/deps/v8/test/cctest/compiler/test-branch-combine.cc
index 61dffdca87..52590c0453 100644
--- a/deps/v8/test/cctest/compiler/test-branch-combine.cc
+++ b/deps/v8/test/cctest/compiler/test-branch-combine.cc
@@ -4,6 +4,7 @@
#include "src/v8.h"
+#include "src/compiler/generic-node-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/value-helper.h"
@@ -23,7 +24,7 @@ static IrOpcode::Value int32cmp_opcodes[] = {
TEST(BranchCombineWord32EqualZero_1) {
// Test combining a branch with x == 0
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
int32_t eq_constant = -1033;
int32_t ne_constant = 825118;
Node* p0 = m.Parameter(0);
@@ -49,7 +50,7 @@ TEST(BranchCombineWord32EqualZero_chain) {
int32_t ne_constant = 815118;
for (int k = 0; k < 6; k++) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
Node* p0 = m.Parameter(0);
MLabel blocka, blockb;
Node* cond = p0;
@@ -74,7 +75,7 @@ TEST(BranchCombineWord32EqualZero_chain) {
TEST(BranchCombineInt32LessThanZero_1) {
// Test combining a branch with x < 0
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
int32_t eq_constant = -1433;
int32_t ne_constant = 845118;
Node* p0 = m.Parameter(0);
@@ -96,7 +97,7 @@ TEST(BranchCombineInt32LessThanZero_1) {
TEST(BranchCombineUint32LessThan100_1) {
// Test combining a branch with x < 100
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachUint32);
int32_t eq_constant = 1471;
int32_t ne_constant = 88845718;
Node* p0 = m.Parameter(0);
@@ -118,7 +119,7 @@ TEST(BranchCombineUint32LessThan100_1) {
TEST(BranchCombineUint32LessThanOrEqual100_1) {
// Test combining a branch with x <= 100
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachUint32);
int32_t eq_constant = 1479;
int32_t ne_constant = 77845719;
Node* p0 = m.Parameter(0);
@@ -140,7 +141,7 @@ TEST(BranchCombineUint32LessThanOrEqual100_1) {
TEST(BranchCombineZeroLessThanInt32_1) {
// Test combining a branch with 0 < x
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
int32_t eq_constant = -2033;
int32_t ne_constant = 225118;
Node* p0 = m.Parameter(0);
@@ -162,7 +163,7 @@ TEST(BranchCombineZeroLessThanInt32_1) {
TEST(BranchCombineInt32GreaterThanZero_1) {
// Test combining a branch with x > 0
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
int32_t eq_constant = -1073;
int32_t ne_constant = 825178;
Node* p0 = m.Parameter(0);
@@ -184,7 +185,7 @@ TEST(BranchCombineInt32GreaterThanZero_1) {
TEST(BranchCombineWord32EqualP) {
// Test combining a branch with an Word32Equal.
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
int32_t eq_constant = -1035;
int32_t ne_constant = 825018;
Node* p0 = m.Parameter(0);
@@ -214,7 +215,7 @@ TEST(BranchCombineWord32EqualI) {
for (int left = 0; left < 2; left++) {
FOR_INT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
int32_t a = *i;
Node* p0 = m.Int32Constant(a);
@@ -243,7 +244,7 @@ TEST(BranchCombineInt32CmpP) {
int32_t ne_constant = 725018;
for (int op = 0; op < 2; op++) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
Node* p0 = m.Parameter(0);
Node* p1 = m.Parameter(1);
@@ -275,7 +276,7 @@ TEST(BranchCombineInt32CmpI) {
for (int op = 0; op < 2; op++) {
FOR_INT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
int32_t a = *i;
Node* p0 = m.Int32Constant(a);
Node* p1 = m.Parameter(0);
@@ -360,7 +361,7 @@ class CmpBranchGen : public BinopGen<int32_t> {
TEST(BranchCombineInt32CmpAllInputShapes_materialized) {
- for (size_t i = 0; i < ARRAY_SIZE(int32cmp_opcodes); i++) {
+ for (size_t i = 0; i < arraysize(int32cmp_opcodes); i++) {
CmpMaterializeBoolGen gen(int32cmp_opcodes[i], false);
Int32BinopInputShapeTester tester(&gen);
tester.TestAllInputShapes();
@@ -369,7 +370,7 @@ TEST(BranchCombineInt32CmpAllInputShapes_materialized) {
TEST(BranchCombineInt32CmpAllInputShapes_inverted_materialized) {
- for (size_t i = 0; i < ARRAY_SIZE(int32cmp_opcodes); i++) {
+ for (size_t i = 0; i < arraysize(int32cmp_opcodes); i++) {
CmpMaterializeBoolGen gen(int32cmp_opcodes[i], true);
Int32BinopInputShapeTester tester(&gen);
tester.TestAllInputShapes();
@@ -378,7 +379,7 @@ TEST(BranchCombineInt32CmpAllInputShapes_inverted_materialized) {
TEST(BranchCombineInt32CmpAllInputShapes_branch_true) {
- for (int i = 0; i < static_cast<int>(ARRAY_SIZE(int32cmp_opcodes)); i++) {
+ for (int i = 0; i < static_cast<int>(arraysize(int32cmp_opcodes)); i++) {
CmpBranchGen gen(int32cmp_opcodes[i], false, false, 995 + i, -1011 - i);
Int32BinopInputShapeTester tester(&gen);
tester.TestAllInputShapes();
@@ -387,7 +388,7 @@ TEST(BranchCombineInt32CmpAllInputShapes_branch_true) {
TEST(BranchCombineInt32CmpAllInputShapes_branch_false) {
- for (int i = 0; i < static_cast<int>(ARRAY_SIZE(int32cmp_opcodes)); i++) {
+ for (int i = 0; i < static_cast<int>(arraysize(int32cmp_opcodes)); i++) {
CmpBranchGen gen(int32cmp_opcodes[i], false, true, 795 + i, -2011 - i);
Int32BinopInputShapeTester tester(&gen);
tester.TestAllInputShapes();
@@ -396,7 +397,7 @@ TEST(BranchCombineInt32CmpAllInputShapes_branch_false) {
TEST(BranchCombineInt32CmpAllInputShapes_inverse_branch_true) {
- for (int i = 0; i < static_cast<int>(ARRAY_SIZE(int32cmp_opcodes)); i++) {
+ for (int i = 0; i < static_cast<int>(arraysize(int32cmp_opcodes)); i++) {
CmpBranchGen gen(int32cmp_opcodes[i], true, false, 695 + i, -3011 - i);
Int32BinopInputShapeTester tester(&gen);
tester.TestAllInputShapes();
@@ -405,7 +406,7 @@ TEST(BranchCombineInt32CmpAllInputShapes_inverse_branch_true) {
TEST(BranchCombineInt32CmpAllInputShapes_inverse_branch_false) {
- for (int i = 0; i < static_cast<int>(ARRAY_SIZE(int32cmp_opcodes)); i++) {
+ for (int i = 0; i < static_cast<int>(arraysize(int32cmp_opcodes)); i++) {
CmpBranchGen gen(int32cmp_opcodes[i], true, true, 595 + i, -4011 - i);
Int32BinopInputShapeTester tester(&gen);
tester.TestAllInputShapes();
@@ -428,12 +429,12 @@ TEST(BranchCombineFloat64Compares) {
CompareWrapper(IrOpcode::kFloat64LessThan),
CompareWrapper(IrOpcode::kFloat64LessThanOrEqual)};
- for (size_t c = 0; c < ARRAY_SIZE(cmps); c++) {
+ for (size_t c = 0; c < arraysize(cmps); c++) {
CompareWrapper cmp = cmps[c];
for (int invert = 0; invert < 2; invert++) {
RawMachineAssemblerTester<int32_t> m;
- Node* a = m.LoadFromPointer(&input_a, kMachineFloat64);
- Node* b = m.LoadFromPointer(&input_b, kMachineFloat64);
+ Node* a = m.LoadFromPointer(&input_a, kMachFloat64);
+ Node* b = m.LoadFromPointer(&input_b, kMachFloat64);
MLabel blocka, blockb;
Node* cond = cmp.MakeNode(&m, a, b);
@@ -444,8 +445,8 @@ TEST(BranchCombineFloat64Compares) {
m.Bind(&blockb);
m.Return(m.Int32Constant(ne_constant));
- for (size_t i = 0; i < ARRAY_SIZE(inputs); i++) {
- for (size_t j = 0; j < ARRAY_SIZE(inputs); j += 2) {
+ for (size_t i = 0; i < arraysize(inputs); i++) {
+ for (size_t j = 0; j < arraysize(inputs); j += 2) {
input_a = inputs[i];
input_b = inputs[i];
int32_t expected =
diff --git a/deps/v8/test/cctest/compiler/test-changes-lowering.cc b/deps/v8/test/cctest/compiler/test-changes-lowering.cc
index 148f4b34f5..06308a0b50 100644
--- a/deps/v8/test/cctest/compiler/test-changes-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-changes-lowering.cc
@@ -4,15 +4,16 @@
#include <limits>
+#include "src/compiler/change-lowering.h"
#include "src/compiler/control-builders.h"
#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties-inl.h"
#include "src/compiler/pipeline.h"
-#include "src/compiler/simplified-lowering.h"
-#include "src/compiler/simplified-node-factory.h"
#include "src/compiler/typer.h"
#include "src/compiler/verifier.h"
#include "src/execution.h"
+#include "src/globals.h"
#include "src/parser.h"
#include "src/rewriter.h"
#include "src/scopes.h"
@@ -27,18 +28,17 @@ using namespace v8::internal::compiler;
template <typename ReturnType>
class ChangesLoweringTester : public GraphBuilderTester<ReturnType> {
public:
- explicit ChangesLoweringTester(MachineType p0 = kMachineLast)
+ explicit ChangesLoweringTester(MachineType p0 = kMachNone)
: GraphBuilderTester<ReturnType>(p0),
typer(this->zone()),
- source_positions(this->graph()),
- jsgraph(this->graph(), this->common(), &typer),
- lowering(&jsgraph, &source_positions),
+ javascript(this->zone()),
+ jsgraph(this->graph(), this->common(), &javascript, &typer,
+ this->machine()),
function(Handle<JSFunction>::null()) {}
Typer typer;
- SourcePositionTable source_positions;
+ JSOperatorBuilder javascript;
JSGraph jsgraph;
- SimplifiedLowering lowering;
Handle<JSFunction> function;
Node* start() { return this->graph()->start(); }
@@ -55,12 +55,13 @@ class ChangesLoweringTester : public GraphBuilderTester<ReturnType> {
"(function() { 'use strict'; return 2.7123; })")));
CompilationInfoWithZone info(function);
CHECK(Parser::Parse(&info));
- StrictMode strict_mode = info.function()->strict_mode();
- info.SetStrictMode(strict_mode);
info.SetOptimizing(BailoutId::None(), Handle<Code>(function->code()));
CHECK(Rewriter::Rewrite(&info));
CHECK(Scope::Analyze(&info));
CHECK_NE(NULL, info.scope());
+ Handle<ScopeInfo> scope_info =
+ ScopeInfo::Create(info.scope(), info.zone());
+ info.shared_info()->set_scope_info(*scope_info);
Pipeline pipeline(&info);
Linkage linkage(&info);
Handle<Code> code =
@@ -77,29 +78,29 @@ class ChangesLoweringTester : public GraphBuilderTester<ReturnType> {
void StoreFloat64(Node* node, double* ptr) {
Node* ptr_node = this->PointerConstant(ptr);
- this->Store(kMachineFloat64, ptr_node, node);
+ this->Store(kMachFloat64, ptr_node, node);
}
Node* LoadInt32(int32_t* ptr) {
Node* ptr_node = this->PointerConstant(ptr);
- return this->Load(kMachineWord32, ptr_node);
+ return this->Load(kMachInt32, ptr_node);
}
Node* LoadUint32(uint32_t* ptr) {
Node* ptr_node = this->PointerConstant(ptr);
- return this->Load(kMachineWord32, ptr_node);
+ return this->Load(kMachUint32, ptr_node);
}
Node* LoadFloat64(double* ptr) {
Node* ptr_node = this->PointerConstant(ptr);
- return this->Load(kMachineFloat64, ptr_node);
+ return this->Load(kMachFloat64, ptr_node);
}
void CheckNumber(double expected, Object* number) {
CHECK(this->isolate()->factory()->NewNumber(expected)->SameValue(number));
}
- void BuildAndLower(Operator* op) {
+ void BuildAndLower(const Operator* op) {
// We build a graph by hand here, because the raw machine assembler
// does not add the correct control and effect nodes.
Node* p0 = this->Parameter(0);
@@ -108,11 +109,11 @@ class ChangesLoweringTester : public GraphBuilderTester<ReturnType> {
this->start(), this->start());
Node* end = this->graph()->NewNode(this->common()->End(), ret);
this->graph()->SetEnd(end);
- this->lowering.LowerChange(change, this->start(), this->start());
- Verifier::Run(this->graph());
+ LowerChange(change);
}
- void BuildStoreAndLower(Operator* op, Operator* store_op, void* location) {
+ void BuildStoreAndLower(const Operator* op, const Operator* store_op,
+ void* location) {
// We build a graph by hand here, because the raw machine assembler
// does not add the correct control and effect nodes.
Node* p0 = this->Parameter(0);
@@ -124,11 +125,11 @@ class ChangesLoweringTester : public GraphBuilderTester<ReturnType> {
this->common()->Return(), this->Int32Constant(0), store, this->start());
Node* end = this->graph()->NewNode(this->common()->End(), ret);
this->graph()->SetEnd(end);
- this->lowering.LowerChange(change, this->start(), this->start());
- Verifier::Run(this->graph());
+ LowerChange(change);
}
- void BuildLoadAndLower(Operator* op, Operator* load_op, void* location) {
+ void BuildLoadAndLower(const Operator* op, const Operator* load_op,
+ void* location) {
// We build a graph by hand here, because the raw machine assembler
// does not add the correct control and effect nodes.
Node* load =
@@ -139,7 +140,17 @@ class ChangesLoweringTester : public GraphBuilderTester<ReturnType> {
this->start(), this->start());
Node* end = this->graph()->NewNode(this->common()->End(), ret);
this->graph()->SetEnd(end);
- this->lowering.LowerChange(change, this->start(), this->start());
+ LowerChange(change);
+ }
+
+ void LowerChange(Node* change) {
+ // Run the graph reducer with changes lowering on a single node.
+ CompilationInfo info(this->isolate(), this->zone());
+ Linkage linkage(&info);
+ ChangeLowering lowering(&jsgraph, &linkage);
+ GraphReducer reducer(this->graph());
+ reducer.AddReducer(&lowering);
+ reducer.ReduceNode(change);
Verifier::Run(this->graph());
}
@@ -150,7 +161,7 @@ class ChangesLoweringTester : public GraphBuilderTester<ReturnType> {
TEST(RunChangeTaggedToInt32) {
// Build and lower a graph by hand.
- ChangesLoweringTester<int32_t> t(kMachineTagged);
+ ChangesLoweringTester<int32_t> t(kMachAnyTagged);
t.BuildAndLower(t.simplified()->ChangeTaggedToInt32());
if (Pipeline::SupportedTarget()) {
@@ -180,7 +191,7 @@ TEST(RunChangeTaggedToInt32) {
TEST(RunChangeTaggedToUint32) {
// Build and lower a graph by hand.
- ChangesLoweringTester<uint32_t> t(kMachineTagged);
+ ChangesLoweringTester<uint32_t> t(kMachAnyTagged);
t.BuildAndLower(t.simplified()->ChangeTaggedToUint32());
if (Pipeline::SupportedTarget()) {
@@ -209,12 +220,13 @@ TEST(RunChangeTaggedToUint32) {
TEST(RunChangeTaggedToFloat64) {
- ChangesLoweringTester<int32_t> t(kMachineTagged);
+ ChangesLoweringTester<int32_t> t(kMachAnyTagged);
double result;
- t.BuildStoreAndLower(t.simplified()->ChangeTaggedToFloat64(),
- t.machine()->Store(kMachineFloat64, kNoWriteBarrier),
- &result);
+ t.BuildStoreAndLower(
+ t.simplified()->ChangeTaggedToFloat64(),
+ t.machine()->Store(StoreRepresentation(kMachFloat64, kNoWriteBarrier)),
+ &result);
if (Pipeline::SupportedTarget()) {
FOR_INT32_INPUTS(i) {
@@ -259,7 +271,7 @@ TEST(RunChangeTaggedToFloat64) {
TEST(RunChangeBoolToBit) {
- ChangesLoweringTester<int32_t> t(kMachineTagged);
+ ChangesLoweringTester<int32_t> t(kMachAnyTagged);
t.BuildAndLower(t.simplified()->ChangeBoolToBit());
if (Pipeline::SupportedTarget()) {
@@ -277,7 +289,7 @@ TEST(RunChangeBoolToBit) {
TEST(RunChangeBitToBool) {
- ChangesLoweringTester<Object*> t(kMachineWord32);
+ ChangesLoweringTester<Object*> t(kMachInt32);
t.BuildAndLower(t.simplified()->ChangeBitToBool());
if (Pipeline::SupportedTarget()) {
@@ -294,73 +306,82 @@ TEST(RunChangeBitToBool) {
}
-bool TODO_INT32_TO_TAGGED_WILL_WORK(int32_t v) {
- // TODO(titzer): enable all UI32 -> Tagged checking when inline allocation
- // works.
- return Smi::IsValid(v);
-}
-
-
-bool TODO_UINT32_TO_TAGGED_WILL_WORK(uint32_t v) {
- // TODO(titzer): enable all UI32 -> Tagged checking when inline allocation
- // works.
- return v <= static_cast<uint32_t>(Smi::kMaxValue);
-}
-
+#if V8_TURBOFAN_BACKEND
+// TODO(titzer): disabled on ARM
-TEST(RunChangeInt32ToTagged) {
+TEST(RunChangeInt32ToTaggedSmi) {
ChangesLoweringTester<Object*> t;
int32_t input;
t.BuildLoadAndLower(t.simplified()->ChangeInt32ToTagged(),
- t.machine()->Load(kMachineWord32), &input);
+ t.machine()->Load(kMachInt32), &input);
if (Pipeline::SupportedTarget()) {
FOR_INT32_INPUTS(i) {
input = *i;
- Object* result = t.CallWithPotentialGC<Object>();
- if (TODO_INT32_TO_TAGGED_WILL_WORK(input)) {
- t.CheckNumber(static_cast<double>(input), result);
- }
- }
- }
-
- if (Pipeline::SupportedTarget()) {
- FOR_INT32_INPUTS(i) {
- input = *i;
- SimulateFullSpace(CcTest::heap()->new_space());
- Object* result = t.CallWithPotentialGC<Object>();
- if (TODO_INT32_TO_TAGGED_WILL_WORK(input)) {
- t.CheckNumber(static_cast<double>(input), result);
- }
+ if (!Smi::IsValid(input)) continue;
+ Object* result = t.Call();
+ t.CheckNumber(static_cast<double>(input), result);
}
}
}
-TEST(RunChangeUint32ToTagged) {
+TEST(RunChangeUint32ToTaggedSmi) {
ChangesLoweringTester<Object*> t;
uint32_t input;
t.BuildLoadAndLower(t.simplified()->ChangeUint32ToTagged(),
- t.machine()->Load(kMachineWord32), &input);
+ t.machine()->Load(kMachUint32), &input);
if (Pipeline::SupportedTarget()) {
FOR_UINT32_INPUTS(i) {
input = *i;
- Object* result = t.CallWithPotentialGC<Object>();
+ if (input > static_cast<uint32_t>(Smi::kMaxValue)) continue;
+ Object* result = t.Call();
double expected = static_cast<double>(input);
- if (TODO_UINT32_TO_TAGGED_WILL_WORK(input)) {
- t.CheckNumber(expected, result);
+ t.CheckNumber(expected, result);
+ }
+ }
+}
+
+
+TEST(RunChangeInt32ToTagged) {
+ ChangesLoweringTester<Object*> t;
+ int32_t input;
+ t.BuildLoadAndLower(t.simplified()->ChangeInt32ToTagged(),
+ t.machine()->Load(kMachInt32), &input);
+
+ if (Pipeline::SupportedTarget()) {
+ for (int m = 0; m < 3; m++) { // Try 3 GC modes.
+ FOR_INT32_INPUTS(i) {
+ if (m == 0) CcTest::heap()->EnableInlineAllocation();
+ if (m == 1) CcTest::heap()->DisableInlineAllocation();
+ if (m == 2) SimulateFullSpace(CcTest::heap()->new_space());
+
+ input = *i;
+ Object* result = t.CallWithPotentialGC<Object>();
+ t.CheckNumber(static_cast<double>(input), result);
}
}
}
+}
+
+
+TEST(RunChangeUint32ToTagged) {
+ ChangesLoweringTester<Object*> t;
+ uint32_t input;
+ t.BuildLoadAndLower(t.simplified()->ChangeUint32ToTagged(),
+ t.machine()->Load(kMachUint32), &input);
if (Pipeline::SupportedTarget()) {
- FOR_UINT32_INPUTS(i) {
- input = *i;
- SimulateFullSpace(CcTest::heap()->new_space());
- Object* result = t.CallWithPotentialGC<Object>();
- double expected = static_cast<double>(static_cast<uint32_t>(input));
- if (TODO_UINT32_TO_TAGGED_WILL_WORK(input)) {
+ for (int m = 0; m < 3; m++) { // Try 3 GC modes.
+ FOR_UINT32_INPUTS(i) {
+ if (m == 0) CcTest::heap()->EnableInlineAllocation();
+ if (m == 1) CcTest::heap()->DisableInlineAllocation();
+ if (m == 2) SimulateFullSpace(CcTest::heap()->new_space());
+
+ input = *i;
+ Object* result = t.CallWithPotentialGC<Object>();
+ double expected = static_cast<double>(input);
t.CheckNumber(expected, result);
}
}
@@ -368,30 +389,25 @@ TEST(RunChangeUint32ToTagged) {
}
-// TODO(titzer): lowering of Float64->Tagged needs inline allocation.
-#define TODO_FLOAT64_TO_TAGGED false
-
TEST(RunChangeFloat64ToTagged) {
ChangesLoweringTester<Object*> t;
double input;
t.BuildLoadAndLower(t.simplified()->ChangeFloat64ToTagged(),
- t.machine()->Load(kMachineFloat64), &input);
+ t.machine()->Load(kMachFloat64), &input);
- // TODO(titzer): need inline allocation to change float to tagged.
- if (TODO_FLOAT64_TO_TAGGED && Pipeline::SupportedTarget()) {
- FOR_FLOAT64_INPUTS(i) {
- input = *i;
- Object* result = t.CallWithPotentialGC<Object>();
- t.CheckNumber(input, result);
- }
- }
-
- if (TODO_FLOAT64_TO_TAGGED && Pipeline::SupportedTarget()) {
- FOR_FLOAT64_INPUTS(i) {
- input = *i;
- SimulateFullSpace(CcTest::heap()->new_space());
- Object* result = t.CallWithPotentialGC<Object>();
- t.CheckNumber(input, result);
+ if (Pipeline::SupportedTarget()) {
+ for (int m = 0; m < 3; m++) { // Try 3 GC modes.
+ FOR_FLOAT64_INPUTS(i) {
+ if (m == 0) CcTest::heap()->EnableInlineAllocation();
+ if (m == 1) CcTest::heap()->DisableInlineAllocation();
+ if (m == 2) SimulateFullSpace(CcTest::heap()->new_space());
+
+ input = *i;
+ Object* result = t.CallWithPotentialGC<Object>();
+ t.CheckNumber(input, result);
+ }
}
}
}
+
+#endif // V8_TURBOFAN_BACKEND
diff --git a/deps/v8/test/cctest/compiler/test-codegen-deopt.cc b/deps/v8/test/cctest/compiler/test-codegen-deopt.cc
index b953ee53cc..8217229bd5 100644
--- a/deps/v8/test/cctest/compiler/test-codegen-deopt.cc
+++ b/deps/v8/test/cctest/compiler/test-codegen-deopt.cc
@@ -20,6 +20,7 @@
#include "src/parser.h"
#include "src/rewriter.h"
+#include "test/cctest/compiler/c-signature.h"
#include "test/cctest/compiler/function-tester.h"
using namespace v8::internal;
@@ -44,14 +45,10 @@ class DeoptCodegenTester {
info(function, scope->main_zone()),
bailout_id(-1) {
CHECK(Parser::Parse(&info));
- StrictMode strict_mode = info.function()->strict_mode();
- info.SetStrictMode(strict_mode);
info.SetOptimizing(BailoutId::None(), Handle<Code>(function->code()));
CHECK(Rewriter::Rewrite(&info));
CHECK(Scope::Analyze(&info));
- CHECK_NE(NULL, info.scope());
-
- FunctionTester::EnsureDeoptimizationSupport(&info);
+ CHECK(Compiler::EnsureDeoptimizationSupport(&info));
DCHECK(info.shared_info()->has_deoptimization_support());
@@ -62,7 +59,9 @@ class DeoptCodegenTester {
void GenerateCodeFromSchedule(Schedule* schedule) {
OFStream os(stdout);
- os << *schedule;
+ if (FLAG_trace_turbo) {
+ os << *schedule;
+ }
// Initialize the codegen and generate code.
Linkage* linkage = new (scope_->main_zone()) Linkage(&info);
@@ -72,20 +71,26 @@ class DeoptCodegenTester {
InstructionSelector selector(code, &source_positions);
selector.SelectInstructions();
- os << "----- Instruction sequence before register allocation -----\n"
- << *code;
+ if (FLAG_trace_turbo) {
+ os << "----- Instruction sequence before register allocation -----\n"
+ << *code;
+ }
RegisterAllocator allocator(code);
CHECK(allocator.Allocate());
- os << "----- Instruction sequence after register allocation -----\n"
- << *code;
+ if (FLAG_trace_turbo) {
+ os << "----- Instruction sequence after register allocation -----\n"
+ << *code;
+ }
compiler::CodeGenerator generator(code);
result_code = generator.GenerateCode();
-#ifdef DEBUG
- result_code->Print();
+#ifdef OBJECT_PRINT
+ if (FLAG_print_opt_code || FLAG_trace_turbo) {
+ result_code->Print();
+ }
#endif
}
@@ -112,7 +117,6 @@ class TrivialDeoptCodegenTester : public DeoptCodegenTester {
}
Schedule* BuildGraphAndSchedule(Graph* graph) {
- Isolate* isolate = info.isolate();
CommonOperatorBuilder common(zone());
// Manually construct a schedule for the function below:
@@ -120,49 +124,42 @@ class TrivialDeoptCodegenTester : public DeoptCodegenTester {
// deopt();
// }
- MachineType parameter_reps[] = {kMachineTagged};
- MachineCallDescriptorBuilder descriptor_builder(kMachineTagged, 1,
- parameter_reps);
-
- RawMachineAssembler m(graph, &descriptor_builder);
-
- Handle<Object> undef_object =
- Handle<Object>(isolate->heap()->undefined_value(), isolate);
- PrintableUnique<Object> undef_constant =
- PrintableUnique<Object>::CreateUninitialized(zone(), undef_object);
- Node* undef_node = m.NewNode(common.HeapConstant(undef_constant));
+ CSignature1<Object*, Object*> sig;
+ RawMachineAssembler m(graph, &sig);
Handle<JSFunction> deopt_function =
NewFunction("function deopt() { %DeoptimizeFunction(foo); }; deopt");
- PrintableUnique<Object> deopt_fun_constant =
- PrintableUnique<Object>::CreateUninitialized(zone(), deopt_function);
+ Unique<Object> deopt_fun_constant =
+ Unique<Object>::CreateUninitialized(deopt_function);
Node* deopt_fun_node = m.NewNode(common.HeapConstant(deopt_fun_constant));
- MLabel deopt, cont;
- Node* call = m.CallJS0(deopt_fun_node, undef_node, &cont, &deopt);
-
- m.Bind(&cont);
- m.NewNode(common.Continuation(), call);
- m.Return(undef_node);
-
- m.Bind(&deopt);
- m.NewNode(common.LazyDeoptimization(), call);
+ Handle<Context> caller_context(function->context(), CcTest::i_isolate());
+ Unique<Object> caller_context_constant =
+ Unique<Object>::CreateUninitialized(caller_context);
+ Node* caller_context_node =
+ m.NewNode(common.HeapConstant(caller_context_constant));
bailout_id = GetCallBailoutId();
- Node* parameters = m.NewNode(common.StateValues(1), undef_node);
+ Node* parameters = m.NewNode(common.StateValues(1), m.UndefinedConstant());
Node* locals = m.NewNode(common.StateValues(0));
Node* stack = m.NewNode(common.StateValues(0));
- Node* state_node =
- m.NewNode(common.FrameState(bailout_id), parameters, locals, stack);
- m.Deoptimize(state_node);
+ Node* state_node = m.NewNode(
+ common.FrameState(JS_FRAME, bailout_id, kIgnoreOutput), parameters,
+ locals, stack, caller_context_node, m.UndefinedConstant());
+
+ Handle<Context> context(deopt_function->context(), CcTest::i_isolate());
+ Unique<Object> context_constant =
+ Unique<Object>::CreateUninitialized(context);
+ Node* context_node = m.NewNode(common.HeapConstant(context_constant));
+
+ m.CallJS0(deopt_fun_node, m.UndefinedConstant(), context_node, state_node);
+
+ m.Return(m.UndefinedConstant());
// Schedule the graph:
Schedule* schedule = m.Export();
- cont_block = cont.block();
- deopt_block = deopt.block();
-
return schedule;
}
@@ -177,9 +174,6 @@ class TrivialDeoptCodegenTester : public DeoptCodegenTester {
CHECK(false);
return BailoutId(-1);
}
-
- BasicBlock* cont_block;
- BasicBlock* deopt_block;
};
@@ -196,18 +190,10 @@ TEST(TurboTrivialDeoptCodegen) {
DeoptimizationInputData* data =
DeoptimizationInputData::cast(t.result_code->deoptimization_data());
- Label* cont_label = t.code->GetLabel(t.cont_block);
- Label* deopt_label = t.code->GetLabel(t.deopt_block);
-
- // Check the patch table. It should patch the continuation address to the
- // deoptimization block address.
- CHECK_EQ(1, data->ReturnAddressPatchCount());
- CHECK_EQ(cont_label->pos(), data->ReturnAddressPc(0)->value());
- CHECK_EQ(deopt_label->pos(), data->PatchedAddressPc(0)->value());
+ // TODO(jarin) Find a way to test the safepoint.
// Check that we deoptimize to the right AST id.
CHECK_EQ(1, data->DeoptCount());
- CHECK_EQ(1, data->DeoptCount());
CHECK_EQ(t.bailout_id.ToInt(), data->AstId(0).ToInt());
}
@@ -248,7 +234,6 @@ class TrivialRuntimeDeoptCodegenTester : public DeoptCodegenTester {
}
Schedule* BuildGraphAndSchedule(Graph* graph) {
- Isolate* isolate = info.isolate();
CommonOperatorBuilder common(zone());
// Manually construct a schedule for the function below:
@@ -256,48 +241,35 @@ class TrivialRuntimeDeoptCodegenTester : public DeoptCodegenTester {
// %DeoptimizeFunction(foo);
// }
- MachineType parameter_reps[] = {kMachineTagged};
- MachineCallDescriptorBuilder descriptor_builder(kMachineTagged, 2,
- parameter_reps);
-
- RawMachineAssembler m(graph, &descriptor_builder);
-
- Handle<Object> undef_object =
- Handle<Object>(isolate->heap()->undefined_value(), isolate);
- PrintableUnique<Object> undef_constant =
- PrintableUnique<Object>::CreateUninitialized(zone(), undef_object);
- Node* undef_node = m.NewNode(common.HeapConstant(undef_constant));
+ CSignature1<Object*, Object*> sig;
+ RawMachineAssembler m(graph, &sig);
- PrintableUnique<Object> this_fun_constant =
- PrintableUnique<Object>::CreateUninitialized(zone(), function);
+ Unique<Object> this_fun_constant =
+ Unique<Object>::CreateUninitialized(function);
Node* this_fun_node = m.NewNode(common.HeapConstant(this_fun_constant));
- MLabel deopt, cont;
- Node* call = m.CallRuntime1(Runtime::kDeoptimizeFunction, this_fun_node,
- &cont, &deopt);
-
- m.Bind(&cont);
- m.NewNode(common.Continuation(), call);
- m.Return(undef_node);
-
- m.Bind(&deopt);
- m.NewNode(common.LazyDeoptimization(), call);
+ Handle<Context> context(function->context(), CcTest::i_isolate());
+ Unique<Object> context_constant =
+ Unique<Object>::CreateUninitialized(context);
+ Node* context_node = m.NewNode(common.HeapConstant(context_constant));
bailout_id = GetCallBailoutId();
- Node* parameters = m.NewNode(common.StateValues(1), undef_node);
+ Node* parameters = m.NewNode(common.StateValues(1), m.UndefinedConstant());
Node* locals = m.NewNode(common.StateValues(0));
Node* stack = m.NewNode(common.StateValues(0));
- Node* state_node =
- m.NewNode(common.FrameState(bailout_id), parameters, locals, stack);
- m.Deoptimize(state_node);
+ Node* state_node = m.NewNode(
+ common.FrameState(JS_FRAME, bailout_id, kIgnoreOutput), parameters,
+ locals, stack, context_node, m.UndefinedConstant());
+
+ m.CallRuntime1(Runtime::kDeoptimizeFunction, this_fun_node, context_node,
+ state_node);
+
+ m.Return(m.UndefinedConstant());
// Schedule the graph:
Schedule* schedule = m.Export();
- cont_block = cont.block();
- deopt_block = deopt.block();
-
return schedule;
}
@@ -312,9 +284,6 @@ class TrivialRuntimeDeoptCodegenTester : public DeoptCodegenTester {
CHECK(false);
return BailoutId(-1);
}
-
- BasicBlock* cont_block;
- BasicBlock* deopt_block;
};
diff --git a/deps/v8/test/cctest/compiler/test-gap-resolver.cc b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
index 00c220945d..6239f2a406 100644
--- a/deps/v8/test/cctest/compiler/test-gap-resolver.cc
+++ b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
@@ -77,14 +77,14 @@ class InterpreterState {
class MoveInterpreter : public GapResolver::Assembler {
public:
virtual void AssembleMove(InstructionOperand* source,
- InstructionOperand* destination) V8_OVERRIDE {
+ InstructionOperand* destination) OVERRIDE {
InterpreterState::Moves moves;
moves.push_back(MoveOperands(source, destination));
state_.ExecuteInParallel(moves);
}
virtual void AssembleSwap(InstructionOperand* source,
- InstructionOperand* destination) V8_OVERRIDE {
+ InstructionOperand* destination) OVERRIDE {
InterpreterState::Moves moves;
moves.push_back(MoveOperands(source, destination));
moves.push_back(MoveOperands(destination, source));
diff --git a/deps/v8/test/cctest/compiler/test-graph-reducer.cc b/deps/v8/test/cctest/compiler/test-graph-reducer.cc
index 189b3db18e..b94ca45855 100644
--- a/deps/v8/test/cctest/compiler/test-graph-reducer.cc
+++ b/deps/v8/test/cctest/compiler/test-graph-reducer.cc
@@ -102,10 +102,10 @@ class InPlaceBCReducer : public Reducer {
// Wraps all "OPA0" nodes in "OPB1" operators by allocating new nodes.
-class A0Wrapper V8_FINAL : public Reducer {
+class A0Wrapper FINAL : public Reducer {
public:
explicit A0Wrapper(Graph* graph) : graph_(graph) {}
- virtual Reduction Reduce(Node* node) V8_OVERRIDE {
+ virtual Reduction Reduce(Node* node) OVERRIDE {
switch (node->op()->opcode()) {
case OPCODE_A0:
CHECK_EQ(0, node->InputCount());
@@ -118,10 +118,10 @@ class A0Wrapper V8_FINAL : public Reducer {
// Wraps all "OPB0" nodes in two "OPC1" operators by allocating new nodes.
-class B0Wrapper V8_FINAL : public Reducer {
+class B0Wrapper FINAL : public Reducer {
public:
explicit B0Wrapper(Graph* graph) : graph_(graph) {}
- virtual Reduction Reduce(Node* node) V8_OVERRIDE {
+ virtual Reduction Reduce(Node* node) OVERRIDE {
switch (node->op()->opcode()) {
case OPCODE_B0:
CHECK_EQ(0, node->InputCount());
@@ -470,9 +470,9 @@ TEST(ReduceForward1) {
reducer.ReduceGraph();
CHECK_EQ(before, graph.NodeCount());
CHECK_EQ(&OPB0, n1->op());
- CHECK_EQ(&OPB1, n2->op());
+ CHECK(n2->IsDead());
CHECK_EQ(n1, end->InputAt(0));
- CHECK_EQ(&OPB1, n3->op());
+ CHECK(n3->IsDead());
CHECK_EQ(n1, end->InputAt(0));
CHECK_EQ(&OPB2, end->op());
CHECK_EQ(0, n2->UseCount());
@@ -621,41 +621,3 @@ TEST(Order) {
}
}
}
-
-
-// Tests that a reducer is only applied once.
-class OneTimeReducer : public Reducer {
- public:
- OneTimeReducer(Reducer* reducer, Zone* zone)
- : reducer_(reducer),
- nodes_(NodeSet::key_compare(), NodeSet::allocator_type(zone)) {}
- virtual Reduction Reduce(Node* node) {
- CHECK_EQ(0, static_cast<int>(nodes_.count(node)));
- nodes_.insert(node);
- return reducer_->Reduce(node);
- }
- Reducer* reducer_;
- NodeSet nodes_;
-};
-
-
-TEST(OneTimeReduce1) {
- GraphTester graph;
-
- Node* n1 = graph.NewNode(&OPA0);
- Node* end = graph.NewNode(&OPA1, n1);
- graph.SetEnd(end);
-
- GraphReducer reducer(&graph);
- InPlaceABReducer r;
- OneTimeReducer once(&r, graph.zone());
- reducer.AddReducer(&once);
-
- // Tests A* => B* with in-place updates. Should only be applied once.
- int before = graph.NodeCount();
- reducer.ReduceGraph();
- CHECK_EQ(before, graph.NodeCount());
- CHECK_EQ(&OPB0, n1->op());
- CHECK_EQ(&OPB1, end->op());
- CHECK_EQ(n1, end->InputAt(0));
-}
diff --git a/deps/v8/test/cctest/compiler/test-instruction-selector-arm.cc b/deps/v8/test/cctest/compiler/test-instruction-selector-arm.cc
deleted file mode 100644
index f62e09f978..0000000000
--- a/deps/v8/test/cctest/compiler/test-instruction-selector-arm.cc
+++ /dev/null
@@ -1,1863 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <list>
-
-#include "test/cctest/compiler/instruction-selector-tester.h"
-#include "test/cctest/compiler/value-helper.h"
-
-using namespace v8::internal;
-using namespace v8::internal::compiler;
-
-namespace {
-
-typedef RawMachineAssembler::Label MLabel;
-
-struct DPI {
- Operator* op;
- ArchOpcode arch_opcode;
- ArchOpcode reverse_arch_opcode;
- ArchOpcode test_arch_opcode;
-};
-
-
-// ARM data processing instructions.
-class DPIs V8_FINAL : public std::list<DPI>, private HandleAndZoneScope {
- public:
- DPIs() {
- MachineOperatorBuilder machine(main_zone());
- DPI and_ = {machine.Word32And(), kArmAnd, kArmAnd, kArmTst};
- push_back(and_);
- DPI or_ = {machine.Word32Or(), kArmOrr, kArmOrr, kArmOrr};
- push_back(or_);
- DPI xor_ = {machine.Word32Xor(), kArmEor, kArmEor, kArmTeq};
- push_back(xor_);
- DPI add = {machine.Int32Add(), kArmAdd, kArmAdd, kArmCmn};
- push_back(add);
- DPI sub = {machine.Int32Sub(), kArmSub, kArmRsb, kArmCmp};
- push_back(sub);
- }
-};
-
-
-struct ODPI {
- Operator* op;
- ArchOpcode arch_opcode;
- ArchOpcode reverse_arch_opcode;
-};
-
-
-// ARM data processing instructions with overflow.
-class ODPIs V8_FINAL : public std::list<ODPI>, private HandleAndZoneScope {
- public:
- ODPIs() {
- MachineOperatorBuilder machine(main_zone());
- ODPI add = {machine.Int32AddWithOverflow(), kArmAdd, kArmAdd};
- push_back(add);
- ODPI sub = {machine.Int32SubWithOverflow(), kArmSub, kArmRsb};
- push_back(sub);
- }
-};
-
-
-// ARM immediates.
-class Immediates V8_FINAL : public std::list<int32_t> {
- public:
- Immediates() {
- for (uint32_t imm8 = 0; imm8 < 256; ++imm8) {
- for (uint32_t rot4 = 0; rot4 < 32; rot4 += 2) {
- int32_t imm = (imm8 >> rot4) | (imm8 << (32 - rot4));
- CHECK(Assembler::ImmediateFitsAddrMode1Instruction(imm));
- push_back(imm);
- }
- }
- }
-};
-
-
-struct Shift {
- Operator* op;
- int32_t i_low; // lowest possible immediate
- int32_t i_high; // highest possible immediate
- AddressingMode i_mode; // Operand2_R_<shift>_I
- AddressingMode r_mode; // Operand2_R_<shift>_R
-};
-
-
-// ARM shifts.
-class Shifts V8_FINAL : public std::list<Shift>, private HandleAndZoneScope {
- public:
- Shifts() {
- MachineOperatorBuilder machine(main_zone());
- Shift sar = {machine.Word32Sar(), 1, 32, kMode_Operand2_R_ASR_I,
- kMode_Operand2_R_ASR_R};
- Shift shl = {machine.Word32Shl(), 0, 31, kMode_Operand2_R_LSL_I,
- kMode_Operand2_R_LSL_R};
- Shift shr = {machine.Word32Shr(), 1, 32, kMode_Operand2_R_LSR_I,
- kMode_Operand2_R_LSR_R};
- push_back(sar);
- push_back(shl);
- push_back(shr);
- }
-};
-
-} // namespace
-
-
-TEST(InstructionSelectorDPIP) {
- DPIs dpis;
- for (DPIs::const_iterator i = dpis.begin(); i != dpis.end(); ++i) {
- DPI dpi = *i;
- InstructionSelectorTester m;
- m.Return(m.NewNode(dpi.op, m.Parameter(0), m.Parameter(1)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(dpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
- }
-}
-
-
-TEST(InstructionSelectorDPIImm) {
- DPIs dpis;
- Immediates immediates;
- for (DPIs::const_iterator i = dpis.begin(); i != dpis.end(); ++i) {
- DPI dpi = *i;
- for (Immediates::const_iterator j = immediates.begin();
- j != immediates.end(); ++j) {
- int32_t imm = *j;
- {
- InstructionSelectorTester m;
- m.Return(m.NewNode(dpi.op, m.Parameter(0), m.Int32Constant(imm)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(dpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.NewNode(dpi.op, m.Int32Constant(imm), m.Parameter(0)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(dpi.reverse_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
- }
- }
- }
-}
-
-
-TEST(InstructionSelectorDPIAndShiftP) {
- DPIs dpis;
- Shifts shifts;
- for (DPIs::const_iterator i = dpis.begin(); i != dpis.end(); ++i) {
- DPI dpi = *i;
- for (Shifts::const_iterator j = shifts.begin(); j != shifts.end(); ++j) {
- Shift shift = *j;
- {
- InstructionSelectorTester m;
- m.Return(
- m.NewNode(dpi.op, m.Parameter(0),
- m.NewNode(shift.op, m.Parameter(1), m.Parameter(2))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(dpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.NewNode(dpi.op,
- m.NewNode(shift.op, m.Parameter(0), m.Parameter(1)),
- m.Parameter(2)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(dpi.reverse_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
- }
- }
- }
-}
-
-
-TEST(InstructionSelectorDPIAndRotateRightP) {
- DPIs dpis;
- for (DPIs::const_iterator i = dpis.begin(); i != dpis.end(); ++i) {
- DPI dpi = *i;
- {
- InstructionSelectorTester m;
- Node* value = m.Parameter(1);
- Node* shift = m.Parameter(2);
- Node* ror = m.Word32Or(
- m.Word32Shr(value, shift),
- m.Word32Shl(value, m.Int32Sub(m.Int32Constant(32), shift)));
- m.Return(m.NewNode(dpi.op, m.Parameter(0), ror));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(dpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R_ROR_R, m.code[0]->addressing_mode());
- }
- {
- InstructionSelectorTester m;
- Node* value = m.Parameter(1);
- Node* shift = m.Parameter(2);
- Node* ror =
- m.Word32Or(m.Word32Shl(value, m.Int32Sub(m.Int32Constant(32), shift)),
- m.Word32Shr(value, shift));
- m.Return(m.NewNode(dpi.op, m.Parameter(0), ror));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(dpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R_ROR_R, m.code[0]->addressing_mode());
- }
- {
- InstructionSelectorTester m;
- Node* value = m.Parameter(1);
- Node* shift = m.Parameter(2);
- Node* ror = m.Word32Or(
- m.Word32Shr(value, shift),
- m.Word32Shl(value, m.Int32Sub(m.Int32Constant(32), shift)));
- m.Return(m.NewNode(dpi.op, ror, m.Parameter(0)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(dpi.reverse_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R_ROR_R, m.code[0]->addressing_mode());
- }
- {
- InstructionSelectorTester m;
- Node* value = m.Parameter(1);
- Node* shift = m.Parameter(2);
- Node* ror =
- m.Word32Or(m.Word32Shl(value, m.Int32Sub(m.Int32Constant(32), shift)),
- m.Word32Shr(value, shift));
- m.Return(m.NewNode(dpi.op, ror, m.Parameter(0)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(dpi.reverse_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R_ROR_R, m.code[0]->addressing_mode());
- }
- }
-}
-
-
-TEST(InstructionSelectorDPIAndShiftImm) {
- DPIs dpis;
- Shifts shifts;
- for (DPIs::const_iterator i = dpis.begin(); i != dpis.end(); ++i) {
- DPI dpi = *i;
- for (Shifts::const_iterator j = shifts.begin(); j != shifts.end(); ++j) {
- Shift shift = *j;
- for (int32_t imm = shift.i_low; imm <= shift.i_high; ++imm) {
- {
- InstructionSelectorTester m;
- m.Return(m.NewNode(
- dpi.op, m.Parameter(0),
- m.NewNode(shift.op, m.Parameter(1), m.Int32Constant(imm))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(dpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.NewNode(
- dpi.op, m.NewNode(shift.op, m.Parameter(0), m.Int32Constant(imm)),
- m.Parameter(1)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(dpi.reverse_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
- }
- }
- }
- }
-}
-
-
-TEST(InstructionSelectorODPIP) {
- ODPIs odpis;
- for (ODPIs::const_iterator i = odpis.begin(); i != odpis.end(); ++i) {
- ODPI odpi = *i;
- {
- InstructionSelectorTester m;
- m.Return(
- m.Projection(1, m.NewNode(odpi.op, m.Parameter(0), m.Parameter(1))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kOverflow, m.code[0]->flags_condition());
- CHECK_EQ(2, m.code[0]->InputCount());
- CHECK_LE(1, m.code[0]->OutputCount());
- }
- {
- InstructionSelectorTester m;
- m.Return(
- m.Projection(0, m.NewNode(odpi.op, m.Parameter(0), m.Parameter(1))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
- CHECK_EQ(2, m.code[0]->InputCount());
- CHECK_LE(1, m.code[0]->OutputCount());
- }
- {
- InstructionSelectorTester m;
- Node* node = m.NewNode(odpi.op, m.Parameter(0), m.Parameter(1));
- m.Return(m.Word32Equal(m.Projection(0, node), m.Projection(1, node)));
- m.SelectInstructions();
- CHECK_LE(1, m.code.size());
- CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kOverflow, m.code[0]->flags_condition());
- CHECK_EQ(2, m.code[0]->InputCount());
- CHECK_EQ(2, m.code[0]->OutputCount());
- }
- }
-}
-
-
-TEST(InstructionSelectorODPIImm) {
- ODPIs odpis;
- Immediates immediates;
- for (ODPIs::const_iterator i = odpis.begin(); i != odpis.end(); ++i) {
- ODPI odpi = *i;
- for (Immediates::const_iterator j = immediates.begin();
- j != immediates.end(); ++j) {
- int32_t imm = *j;
- {
- InstructionSelectorTester m;
- m.Return(m.Projection(
- 1, m.NewNode(odpi.op, m.Parameter(0), m.Int32Constant(imm))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kOverflow, m.code[0]->flags_condition());
- CHECK_EQ(2, m.code[0]->InputCount());
- CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
- CHECK_LE(1, m.code[0]->OutputCount());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Projection(
- 1, m.NewNode(odpi.op, m.Int32Constant(imm), m.Parameter(0))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kOverflow, m.code[0]->flags_condition());
- CHECK_EQ(2, m.code[0]->InputCount());
- CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
- CHECK_LE(1, m.code[0]->OutputCount());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Projection(
- 0, m.NewNode(odpi.op, m.Parameter(0), m.Int32Constant(imm))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
- CHECK_EQ(2, m.code[0]->InputCount());
- CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
- CHECK_LE(1, m.code[0]->OutputCount());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Projection(
- 0, m.NewNode(odpi.op, m.Int32Constant(imm), m.Parameter(0))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
- CHECK_EQ(2, m.code[0]->InputCount());
- CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
- CHECK_LE(1, m.code[0]->OutputCount());
- }
- {
- InstructionSelectorTester m;
- Node* node = m.NewNode(odpi.op, m.Parameter(0), m.Int32Constant(imm));
- m.Return(m.Word32Equal(m.Projection(0, node), m.Projection(1, node)));
- m.SelectInstructions();
- CHECK_LE(1, m.code.size());
- CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kOverflow, m.code[0]->flags_condition());
- CHECK_EQ(2, m.code[0]->InputCount());
- CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
- CHECK_EQ(2, m.code[0]->OutputCount());
- }
- {
- InstructionSelectorTester m;
- Node* node = m.NewNode(odpi.op, m.Int32Constant(imm), m.Parameter(0));
- m.Return(m.Word32Equal(m.Projection(0, node), m.Projection(1, node)));
- m.SelectInstructions();
- CHECK_LE(1, m.code.size());
- CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kOverflow, m.code[0]->flags_condition());
- CHECK_EQ(2, m.code[0]->InputCount());
- CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
- CHECK_EQ(2, m.code[0]->OutputCount());
- }
- }
- }
-}
-
-
-TEST(InstructionSelectorODPIAndShiftP) {
- ODPIs odpis;
- Shifts shifts;
- for (ODPIs::const_iterator i = odpis.begin(); i != odpis.end(); ++i) {
- ODPI odpi = *i;
- for (Shifts::const_iterator j = shifts.begin(); j != shifts.end(); ++j) {
- Shift shift = *j;
- {
- InstructionSelectorTester m;
- m.Return(m.Projection(
- 1, m.NewNode(odpi.op, m.Parameter(0),
- m.NewNode(shift.op, m.Parameter(1), m.Parameter(2)))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kOverflow, m.code[0]->flags_condition());
- CHECK_EQ(3, m.code[0]->InputCount());
- CHECK_LE(1, m.code[0]->OutputCount());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Projection(
- 1, m.NewNode(odpi.op,
- m.NewNode(shift.op, m.Parameter(0), m.Parameter(1)),
- m.Parameter(2))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kOverflow, m.code[0]->flags_condition());
- CHECK_EQ(3, m.code[0]->InputCount());
- CHECK_LE(1, m.code[0]->OutputCount());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Projection(
- 0, m.NewNode(odpi.op, m.Parameter(0),
- m.NewNode(shift.op, m.Parameter(1), m.Parameter(2)))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
- CHECK_EQ(3, m.code[0]->InputCount());
- CHECK_LE(1, m.code[0]->OutputCount());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Projection(
- 0, m.NewNode(odpi.op,
- m.NewNode(shift.op, m.Parameter(0), m.Parameter(1)),
- m.Parameter(2))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
- CHECK_EQ(3, m.code[0]->InputCount());
- CHECK_LE(1, m.code[0]->OutputCount());
- }
- {
- InstructionSelectorTester m;
- Node* node =
- m.NewNode(odpi.op, m.Parameter(0),
- m.NewNode(shift.op, m.Parameter(1), m.Parameter(2)));
- m.Return(m.Word32Equal(m.Projection(0, node), m.Projection(1, node)));
- m.SelectInstructions();
- CHECK_LE(1, m.code.size());
- CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kOverflow, m.code[0]->flags_condition());
- CHECK_EQ(3, m.code[0]->InputCount());
- CHECK_EQ(2, m.code[0]->OutputCount());
- }
- {
- InstructionSelectorTester m;
- Node* node = m.NewNode(
- odpi.op, m.NewNode(shift.op, m.Parameter(0), m.Parameter(1)),
- m.Parameter(2));
- m.Return(m.Word32Equal(m.Projection(0, node), m.Projection(1, node)));
- m.SelectInstructions();
- CHECK_LE(1, m.code.size());
- CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kOverflow, m.code[0]->flags_condition());
- CHECK_EQ(3, m.code[0]->InputCount());
- CHECK_EQ(2, m.code[0]->OutputCount());
- }
- }
- }
-}
-
-
-TEST(InstructionSelectorODPIAndShiftImm) {
- ODPIs odpis;
- Shifts shifts;
- for (ODPIs::const_iterator i = odpis.begin(); i != odpis.end(); ++i) {
- ODPI odpi = *i;
- for (Shifts::const_iterator j = shifts.begin(); j != shifts.end(); ++j) {
- Shift shift = *j;
- for (int32_t imm = shift.i_low; imm <= shift.i_high; ++imm) {
- {
- InstructionSelectorTester m;
- m.Return(m.Projection(1, m.NewNode(odpi.op, m.Parameter(0),
- m.NewNode(shift.op, m.Parameter(1),
- m.Int32Constant(imm)))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kOverflow, m.code[0]->flags_condition());
- CHECK_EQ(3, m.code[0]->InputCount());
- CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
- CHECK_LE(1, m.code[0]->OutputCount());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Projection(
- 1, m.NewNode(odpi.op, m.NewNode(shift.op, m.Parameter(0),
- m.Int32Constant(imm)),
- m.Parameter(1))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kOverflow, m.code[0]->flags_condition());
- CHECK_EQ(3, m.code[0]->InputCount());
- CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
- CHECK_LE(1, m.code[0]->OutputCount());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Projection(0, m.NewNode(odpi.op, m.Parameter(0),
- m.NewNode(shift.op, m.Parameter(1),
- m.Int32Constant(imm)))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
- CHECK_EQ(3, m.code[0]->InputCount());
- CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
- CHECK_LE(1, m.code[0]->OutputCount());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Projection(
- 0, m.NewNode(odpi.op, m.NewNode(shift.op, m.Parameter(0),
- m.Int32Constant(imm)),
- m.Parameter(1))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
- CHECK_EQ(3, m.code[0]->InputCount());
- CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
- CHECK_LE(1, m.code[0]->OutputCount());
- }
- {
- InstructionSelectorTester m;
- Node* node = m.NewNode(
- odpi.op, m.Parameter(0),
- m.NewNode(shift.op, m.Parameter(1), m.Int32Constant(imm)));
- m.Return(m.Word32Equal(m.Projection(0, node), m.Projection(1, node)));
- m.SelectInstructions();
- CHECK_LE(1, m.code.size());
- CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kOverflow, m.code[0]->flags_condition());
- CHECK_EQ(3, m.code[0]->InputCount());
- CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
- CHECK_EQ(2, m.code[0]->OutputCount());
- }
- {
- InstructionSelectorTester m;
- Node* node = m.NewNode(odpi.op, m.NewNode(shift.op, m.Parameter(0),
- m.Int32Constant(imm)),
- m.Parameter(1));
- m.Return(m.Word32Equal(m.Projection(0, node), m.Projection(1, node)));
- m.SelectInstructions();
- CHECK_LE(1, m.code.size());
- CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kOverflow, m.code[0]->flags_condition());
- CHECK_EQ(3, m.code[0]->InputCount());
- CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
- CHECK_EQ(2, m.code[0]->OutputCount());
- }
- }
- }
- }
-}
-
-
-TEST(InstructionSelectorWord32AndAndWord32XorWithMinus1P) {
- {
- InstructionSelectorTester m;
- m.Return(m.Word32And(m.Parameter(0),
- m.Word32Xor(m.Int32Constant(-1), m.Parameter(1))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmBic, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Word32And(m.Parameter(0),
- m.Word32Xor(m.Parameter(1), m.Int32Constant(-1))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmBic, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Word32And(m.Word32Xor(m.Int32Constant(-1), m.Parameter(0)),
- m.Parameter(1)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmBic, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Word32And(m.Word32Xor(m.Parameter(0), m.Int32Constant(-1)),
- m.Parameter(1)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmBic, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
- }
-}
-
-
-TEST(InstructionSelectorWord32AndAndWord32XorWithMinus1AndShiftP) {
- Shifts shifts;
- for (Shifts::const_iterator i = shifts.begin(); i != shifts.end(); ++i) {
- Shift shift = *i;
- {
- InstructionSelectorTester m;
- m.Return(m.Word32And(
- m.Parameter(0),
- m.Word32Xor(m.Int32Constant(-1),
- m.NewNode(shift.op, m.Parameter(1), m.Parameter(2)))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmBic, m.code[0]->arch_opcode());
- CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Word32And(
- m.Parameter(0),
- m.Word32Xor(m.NewNode(shift.op, m.Parameter(1), m.Parameter(2)),
- m.Int32Constant(-1))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmBic, m.code[0]->arch_opcode());
- CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Word32And(
- m.Word32Xor(m.Int32Constant(-1),
- m.NewNode(shift.op, m.Parameter(0), m.Parameter(1))),
- m.Parameter(2)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmBic, m.code[0]->arch_opcode());
- CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Word32And(
- m.Word32Xor(m.NewNode(shift.op, m.Parameter(0), m.Parameter(1)),
- m.Int32Constant(-1)),
- m.Parameter(2)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmBic, m.code[0]->arch_opcode());
- CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
- }
- }
-}
-
-
-TEST(InstructionSelectorWord32XorWithMinus1P) {
- {
- InstructionSelectorTester m;
- m.Return(m.Word32Xor(m.Int32Constant(-1), m.Parameter(0)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmMvn, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Word32Xor(m.Parameter(0), m.Int32Constant(-1)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmMvn, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
- }
-}
-
-
-TEST(InstructionSelectorWord32XorWithMinus1AndShiftP) {
- Shifts shifts;
- for (Shifts::const_iterator i = shifts.begin(); i != shifts.end(); ++i) {
- Shift shift = *i;
- {
- InstructionSelectorTester m;
- m.Return(
- m.Word32Xor(m.Int32Constant(-1),
- m.NewNode(shift.op, m.Parameter(0), m.Parameter(1))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmMvn, m.code[0]->arch_opcode());
- CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Word32Xor(m.NewNode(shift.op, m.Parameter(0), m.Parameter(1)),
- m.Int32Constant(-1)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmMvn, m.code[0]->arch_opcode());
- CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
- }
- }
-}
-
-
-TEST(InstructionSelectorShiftP) {
- Shifts shifts;
- for (Shifts::const_iterator i = shifts.begin(); i != shifts.end(); ++i) {
- Shift shift = *i;
- InstructionSelectorTester m;
- m.Return(m.NewNode(shift.op, m.Parameter(0), m.Parameter(1)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmMov, m.code[0]->arch_opcode());
- CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
- CHECK_EQ(2, m.code[0]->InputCount());
- }
-}
-
-
-TEST(InstructionSelectorShiftImm) {
- Shifts shifts;
- for (Shifts::const_iterator i = shifts.begin(); i != shifts.end(); ++i) {
- Shift shift = *i;
- for (int32_t imm = shift.i_low; imm <= shift.i_high; ++imm) {
- InstructionSelectorTester m;
- m.Return(m.NewNode(shift.op, m.Parameter(0), m.Int32Constant(imm)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmMov, m.code[0]->arch_opcode());
- CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
- CHECK_EQ(2, m.code[0]->InputCount());
- CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
- }
- }
-}
-
-
-TEST(InstructionSelectorRotateRightP) {
- {
- InstructionSelectorTester m;
- Node* value = m.Parameter(0);
- Node* shift = m.Parameter(1);
- m.Return(
- m.Word32Or(m.Word32Shr(value, shift),
- m.Word32Shl(value, m.Int32Sub(m.Int32Constant(32), shift))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmMov, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R_ROR_R, m.code[0]->addressing_mode());
- CHECK_EQ(2, m.code[0]->InputCount());
- }
- {
- InstructionSelectorTester m;
- Node* value = m.Parameter(0);
- Node* shift = m.Parameter(1);
- m.Return(
- m.Word32Or(m.Word32Shl(value, m.Int32Sub(m.Int32Constant(32), shift)),
- m.Word32Shr(value, shift)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmMov, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R_ROR_R, m.code[0]->addressing_mode());
- CHECK_EQ(2, m.code[0]->InputCount());
- }
-}
-
-
-TEST(InstructionSelectorRotateRightImm) {
- FOR_INPUTS(uint32_t, ror, i) {
- uint32_t shift = *i;
- {
- InstructionSelectorTester m;
- Node* value = m.Parameter(0);
- m.Return(m.Word32Or(m.Word32Shr(value, m.Int32Constant(shift)),
- m.Word32Shl(value, m.Int32Constant(32 - shift))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmMov, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R_ROR_I, m.code[0]->addressing_mode());
- CHECK_EQ(2, m.code[0]->InputCount());
- CHECK_EQ(shift, m.ToInt32(m.code[0]->InputAt(1)));
- }
- {
- InstructionSelectorTester m;
- Node* value = m.Parameter(0);
- m.Return(m.Word32Or(m.Word32Shl(value, m.Int32Constant(32 - shift)),
- m.Word32Shr(value, m.Int32Constant(shift))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmMov, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R_ROR_I, m.code[0]->addressing_mode());
- CHECK_EQ(2, m.code[0]->InputCount());
- CHECK_EQ(shift, m.ToInt32(m.code[0]->InputAt(1)));
- }
- }
-}
-
-
-TEST(InstructionSelectorInt32MulP) {
- InstructionSelectorTester m;
- m.Return(m.Int32Mul(m.Parameter(0), m.Parameter(1)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmMul, m.code[0]->arch_opcode());
-}
-
-
-TEST(InstructionSelectorInt32MulImm) {
- // x * (2^k + 1) -> (x >> k) + x
- for (int k = 1; k < 31; ++k) {
- InstructionSelectorTester m;
- m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) + 1)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R_LSL_I, m.code[0]->addressing_mode());
- }
- // (2^k + 1) * x -> (x >> k) + x
- for (int k = 1; k < 31; ++k) {
- InstructionSelectorTester m;
- m.Return(m.Int32Mul(m.Int32Constant((1 << k) + 1), m.Parameter(0)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R_LSL_I, m.code[0]->addressing_mode());
- }
- // x * (2^k - 1) -> (x >> k) - x
- for (int k = 3; k < 31; ++k) {
- InstructionSelectorTester m;
- m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) - 1)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmRsb, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R_LSL_I, m.code[0]->addressing_mode());
- }
- // (2^k - 1) * x -> (x >> k) - x
- for (int k = 3; k < 31; ++k) {
- InstructionSelectorTester m;
- m.Return(m.Int32Mul(m.Int32Constant((1 << k) - 1), m.Parameter(0)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmRsb, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R_LSL_I, m.code[0]->addressing_mode());
- }
-}
-
-
-TEST(InstructionSelectorWord32AndImm_ARMv7) {
- for (uint32_t width = 1; width <= 32; ++width) {
- InstructionSelectorTester m;
- m.Return(m.Word32And(m.Parameter(0),
- m.Int32Constant(0xffffffffu >> (32 - width))));
- m.SelectInstructions(ARMv7);
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmUbfx, m.code[0]->arch_opcode());
- CHECK_EQ(3, m.code[0]->InputCount());
- CHECK_EQ(0, m.ToInt32(m.code[0]->InputAt(1)));
- CHECK_EQ(width, m.ToInt32(m.code[0]->InputAt(2)));
- }
- for (uint32_t lsb = 0; lsb <= 31; ++lsb) {
- for (uint32_t width = 1; width < 32 - lsb; ++width) {
- uint32_t msk = ~((0xffffffffu >> (32 - width)) << lsb);
- InstructionSelectorTester m;
- m.Return(m.Word32And(m.Parameter(0), m.Int32Constant(msk)));
- m.SelectInstructions(ARMv7);
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmBfc, m.code[0]->arch_opcode());
- CHECK_EQ(1, m.code[0]->OutputCount());
- CHECK(UnallocatedOperand::cast(m.code[0]->Output())
- ->HasSameAsInputPolicy());
- CHECK_EQ(3, m.code[0]->InputCount());
- CHECK_EQ(lsb, m.ToInt32(m.code[0]->InputAt(1)));
- CHECK_EQ(width, m.ToInt32(m.code[0]->InputAt(2)));
- }
- }
-}
-
-
-TEST(InstructionSelectorWord32AndAndWord32ShrImm_ARMv7) {
- for (uint32_t lsb = 0; lsb <= 31; ++lsb) {
- for (uint32_t width = 1; width <= 32 - lsb; ++width) {
- {
- InstructionSelectorTester m;
- m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb)),
- m.Int32Constant(0xffffffffu >> (32 - width))));
- m.SelectInstructions(ARMv7);
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmUbfx, m.code[0]->arch_opcode());
- CHECK_EQ(3, m.code[0]->InputCount());
- CHECK_EQ(lsb, m.ToInt32(m.code[0]->InputAt(1)));
- CHECK_EQ(width, m.ToInt32(m.code[0]->InputAt(2)));
- }
- {
- InstructionSelectorTester m;
- m.Return(
- m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
- m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb))));
- m.SelectInstructions(ARMv7);
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmUbfx, m.code[0]->arch_opcode());
- CHECK_EQ(3, m.code[0]->InputCount());
- CHECK_EQ(lsb, m.ToInt32(m.code[0]->InputAt(1)));
- CHECK_EQ(width, m.ToInt32(m.code[0]->InputAt(2)));
- }
- }
- }
-}
-
-
-TEST(InstructionSelectorWord32ShrAndWord32AndImm_ARMv7) {
- for (uint32_t lsb = 0; lsb <= 31; ++lsb) {
- for (uint32_t width = 1; width <= 32 - lsb; ++width) {
- uint32_t max = 1 << lsb;
- if (max > static_cast<uint32_t>(kMaxInt)) max -= 1;
- uint32_t jnk = CcTest::random_number_generator()->NextInt(max);
- uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
- {
- InstructionSelectorTester m;
- m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
- m.Int32Constant(lsb)));
- m.SelectInstructions(ARMv7);
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmUbfx, m.code[0]->arch_opcode());
- CHECK_EQ(3, m.code[0]->InputCount());
- CHECK_EQ(lsb, m.ToInt32(m.code[0]->InputAt(1)));
- CHECK_EQ(width, m.ToInt32(m.code[0]->InputAt(2)));
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
- m.Int32Constant(lsb)));
- m.SelectInstructions(ARMv7);
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmUbfx, m.code[0]->arch_opcode());
- CHECK_EQ(3, m.code[0]->InputCount());
- CHECK_EQ(lsb, m.ToInt32(m.code[0]->InputAt(1)));
- CHECK_EQ(width, m.ToInt32(m.code[0]->InputAt(2)));
- }
- }
- }
-}
-
-
-TEST(InstructionSelectorInt32SubAndInt32MulP) {
- InstructionSelectorTester m;
- m.Return(
- m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
- m.SelectInstructions();
- CHECK_EQ(2, m.code.size());
- CHECK_EQ(kArmMul, m.code[0]->arch_opcode());
- CHECK_EQ(1, m.code[0]->OutputCount());
- CHECK_EQ(kArmSub, m.code[1]->arch_opcode());
- CHECK_EQ(2, m.code[1]->InputCount());
- CheckSameVreg(m.code[0]->Output(), m.code[1]->InputAt(1));
-}
-
-
-TEST(InstructionSelectorInt32SubAndInt32MulP_MLS) {
- InstructionSelectorTester m;
- m.Return(
- m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
- m.SelectInstructions(MLS);
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmMls, m.code[0]->arch_opcode());
-}
-
-
-TEST(InstructionSelectorInt32DivP) {
- InstructionSelectorTester m;
- m.Return(m.Int32Div(m.Parameter(0), m.Parameter(1)));
- m.SelectInstructions();
- CHECK_EQ(4, m.code.size());
- CHECK_EQ(kArmVcvtF64S32, m.code[0]->arch_opcode());
- CHECK_EQ(1, m.code[0]->OutputCount());
- CHECK_EQ(kArmVcvtF64S32, m.code[1]->arch_opcode());
- CHECK_EQ(1, m.code[1]->OutputCount());
- CHECK_EQ(kArmVdivF64, m.code[2]->arch_opcode());
- CHECK_EQ(2, m.code[2]->InputCount());
- CHECK_EQ(1, m.code[2]->OutputCount());
- CheckSameVreg(m.code[0]->Output(), m.code[2]->InputAt(0));
- CheckSameVreg(m.code[1]->Output(), m.code[2]->InputAt(1));
- CHECK_EQ(kArmVcvtS32F64, m.code[3]->arch_opcode());
- CHECK_EQ(1, m.code[3]->InputCount());
- CheckSameVreg(m.code[2]->Output(), m.code[3]->InputAt(0));
-}
-
-
-TEST(InstructionSelectorInt32DivP_SUDIV) {
- InstructionSelectorTester m;
- m.Return(m.Int32Div(m.Parameter(0), m.Parameter(1)));
- m.SelectInstructions(SUDIV);
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmSdiv, m.code[0]->arch_opcode());
-}
-
-
-TEST(InstructionSelectorInt32UDivP) {
- InstructionSelectorTester m;
- m.Return(m.Int32UDiv(m.Parameter(0), m.Parameter(1)));
- m.SelectInstructions();
- CHECK_EQ(4, m.code.size());
- CHECK_EQ(kArmVcvtF64U32, m.code[0]->arch_opcode());
- CHECK_EQ(1, m.code[0]->OutputCount());
- CHECK_EQ(kArmVcvtF64U32, m.code[1]->arch_opcode());
- CHECK_EQ(1, m.code[1]->OutputCount());
- CHECK_EQ(kArmVdivF64, m.code[2]->arch_opcode());
- CHECK_EQ(2, m.code[2]->InputCount());
- CHECK_EQ(1, m.code[2]->OutputCount());
- CheckSameVreg(m.code[0]->Output(), m.code[2]->InputAt(0));
- CheckSameVreg(m.code[1]->Output(), m.code[2]->InputAt(1));
- CHECK_EQ(kArmVcvtU32F64, m.code[3]->arch_opcode());
- CHECK_EQ(1, m.code[3]->InputCount());
- CheckSameVreg(m.code[2]->Output(), m.code[3]->InputAt(0));
-}
-
-
-TEST(InstructionSelectorInt32UDivP_SUDIV) {
- InstructionSelectorTester m;
- m.Return(m.Int32UDiv(m.Parameter(0), m.Parameter(1)));
- m.SelectInstructions(SUDIV);
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmUdiv, m.code[0]->arch_opcode());
-}
-
-
-TEST(InstructionSelectorInt32ModP) {
- InstructionSelectorTester m;
- m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
- m.SelectInstructions();
- CHECK_EQ(6, m.code.size());
- CHECK_EQ(kArmVcvtF64S32, m.code[0]->arch_opcode());
- CHECK_EQ(1, m.code[0]->OutputCount());
- CHECK_EQ(kArmVcvtF64S32, m.code[1]->arch_opcode());
- CHECK_EQ(1, m.code[1]->OutputCount());
- CHECK_EQ(kArmVdivF64, m.code[2]->arch_opcode());
- CHECK_EQ(2, m.code[2]->InputCount());
- CHECK_EQ(1, m.code[2]->OutputCount());
- CheckSameVreg(m.code[0]->Output(), m.code[2]->InputAt(0));
- CheckSameVreg(m.code[1]->Output(), m.code[2]->InputAt(1));
- CHECK_EQ(kArmVcvtS32F64, m.code[3]->arch_opcode());
- CHECK_EQ(1, m.code[3]->InputCount());
- CheckSameVreg(m.code[2]->Output(), m.code[3]->InputAt(0));
- CHECK_EQ(kArmMul, m.code[4]->arch_opcode());
- CHECK_EQ(1, m.code[4]->OutputCount());
- CHECK_EQ(2, m.code[4]->InputCount());
- CheckSameVreg(m.code[3]->Output(), m.code[4]->InputAt(0));
- CheckSameVreg(m.code[1]->InputAt(0), m.code[4]->InputAt(1));
- CHECK_EQ(kArmSub, m.code[5]->arch_opcode());
- CHECK_EQ(1, m.code[5]->OutputCount());
- CHECK_EQ(2, m.code[5]->InputCount());
- CheckSameVreg(m.code[0]->InputAt(0), m.code[5]->InputAt(0));
- CheckSameVreg(m.code[4]->Output(), m.code[5]->InputAt(1));
-}
-
-
-TEST(InstructionSelectorInt32ModP_SUDIV) {
- InstructionSelectorTester m;
- m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
- m.SelectInstructions(SUDIV);
- CHECK_EQ(3, m.code.size());
- CHECK_EQ(kArmSdiv, m.code[0]->arch_opcode());
- CHECK_EQ(1, m.code[0]->OutputCount());
- CHECK_EQ(2, m.code[0]->InputCount());
- CHECK_EQ(kArmMul, m.code[1]->arch_opcode());
- CHECK_EQ(1, m.code[1]->OutputCount());
- CHECK_EQ(2, m.code[1]->InputCount());
- CheckSameVreg(m.code[0]->Output(), m.code[1]->InputAt(0));
- CheckSameVreg(m.code[0]->InputAt(1), m.code[1]->InputAt(1));
- CHECK_EQ(kArmSub, m.code[2]->arch_opcode());
- CHECK_EQ(1, m.code[2]->OutputCount());
- CHECK_EQ(2, m.code[2]->InputCount());
- CheckSameVreg(m.code[0]->InputAt(0), m.code[2]->InputAt(0));
- CheckSameVreg(m.code[1]->Output(), m.code[2]->InputAt(1));
-}
-
-
-TEST(InstructionSelectorInt32ModP_MLS_SUDIV) {
- InstructionSelectorTester m;
- m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
- m.SelectInstructions(MLS, SUDIV);
- CHECK_EQ(2, m.code.size());
- CHECK_EQ(kArmSdiv, m.code[0]->arch_opcode());
- CHECK_EQ(1, m.code[0]->OutputCount());
- CHECK_EQ(2, m.code[0]->InputCount());
- CHECK_EQ(kArmMls, m.code[1]->arch_opcode());
- CHECK_EQ(1, m.code[1]->OutputCount());
- CHECK_EQ(3, m.code[1]->InputCount());
- CheckSameVreg(m.code[0]->Output(), m.code[1]->InputAt(0));
- CheckSameVreg(m.code[0]->InputAt(1), m.code[1]->InputAt(1));
- CheckSameVreg(m.code[0]->InputAt(0), m.code[1]->InputAt(2));
-}
-
-
-TEST(InstructionSelectorInt32UModP) {
- InstructionSelectorTester m;
- m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
- m.SelectInstructions();
- CHECK_EQ(6, m.code.size());
- CHECK_EQ(kArmVcvtF64U32, m.code[0]->arch_opcode());
- CHECK_EQ(1, m.code[0]->OutputCount());
- CHECK_EQ(kArmVcvtF64U32, m.code[1]->arch_opcode());
- CHECK_EQ(1, m.code[1]->OutputCount());
- CHECK_EQ(kArmVdivF64, m.code[2]->arch_opcode());
- CHECK_EQ(2, m.code[2]->InputCount());
- CHECK_EQ(1, m.code[2]->OutputCount());
- CheckSameVreg(m.code[0]->Output(), m.code[2]->InputAt(0));
- CheckSameVreg(m.code[1]->Output(), m.code[2]->InputAt(1));
- CHECK_EQ(kArmVcvtU32F64, m.code[3]->arch_opcode());
- CHECK_EQ(1, m.code[3]->InputCount());
- CheckSameVreg(m.code[2]->Output(), m.code[3]->InputAt(0));
- CHECK_EQ(kArmMul, m.code[4]->arch_opcode());
- CHECK_EQ(1, m.code[4]->OutputCount());
- CHECK_EQ(2, m.code[4]->InputCount());
- CheckSameVreg(m.code[3]->Output(), m.code[4]->InputAt(0));
- CheckSameVreg(m.code[1]->InputAt(0), m.code[4]->InputAt(1));
- CHECK_EQ(kArmSub, m.code[5]->arch_opcode());
- CHECK_EQ(1, m.code[5]->OutputCount());
- CHECK_EQ(2, m.code[5]->InputCount());
- CheckSameVreg(m.code[0]->InputAt(0), m.code[5]->InputAt(0));
- CheckSameVreg(m.code[4]->Output(), m.code[5]->InputAt(1));
-}
-
-
-TEST(InstructionSelectorInt32UModP_SUDIV) {
- InstructionSelectorTester m;
- m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
- m.SelectInstructions(SUDIV);
- CHECK_EQ(3, m.code.size());
- CHECK_EQ(kArmUdiv, m.code[0]->arch_opcode());
- CHECK_EQ(1, m.code[0]->OutputCount());
- CHECK_EQ(2, m.code[0]->InputCount());
- CHECK_EQ(kArmMul, m.code[1]->arch_opcode());
- CHECK_EQ(1, m.code[1]->OutputCount());
- CHECK_EQ(2, m.code[1]->InputCount());
- CheckSameVreg(m.code[0]->Output(), m.code[1]->InputAt(0));
- CheckSameVreg(m.code[0]->InputAt(1), m.code[1]->InputAt(1));
- CHECK_EQ(kArmSub, m.code[2]->arch_opcode());
- CHECK_EQ(1, m.code[2]->OutputCount());
- CHECK_EQ(2, m.code[2]->InputCount());
- CheckSameVreg(m.code[0]->InputAt(0), m.code[2]->InputAt(0));
- CheckSameVreg(m.code[1]->Output(), m.code[2]->InputAt(1));
-}
-
-
-TEST(InstructionSelectorInt32UModP_MLS_SUDIV) {
- InstructionSelectorTester m;
- m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
- m.SelectInstructions(MLS, SUDIV);
- CHECK_EQ(2, m.code.size());
- CHECK_EQ(kArmUdiv, m.code[0]->arch_opcode());
- CHECK_EQ(1, m.code[0]->OutputCount());
- CHECK_EQ(2, m.code[0]->InputCount());
- CHECK_EQ(kArmMls, m.code[1]->arch_opcode());
- CHECK_EQ(1, m.code[1]->OutputCount());
- CHECK_EQ(3, m.code[1]->InputCount());
- CheckSameVreg(m.code[0]->Output(), m.code[1]->InputAt(0));
- CheckSameVreg(m.code[0]->InputAt(1), m.code[1]->InputAt(1));
- CheckSameVreg(m.code[0]->InputAt(0), m.code[1]->InputAt(2));
-}
-
-
-TEST(InstructionSelectorWord32EqualP) {
- InstructionSelectorTester m;
- m.Return(m.Word32Equal(m.Parameter(0), m.Parameter(1)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
-}
-
-
-TEST(InstructionSelectorWord32EqualImm) {
- Immediates immediates;
- for (Immediates::const_iterator i = immediates.begin(); i != immediates.end();
- ++i) {
- int32_t imm = *i;
- {
- InstructionSelectorTester m;
- m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(imm)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- if (imm == 0) {
- CHECK_EQ(kArmTst, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
- CHECK_EQ(2, m.code[0]->InputCount());
- CheckSameVreg(m.code[0]->InputAt(0), m.code[0]->InputAt(1));
- } else {
- CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
- }
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Word32Equal(m.Int32Constant(imm), m.Parameter(0)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- if (imm == 0) {
- CHECK_EQ(kArmTst, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
- CHECK_EQ(2, m.code[0]->InputCount());
- CheckSameVreg(m.code[0]->InputAt(0), m.code[0]->InputAt(1));
- } else {
- CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
- }
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- }
- }
-}
-
-
-TEST(InstructionSelectorWord32EqualAndDPIP) {
- DPIs dpis;
- for (DPIs::const_iterator i = dpis.begin(); i != dpis.end(); ++i) {
- DPI dpi = *i;
- {
- InstructionSelectorTester m;
- m.Return(m.Word32Equal(m.NewNode(dpi.op, m.Parameter(0), m.Parameter(1)),
- m.Int32Constant(0)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- }
- {
- InstructionSelectorTester m;
- m.Return(
- m.Word32Equal(m.Int32Constant(0),
- m.NewNode(dpi.op, m.Parameter(0), m.Parameter(1))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- }
- }
-}
-
-
-TEST(InstructionSelectorWord32EqualAndDPIImm) {
- DPIs dpis;
- Immediates immediates;
- for (DPIs::const_iterator i = dpis.begin(); i != dpis.end(); ++i) {
- DPI dpi = *i;
- for (Immediates::const_iterator j = immediates.begin();
- j != immediates.end(); ++j) {
- int32_t imm = *j;
- {
- InstructionSelectorTester m;
- m.Return(m.Word32Equal(
- m.NewNode(dpi.op, m.Parameter(0), m.Int32Constant(imm)),
- m.Int32Constant(0)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Word32Equal(
- m.NewNode(dpi.op, m.Int32Constant(imm), m.Parameter(0)),
- m.Int32Constant(0)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Word32Equal(
- m.Int32Constant(0),
- m.NewNode(dpi.op, m.Parameter(0), m.Int32Constant(imm))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Word32Equal(
- m.Int32Constant(0),
- m.NewNode(dpi.op, m.Int32Constant(imm), m.Parameter(0))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- }
- }
- }
-}
-
-
-TEST(InstructionSelectorWord32EqualAndShiftP) {
- Shifts shifts;
- for (Shifts::const_iterator i = shifts.begin(); i != shifts.end(); ++i) {
- Shift shift = *i;
- {
- InstructionSelectorTester m;
- m.Return(m.Word32Equal(
- m.Parameter(0), m.NewNode(shift.op, m.Parameter(1), m.Parameter(2))));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
- CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Word32Equal(
- m.NewNode(shift.op, m.Parameter(0), m.Parameter(1)), m.Parameter(2)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
- CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- }
- }
-}
-
-
-TEST(InstructionSelectorBranchWithWord32EqualAndShiftP) {
- Shifts shifts;
- for (Shifts::const_iterator i = shifts.begin(); i != shifts.end(); ++i) {
- Shift shift = *i;
- {
- InstructionSelectorTester m;
- MLabel blocka, blockb;
- m.Branch(m.Word32Equal(m.Parameter(0), m.NewNode(shift.op, m.Parameter(1),
- m.Parameter(2))),
- &blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(1));
- m.Bind(&blockb);
- m.Return(m.Int32Constant(0));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
- CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- }
- {
- InstructionSelectorTester m;
- MLabel blocka, blockb;
- m.Branch(
- m.Word32Equal(m.NewNode(shift.op, m.Parameter(1), m.Parameter(2)),
- m.Parameter(0)),
- &blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(1));
- m.Bind(&blockb);
- m.Return(m.Int32Constant(0));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
- CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- }
- }
-}
-
-
-TEST(InstructionSelectorBranchWithWord32EqualAndShiftImm) {
- Shifts shifts;
- for (Shifts::const_iterator i = shifts.begin(); i != shifts.end(); ++i) {
- Shift shift = *i;
- for (int32_t imm = shift.i_low; imm <= shift.i_high; ++imm) {
- {
- InstructionSelectorTester m;
- MLabel blocka, blockb;
- m.Branch(
- m.Word32Equal(m.Parameter(0), m.NewNode(shift.op, m.Parameter(1),
- m.Int32Constant(imm))),
- &blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(1));
- m.Bind(&blockb);
- m.Return(m.Int32Constant(0));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
- CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- }
- {
- InstructionSelectorTester m;
- MLabel blocka, blockb;
- m.Branch(m.Word32Equal(
- m.NewNode(shift.op, m.Parameter(1), m.Int32Constant(imm)),
- m.Parameter(0)),
- &blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(1));
- m.Bind(&blockb);
- m.Return(m.Int32Constant(0));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
- CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- }
- }
- }
-}
-
-
-TEST(InstructionSelectorBranchWithWord32EqualAndRotateRightP) {
- {
- InstructionSelectorTester m;
- MLabel blocka, blockb;
- Node* input = m.Parameter(0);
- Node* value = m.Parameter(1);
- Node* shift = m.Parameter(2);
- Node* ror =
- m.Word32Or(m.Word32Shr(value, shift),
- m.Word32Shl(value, m.Int32Sub(m.Int32Constant(32), shift)));
- m.Branch(m.Word32Equal(input, ror), &blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(1));
- m.Bind(&blockb);
- m.Return(m.Int32Constant(0));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R_ROR_R, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- }
- {
- InstructionSelectorTester m;
- MLabel blocka, blockb;
- Node* input = m.Parameter(0);
- Node* value = m.Parameter(1);
- Node* shift = m.Parameter(2);
- Node* ror =
- m.Word32Or(m.Word32Shl(value, m.Int32Sub(m.Int32Constant(32), shift)),
- m.Word32Shr(value, shift));
- m.Branch(m.Word32Equal(input, ror), &blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(1));
- m.Bind(&blockb);
- m.Return(m.Int32Constant(0));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R_ROR_R, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- }
- {
- InstructionSelectorTester m;
- MLabel blocka, blockb;
- Node* input = m.Parameter(0);
- Node* value = m.Parameter(1);
- Node* shift = m.Parameter(2);
- Node* ror =
- m.Word32Or(m.Word32Shr(value, shift),
- m.Word32Shl(value, m.Int32Sub(m.Int32Constant(32), shift)));
- m.Branch(m.Word32Equal(ror, input), &blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(1));
- m.Bind(&blockb);
- m.Return(m.Int32Constant(0));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R_ROR_R, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- }
- {
- InstructionSelectorTester m;
- MLabel blocka, blockb;
- Node* input = m.Parameter(0);
- Node* value = m.Parameter(1);
- Node* shift = m.Parameter(2);
- Node* ror =
- m.Word32Or(m.Word32Shl(value, m.Int32Sub(m.Int32Constant(32), shift)),
- m.Word32Shr(value, shift));
- m.Branch(m.Word32Equal(ror, input), &blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(1));
- m.Bind(&blockb);
- m.Return(m.Int32Constant(0));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R_ROR_R, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- }
-}
-
-
-TEST(InstructionSelectorBranchWithWord32EqualAndRotateRightImm) {
- FOR_INPUTS(uint32_t, ror, i) {
- uint32_t shift = *i;
- {
- InstructionSelectorTester m;
- MLabel blocka, blockb;
- Node* input = m.Parameter(0);
- Node* value = m.Parameter(1);
- Node* ror = m.Word32Or(m.Word32Shr(value, m.Int32Constant(shift)),
- m.Word32Shl(value, m.Int32Constant(32 - shift)));
- m.Branch(m.Word32Equal(input, ror), &blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(1));
- m.Bind(&blockb);
- m.Return(m.Int32Constant(0));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R_ROR_I, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- CHECK_LE(3, m.code[0]->InputCount());
- CHECK_EQ(shift, m.ToInt32(m.code[0]->InputAt(2)));
- }
- {
- InstructionSelectorTester m;
- MLabel blocka, blockb;
- Node* input = m.Parameter(0);
- Node* value = m.Parameter(1);
- Node* ror = m.Word32Or(m.Word32Shl(value, m.Int32Constant(32 - shift)),
- m.Word32Shr(value, m.Int32Constant(shift)));
- m.Branch(m.Word32Equal(input, ror), &blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(1));
- m.Bind(&blockb);
- m.Return(m.Int32Constant(0));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R_ROR_I, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- CHECK_LE(3, m.code[0]->InputCount());
- CHECK_EQ(shift, m.ToInt32(m.code[0]->InputAt(2)));
- }
- {
- InstructionSelectorTester m;
- MLabel blocka, blockb;
- Node* input = m.Parameter(0);
- Node* value = m.Parameter(1);
- Node* ror = m.Word32Or(m.Word32Shr(value, m.Int32Constant(shift)),
- m.Word32Shl(value, m.Int32Constant(32 - shift)));
- m.Branch(m.Word32Equal(ror, input), &blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(1));
- m.Bind(&blockb);
- m.Return(m.Int32Constant(0));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R_ROR_I, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- CHECK_LE(3, m.code[0]->InputCount());
- CHECK_EQ(shift, m.ToInt32(m.code[0]->InputAt(2)));
- }
- {
- InstructionSelectorTester m;
- MLabel blocka, blockb;
- Node* input = m.Parameter(0);
- Node* value = m.Parameter(1);
- Node* ror = m.Word32Or(m.Word32Shl(value, m.Int32Constant(32 - shift)),
- m.Word32Shr(value, m.Int32Constant(shift)));
- m.Branch(m.Word32Equal(ror, input), &blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(1));
- m.Bind(&blockb);
- m.Return(m.Int32Constant(0));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R_ROR_I, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- CHECK_LE(3, m.code[0]->InputCount());
- CHECK_EQ(shift, m.ToInt32(m.code[0]->InputAt(2)));
- }
- }
-}
-
-
-TEST(InstructionSelectorBranchWithDPIP) {
- DPIs dpis;
- for (DPIs::const_iterator i = dpis.begin(); i != dpis.end(); ++i) {
- DPI dpi = *i;
- {
- InstructionSelectorTester m;
- MLabel blocka, blockb;
- m.Branch(m.NewNode(dpi.op, m.Parameter(0), m.Parameter(1)), &blocka,
- &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(1));
- m.Bind(&blockb);
- m.Return(m.Int32Constant(0));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
- CHECK_EQ(kNotEqual, m.code[0]->flags_condition());
- }
- {
- InstructionSelectorTester m;
- MLabel blocka, blockb;
- m.Branch(m.Word32Equal(m.Int32Constant(0),
- m.NewNode(dpi.op, m.Parameter(0), m.Parameter(1))),
- &blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(1));
- m.Bind(&blockb);
- m.Return(m.Int32Constant(0));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- }
- {
- InstructionSelectorTester m;
- MLabel blocka, blockb;
- m.Branch(m.Word32Equal(m.NewNode(dpi.op, m.Parameter(0), m.Parameter(1)),
- m.Int32Constant(0)),
- &blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(1));
- m.Bind(&blockb);
- m.Return(m.Int32Constant(0));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
- CHECK_EQ(kEqual, m.code[0]->flags_condition());
- }
- }
-}
-
-
-TEST(InstructionSelectorBranchWithODPIP) {
- ODPIs odpis;
- for (ODPIs::const_iterator i = odpis.begin(); i != odpis.end(); ++i) {
- ODPI odpi = *i;
- {
- InstructionSelectorTester m;
- MLabel blocka, blockb;
- Node* node = m.NewNode(odpi.op, m.Parameter(0), m.Parameter(1));
- m.Branch(m.Projection(1, node), &blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(0));
- m.Bind(&blockb);
- m.Return(m.Projection(0, node));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
- CHECK_EQ(kOverflow, m.code[0]->flags_condition());
- }
- {
- InstructionSelectorTester m;
- MLabel blocka, blockb;
- Node* node = m.NewNode(odpi.op, m.Parameter(0), m.Parameter(1));
- m.Branch(m.Word32Equal(m.Projection(1, node), m.Int32Constant(0)),
- &blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(0));
- m.Bind(&blockb);
- m.Return(m.Projection(0, node));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
- CHECK_EQ(kNotOverflow, m.code[0]->flags_condition());
- }
- {
- InstructionSelectorTester m;
- MLabel blocka, blockb;
- Node* node = m.NewNode(odpi.op, m.Parameter(0), m.Parameter(1));
- m.Branch(m.Word32Equal(m.Int32Constant(0), m.Projection(1, node)),
- &blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(0));
- m.Bind(&blockb);
- m.Return(m.Projection(0, node));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
- CHECK_EQ(kNotOverflow, m.code[0]->flags_condition());
- }
- }
-}
-
-
-TEST(InstructionSelectorBranchWithODPIImm) {
- ODPIs odpis;
- Immediates immediates;
- for (ODPIs::const_iterator i = odpis.begin(); i != odpis.end(); ++i) {
- ODPI odpi = *i;
- for (Immediates::const_iterator j = immediates.begin();
- j != immediates.end(); ++j) {
- int32_t imm = *j;
- {
- InstructionSelectorTester m;
- MLabel blocka, blockb;
- Node* node = m.NewNode(odpi.op, m.Parameter(0), m.Int32Constant(imm));
- m.Branch(m.Projection(1, node), &blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(0));
- m.Bind(&blockb);
- m.Return(m.Projection(0, node));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
- CHECK_EQ(kOverflow, m.code[0]->flags_condition());
- CHECK_LE(2, m.code[0]->InputCount());
- CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
- }
- {
- InstructionSelectorTester m;
- MLabel blocka, blockb;
- Node* node = m.NewNode(odpi.op, m.Int32Constant(imm), m.Parameter(0));
- m.Branch(m.Projection(1, node), &blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(0));
- m.Bind(&blockb);
- m.Return(m.Projection(0, node));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
- CHECK_EQ(kOverflow, m.code[0]->flags_condition());
- CHECK_LE(2, m.code[0]->InputCount());
- CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
- }
- {
- InstructionSelectorTester m;
- MLabel blocka, blockb;
- Node* node = m.NewNode(odpi.op, m.Parameter(0), m.Int32Constant(imm));
- m.Branch(m.Word32Equal(m.Projection(1, node), m.Int32Constant(0)),
- &blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(0));
- m.Bind(&blockb);
- m.Return(m.Projection(0, node));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
- CHECK_EQ(kNotOverflow, m.code[0]->flags_condition());
- CHECK_LE(2, m.code[0]->InputCount());
- CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
- }
- {
- InstructionSelectorTester m;
- MLabel blocka, blockb;
- Node* node = m.NewNode(odpi.op, m.Int32Constant(imm), m.Parameter(0));
- m.Branch(m.Word32Equal(m.Projection(1, node), m.Int32Constant(0)),
- &blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(0));
- m.Bind(&blockb);
- m.Return(m.Projection(0, node));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
- CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
- CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
- CHECK_EQ(kNotOverflow, m.code[0]->flags_condition());
- CHECK_LE(2, m.code[0]->InputCount());
- CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
- }
- }
- }
-}
diff --git a/deps/v8/test/cctest/compiler/test-instruction-selector-ia32.cc b/deps/v8/test/cctest/compiler/test-instruction-selector-ia32.cc
deleted file mode 100644
index b6509584e0..0000000000
--- a/deps/v8/test/cctest/compiler/test-instruction-selector-ia32.cc
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "test/cctest/compiler/instruction-selector-tester.h"
-#include "test/cctest/compiler/value-helper.h"
-
-using namespace v8::internal;
-using namespace v8::internal::compiler;
-
-TEST(InstructionSelectorInt32AddP) {
- InstructionSelectorTester m;
- m.Return(m.Int32Add(m.Parameter(0), m.Parameter(1)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kIA32Add, m.code[0]->arch_opcode());
-}
-
-
-TEST(InstructionSelectorInt32AddImm) {
- FOR_INT32_INPUTS(i) {
- int32_t imm = *i;
- {
- InstructionSelectorTester m;
- m.Return(m.Int32Add(m.Parameter(0), m.Int32Constant(imm)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kIA32Add, m.code[0]->arch_opcode());
- CHECK_EQ(2, m.code[0]->InputCount());
- CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
- }
- {
- InstructionSelectorTester m;
- m.Return(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kIA32Add, m.code[0]->arch_opcode());
- CHECK_EQ(2, m.code[0]->InputCount());
- CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
- }
- }
-}
-
-
-TEST(InstructionSelectorInt32SubP) {
- InstructionSelectorTester m;
- m.Return(m.Int32Sub(m.Parameter(0), m.Parameter(1)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kIA32Sub, m.code[0]->arch_opcode());
- CHECK_EQ(1, m.code[0]->OutputCount());
-}
-
-
-TEST(InstructionSelectorInt32SubImm) {
- FOR_INT32_INPUTS(i) {
- int32_t imm = *i;
- InstructionSelectorTester m;
- m.Return(m.Int32Sub(m.Parameter(0), m.Int32Constant(imm)));
- m.SelectInstructions();
- CHECK_EQ(1, m.code.size());
- CHECK_EQ(kIA32Sub, m.code[0]->arch_opcode());
- CHECK_EQ(2, m.code[0]->InputCount());
- CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
- }
-}
diff --git a/deps/v8/test/cctest/compiler/test-instruction-selector.cc b/deps/v8/test/cctest/compiler/test-instruction-selector.cc
deleted file mode 100644
index e59406426e..0000000000
--- a/deps/v8/test/cctest/compiler/test-instruction-selector.cc
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "test/cctest/compiler/instruction-selector-tester.h"
-
-using namespace v8::internal;
-using namespace v8::internal::compiler;
-
-#if V8_TURBOFAN_TARGET
-
-TEST(InstructionSelectionReturnZero) {
- InstructionSelectorTester m;
- m.Return(m.Int32Constant(0));
- m.SelectInstructions(InstructionSelectorTester::kInternalMode);
- CHECK_EQ(2, static_cast<int>(m.code.size()));
- CHECK_EQ(kArchNop, m.code[0]->opcode());
- CHECK_EQ(kArchRet, m.code[1]->opcode());
- CHECK_EQ(1, static_cast<int>(m.code[1]->InputCount()));
-}
-
-#endif // !V8_TURBOFAN_TARGET
diff --git a/deps/v8/test/cctest/compiler/test-instruction.cc b/deps/v8/test/cctest/compiler/test-instruction.cc
index bc9f4c7723..a9feaac2c8 100644
--- a/deps/v8/test/cctest/compiler/test-instruction.cc
+++ b/deps/v8/test/cctest/compiler/test-instruction.cc
@@ -9,6 +9,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/instruction.h"
+#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
@@ -32,7 +33,6 @@ class InstructionTester : public HandleAndZoneScope {
info(static_cast<HydrogenCodeStub*>(NULL), main_isolate()),
linkage(&info),
common(zone()),
- machine(zone(), kMachineWord32),
code(NULL) {}
~InstructionTester() { delete code; }
@@ -59,19 +59,19 @@ class InstructionTester : public HandleAndZoneScope {
Node* Int32Constant(int32_t val) {
Node* node = graph.NewNode(common.Int32Constant(val));
- schedule.AddNode(schedule.entry(), node);
+ schedule.AddNode(schedule.start(), node);
return node;
}
Node* Float64Constant(double val) {
Node* node = graph.NewNode(common.Float64Constant(val));
- schedule.AddNode(schedule.entry(), node);
+ schedule.AddNode(schedule.start(), node);
return node;
}
Node* Parameter(int32_t which) {
Node* node = graph.NewNode(common.Parameter(which));
- schedule.AddNode(schedule.entry(), node);
+ schedule.AddNode(schedule.start(), node);
return node;
}
@@ -103,7 +103,7 @@ TEST(InstructionBasic) {
R.Int32Constant(i); // Add some nodes to the graph.
}
- BasicBlock* last = R.schedule.entry();
+ BasicBlock* last = R.schedule.start();
for (int i = 0; i < 5; i++) {
BasicBlock* block = R.schedule.NewBasicBlock();
R.schedule.AddGoto(last, block);
@@ -130,10 +130,10 @@ TEST(InstructionBasic) {
TEST(InstructionGetBasicBlock) {
InstructionTester R;
- BasicBlock* b0 = R.schedule.entry();
+ BasicBlock* b0 = R.schedule.start();
BasicBlock* b1 = R.schedule.NewBasicBlock();
BasicBlock* b2 = R.schedule.NewBasicBlock();
- BasicBlock* b3 = R.schedule.exit();
+ BasicBlock* b3 = R.schedule.end();
R.schedule.AddGoto(b0, b1);
R.schedule.AddGoto(b1, b2);
@@ -188,7 +188,7 @@ TEST(InstructionGetBasicBlock) {
TEST(InstructionIsGapAt) {
InstructionTester R;
- BasicBlock* b0 = R.schedule.entry();
+ BasicBlock* b0 = R.schedule.start();
R.schedule.AddReturn(b0, R.Int32Constant(1));
R.allocCode();
@@ -213,8 +213,8 @@ TEST(InstructionIsGapAt) {
TEST(InstructionIsGapAt2) {
InstructionTester R;
- BasicBlock* b0 = R.schedule.entry();
- BasicBlock* b1 = R.schedule.exit();
+ BasicBlock* b0 = R.schedule.start();
+ BasicBlock* b1 = R.schedule.end();
R.schedule.AddGoto(b0, b1);
R.schedule.AddReturn(b1, R.Int32Constant(1));
@@ -256,7 +256,7 @@ TEST(InstructionIsGapAt2) {
TEST(InstructionAddGapMove) {
InstructionTester R;
- BasicBlock* b0 = R.schedule.entry();
+ BasicBlock* b0 = R.schedule.start();
R.schedule.AddReturn(b0, R.Int32Constant(1));
R.allocCode();
@@ -324,9 +324,9 @@ TEST(InstructionOperands) {
new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER)};
- for (size_t i = 0; i < ARRAY_SIZE(outputs); i++) {
- for (size_t j = 0; j < ARRAY_SIZE(inputs); j++) {
- for (size_t k = 0; k < ARRAY_SIZE(temps); k++) {
+ for (size_t i = 0; i < arraysize(outputs); i++) {
+ for (size_t j = 0; j < arraysize(inputs); j++) {
+ for (size_t k = 0; k < arraysize(temps); k++) {
TestInstr* m =
TestInstr::New(&zone, 101, i, outputs, j, inputs, k, temps);
CHECK(i == m->OutputCount());
diff --git a/deps/v8/test/cctest/compiler/test-js-constant-cache.cc b/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
index 42a606d23c..eb0975ef45 100644
--- a/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
+++ b/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
@@ -17,10 +17,16 @@ using namespace v8::internal::compiler;
class JSCacheTesterHelper {
protected:
explicit JSCacheTesterHelper(Zone* zone)
- : main_graph_(zone), main_common_(zone), main_typer_(zone) {}
+ : main_graph_(zone),
+ main_common_(zone),
+ main_javascript_(zone),
+ main_typer_(zone),
+ main_machine_() {}
Graph main_graph_;
CommonOperatorBuilder main_common_;
+ JSOperatorBuilder main_javascript_;
Typer main_typer_;
+ MachineOperatorBuilder main_machine_;
};
@@ -30,13 +36,14 @@ class JSConstantCacheTester : public HandleAndZoneScope,
public:
JSConstantCacheTester()
: JSCacheTesterHelper(main_zone()),
- JSGraph(&main_graph_, &main_common_, &main_typer_) {}
+ JSGraph(&main_graph_, &main_common_, &main_javascript_, &main_typer_,
+ &main_machine_) {}
Type* upper(Node* node) { return NodeProperties::GetBounds(node).upper; }
Handle<Object> handle(Node* node) {
CHECK_EQ(IrOpcode::kHeapConstant, node->opcode());
- return ValueOf<Handle<Object> >(node->op());
+ return OpParameter<Unique<Object> >(node).handle();
}
Factory* factory() { return main_isolate()->factory(); }
@@ -87,8 +94,8 @@ TEST(MinusZeroConstant) {
CHECK(!t->Is(Type::SignedSmall()));
CHECK(!t->Is(Type::UnsignedSmall()));
- double zero_value = ValueOf<double>(zero->op());
- double minus_zero_value = ValueOf<double>(minus_zero->op());
+ double zero_value = OpParameter<double>(zero);
+ double minus_zero_value = OpParameter<double>(minus_zero);
CHECK_EQ(0.0, zero_value);
CHECK_NE(-0.0, zero_value);
@@ -194,8 +201,8 @@ TEST(NoAliasing) {
T.OneConstant(), T.NaNConstant(), T.Constant(21),
T.Constant(22.2)};
- for (size_t i = 0; i < ARRAY_SIZE(nodes); i++) {
- for (size_t j = 0; j < ARRAY_SIZE(nodes); j++) {
+ for (size_t i = 0; i < arraysize(nodes); i++) {
+ for (size_t j = 0; j < arraysize(nodes); j++) {
if (i != j) CHECK_NE(nodes[i], nodes[j]);
}
}
diff --git a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
index 740d9f3d49..47c660ae0d 100644
--- a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
+++ b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
@@ -6,7 +6,6 @@
#include "src/compiler/js-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties-inl.h"
-#include "src/compiler/simplified-node-factory.h"
#include "src/compiler/source-position.h"
#include "src/compiler/typer.h"
#include "test/cctest/cctest.h"
@@ -16,18 +15,17 @@
using namespace v8::internal;
using namespace v8::internal::compiler;
-class ContextSpecializationTester
- : public HandleAndZoneScope,
- public DirectGraphBuilder,
- public SimplifiedNodeFactory<ContextSpecializationTester> {
+class ContextSpecializationTester : public HandleAndZoneScope,
+ public DirectGraphBuilder {
public:
ContextSpecializationTester()
: DirectGraphBuilder(new (main_zone()) Graph(main_zone())),
common_(main_zone()),
javascript_(main_zone()),
+ machine_(),
simplified_(main_zone()),
typer_(main_zone()),
- jsgraph_(graph(), common(), &typer_),
+ jsgraph_(graph(), common(), &javascript_, &typer_, &machine_),
info_(main_isolate(), main_zone()) {}
Factory* factory() { return main_isolate()->factory(); }
@@ -40,6 +38,7 @@ class ContextSpecializationTester
private:
CommonOperatorBuilder common_;
JSOperatorBuilder javascript_;
+ MachineOperatorBuilder machine_;
SimplifiedOperatorBuilder simplified_;
Typer typer_;
JSGraph jsgraph_;
@@ -93,10 +92,9 @@ TEST(ReduceJSLoadContext) {
CHECK(r.Changed());
Node* new_context_input = NodeProperties::GetValueInput(r.replacement(), 0);
CHECK_EQ(IrOpcode::kHeapConstant, new_context_input->opcode());
- ValueMatcher<Handle<Context> > match(new_context_input);
- CHECK_EQ(*native, *match.Value());
- ContextAccess access = static_cast<Operator1<ContextAccess>*>(
- r.replacement()->op())->parameter();
+ HeapObjectMatcher<Context> match(new_context_input);
+ CHECK_EQ(*native, *match.Value().handle());
+ ContextAccess access = OpParameter<ContextAccess>(r.replacement());
CHECK_EQ(Context::GLOBAL_EVAL_FUN_INDEX, access.index());
CHECK_EQ(0, access.depth());
CHECK_EQ(false, access.immutable());
@@ -110,9 +108,9 @@ TEST(ReduceJSLoadContext) {
CHECK(r.Changed());
CHECK(r.replacement() != load);
- ValueMatcher<Handle<Object> > match(r.replacement());
+ HeapObjectMatcher<Object> match(r.replacement());
CHECK(match.HasValue());
- CHECK_EQ(*expected, *match.Value());
+ CHECK_EQ(*expected, *match.Value().handle());
}
// TODO(titzer): test with other kinds of contexts, e.g. a function context.
@@ -174,10 +172,9 @@ TEST(ReduceJSStoreContext) {
CHECK(r.Changed());
Node* new_context_input = NodeProperties::GetValueInput(r.replacement(), 0);
CHECK_EQ(IrOpcode::kHeapConstant, new_context_input->opcode());
- ValueMatcher<Handle<Context> > match(new_context_input);
- CHECK_EQ(*native, *match.Value());
- ContextAccess access = static_cast<Operator1<ContextAccess>*>(
- r.replacement()->op())->parameter();
+ HeapObjectMatcher<Context> match(new_context_input);
+ CHECK_EQ(*native, *match.Value().handle());
+ ContextAccess access = OpParameter<ContextAccess>(r.replacement());
CHECK_EQ(Context::GLOBAL_EVAL_FUN_INDEX, access.index());
CHECK_EQ(0, access.depth());
CHECK_EQ(false, access.immutable());
@@ -216,11 +213,12 @@ TEST(SpecializeToContext) {
const_context, const_context, effect_in);
- Node* value_use = t.ChangeTaggedToInt32(load);
+ Node* value_use = t.NewNode(t.simplified()->ChangeTaggedToInt32(), load);
Node* other_load = t.NewNode(t.javascript()->LoadContext(0, slot, true),
param_context, param_context, load);
Node* effect_use = other_load;
- Node* other_use = t.ChangeTaggedToInt32(other_load);
+ Node* other_use =
+ t.NewNode(t.simplified()->ChangeTaggedToInt32(), other_load);
Node* add = t.NewNode(t.javascript()->Add(), value_use, other_use,
param_context, other_load, start);
@@ -244,9 +242,9 @@ TEST(SpecializeToContext) {
CHECK_EQ(other_load, other_use->InputAt(0));
Node* replacement = value_use->InputAt(0);
- ValueMatcher<Handle<Object> > match(replacement);
+ HeapObjectMatcher<Object> match(replacement);
CHECK(match.HasValue());
- CHECK_EQ(*expected, *match.Value());
+ CHECK_EQ(*expected, *match.Value().handle());
}
// TODO(titzer): clean up above test and test more complicated effects.
}
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index b6aa6d9582..cf126c2c54 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -21,12 +21,10 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
binop(NULL),
unop(NULL),
javascript(main_zone()),
- machine(main_zone()),
simplified(main_zone()),
common(main_zone()),
graph(main_zone()),
typer(main_zone()),
- source_positions(&graph),
context_node(NULL) {
typer.DecorateGraph(&graph);
Node* s = graph.NewNode(common.Start(num_parameters));
@@ -34,15 +32,14 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
}
Isolate* isolate;
- Operator* binop;
- Operator* unop;
+ const Operator* binop;
+ const Operator* unop;
JSOperatorBuilder javascript;
MachineOperatorBuilder machine;
SimplifiedOperatorBuilder simplified;
CommonOperatorBuilder common;
Graph graph;
Typer typer;
- SourcePositionTable source_positions;
Node* context_node;
Node* Parameter(Type* t, int32_t index = 0) {
@@ -51,9 +48,32 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
return n;
}
+ Node* UndefinedConstant() {
+ Unique<Object> unique =
+ Unique<Object>::CreateImmovable(isolate->factory()->undefined_value());
+ return graph.NewNode(common.HeapConstant(unique));
+ }
+
+ Node* HeapConstant(Handle<Object> constant) {
+ Unique<Object> unique = Unique<Object>::CreateUninitialized(constant);
+ return graph.NewNode(common.HeapConstant(unique));
+ }
+
+ Node* EmptyFrameState(Node* context) {
+ Node* parameters = graph.NewNode(common.StateValues(0));
+ Node* locals = graph.NewNode(common.StateValues(0));
+ Node* stack = graph.NewNode(common.StateValues(0));
+
+ Node* state_node =
+ graph.NewNode(common.FrameState(JS_FRAME, BailoutId(0), kIgnoreOutput),
+ parameters, locals, stack, context, UndefinedConstant());
+
+ return state_node;
+ }
+
Node* reduce(Node* node) {
- JSGraph jsgraph(&graph, &common, &typer);
- JSTypedLowering reducer(&jsgraph, &source_positions);
+ JSGraph jsgraph(&graph, &common, &javascript, &typer, &machine);
+ JSTypedLowering reducer(&jsgraph);
Reduction reduction = reducer.Reduce(node);
if (reduction.Changed()) return reduction.replacement();
return node;
@@ -75,25 +95,25 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
CHECK_EQ(2, node->InputCount()); // should not have context, effect, etc.
}
- void CheckPureBinop(Operator* expected, Node* node) {
+ void CheckPureBinop(const Operator* expected, Node* node) {
CHECK_EQ(expected->opcode(), node->op()->opcode());
CHECK_EQ(2, node->InputCount()); // should not have context, effect, etc.
}
- Node* ReduceUnop(Operator* op, Type* input_type) {
+ Node* ReduceUnop(const Operator* op, Type* input_type) {
return reduce(Unop(op, Parameter(input_type)));
}
- Node* ReduceBinop(Operator* op, Type* left_type, Type* right_type) {
+ Node* ReduceBinop(const Operator* op, Type* left_type, Type* right_type) {
return reduce(Binop(op, Parameter(left_type, 0), Parameter(right_type, 1)));
}
- Node* Binop(Operator* op, Node* left, Node* right) {
+ Node* Binop(const Operator* op, Node* left, Node* right) {
// JS binops also require context, effect, and control
return graph.NewNode(op, left, right, context(), start(), control());
}
- Node* Unop(Operator* op, Node* input) {
+ Node* Unop(const Operator* op, Node* input) {
// JS unops also require context, effect, and control
return graph.NewNode(op, input, context(), start(), control());
}
@@ -110,17 +130,17 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
void CheckInt32Constant(int32_t expected, Node* result) {
CHECK_EQ(IrOpcode::kInt32Constant, result->opcode());
- CHECK_EQ(expected, ValueOf<int32_t>(result->op()));
+ CHECK_EQ(expected, OpParameter<int32_t>(result));
}
void CheckNumberConstant(double expected, Node* result) {
CHECK_EQ(IrOpcode::kNumberConstant, result->opcode());
- CHECK_EQ(expected, ValueOf<double>(result->op()));
+ CHECK_EQ(expected, OpParameter<double>(result));
}
void CheckNaN(Node* result) {
CHECK_EQ(IrOpcode::kNumberConstant, result->opcode());
- double value = ValueOf<double>(result->op());
+ double value = OpParameter<double>(result);
CHECK(std::isnan(value));
}
@@ -134,7 +154,7 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
void CheckHandle(Handle<Object> expected, Node* result) {
CHECK_EQ(IrOpcode::kHeapConstant, result->opcode());
- Handle<Object> value = ValueOf<Handle<Object> >(result->op());
+ Handle<Object> value = OpParameter<Unique<Object> >(result).handle();
CHECK_EQ(*expected, *value);
}
};
@@ -154,7 +174,7 @@ static Type* kNumberTypes[] = {
Type::OtherUnsigned32(), Type::OtherSigned32(), Type::SignedSmall(),
Type::Signed32(), Type::Unsigned32(), Type::Integral32(),
Type::MinusZero(), Type::NaN(), Type::OtherNumber(),
- Type::Number()};
+ Type::OrderedNumber(), Type::Number()};
static Type* kJSTypes[] = {Type::Undefined(), Type::Null(), Type::Boolean(),
@@ -171,13 +191,15 @@ static IrOpcode::Value NumberToI32(bool is_signed) {
}
+// TODO(turbofan): Lowering of StringAdd is disabled for now.
+#if 0
TEST(StringBinops) {
JSTypedLoweringTester R;
- for (size_t i = 0; i < ARRAY_SIZE(kStringTypes); ++i) {
+ for (size_t i = 0; i < arraysize(kStringTypes); ++i) {
Node* p0 = R.Parameter(kStringTypes[i], 0);
- for (size_t j = 0; j < ARRAY_SIZE(kStringTypes); ++j) {
+ for (size_t j = 0; j < arraysize(kStringTypes); ++j) {
Node* p1 = R.Parameter(kStringTypes[j], 1);
Node* add = R.Binop(R.javascript.Add(), p0, p1);
@@ -189,11 +211,12 @@ TEST(StringBinops) {
}
}
}
+#endif
TEST(AddNumber1) {
JSTypedLoweringTester R;
- for (size_t i = 0; i < ARRAY_SIZE(kNumberTypes); ++i) {
+ for (size_t i = 0; i < arraysize(kNumberTypes); ++i) {
Node* p0 = R.Parameter(kNumberTypes[i], 0);
Node* p1 = R.Parameter(kNumberTypes[i], 1);
Node* add = R.Binop(R.javascript.Add(), p0, p1);
@@ -208,7 +231,7 @@ TEST(AddNumber1) {
TEST(NumberBinops) {
JSTypedLoweringTester R;
- Operator* ops[] = {
+ const Operator* ops[] = {
R.javascript.Add(), R.simplified.NumberAdd(),
R.javascript.Subtract(), R.simplified.NumberSubtract(),
R.javascript.Multiply(), R.simplified.NumberMultiply(),
@@ -216,13 +239,13 @@ TEST(NumberBinops) {
R.javascript.Modulus(), R.simplified.NumberModulus(),
};
- for (size_t i = 0; i < ARRAY_SIZE(kNumberTypes); ++i) {
+ for (size_t i = 0; i < arraysize(kNumberTypes); ++i) {
Node* p0 = R.Parameter(kNumberTypes[i], 0);
- for (size_t j = 0; j < ARRAY_SIZE(kNumberTypes); ++j) {
+ for (size_t j = 0; j < arraysize(kNumberTypes); ++j) {
Node* p1 = R.Parameter(kNumberTypes[j], 1);
- for (size_t k = 0; k < ARRAY_SIZE(ops); k += 2) {
+ for (size_t k = 0; k < arraysize(ops); k += 2) {
Node* add = R.Binop(ops[k], p0, p1);
Node* r = R.reduce(add);
@@ -242,7 +265,7 @@ static void CheckToI32(Node* old_input, Node* new_input, bool is_signed) {
CHECK_EQ(old_input, new_input);
} else if (new_input->opcode() == IrOpcode::kNumberConstant) {
CHECK(NodeProperties::GetBounds(new_input).upper->Is(expected_type));
- double v = ValueOf<double>(new_input->op());
+ double v = OpParameter<double>(new_input);
double e = static_cast<double>(is_signed ? FastD2I(v) : FastD2UI(v));
CHECK_EQ(e, v);
} else {
@@ -255,7 +278,7 @@ static void CheckToI32(Node* old_input, Node* new_input, bool is_signed) {
class JSBitwiseShiftTypedLoweringTester : public JSTypedLoweringTester {
public:
static const int kNumberOps = 6;
- Operator* ops[kNumberOps];
+ const Operator* ops[kNumberOps];
bool signedness[kNumberOps];
JSBitwiseShiftTypedLoweringTester() {
@@ -269,7 +292,7 @@ class JSBitwiseShiftTypedLoweringTester : public JSTypedLoweringTester {
}
private:
- void set(int idx, Operator* op, bool s) {
+ void set(int idx, const Operator* op, bool s) {
ops[idx] = op;
signedness[idx] = s;
}
@@ -286,10 +309,10 @@ TEST(Int32BitwiseShifts) {
Type::Null(), Type::Boolean(), Type::Number(),
Type::String(), Type::Object()};
- for (size_t i = 0; i < ARRAY_SIZE(types); ++i) {
+ for (size_t i = 0; i < arraysize(types); ++i) {
Node* p0 = R.Parameter(types[i], 0);
- for (size_t j = 0; j < ARRAY_SIZE(types); ++j) {
+ for (size_t j = 0; j < arraysize(types); ++j) {
Node* p1 = R.Parameter(types[j], 1);
for (int k = 0; k < R.kNumberOps; k += 2) {
@@ -315,7 +338,7 @@ TEST(Int32BitwiseShifts) {
class JSBitwiseTypedLoweringTester : public JSTypedLoweringTester {
public:
static const int kNumberOps = 6;
- Operator* ops[kNumberOps];
+ const Operator* ops[kNumberOps];
bool signedness[kNumberOps];
JSBitwiseTypedLoweringTester() {
@@ -329,7 +352,7 @@ class JSBitwiseTypedLoweringTester : public JSTypedLoweringTester {
}
private:
- void set(int idx, Operator* op, bool s) {
+ void set(int idx, const Operator* op, bool s) {
ops[idx] = op;
signedness[idx] = s;
}
@@ -346,10 +369,10 @@ TEST(Int32BitwiseBinops) {
Type::Null(), Type::Boolean(), Type::Number(),
Type::String(), Type::Object()};
- for (size_t i = 0; i < ARRAY_SIZE(types); ++i) {
+ for (size_t i = 0; i < arraysize(types); ++i) {
Node* p0 = R.Parameter(types[i], 0);
- for (size_t j = 0; j < ARRAY_SIZE(types); ++j) {
+ for (size_t j = 0; j < arraysize(types); ++j) {
Node* p1 = R.Parameter(types[j], 1);
for (int k = 0; k < R.kNumberOps; k += 2) {
@@ -368,9 +391,9 @@ TEST(Int32BitwiseBinops) {
TEST(JSToNumber1) {
JSTypedLoweringTester R;
- Operator* ton = R.javascript.ToNumber();
+ const Operator* ton = R.javascript.ToNumber();
- for (size_t i = 0; i < ARRAY_SIZE(kNumberTypes); i++) { // ToNumber(number)
+ for (size_t i = 0; i < arraysize(kNumberTypes); i++) { // ToNumber(number)
Node* r = R.ReduceUnop(ton, kNumberTypes[i]);
CHECK_EQ(IrOpcode::kParameter, r->opcode());
}
@@ -392,7 +415,7 @@ TEST(JSToNumber_replacement) {
Type* types[] = {Type::Null(), Type::Undefined(), Type::Number()};
- for (size_t i = 0; i < ARRAY_SIZE(types); i++) {
+ for (size_t i = 0; i < arraysize(types); i++) {
Node* n = R.Parameter(types[i]);
Node* c = R.graph.NewNode(R.javascript.ToNumber(), n, R.context(),
R.start(), R.start());
@@ -418,11 +441,12 @@ TEST(JSToNumber_replacement) {
TEST(JSToNumberOfConstant) {
JSTypedLoweringTester R;
- Operator* ops[] = {R.common.NumberConstant(0), R.common.NumberConstant(-1),
- R.common.NumberConstant(0.1), R.common.Int32Constant(1177),
- R.common.Float64Constant(0.99)};
+ const Operator* ops[] = {
+ R.common.NumberConstant(0), R.common.NumberConstant(-1),
+ R.common.NumberConstant(0.1), R.common.Int32Constant(1177),
+ R.common.Float64Constant(0.99)};
- for (size_t i = 0; i < ARRAY_SIZE(ops); i++) {
+ for (size_t i = 0; i < arraysize(ops); i++) {
Node* n = R.graph.NewNode(ops[i]);
Node* convert = R.Unop(R.javascript.ToNumber(), n);
Node* r = R.reduce(convert);
@@ -447,7 +471,7 @@ TEST(JSToNumberOfNumberOrOtherPrimitive) {
Type* others[] = {Type::Undefined(), Type::Null(), Type::Boolean(),
Type::String()};
- for (size_t i = 0; i < ARRAY_SIZE(others); i++) {
+ for (size_t i = 0; i < arraysize(others); i++) {
Type* t = Type::Union(Type::Number(), others[i], R.main_zone());
Node* r = R.ReduceUnop(R.javascript.ToNumber(), t);
CHECK_EQ(IrOpcode::kJSToNumber, r->opcode());
@@ -457,7 +481,7 @@ TEST(JSToNumberOfNumberOrOtherPrimitive) {
TEST(JSToBoolean) {
JSTypedLoweringTester R;
- Operator* op = R.javascript.ToBoolean();
+ const Operator* op = R.javascript.ToBoolean();
{ // ToBoolean(undefined)
Node* r = R.ReduceUnop(op, Type::Undefined());
@@ -474,12 +498,12 @@ TEST(JSToBoolean) {
CHECK_EQ(IrOpcode::kParameter, r->opcode());
}
- { // ToBoolean(number)
- Node* r = R.ReduceUnop(op, Type::Number());
+ { // ToBoolean(ordered-number)
+ Node* r = R.ReduceUnop(op, Type::OrderedNumber());
CHECK_EQ(IrOpcode::kBooleanNot, r->opcode());
Node* i = r->InputAt(0);
CHECK_EQ(IrOpcode::kNumberEqual, i->opcode());
- // ToBoolean(number) => BooleanNot(NumberEqual(x, #0))
+ // ToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x, #0))
}
{ // ToBoolean(string)
@@ -508,10 +532,11 @@ TEST(JSToBoolean) {
TEST(JSToBoolean_replacement) {
JSTypedLoweringTester R;
- Type* types[] = {Type::Null(), Type::Undefined(), Type::Boolean(),
+ Type* types[] = {Type::Null(), Type::Undefined(),
+ Type::Boolean(), Type::OrderedNumber(),
Type::DetectableObject(), Type::Undetectable()};
- for (size_t i = 0; i < ARRAY_SIZE(types); i++) {
+ for (size_t i = 0; i < arraysize(types); i++) {
Node* n = R.Parameter(types[i]);
Node* c = R.graph.NewNode(R.javascript.ToBoolean(), n, R.context(),
R.start(), R.start());
@@ -523,6 +548,8 @@ TEST(JSToBoolean_replacement) {
if (types[i]->Is(Type::Boolean())) {
CHECK_EQ(n, r);
+ } else if (types[i]->Is(Type::OrderedNumber())) {
+ CHECK_EQ(IrOpcode::kBooleanNot, r->opcode());
} else {
CHECK_EQ(IrOpcode::kHeapConstant, r->opcode());
}
@@ -537,12 +564,12 @@ TEST(JSToBoolean_replacement) {
TEST(JSToString1) {
JSTypedLoweringTester R;
- for (size_t i = 0; i < ARRAY_SIZE(kStringTypes); i++) {
+ for (size_t i = 0; i < arraysize(kStringTypes); i++) {
Node* r = R.ReduceUnop(R.javascript.ToString(), kStringTypes[i]);
CHECK_EQ(IrOpcode::kParameter, r->opcode());
}
- Operator* op = R.javascript.ToString();
+ const Operator* op = R.javascript.ToString();
{ // ToString(undefined) => "undefined"
Node* r = R.ReduceUnop(op, Type::Undefined());
@@ -583,7 +610,7 @@ TEST(JSToString_replacement) {
Type* types[] = {Type::Null(), Type::Undefined(), Type::String()};
- for (size_t i = 0; i < ARRAY_SIZE(types); i++) {
+ for (size_t i = 0; i < arraysize(types); i++) {
Node* n = R.Parameter(types[i]);
Node* c = R.graph.NewNode(R.javascript.ToString(), n, R.context(),
R.start(), R.start());
@@ -609,18 +636,18 @@ TEST(JSToString_replacement) {
TEST(StringComparison) {
JSTypedLoweringTester R;
- Operator* ops[] = {
+ const Operator* ops[] = {
R.javascript.LessThan(), R.simplified.StringLessThan(),
R.javascript.LessThanOrEqual(), R.simplified.StringLessThanOrEqual(),
R.javascript.GreaterThan(), R.simplified.StringLessThan(),
R.javascript.GreaterThanOrEqual(), R.simplified.StringLessThanOrEqual()};
- for (size_t i = 0; i < ARRAY_SIZE(kStringTypes); i++) {
+ for (size_t i = 0; i < arraysize(kStringTypes); i++) {
Node* p0 = R.Parameter(kStringTypes[i], 0);
- for (size_t j = 0; j < ARRAY_SIZE(kStringTypes); j++) {
+ for (size_t j = 0; j < arraysize(kStringTypes); j++) {
Node* p1 = R.Parameter(kStringTypes[j], 1);
- for (size_t k = 0; k < ARRAY_SIZE(ops); k += 2) {
+ for (size_t k = 0; k < arraysize(ops); k += 2) {
Node* cmp = R.Binop(ops[k], p0, p1);
Node* r = R.reduce(cmp);
@@ -643,6 +670,9 @@ TEST(StringComparison) {
static void CheckIsConvertedToNumber(Node* val, Node* converted) {
if (NodeProperties::GetBounds(val).upper->Is(Type::Number())) {
CHECK_EQ(val, converted);
+ } else if (NodeProperties::GetBounds(val).upper->Is(Type::Boolean())) {
+ CHECK_EQ(IrOpcode::kBooleanToNumber, converted->opcode());
+ CHECK_EQ(val, converted->InputAt(0));
} else {
if (converted->opcode() == IrOpcode::kNumberConstant) return;
CHECK_EQ(IrOpcode::kJSToNumber, converted->opcode());
@@ -654,23 +684,25 @@ static void CheckIsConvertedToNumber(Node* val, Node* converted) {
TEST(NumberComparison) {
JSTypedLoweringTester R;
- Operator* ops[] = {
+ const Operator* ops[] = {
R.javascript.LessThan(), R.simplified.NumberLessThan(),
R.javascript.LessThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
R.javascript.GreaterThan(), R.simplified.NumberLessThan(),
R.javascript.GreaterThanOrEqual(), R.simplified.NumberLessThanOrEqual()};
- for (size_t i = 0; i < ARRAY_SIZE(kJSTypes); i++) {
+ for (size_t i = 0; i < arraysize(kJSTypes); i++) {
Type* t0 = kJSTypes[i];
- if (t0->Is(Type::String())) continue; // skip Type::String
+ // Skip Type::String and Type::Receiver which might coerce into a string.
+ if (t0->Is(Type::String()) || t0->Is(Type::Receiver())) continue;
Node* p0 = R.Parameter(t0, 0);
- for (size_t j = 0; j < ARRAY_SIZE(kJSTypes); j++) {
+ for (size_t j = 0; j < arraysize(kJSTypes); j++) {
Type* t1 = kJSTypes[j];
- if (t1->Is(Type::String())) continue; // skip Type::String
+ // Skip Type::String and Type::Receiver which might coerce into a string.
+ if (t1->Is(Type::String()) || t0->Is(Type::Receiver())) continue;
Node* p1 = R.Parameter(t1, 1);
- for (size_t k = 0; k < ARRAY_SIZE(ops); k += 2) {
+ for (size_t k = 0; k < arraysize(ops); k += 2) {
Node* cmp = R.Binop(ops[k], p0, p1);
Node* r = R.reduce(cmp);
@@ -696,10 +728,10 @@ TEST(MixedComparison1) {
Type* types[] = {Type::Number(), Type::String(),
Type::Union(Type::Number(), Type::String(), R.main_zone())};
- for (size_t i = 0; i < ARRAY_SIZE(types); i++) {
+ for (size_t i = 0; i < arraysize(types); i++) {
Node* p0 = R.Parameter(types[i], 0);
- for (size_t j = 0; j < ARRAY_SIZE(types); j++) {
+ for (size_t j = 0; j < arraysize(types); j++) {
Node* p1 = R.Parameter(types[j], 1);
{
Node* cmp = R.Binop(R.javascript.LessThan(), p0, p1);
@@ -724,7 +756,7 @@ TEST(MixedComparison1) {
TEST(ObjectComparison) {
JSTypedLoweringTester R;
- Node* p0 = R.Parameter(Type::Object(), 0);
+ Node* p0 = R.Parameter(Type::Number(), 0);
Node* p1 = R.Parameter(Type::Object(), 1);
Node* cmp = R.Binop(R.javascript.LessThan(), p0, p1);
@@ -740,37 +772,48 @@ TEST(ObjectComparison) {
Node* i0 = r->InputAt(0);
Node* i1 = r->InputAt(1);
- CHECK_NE(p0, i0);
+ CHECK_EQ(p0, i0);
CHECK_NE(p1, i1);
- CHECK_EQ(IrOpcode::kJSToNumber, i0->opcode());
+ CHECK_EQ(IrOpcode::kParameter, i0->opcode());
CHECK_EQ(IrOpcode::kJSToNumber, i1->opcode());
// Check effect chain is correct.
- R.CheckEffectInput(R.start(), i0);
- R.CheckEffectInput(i0, i1);
+ R.CheckEffectInput(R.start(), i1);
R.CheckEffectInput(i1, effect_use);
}
TEST(UnaryNot) {
JSTypedLoweringTester R;
- Operator* opnot = R.javascript.UnaryNot();
+ const Operator* opnot = R.javascript.UnaryNot();
- for (size_t i = 0; i < ARRAY_SIZE(kJSTypes); i++) {
- Node* r = R.ReduceUnop(opnot, kJSTypes[i]);
+ for (size_t i = 0; i < arraysize(kJSTypes); i++) {
+ Node* orig = R.Unop(opnot, R.Parameter(kJSTypes[i]));
+ Node* use = R.graph.NewNode(R.common.Return(), orig);
+ Node* r = R.reduce(orig);
// TODO(titzer): test will break if/when js-typed-lowering constant folds.
- CHECK_EQ(IrOpcode::kBooleanNot, r->opcode());
+ CHECK_EQ(IrOpcode::kBooleanNot, use->InputAt(0)->opcode());
+
+ if (r == orig && orig->opcode() == IrOpcode::kJSToBoolean) {
+ // The original node was turned into a ToBoolean.
+ CHECK_EQ(IrOpcode::kJSToBoolean, r->opcode());
+ } else {
+ CHECK_EQ(IrOpcode::kBooleanNot, r->opcode());
+ }
}
}
TEST(RemoveToNumberEffects) {
+ FLAG_turbo_deoptimization = true;
+
JSTypedLoweringTester R;
Node* effect_use = NULL;
for (int i = 0; i < 10; i++) {
Node* p0 = R.Parameter(Type::Number());
Node* ton = R.Unop(R.javascript.ToNumber(), p0);
+ Node* frame_state = R.EmptyFrameState(R.context());
effect_use = NULL;
switch (i) {
@@ -786,11 +829,11 @@ TEST(RemoveToNumberEffects) {
effect_use = R.graph.NewNode(R.common.EffectPhi(1), ton, R.start());
case 3:
effect_use = R.graph.NewNode(R.javascript.Add(), ton, ton, R.context(),
- ton, R.start());
+ frame_state, ton, R.start());
break;
case 4:
effect_use = R.graph.NewNode(R.javascript.Add(), p0, p0, R.context(),
- ton, R.start());
+ frame_state, ton, R.start());
break;
case 5:
effect_use = R.graph.NewNode(R.common.Return(), p0, ton, R.start());
@@ -822,7 +865,7 @@ TEST(RemoveToNumberEffects) {
// Helper class for testing the reduction of a single binop.
class BinopEffectsTester {
public:
- explicit BinopEffectsTester(Operator* op, Type* t0, Type* t1)
+ explicit BinopEffectsTester(const Operator* op, Type* t0, Type* t1)
: R(),
p0(R.Parameter(t0, 0)),
p1(R.Parameter(t1, 1)),
@@ -910,10 +953,10 @@ TEST(EqualityForNumbers) {
Type::Number()};
- for (size_t i = 0; i < ARRAY_SIZE(simple_number_types); ++i) {
+ for (size_t i = 0; i < arraysize(simple_number_types); ++i) {
Node* p0 = R.Parameter(simple_number_types[i], 0);
- for (size_t j = 0; j < ARRAY_SIZE(simple_number_types); ++j) {
+ for (size_t j = 0; j < arraysize(simple_number_types); ++j) {
Node* p1 = R.Parameter(simple_number_types[j], 1);
CheckEqualityReduction(&R, true, p0, p1, IrOpcode::kNumberEqual);
@@ -930,7 +973,7 @@ TEST(StrictEqualityForRefEqualTypes) {
Type::Object(), Type::Receiver()};
Node* p0 = R.Parameter(Type::Any());
- for (size_t i = 0; i < ARRAY_SIZE(types); i++) {
+ for (size_t i = 0; i < arraysize(types); i++) {
Node* p1 = R.Parameter(types[i]);
CheckEqualityReduction(&R, true, p0, p1, IrOpcode::kReferenceEqual);
}
@@ -951,7 +994,7 @@ TEST(StringEquality) {
TEST(RemovePureNumberBinopEffects) {
JSTypedLoweringTester R;
- Operator* ops[] = {
+ const Operator* ops[] = {
R.javascript.Equal(), R.simplified.NumberEqual(),
R.javascript.Add(), R.simplified.NumberAdd(),
R.javascript.Subtract(), R.simplified.NumberSubtract(),
@@ -962,7 +1005,7 @@ TEST(RemovePureNumberBinopEffects) {
R.javascript.LessThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
};
- for (size_t j = 0; j < ARRAY_SIZE(ops); j += 2) {
+ for (size_t j = 0; j < arraysize(ops); j += 2) {
BinopEffectsTester B(ops[j], Type::Number(), Type::Number());
CHECK_EQ(ops[j + 1]->opcode(), B.result->op()->opcode());
@@ -979,14 +1022,14 @@ TEST(RemovePureNumberBinopEffects) {
TEST(OrderNumberBinopEffects1) {
JSTypedLoweringTester R;
- Operator* ops[] = {
+ const Operator* ops[] = {
R.javascript.Subtract(), R.simplified.NumberSubtract(),
R.javascript.Multiply(), R.simplified.NumberMultiply(),
R.javascript.Divide(), R.simplified.NumberDivide(),
R.javascript.Modulus(), R.simplified.NumberModulus(),
};
- for (size_t j = 0; j < ARRAY_SIZE(ops); j += 2) {
+ for (size_t j = 0; j < arraysize(ops); j += 2) {
BinopEffectsTester B(ops[j], Type::Object(), Type::String());
CHECK_EQ(ops[j + 1]->opcode(), B.result->op()->opcode());
@@ -1005,7 +1048,7 @@ TEST(OrderNumberBinopEffects1) {
TEST(OrderNumberBinopEffects2) {
JSTypedLoweringTester R;
- Operator* ops[] = {
+ const Operator* ops[] = {
R.javascript.Add(), R.simplified.NumberAdd(),
R.javascript.Subtract(), R.simplified.NumberSubtract(),
R.javascript.Multiply(), R.simplified.NumberMultiply(),
@@ -1013,8 +1056,8 @@ TEST(OrderNumberBinopEffects2) {
R.javascript.Modulus(), R.simplified.NumberModulus(),
};
- for (size_t j = 0; j < ARRAY_SIZE(ops); j += 2) {
- BinopEffectsTester B(ops[j], Type::Number(), Type::Object());
+ for (size_t j = 0; j < arraysize(ops); j += 2) {
+ BinopEffectsTester B(ops[j], Type::Number(), Type::Symbol());
Node* i0 = B.CheckNoOp(0);
Node* i1 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 1, true);
@@ -1026,8 +1069,8 @@ TEST(OrderNumberBinopEffects2) {
B.CheckEffectOrdering(i1);
}
- for (size_t j = 0; j < ARRAY_SIZE(ops); j += 2) {
- BinopEffectsTester B(ops[j], Type::Object(), Type::Number());
+ for (size_t j = 0; j < arraysize(ops); j += 2) {
+ BinopEffectsTester B(ops[j], Type::Symbol(), Type::Number());
Node* i0 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 0, true);
Node* i1 = B.CheckNoOp(1);
@@ -1044,13 +1087,13 @@ TEST(OrderNumberBinopEffects2) {
TEST(OrderCompareEffects) {
JSTypedLoweringTester R;
- Operator* ops[] = {
+ const Operator* ops[] = {
R.javascript.GreaterThan(), R.simplified.NumberLessThan(),
R.javascript.GreaterThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
};
- for (size_t j = 0; j < ARRAY_SIZE(ops); j += 2) {
- BinopEffectsTester B(ops[j], Type::Object(), Type::String());
+ for (size_t j = 0; j < arraysize(ops); j += 2) {
+ BinopEffectsTester B(ops[j], Type::Symbol(), Type::String());
CHECK_EQ(ops[j + 1]->opcode(), B.result->op()->opcode());
Node* i0 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 0, true);
@@ -1064,8 +1107,8 @@ TEST(OrderCompareEffects) {
B.CheckEffectOrdering(i1, i0);
}
- for (size_t j = 0; j < ARRAY_SIZE(ops); j += 2) {
- BinopEffectsTester B(ops[j], Type::Number(), Type::Object());
+ for (size_t j = 0; j < arraysize(ops); j += 2) {
+ BinopEffectsTester B(ops[j], Type::Number(), Type::Symbol());
Node* i0 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 0, true);
Node* i1 = B.result->InputAt(1);
@@ -1077,8 +1120,8 @@ TEST(OrderCompareEffects) {
B.CheckEffectOrdering(i0);
}
- for (size_t j = 0; j < ARRAY_SIZE(ops); j += 2) {
- BinopEffectsTester B(ops[j], Type::Object(), Type::Number());
+ for (size_t j = 0; j < arraysize(ops); j += 2) {
+ BinopEffectsTester B(ops[j], Type::Symbol(), Type::Number());
Node* i0 = B.result->InputAt(0);
Node* i1 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 1, true);
@@ -1177,25 +1220,25 @@ TEST(Int32BinopEffects) {
TEST(UnaryNotEffects) {
JSTypedLoweringTester R;
- Operator* opnot = R.javascript.UnaryNot();
+ const Operator* opnot = R.javascript.UnaryNot();
- for (size_t i = 0; i < ARRAY_SIZE(kJSTypes); i++) {
+ for (size_t i = 0; i < arraysize(kJSTypes); i++) {
Node* p0 = R.Parameter(kJSTypes[i], 0);
Node* orig = R.Unop(opnot, p0);
Node* effect_use = R.UseForEffect(orig);
Node* value_use = R.graph.NewNode(R.common.Return(), orig);
Node* r = R.reduce(orig);
// TODO(titzer): test will break if/when js-typed-lowering constant folds.
- CHECK_EQ(IrOpcode::kBooleanNot, r->opcode());
-
- CHECK_EQ(r, value_use->InputAt(0));
+ CHECK_EQ(IrOpcode::kBooleanNot, value_use->InputAt(0)->opcode());
- if (r->InputAt(0) == orig && orig->opcode() == IrOpcode::kJSToBoolean) {
+ if (r == orig && orig->opcode() == IrOpcode::kJSToBoolean) {
// The original node was turned into a ToBoolean, which has an effect.
+ CHECK_EQ(IrOpcode::kJSToBoolean, r->opcode());
R.CheckEffectInput(R.start(), orig);
R.CheckEffectInput(orig, effect_use);
} else {
// effect should have been removed from this node.
+ CHECK_EQ(IrOpcode::kBooleanNot, r->opcode());
R.CheckEffectInput(R.start(), effect_use);
}
}
@@ -1207,9 +1250,9 @@ TEST(Int32AddNarrowing) {
JSBitwiseTypedLoweringTester R;
for (int o = 0; o < R.kNumberOps; o += 2) {
- for (size_t i = 0; i < ARRAY_SIZE(kInt32Types); i++) {
+ for (size_t i = 0; i < arraysize(kInt32Types); i++) {
Node* n0 = R.Parameter(kInt32Types[i]);
- for (size_t j = 0; j < ARRAY_SIZE(kInt32Types); j++) {
+ for (size_t j = 0; j < arraysize(kInt32Types); j++) {
Node* n1 = R.Parameter(kInt32Types[j]);
Node* one = R.graph.NewNode(R.common.NumberConstant(1));
@@ -1234,9 +1277,9 @@ TEST(Int32AddNarrowing) {
JSBitwiseShiftTypedLoweringTester R;
for (int o = 0; o < R.kNumberOps; o += 2) {
- for (size_t i = 0; i < ARRAY_SIZE(kInt32Types); i++) {
+ for (size_t i = 0; i < arraysize(kInt32Types); i++) {
Node* n0 = R.Parameter(kInt32Types[i]);
- for (size_t j = 0; j < ARRAY_SIZE(kInt32Types); j++) {
+ for (size_t j = 0; j < arraysize(kInt32Types); j++) {
Node* n1 = R.Parameter(kInt32Types[j]);
Node* one = R.graph.NewNode(R.common.NumberConstant(1));
@@ -1289,10 +1332,10 @@ TEST(Int32Comparisons) {
JSTypedLoweringTester R;
struct Entry {
- Operator* js_op;
- Operator* uint_op;
- Operator* int_op;
- Operator* num_op;
+ const Operator* js_op;
+ const Operator* uint_op;
+ const Operator* int_op;
+ const Operator* num_op;
bool commute;
};
@@ -1308,19 +1351,19 @@ TEST(Int32Comparisons) {
R.machine.Int32LessThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
true}};
- for (size_t o = 0; o < ARRAY_SIZE(ops); o++) {
- for (size_t i = 0; i < ARRAY_SIZE(kNumberTypes); i++) {
+ for (size_t o = 0; o < arraysize(ops); o++) {
+ for (size_t i = 0; i < arraysize(kNumberTypes); i++) {
Type* t0 = kNumberTypes[i];
Node* p0 = R.Parameter(t0, 0);
- for (size_t j = 0; j < ARRAY_SIZE(kNumberTypes); j++) {
+ for (size_t j = 0; j < arraysize(kNumberTypes); j++) {
Type* t1 = kNumberTypes[j];
Node* p1 = R.Parameter(t1, 1);
Node* cmp = R.Binop(ops[o].js_op, p0, p1);
Node* r = R.reduce(cmp);
- Operator* expected;
+ const Operator* expected;
if (t0->Is(Type::Unsigned32()) && t1->Is(Type::Unsigned32())) {
expected = ops[o].uint_op;
} else if (t0->Is(Type::Signed32()) && t1->Is(Type::Signed32())) {
diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc
index 6d9453f7c4..ff65d6e4d6 100644
--- a/deps/v8/test/cctest/compiler/test-linkage.cc
+++ b/deps/v8/test/cctest/compiler/test-linkage.cc
@@ -65,7 +65,7 @@ TEST(TestLinkageJSFunctionIncoming) {
CallDescriptor* descriptor = linkage.GetIncomingDescriptor();
CHECK_NE(NULL, descriptor);
- CHECK_EQ(1 + i, descriptor->ParameterCount());
+ CHECK_EQ(1 + i, descriptor->JSParameterCount());
CHECK_EQ(1, descriptor->ReturnCount());
CHECK_EQ(Operator::kNoProperties, descriptor->properties());
CHECK_EQ(true, descriptor->IsJSFunctionCall());
@@ -92,7 +92,7 @@ TEST(TestLinkageJSCall) {
for (int i = 0; i < 32; i++) {
CallDescriptor* descriptor = linkage.GetJSCallDescriptor(i);
CHECK_NE(NULL, descriptor);
- CHECK_EQ(i, descriptor->ParameterCount());
+ CHECK_EQ(i, descriptor->JSParameterCount());
CHECK_EQ(1, descriptor->ReturnCount());
CHECK_EQ(Operator::kNoProperties, descriptor->properties());
CHECK_EQ(true, descriptor->IsJSFunctionCall());
diff --git a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
index c79a96a094..9a41bc5e44 100644
--- a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
+++ b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
@@ -6,48 +6,72 @@
#include "src/base/utils/random-number-generator.h"
#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-graph.h"
#include "src/compiler/machine-operator-reducer.h"
+#include "src/compiler/typer.h"
#include "test/cctest/compiler/value-helper.h"
using namespace v8::internal;
using namespace v8::internal::compiler;
template <typename T>
-Operator* NewConstantOperator(CommonOperatorBuilder* common, volatile T value);
+const Operator* NewConstantOperator(CommonOperatorBuilder* common,
+ volatile T value);
template <>
-Operator* NewConstantOperator<int32_t>(CommonOperatorBuilder* common,
- volatile int32_t value) {
+const Operator* NewConstantOperator<int32_t>(CommonOperatorBuilder* common,
+ volatile int32_t value) {
return common->Int32Constant(value);
}
template <>
-Operator* NewConstantOperator<double>(CommonOperatorBuilder* common,
- volatile double value) {
+const Operator* NewConstantOperator<double>(CommonOperatorBuilder* common,
+ volatile double value) {
return common->Float64Constant(value);
}
+template <typename T>
+T ValueOfOperator(const Operator* op);
+
+template <>
+int32_t ValueOfOperator<int32_t>(const Operator* op) {
+ CHECK_EQ(IrOpcode::kInt32Constant, op->opcode());
+ return OpParameter<int32_t>(op);
+}
+
+template <>
+double ValueOfOperator<double>(const Operator* op) {
+ CHECK_EQ(IrOpcode::kFloat64Constant, op->opcode());
+ return OpParameter<double>(op);
+}
+
+
class ReducerTester : public HandleAndZoneScope {
public:
explicit ReducerTester(int num_parameters = 0)
: isolate(main_isolate()),
binop(NULL),
unop(NULL),
- machine(main_zone()),
common(main_zone()),
graph(main_zone()),
+ javascript(main_zone()),
+ typer(main_zone()),
+ jsgraph(&graph, &common, &javascript, &typer, &machine),
maxuint32(Constant<int32_t>(kMaxUInt32)) {
Node* s = graph.NewNode(common.Start(num_parameters));
graph.SetStart(s);
}
Isolate* isolate;
- Operator* binop;
- Operator* unop;
+ const Operator* binop;
+ const Operator* unop;
MachineOperatorBuilder machine;
CommonOperatorBuilder common;
Graph graph;
+ JSOperatorBuilder javascript;
+ Typer typer;
+ JSGraph jsgraph;
Node* maxuint32;
template <typename T>
@@ -55,6 +79,11 @@ class ReducerTester : public HandleAndZoneScope {
return graph.NewNode(NewConstantOperator<T>(&common, value));
}
+ template <typename T>
+ const T ValueOf(const Operator* op) {
+ return ValueOfOperator<T>(op);
+ }
+
// Check that the reduction of this binop applied to constants {a} and {b}
// yields the {expect} value.
template <typename T>
@@ -68,7 +97,7 @@ class ReducerTester : public HandleAndZoneScope {
void CheckFoldBinop(volatile T expect, Node* a, Node* b) {
CHECK_NE(NULL, binop);
Node* n = graph.NewNode(binop, a, b);
- MachineOperatorReducer reducer(&graph);
+ MachineOperatorReducer reducer(&jsgraph);
Reduction reduction = reducer.Reduce(n);
CHECK(reduction.Changed());
CHECK_NE(n, reduction.replacement());
@@ -80,7 +109,7 @@ class ReducerTester : public HandleAndZoneScope {
void CheckBinop(Node* expect, Node* a, Node* b) {
CHECK_NE(NULL, binop);
Node* n = graph.NewNode(binop, a, b);
- MachineOperatorReducer reducer(&graph);
+ MachineOperatorReducer reducer(&jsgraph);
Reduction reduction = reducer.Reduce(n);
CHECK(reduction.Changed());
CHECK_EQ(expect, reduction.replacement());
@@ -92,7 +121,7 @@ class ReducerTester : public HandleAndZoneScope {
Node* right) {
CHECK_NE(NULL, binop);
Node* n = graph.NewNode(binop, left, right);
- MachineOperatorReducer reducer(&graph);
+ MachineOperatorReducer reducer(&jsgraph);
Reduction reduction = reducer.Reduce(n);
CHECK(reduction.Changed());
CHECK_EQ(binop, reduction.replacement()->op());
@@ -103,11 +132,11 @@ class ReducerTester : public HandleAndZoneScope {
// Check that the reduction of this binop applied to {left} and {right} yields
// the {op_expect} applied to {left_expect} and {right_expect}.
template <typename T>
- void CheckFoldBinop(volatile T left_expect, Operator* op_expect,
+ void CheckFoldBinop(volatile T left_expect, const Operator* op_expect,
Node* right_expect, Node* left, Node* right) {
CHECK_NE(NULL, binop);
Node* n = graph.NewNode(binop, left, right);
- MachineOperatorReducer reducer(&graph);
+ MachineOperatorReducer reducer(&jsgraph);
Reduction r = reducer.Reduce(n);
CHECK(r.Changed());
CHECK_EQ(op_expect->opcode(), r.replacement()->op()->opcode());
@@ -118,11 +147,11 @@ class ReducerTester : public HandleAndZoneScope {
// Check that the reduction of this binop applied to {left} and {right} yields
// the {op_expect} applied to {left_expect} and {right_expect}.
template <typename T>
- void CheckFoldBinop(Node* left_expect, Operator* op_expect,
+ void CheckFoldBinop(Node* left_expect, const Operator* op_expect,
volatile T right_expect, Node* left, Node* right) {
CHECK_NE(NULL, binop);
Node* n = graph.NewNode(binop, left, right);
- MachineOperatorReducer reducer(&graph);
+ MachineOperatorReducer reducer(&jsgraph);
Reduction r = reducer.Reduce(n);
CHECK(r.Changed());
CHECK_EQ(op_expect->opcode(), r.replacement()->op()->opcode());
@@ -139,7 +168,7 @@ class ReducerTester : public HandleAndZoneScope {
Node* k = Constant<T>(constant);
{
Node* n = graph.NewNode(binop, k, p);
- MachineOperatorReducer reducer(&graph);
+ MachineOperatorReducer reducer(&jsgraph);
Reduction reduction = reducer.Reduce(n);
CHECK(!reduction.Changed() || reduction.replacement() == n);
CHECK_EQ(p, n->InputAt(0));
@@ -147,7 +176,7 @@ class ReducerTester : public HandleAndZoneScope {
}
{
Node* n = graph.NewNode(binop, p, k);
- MachineOperatorReducer reducer(&graph);
+ MachineOperatorReducer reducer(&jsgraph);
Reduction reduction = reducer.Reduce(n);
CHECK(!reduction.Changed());
CHECK_EQ(p, n->InputAt(0));
@@ -163,7 +192,7 @@ class ReducerTester : public HandleAndZoneScope {
Node* p = Parameter();
Node* k = Constant<T>(constant);
Node* n = graph.NewNode(binop, k, p);
- MachineOperatorReducer reducer(&graph);
+ MachineOperatorReducer reducer(&jsgraph);
Reduction reduction = reducer.Reduce(n);
CHECK(!reduction.Changed());
CHECK_EQ(k, n->InputAt(0));
@@ -630,18 +659,19 @@ TEST(ReduceLoadStore) {
Node* base = R.Constant<int32_t>(11);
Node* index = R.Constant<int32_t>(4);
- Node* load = R.graph.NewNode(R.machine.Load(kMachineWord32), base, index);
+ Node* load = R.graph.NewNode(R.machine.Load(kMachInt32), base, index);
{
- MachineOperatorReducer reducer(&R.graph);
+ MachineOperatorReducer reducer(&R.jsgraph);
Reduction reduction = reducer.Reduce(load);
CHECK(!reduction.Changed()); // loads should not be reduced.
}
{
Node* store = R.graph.NewNode(
- R.machine.Store(kMachineWord32, kNoWriteBarrier), base, index, load);
- MachineOperatorReducer reducer(&R.graph);
+ R.machine.Store(StoreRepresentation(kMachInt32, kNoWriteBarrier)), base,
+ index, load);
+ MachineOperatorReducer reducer(&R.jsgraph);
Reduction reduction = reducer.Reduce(store);
CHECK(!reduction.Changed()); // stores should not be reduced.
}
@@ -657,9 +687,9 @@ static void CheckNans(ReducerTester* R) {
pr != nans.end(); ++pr) {
Node* nan1 = R->Constant<double>(*pl);
Node* nan2 = R->Constant<double>(*pr);
- R->CheckBinop(nan1, x, nan1); // x % NaN => NaN
- R->CheckBinop(nan1, nan1, x); // NaN % x => NaN
- R->CheckBinop(nan1, nan2, nan1); // NaN % NaN => NaN
+ R->CheckBinop(nan1, x, nan1); // x op NaN => NaN
+ R->CheckBinop(nan1, nan1, x); // NaN op x => NaN
+ R->CheckBinop(nan1, nan2, nan1); // NaN op NaN => NaN
}
}
}
@@ -676,8 +706,15 @@ TEST(ReduceFloat64Add) {
}
}
- FOR_FLOAT64_INPUTS(i) { R.CheckPutConstantOnRight(*i); }
- // TODO(titzer): CheckNans(&R);
+ FOR_FLOAT64_INPUTS(i) {
+ Double tmp(*i);
+ if (!tmp.IsSpecial() || tmp.IsInfinite()) {
+ // Don't check NaNs as they are reduced more.
+ R.CheckPutConstantOnRight(*i);
+ }
+ }
+
+ CheckNans(&R);
}
@@ -691,7 +728,13 @@ TEST(ReduceFloat64Sub) {
R.CheckFoldBinop<double>(x - y, x, y);
}
}
- // TODO(titzer): CheckNans(&R);
+
+ Node* zero = R.Constant<double>(0.0);
+ Node* x = R.Parameter();
+
+ R.CheckBinop(x, x, zero); // x - 0.0 => x
+
+ CheckNans(&R);
}
@@ -753,6 +796,11 @@ TEST(ReduceFloat64Mod) {
}
}
+ Node* x = R.Parameter();
+ Node* zero = R.Constant<double>(0.0);
+
+ R.CheckFoldBinop<double>(v8::base::OS::nan_value(), x, zero);
+
CheckNans(&R);
}
diff --git a/deps/v8/test/cctest/compiler/test-node-cache.cc b/deps/v8/test/cctest/compiler/test-node-cache.cc
index 23909a5f5a..3569386c85 100644
--- a/deps/v8/test/cctest/compiler/test-node-cache.cc
+++ b/deps/v8/test/cctest/compiler/test-node-cache.cc
@@ -33,15 +33,15 @@ TEST(Int32Constant_five) {
int32_t constants[] = {static_cast<int32_t>(0x80000000), -77, 0, 1, -1};
- Node* nodes[ARRAY_SIZE(constants)];
+ Node* nodes[arraysize(constants)];
- for (size_t i = 0; i < ARRAY_SIZE(constants); i++) {
+ for (size_t i = 0; i < arraysize(constants); i++) {
int32_t k = constants[i];
Node* node = graph.NewNode(common.Int32Constant(k));
*cache.Find(graph.zone(), k) = nodes[i] = node;
}
- for (size_t i = 0; i < ARRAY_SIZE(constants); i++) {
+ for (size_t i = 0; i < arraysize(constants); i++) {
int32_t k = constants[i];
CHECK_EQ(nodes[i], *cache.Find(graph.zone(), k));
}
@@ -121,7 +121,7 @@ TEST(PtrConstant_back_to_back) {
int32_t buffer[50];
for (int32_t* p = buffer;
- (p - buffer) < static_cast<ptrdiff_t>(ARRAY_SIZE(buffer)); p++) {
+ (p - buffer) < static_cast<ptrdiff_t>(arraysize(buffer)); p++) {
Node** pos = cache.Find(graph.zone(), p);
CHECK_NE(NULL, pos);
for (int j = 0; j < 3; j++) {
@@ -140,7 +140,7 @@ TEST(PtrConstant_hits) {
Node* nodes[kSize];
CommonOperatorBuilder common(graph.zone());
- for (size_t i = 0; i < ARRAY_SIZE(buffer); i++) {
+ for (size_t i = 0; i < arraysize(buffer); i++) {
int k = static_cast<int>(i);
int32_t* p = &buffer[i];
nodes[i] = graph.NewNode(common.Int32Constant(k));
@@ -148,7 +148,7 @@ TEST(PtrConstant_hits) {
}
int hits = 0;
- for (size_t i = 0; i < ARRAY_SIZE(buffer); i++) {
+ for (size_t i = 0; i < arraysize(buffer); i++) {
int32_t* p = &buffer[i];
Node** pos = cache.Find(graph.zone(), p);
if (*pos != NULL) {
diff --git a/deps/v8/test/cctest/compiler/test-node.cc b/deps/v8/test/cctest/compiler/test-node.cc
index 6fe8573a2f..28d807e4a7 100644
--- a/deps/v8/test/cctest/compiler/test-node.cc
+++ b/deps/v8/test/cctest/compiler/test-node.cc
@@ -118,9 +118,14 @@ TEST(NodeUseIteratorOne) {
TEST(NodeUseIteratorReplaceNoUses) {
GraphTester graph;
Node* n0 = graph.NewNode(&dummy_operator);
- Node* n3 = graph.NewNode(&dummy_operator);
- n0->ReplaceUses(n3);
+ Node* n1 = graph.NewNode(&dummy_operator);
+ Node* n2 = graph.NewNode(&dummy_operator);
+ Node* n3 = graph.NewNode(&dummy_operator, n2);
+ n0->ReplaceUses(n1);
+ CHECK(n0->uses().begin() == n0->uses().end());
+ n0->ReplaceUses(n2);
CHECK(n0->uses().begin() == n0->uses().end());
+ USE(n3);
}
@@ -333,6 +338,27 @@ TEST(Inputs) {
}
+TEST(RemoveInput) {
+ GraphTester graph;
+
+ Node* n0 = graph.NewNode(&dummy_operator);
+ Node* n1 = graph.NewNode(&dummy_operator, n0);
+ Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+
+ n1->RemoveInput(0);
+ CHECK_EQ(0, n1->InputCount());
+ CHECK_EQ(1, n0->UseCount());
+
+ n2->RemoveInput(0);
+ CHECK_EQ(1, n2->InputCount());
+ CHECK_EQ(0, n0->UseCount());
+ CHECK_EQ(1, n1->UseCount());
+
+ n2->RemoveInput(0);
+ CHECK_EQ(0, n2->InputCount());
+}
+
+
TEST(AppendInputsAndIterator) {
GraphTester graph;
diff --git a/deps/v8/test/cctest/compiler/test-operator.cc b/deps/v8/test/cctest/compiler/test-operator.cc
index 0bf8cb755b..af75d67663 100644
--- a/deps/v8/test/cctest/compiler/test-operator.cc
+++ b/deps/v8/test/cctest/compiler/test-operator.cc
@@ -14,40 +14,40 @@ using namespace v8::internal::compiler;
#define Infinity (std::numeric_limits<double>::infinity())
TEST(TestOperatorMnemonic) {
- SimpleOperator op1(10, 0, 0, 0, "ThisOne");
+ SimpleOperator op1(10, Operator::kNoProperties, 0, 0, "ThisOne");
CHECK_EQ(0, strcmp(op1.mnemonic(), "ThisOne"));
- SimpleOperator op2(11, 0, 0, 0, "ThatOne");
+ SimpleOperator op2(11, Operator::kNoProperties, 0, 0, "ThatOne");
CHECK_EQ(0, strcmp(op2.mnemonic(), "ThatOne"));
- Operator1<int> op3(12, 0, 0, 1, "Mnemonic1", 12333);
+ Operator1<int> op3(12, Operator::kNoProperties, 0, 1, "Mnemonic1", 12333);
CHECK_EQ(0, strcmp(op3.mnemonic(), "Mnemonic1"));
- Operator1<double> op4(13, 0, 0, 1, "TheOther", 99.9);
+ Operator1<double> op4(13, Operator::kNoProperties, 0, 1, "TheOther", 99.9);
CHECK_EQ(0, strcmp(op4.mnemonic(), "TheOther"));
}
TEST(TestSimpleOperatorHash) {
- SimpleOperator op1(17, 0, 0, 0, "Another");
+ SimpleOperator op1(17, Operator::kNoProperties, 0, 0, "Another");
CHECK_EQ(17, op1.HashCode());
- SimpleOperator op2(18, 0, 0, 0, "Falsch");
+ SimpleOperator op2(18, Operator::kNoProperties, 0, 0, "Falsch");
CHECK_EQ(18, op2.HashCode());
}
TEST(TestSimpleOperatorEquals) {
- SimpleOperator op1a(19, 0, 0, 0, "Another1");
- SimpleOperator op1b(19, 2, 2, 2, "Another2");
+ SimpleOperator op1a(19, Operator::kNoProperties, 0, 0, "Another1");
+ SimpleOperator op1b(19, Operator::kFoldable, 2, 2, "Another2");
CHECK(op1a.Equals(&op1a));
CHECK(op1a.Equals(&op1b));
CHECK(op1b.Equals(&op1a));
CHECK(op1b.Equals(&op1b));
- SimpleOperator op2a(20, 0, 0, 0, "Falsch1");
- SimpleOperator op2b(20, 1, 1, 1, "Falsch2");
+ SimpleOperator op2a(20, Operator::kNoProperties, 0, 0, "Falsch1");
+ SimpleOperator op2b(20, Operator::kFoldable, 1, 1, "Falsch2");
CHECK(op2a.Equals(&op2a));
CHECK(op2a.Equals(&op2b));
@@ -74,14 +74,14 @@ static SmartArrayPointer<const char> OperatorToString(Operator* op) {
TEST(TestSimpleOperatorPrint) {
- SimpleOperator op1a(19, 0, 0, 0, "Another1");
- SimpleOperator op1b(19, 2, 2, 2, "Another2");
+ SimpleOperator op1a(19, Operator::kNoProperties, 0, 0, "Another1");
+ SimpleOperator op1b(19, Operator::kFoldable, 2, 2, "Another2");
CHECK_EQ("Another1", OperatorToString(&op1a).get());
CHECK_EQ("Another2", OperatorToString(&op1b).get());
- SimpleOperator op2a(20, 0, 0, 0, "Flog1");
- SimpleOperator op2b(20, 1, 1, 1, "Flog2");
+ SimpleOperator op2a(20, Operator::kNoProperties, 0, 0, "Flog1");
+ SimpleOperator op2b(20, Operator::kFoldable, 1, 1, "Flog2");
CHECK_EQ("Flog1", OperatorToString(&op2a).get());
CHECK_EQ("Flog2", OperatorToString(&op2b).get());
@@ -89,13 +89,13 @@ TEST(TestSimpleOperatorPrint) {
TEST(TestOperator1intHash) {
- Operator1<int> op1a(23, 0, 0, 0, "Wolfie", 11);
- Operator1<int> op1b(23, 2, 2, 2, "Doggie", 11);
+ Operator1<int> op1a(23, Operator::kNoProperties, 0, 0, "Wolfie", 11);
+ Operator1<int> op1b(23, Operator::kFoldable, 2, 2, "Doggie", 11);
CHECK_EQ(op1a.HashCode(), op1b.HashCode());
- Operator1<int> op2a(24, 0, 0, 0, "Arfie", 3);
- Operator1<int> op2b(24, 0, 0, 0, "Arfie", 4);
+ Operator1<int> op2a(24, Operator::kNoProperties, 0, 0, "Arfie", 3);
+ Operator1<int> op2b(24, Operator::kNoProperties, 0, 0, "Arfie", 4);
CHECK_NE(op1a.HashCode(), op2a.HashCode());
CHECK_NE(op2a.HashCode(), op2b.HashCode());
@@ -103,16 +103,16 @@ TEST(TestOperator1intHash) {
TEST(TestOperator1intEquals) {
- Operator1<int> op1a(23, 0, 0, 0, "Scratchy", 11);
- Operator1<int> op1b(23, 2, 2, 2, "Scratchy", 11);
+ Operator1<int> op1a(23, Operator::kNoProperties, 0, 0, "Scratchy", 11);
+ Operator1<int> op1b(23, Operator::kFoldable, 2, 2, "Scratchy", 11);
CHECK(op1a.Equals(&op1a));
CHECK(op1a.Equals(&op1b));
CHECK(op1b.Equals(&op1a));
CHECK(op1b.Equals(&op1b));
- Operator1<int> op2a(24, 0, 0, 0, "Im", 3);
- Operator1<int> op2b(24, 0, 0, 0, "Im", 4);
+ Operator1<int> op2a(24, Operator::kNoProperties, 0, 0, "Im", 3);
+ Operator1<int> op2b(24, Operator::kNoProperties, 0, 0, "Im", 4);
CHECK(op2a.Equals(&op2a));
CHECK(!op2a.Equals(&op2b));
@@ -129,7 +129,7 @@ TEST(TestOperator1intEquals) {
CHECK(!op2b.Equals(&op1a));
CHECK(!op2b.Equals(&op1b));
- SimpleOperator op3(25, 0, 0, 0, "Weepy");
+ SimpleOperator op3(25, Operator::kNoProperties, 0, 0, "Weepy");
CHECK(!op1a.Equals(&op3));
CHECK(!op1b.Equals(&op3));
@@ -144,28 +144,28 @@ TEST(TestOperator1intEquals) {
TEST(TestOperator1intPrint) {
- Operator1<int> op1(12, 0, 0, 1, "Op1Test", 0);
+ Operator1<int> op1(12, Operator::kNoProperties, 0, 1, "Op1Test", 0);
CHECK_EQ("Op1Test[0]", OperatorToString(&op1).get());
- Operator1<int> op2(12, 0, 0, 1, "Op1Test", 66666666);
+ Operator1<int> op2(12, Operator::kNoProperties, 0, 1, "Op1Test", 66666666);
CHECK_EQ("Op1Test[66666666]", OperatorToString(&op2).get());
- Operator1<int> op3(12, 0, 0, 1, "FooBar", 2347);
+ Operator1<int> op3(12, Operator::kNoProperties, 0, 1, "FooBar", 2347);
CHECK_EQ("FooBar[2347]", OperatorToString(&op3).get());
- Operator1<int> op4(12, 0, 0, 1, "BarFoo", -879);
+ Operator1<int> op4(12, Operator::kNoProperties, 0, 1, "BarFoo", -879);
CHECK_EQ("BarFoo[-879]", OperatorToString(&op4).get());
}
TEST(TestOperator1doubleHash) {
- Operator1<double> op1a(23, 0, 0, 0, "Wolfie", 11.77);
- Operator1<double> op1b(23, 2, 2, 2, "Doggie", 11.77);
+ Operator1<double> op1a(23, Operator::kNoProperties, 0, 0, "Wolfie", 11.77);
+ Operator1<double> op1b(23, Operator::kFoldable, 2, 2, "Doggie", 11.77);
CHECK_EQ(op1a.HashCode(), op1b.HashCode());
- Operator1<double> op2a(24, 0, 0, 0, "Arfie", -6.7);
- Operator1<double> op2b(24, 0, 0, 0, "Arfie", -6.8);
+ Operator1<double> op2a(24, Operator::kNoProperties, 0, 0, "Arfie", -6.7);
+ Operator1<double> op2b(24, Operator::kNoProperties, 0, 0, "Arfie", -6.8);
CHECK_NE(op1a.HashCode(), op2a.HashCode());
CHECK_NE(op2a.HashCode(), op2b.HashCode());
@@ -173,16 +173,16 @@ TEST(TestOperator1doubleHash) {
TEST(TestOperator1doubleEquals) {
- Operator1<double> op1a(23, 0, 0, 0, "Scratchy", 11.77);
- Operator1<double> op1b(23, 2, 2, 2, "Scratchy", 11.77);
+ Operator1<double> op1a(23, Operator::kNoProperties, 0, 0, "Scratchy", 11.77);
+ Operator1<double> op1b(23, Operator::kFoldable, 2, 2, "Scratchy", 11.77);
CHECK(op1a.Equals(&op1a));
CHECK(op1a.Equals(&op1b));
CHECK(op1b.Equals(&op1a));
CHECK(op1b.Equals(&op1b));
- Operator1<double> op2a(24, 0, 0, 0, "Im", 3.1);
- Operator1<double> op2b(24, 0, 0, 0, "Im", 3.2);
+ Operator1<double> op2a(24, Operator::kNoProperties, 0, 0, "Im", 3.1);
+ Operator1<double> op2b(24, Operator::kNoProperties, 0, 0, "Im", 3.2);
CHECK(op2a.Equals(&op2a));
CHECK(!op2a.Equals(&op2b));
@@ -199,7 +199,7 @@ TEST(TestOperator1doubleEquals) {
CHECK(!op2b.Equals(&op1a));
CHECK(!op2b.Equals(&op1b));
- SimpleOperator op3(25, 0, 0, 0, "Weepy");
+ SimpleOperator op3(25, Operator::kNoProperties, 0, 0, "Weepy");
CHECK(!op1a.Equals(&op3));
CHECK(!op1b.Equals(&op3));
@@ -211,8 +211,8 @@ TEST(TestOperator1doubleEquals) {
CHECK(!op3.Equals(&op2a));
CHECK(!op3.Equals(&op2b));
- Operator1<double> op4a(24, 0, 0, 0, "Bashful", NaN);
- Operator1<double> op4b(24, 0, 0, 0, "Bashful", NaN);
+ Operator1<double> op4a(24, Operator::kNoProperties, 0, 0, "Bashful", NaN);
+ Operator1<double> op4b(24, Operator::kNoProperties, 0, 0, "Bashful", NaN);
CHECK(op4a.Equals(&op4a));
CHECK(op4a.Equals(&op4b));
@@ -227,18 +227,18 @@ TEST(TestOperator1doubleEquals) {
TEST(TestOperator1doublePrint) {
- Operator1<double> op1(12, 0, 0, 1, "Op1Test", 0);
+ Operator1<double> op1(12, Operator::kNoProperties, 0, 1, "Op1Test", 0);
CHECK_EQ("Op1Test[0]", OperatorToString(&op1).get());
- Operator1<double> op2(12, 0, 0, 1, "Op1Test", 7.3);
+ Operator1<double> op2(12, Operator::kNoProperties, 0, 1, "Op1Test", 7.3);
CHECK_EQ("Op1Test[7.3]", OperatorToString(&op2).get());
- Operator1<double> op3(12, 0, 0, 1, "FooBar", 2e+123);
+ Operator1<double> op3(12, Operator::kNoProperties, 0, 1, "FooBar", 2e+123);
CHECK_EQ("FooBar[2e+123]", OperatorToString(&op3).get());
- Operator1<double> op4(12, 0, 0, 1, "BarFoo", Infinity);
+ Operator1<double> op4(12, Operator::kNoProperties, 0, 1, "BarFoo", Infinity);
CHECK_EQ("BarFoo[inf]", OperatorToString(&op4).get());
- Operator1<double> op5(12, 0, 0, 1, "BarFoo", NaN);
+ Operator1<double> op5(12, Operator::kNoProperties, 0, 1, "BarFoo", NaN);
CHECK_EQ("BarFoo[nan]", OperatorToString(&op5).get());
}
diff --git a/deps/v8/test/cctest/compiler/test-phi-reducer.cc b/deps/v8/test/cctest/compiler/test-phi-reducer.cc
index 00e250d8a2..7d2fab6727 100644
--- a/deps/v8/test/cctest/compiler/test-phi-reducer.cc
+++ b/deps/v8/test/cctest/compiler/test-phi-reducer.cc
@@ -53,27 +53,32 @@ class PhiReducerTester : HandleAndZoneScope {
}
Node* Phi(Node* a) {
- return SetSelfReferences(graph.NewNode(common.Phi(1), a));
+ return SetSelfReferences(graph.NewNode(common.Phi(kMachAnyTagged, 1), a));
}
Node* Phi(Node* a, Node* b) {
- return SetSelfReferences(graph.NewNode(common.Phi(2), a, b));
+ return SetSelfReferences(
+ graph.NewNode(common.Phi(kMachAnyTagged, 2), a, b));
}
Node* Phi(Node* a, Node* b, Node* c) {
- return SetSelfReferences(graph.NewNode(common.Phi(3), a, b, c));
+ return SetSelfReferences(
+ graph.NewNode(common.Phi(kMachAnyTagged, 3), a, b, c));
}
Node* Phi(Node* a, Node* b, Node* c, Node* d) {
- return SetSelfReferences(graph.NewNode(common.Phi(4), a, b, c, d));
+ return SetSelfReferences(
+ graph.NewNode(common.Phi(kMachAnyTagged, 4), a, b, c, d));
}
Node* PhiWithControl(Node* a, Node* control) {
- return SetSelfReferences(graph.NewNode(common.Phi(1), a, control));
+ return SetSelfReferences(
+ graph.NewNode(common.Phi(kMachAnyTagged, 1), a, control));
}
Node* PhiWithControl(Node* a, Node* b, Node* control) {
- return SetSelfReferences(graph.NewNode(common.Phi(2), a, b, control));
+ return SetSelfReferences(
+ graph.NewNode(common.Phi(kMachAnyTagged, 2), a, b, control));
}
Node* SetSelfReferences(Node* node) {
@@ -96,7 +101,7 @@ TEST(PhiReduce1) {
Node* param = R.Parameter();
Node* singles[] = {zero, one, oneish, param};
- for (size_t i = 0; i < ARRAY_SIZE(singles); i++) {
+ for (size_t i = 0; i < arraysize(singles); i++) {
R.CheckReduce(singles[i], R.Phi(singles[i]));
}
}
@@ -110,18 +115,18 @@ TEST(PhiReduce2) {
Node* param = R.Parameter();
Node* singles[] = {zero, one, oneish, param};
- for (size_t i = 0; i < ARRAY_SIZE(singles); i++) {
+ for (size_t i = 0; i < arraysize(singles); i++) {
Node* a = singles[i];
R.CheckReduce(a, R.Phi(a, a));
}
- for (size_t i = 0; i < ARRAY_SIZE(singles); i++) {
+ for (size_t i = 0; i < arraysize(singles); i++) {
Node* a = singles[i];
R.CheckReduce(a, R.Phi(R.self, a));
R.CheckReduce(a, R.Phi(a, R.self));
}
- for (size_t i = 1; i < ARRAY_SIZE(singles); i++) {
+ for (size_t i = 1; i < arraysize(singles); i++) {
Node* a = singles[i], *b = singles[0];
Node* phi1 = R.Phi(b, a);
R.CheckReduce(phi1, phi1);
@@ -140,19 +145,19 @@ TEST(PhiReduce3) {
Node* param = R.Parameter();
Node* singles[] = {zero, one, oneish, param};
- for (size_t i = 0; i < ARRAY_SIZE(singles); i++) {
+ for (size_t i = 0; i < arraysize(singles); i++) {
Node* a = singles[i];
R.CheckReduce(a, R.Phi(a, a, a));
}
- for (size_t i = 0; i < ARRAY_SIZE(singles); i++) {
+ for (size_t i = 0; i < arraysize(singles); i++) {
Node* a = singles[i];
R.CheckReduce(a, R.Phi(R.self, a, a));
R.CheckReduce(a, R.Phi(a, R.self, a));
R.CheckReduce(a, R.Phi(a, a, R.self));
}
- for (size_t i = 1; i < ARRAY_SIZE(singles); i++) {
+ for (size_t i = 1; i < arraysize(singles); i++) {
Node* a = singles[i], *b = singles[0];
Node* phi1 = R.Phi(b, a, a);
R.CheckReduce(phi1, phi1);
@@ -174,12 +179,12 @@ TEST(PhiReduce4) {
Node* param = R.Parameter();
Node* singles[] = {zero, one, oneish, param};
- for (size_t i = 0; i < ARRAY_SIZE(singles); i++) {
+ for (size_t i = 0; i < arraysize(singles); i++) {
Node* a = singles[i];
R.CheckReduce(a, R.Phi(a, a, a, a));
}
- for (size_t i = 0; i < ARRAY_SIZE(singles); i++) {
+ for (size_t i = 0; i < arraysize(singles); i++) {
Node* a = singles[i];
R.CheckReduce(a, R.Phi(R.self, a, a, a));
R.CheckReduce(a, R.Phi(a, R.self, a, a));
@@ -192,7 +197,7 @@ TEST(PhiReduce4) {
R.CheckReduce(a, R.Phi(R.self, a, a, R.self));
}
- for (size_t i = 1; i < ARRAY_SIZE(singles); i++) {
+ for (size_t i = 1; i < arraysize(singles); i++) {
Node* a = singles[i], *b = singles[0];
Node* phi1 = R.Phi(b, a, a, a);
R.CheckReduce(phi1, phi1);
@@ -217,7 +222,7 @@ TEST(PhiReduceShouldIgnoreControlNodes) {
Node* param = R.Parameter();
Node* singles[] = {zero, one, oneish, param};
- for (size_t i = 0; i < ARRAY_SIZE(singles); ++i) {
+ for (size_t i = 0; i < arraysize(singles); ++i) {
R.CheckReduce(singles[i], R.PhiWithControl(singles[i], R.dead));
R.CheckReduce(singles[i], R.PhiWithControl(R.self, singles[i], R.dead));
R.CheckReduce(singles[i], R.PhiWithControl(singles[i], R.self, R.dead));
diff --git a/deps/v8/test/cctest/compiler/test-pipeline.cc b/deps/v8/test/cctest/compiler/test-pipeline.cc
index 7efedeeea2..f0b750a0cc 100644
--- a/deps/v8/test/cctest/compiler/test-pipeline.cc
+++ b/deps/v8/test/cctest/compiler/test-pipeline.cc
@@ -23,8 +23,6 @@ TEST(PipelineAdd) {
CompilationInfoWithZone info(function);
CHECK(Parser::Parse(&info));
- StrictMode strict_mode = info.function()->strict_mode();
- info.SetStrictMode(strict_mode);
CHECK(Rewriter::Rewrite(&info));
CHECK(Scope::Analyze(&info));
CHECK_NE(NULL, info.scope());
diff --git a/deps/v8/test/cctest/compiler/test-representation-change.cc b/deps/v8/test/cctest/compiler/test-representation-change.cc
index 092a5f7d90..9bf3b371f9 100644
--- a/deps/v8/test/cctest/compiler/test-representation-change.cc
+++ b/deps/v8/test/cctest/compiler/test-representation-change.cc
@@ -7,6 +7,7 @@
#include "src/v8.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/graph-builder-tester.h"
+#include "test/cctest/compiler/value-helper.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/representation-change.h"
@@ -25,13 +26,16 @@ class RepresentationChangerTester : public HandleAndZoneScope,
explicit RepresentationChangerTester(int num_parameters = 0)
: GraphAndBuilders(main_zone()),
typer_(main_zone()),
- jsgraph_(main_graph_, &main_common_, &typer_),
- changer_(&jsgraph_, &main_simplified_, &main_machine_, main_isolate()) {
+ javascript_(main_zone()),
+ jsgraph_(main_graph_, &main_common_, &javascript_, &typer_,
+ &main_machine_),
+ changer_(&jsgraph_, &main_simplified_, main_isolate()) {
Node* s = graph()->NewNode(common()->Start(num_parameters));
graph()->SetStart(s);
}
Typer typer_;
+ JSOperatorBuilder javascript_;
JSGraph jsgraph_;
RepresentationChanger changer_;
@@ -43,19 +47,37 @@ class RepresentationChangerTester : public HandleAndZoneScope,
// TODO(titzer): use ValueChecker / ValueUtil
void CheckInt32Constant(Node* n, int32_t expected) {
- ValueMatcher<int32_t> m(n);
+ Int32Matcher m(n);
CHECK(m.HasValue());
CHECK_EQ(expected, m.Value());
}
- void CheckHeapConstant(Node* n, Object* expected) {
- ValueMatcher<Handle<Object> > m(n);
+ void CheckUint32Constant(Node* n, uint32_t expected) {
+ Uint32Matcher m(n);
CHECK(m.HasValue());
- CHECK_EQ(expected, *m.Value());
+ CHECK_EQ(static_cast<int>(expected), static_cast<int>(m.Value()));
+ }
+
+ void CheckFloat64Constant(Node* n, double expected) {
+ Float64Matcher m(n);
+ CHECK(m.HasValue());
+ CHECK_EQ(expected, m.Value());
+ }
+
+ void CheckFloat32Constant(Node* n, float expected) {
+ CHECK_EQ(IrOpcode::kFloat32Constant, n->opcode());
+ float fval = OpParameter<float>(n->op());
+ CHECK_EQ(expected, fval);
+ }
+
+ void CheckHeapConstant(Node* n, HeapObject* expected) {
+ HeapObjectMatcher<HeapObject> m(n);
+ CHECK(m.HasValue());
+ CHECK_EQ(expected, *m.Value().handle());
}
void CheckNumberConstant(Node* n, double expected) {
- ValueMatcher<double> m(n);
+ NumberMatcher m(n);
CHECK_EQ(IrOpcode::kNumberConstant, n->opcode());
CHECK(m.HasValue());
CHECK_EQ(expected, m.Value());
@@ -65,16 +87,16 @@ class RepresentationChangerTester : public HandleAndZoneScope,
return graph()->NewNode(common()->Parameter(index), graph()->start());
}
- void CheckTypeError(RepTypeUnion from, RepTypeUnion to) {
+ void CheckTypeError(MachineTypeUnion from, MachineTypeUnion to) {
changer()->testing_type_errors_ = true;
changer()->type_error_ = false;
Node* n = Parameter(0);
Node* c = changer()->GetRepresentationFor(n, from, to);
- CHECK_EQ(n, c);
CHECK(changer()->type_error_);
+ CHECK_EQ(n, c);
}
- void CheckNop(RepTypeUnion from, RepTypeUnion to) {
+ void CheckNop(MachineTypeUnion from, MachineTypeUnion to) {
Node* n = Parameter(0);
Node* c = changer()->GetRepresentationFor(n, from, to);
CHECK_EQ(n, c);
@@ -85,38 +107,21 @@ class RepresentationChangerTester : public HandleAndZoneScope,
} // namespace v8::internal::compiler
-static const RepType all_reps[] = {rBit, rWord32, rWord64, rFloat64, rTagged};
-
-
-// TODO(titzer): lift this to ValueHelper
-static const double double_inputs[] = {
- 0.0, -0.0, 1.0, -1.0, 0.1, 1.4, -1.7,
- 2, 5, 6, 982983, 888, -999.8, 3.1e7,
- -2e66, 2.3e124, -12e73, V8_INFINITY, -V8_INFINITY};
-
-
-static const int32_t int32_inputs[] = {
- 0, 1, -1,
- 2, 5, 6,
- 982983, 888, -999,
- 65535, static_cast<int32_t>(0xFFFFFFFF), static_cast<int32_t>(0x80000000)};
-
-
-static const uint32_t uint32_inputs[] = {
- 0, 1, static_cast<uint32_t>(-1), 2, 5, 6,
- 982983, 888, static_cast<uint32_t>(-999), 65535, 0xFFFFFFFF, 0x80000000};
+static const MachineType all_reps[] = {kRepBit, kRepWord32, kRepWord64,
+ kRepFloat32, kRepFloat64, kRepTagged};
TEST(BoolToBit_constant) {
RepresentationChangerTester r;
Node* true_node = r.jsgraph()->TrueConstant();
- Node* true_bit = r.changer()->GetRepresentationFor(true_node, rTagged, rBit);
+ Node* true_bit =
+ r.changer()->GetRepresentationFor(true_node, kRepTagged, kRepBit);
r.CheckInt32Constant(true_bit, 1);
Node* false_node = r.jsgraph()->FalseConstant();
Node* false_bit =
- r.changer()->GetRepresentationFor(false_node, rTagged, rBit);
+ r.changer()->GetRepresentationFor(false_node, kRepTagged, kRepBit);
r.CheckInt32Constant(false_bit, 0);
}
@@ -126,7 +131,7 @@ TEST(BitToBool_constant) {
for (int i = -5; i < 5; i++) {
Node* node = r.jsgraph()->Int32Constant(i);
- Node* val = r.changer()->GetRepresentationFor(node, rBit, rTagged);
+ Node* val = r.changer()->GetRepresentationFor(node, kRepBit, kRepTagged);
r.CheckHeapConstant(val, i == 0 ? r.isolate()->heap()->false_value()
: r.isolate()->heap()->true_value());
}
@@ -136,28 +141,240 @@ TEST(BitToBool_constant) {
TEST(ToTagged_constant) {
RepresentationChangerTester r;
- for (size_t i = 0; i < ARRAY_SIZE(double_inputs); i++) {
- Node* n = r.jsgraph()->Float64Constant(double_inputs[i]);
- Node* c = r.changer()->GetRepresentationFor(n, rFloat64, rTagged);
- r.CheckNumberConstant(c, double_inputs[i]);
+ {
+ FOR_FLOAT64_INPUTS(i) {
+ Node* n = r.jsgraph()->Float64Constant(*i);
+ Node* c = r.changer()->GetRepresentationFor(n, kRepFloat64, kRepTagged);
+ r.CheckNumberConstant(c, *i);
+ }
+ }
+
+ {
+ FOR_FLOAT64_INPUTS(i) {
+ Node* n = r.jsgraph()->Constant(*i);
+ Node* c = r.changer()->GetRepresentationFor(n, kRepFloat64, kRepTagged);
+ r.CheckNumberConstant(c, *i);
+ }
+ }
+
+ {
+ FOR_FLOAT32_INPUTS(i) {
+ Node* n = r.jsgraph()->Float32Constant(*i);
+ Node* c = r.changer()->GetRepresentationFor(n, kRepFloat32, kRepTagged);
+ r.CheckNumberConstant(c, *i);
+ }
}
- for (size_t i = 0; i < ARRAY_SIZE(int32_inputs); i++) {
- Node* n = r.jsgraph()->Int32Constant(int32_inputs[i]);
- Node* c = r.changer()->GetRepresentationFor(n, rWord32 | tInt32, rTagged);
- r.CheckNumberConstant(c, static_cast<double>(int32_inputs[i]));
+ {
+ FOR_INT32_INPUTS(i) {
+ Node* n = r.jsgraph()->Int32Constant(*i);
+ Node* c = r.changer()->GetRepresentationFor(n, kRepWord32 | kTypeInt32,
+ kRepTagged);
+ r.CheckNumberConstant(c, *i);
+ }
}
- for (size_t i = 0; i < ARRAY_SIZE(uint32_inputs); i++) {
- Node* n = r.jsgraph()->Int32Constant(uint32_inputs[i]);
- Node* c = r.changer()->GetRepresentationFor(n, rWord32 | tUint32, rTagged);
- r.CheckNumberConstant(c, static_cast<double>(uint32_inputs[i]));
+ {
+ FOR_UINT32_INPUTS(i) {
+ Node* n = r.jsgraph()->Int32Constant(*i);
+ Node* c = r.changer()->GetRepresentationFor(n, kRepWord32 | kTypeUint32,
+ kRepTagged);
+ r.CheckNumberConstant(c, *i);
+ }
}
}
-static void CheckChange(IrOpcode::Value expected, RepTypeUnion from,
- RepTypeUnion to) {
+TEST(ToFloat64_constant) {
+ RepresentationChangerTester r;
+
+ {
+ FOR_FLOAT64_INPUTS(i) {
+ Node* n = r.jsgraph()->Float64Constant(*i);
+ Node* c = r.changer()->GetRepresentationFor(n, kRepFloat64, kRepFloat64);
+ CHECK_EQ(n, c);
+ }
+ }
+
+ {
+ FOR_FLOAT64_INPUTS(i) {
+ Node* n = r.jsgraph()->Constant(*i);
+ Node* c = r.changer()->GetRepresentationFor(n, kRepTagged, kRepFloat64);
+ r.CheckFloat64Constant(c, *i);
+ }
+ }
+
+ {
+ FOR_FLOAT32_INPUTS(i) {
+ Node* n = r.jsgraph()->Float32Constant(*i);
+ Node* c = r.changer()->GetRepresentationFor(n, kRepFloat32, kRepFloat64);
+ r.CheckFloat64Constant(c, *i);
+ }
+ }
+
+ {
+ FOR_INT32_INPUTS(i) {
+ Node* n = r.jsgraph()->Int32Constant(*i);
+ Node* c = r.changer()->GetRepresentationFor(n, kRepWord32 | kTypeInt32,
+ kRepFloat64);
+ r.CheckFloat64Constant(c, *i);
+ }
+ }
+
+ {
+ FOR_UINT32_INPUTS(i) {
+ Node* n = r.jsgraph()->Int32Constant(*i);
+ Node* c = r.changer()->GetRepresentationFor(n, kRepWord32 | kTypeUint32,
+ kRepFloat64);
+ r.CheckFloat64Constant(c, *i);
+ }
+ }
+}
+
+
+static bool IsFloat32Int32(int32_t val) {
+ return val >= -(1 << 23) && val <= (1 << 23);
+}
+
+
+static bool IsFloat32Uint32(uint32_t val) { return val <= (1 << 23); }
+
+
+TEST(ToFloat32_constant) {
+ RepresentationChangerTester r;
+
+ {
+ FOR_FLOAT32_INPUTS(i) {
+ Node* n = r.jsgraph()->Float32Constant(*i);
+ Node* c = r.changer()->GetRepresentationFor(n, kRepFloat32, kRepFloat32);
+ CHECK_EQ(n, c);
+ }
+ }
+
+ {
+ FOR_FLOAT32_INPUTS(i) {
+ Node* n = r.jsgraph()->Constant(*i);
+ Node* c = r.changer()->GetRepresentationFor(n, kRepTagged, kRepFloat32);
+ r.CheckFloat32Constant(c, *i);
+ }
+ }
+
+ {
+ FOR_FLOAT32_INPUTS(i) {
+ Node* n = r.jsgraph()->Float64Constant(*i);
+ Node* c = r.changer()->GetRepresentationFor(n, kRepFloat64, kRepFloat32);
+ r.CheckFloat32Constant(c, *i);
+ }
+ }
+
+ {
+ FOR_INT32_INPUTS(i) {
+ if (!IsFloat32Int32(*i)) continue;
+ Node* n = r.jsgraph()->Int32Constant(*i);
+ Node* c = r.changer()->GetRepresentationFor(n, kRepWord32 | kTypeInt32,
+ kRepFloat32);
+ r.CheckFloat32Constant(c, static_cast<float>(*i));
+ }
+ }
+
+ {
+ FOR_UINT32_INPUTS(i) {
+ if (!IsFloat32Uint32(*i)) continue;
+ Node* n = r.jsgraph()->Int32Constant(*i);
+ Node* c = r.changer()->GetRepresentationFor(n, kRepWord32 | kTypeUint32,
+ kRepFloat32);
+ r.CheckFloat32Constant(c, static_cast<float>(*i));
+ }
+ }
+}
+
+
+TEST(ToInt32_constant) {
+ RepresentationChangerTester r;
+
+ {
+ FOR_INT32_INPUTS(i) {
+ Node* n = r.jsgraph()->Int32Constant(*i);
+ Node* c = r.changer()->GetRepresentationFor(n, kRepWord32 | kTypeInt32,
+ kRepWord32);
+ r.CheckInt32Constant(c, *i);
+ }
+ }
+
+ {
+ FOR_INT32_INPUTS(i) {
+ if (!IsFloat32Int32(*i)) continue;
+ Node* n = r.jsgraph()->Float32Constant(static_cast<float>(*i));
+ Node* c = r.changer()->GetRepresentationFor(n, kRepFloat32 | kTypeInt32,
+ kRepWord32);
+ r.CheckInt32Constant(c, *i);
+ }
+ }
+
+ {
+ FOR_INT32_INPUTS(i) {
+ Node* n = r.jsgraph()->Float64Constant(*i);
+ Node* c = r.changer()->GetRepresentationFor(n, kRepFloat64 | kTypeInt32,
+ kRepWord32);
+ r.CheckInt32Constant(c, *i);
+ }
+ }
+
+ {
+ FOR_INT32_INPUTS(i) {
+ Node* n = r.jsgraph()->Constant(*i);
+ Node* c = r.changer()->GetRepresentationFor(n, kRepTagged | kTypeInt32,
+ kRepWord32);
+ r.CheckInt32Constant(c, *i);
+ }
+ }
+}
+
+
+TEST(ToUint32_constant) {
+ RepresentationChangerTester r;
+
+ {
+ FOR_UINT32_INPUTS(i) {
+ Node* n = r.jsgraph()->Int32Constant(*i);
+ Node* c = r.changer()->GetRepresentationFor(n, kRepWord32 | kTypeUint32,
+ kRepWord32);
+ r.CheckUint32Constant(c, *i);
+ }
+ }
+
+ {
+ FOR_UINT32_INPUTS(i) {
+ if (!IsFloat32Uint32(*i)) continue;
+ Node* n = r.jsgraph()->Float32Constant(static_cast<float>(*i));
+ Node* c = r.changer()->GetRepresentationFor(n, kRepFloat32 | kTypeUint32,
+ kRepWord32);
+ r.CheckUint32Constant(c, *i);
+ }
+ }
+
+ {
+ FOR_UINT32_INPUTS(i) {
+ Node* n = r.jsgraph()->Float64Constant(*i);
+ Node* c = r.changer()->GetRepresentationFor(n, kRepFloat64 | kTypeUint32,
+ kRepWord32);
+ r.CheckUint32Constant(c, *i);
+ }
+ }
+
+ {
+ FOR_UINT32_INPUTS(i) {
+ Node* n = r.jsgraph()->Constant(static_cast<double>(*i));
+ Node* c = r.changer()->GetRepresentationFor(n, kRepTagged | kTypeUint32,
+ kRepWord32);
+ r.CheckUint32Constant(c, *i);
+ }
+ }
+}
+
+
+static void CheckChange(IrOpcode::Value expected, MachineTypeUnion from,
+ MachineTypeUnion to) {
RepresentationChangerTester r;
Node* n = r.Parameter();
@@ -169,34 +386,86 @@ static void CheckChange(IrOpcode::Value expected, RepTypeUnion from,
}
+static void CheckTwoChanges(IrOpcode::Value expected2,
+ IrOpcode::Value expected1, MachineTypeUnion from,
+ MachineTypeUnion to) {
+ RepresentationChangerTester r;
+
+ Node* n = r.Parameter();
+ Node* c1 = r.changer()->GetRepresentationFor(n, from, to);
+
+ CHECK_NE(c1, n);
+ CHECK_EQ(expected1, c1->opcode());
+ Node* c2 = c1->InputAt(0);
+ CHECK_NE(c2, n);
+ CHECK_EQ(expected2, c2->opcode());
+ CHECK_EQ(n, c2->InputAt(0));
+}
+
+
TEST(SingleChanges) {
- CheckChange(IrOpcode::kChangeBoolToBit, rTagged, rBit);
- CheckChange(IrOpcode::kChangeBitToBool, rBit, rTagged);
+ CheckChange(IrOpcode::kChangeBoolToBit, kRepTagged, kRepBit);
+ CheckChange(IrOpcode::kChangeBitToBool, kRepBit, kRepTagged);
- CheckChange(IrOpcode::kChangeInt32ToTagged, rWord32 | tInt32, rTagged);
- CheckChange(IrOpcode::kChangeUint32ToTagged, rWord32 | tUint32, rTagged);
- CheckChange(IrOpcode::kChangeFloat64ToTagged, rFloat64, rTagged);
+ CheckChange(IrOpcode::kChangeInt32ToTagged, kRepWord32 | kTypeInt32,
+ kRepTagged);
+ CheckChange(IrOpcode::kChangeUint32ToTagged, kRepWord32 | kTypeUint32,
+ kRepTagged);
+ CheckChange(IrOpcode::kChangeFloat64ToTagged, kRepFloat64, kRepTagged);
- CheckChange(IrOpcode::kChangeTaggedToInt32, rTagged | tInt32, rWord32);
- CheckChange(IrOpcode::kChangeTaggedToUint32, rTagged | tUint32, rWord32);
- CheckChange(IrOpcode::kChangeTaggedToFloat64, rTagged, rFloat64);
+ CheckChange(IrOpcode::kChangeTaggedToInt32, kRepTagged | kTypeInt32,
+ kRepWord32);
+ CheckChange(IrOpcode::kChangeTaggedToUint32, kRepTagged | kTypeUint32,
+ kRepWord32);
+ CheckChange(IrOpcode::kChangeTaggedToFloat64, kRepTagged, kRepFloat64);
// Int32,Uint32 <-> Float64 are actually machine conversions.
- CheckChange(IrOpcode::kChangeInt32ToFloat64, rWord32 | tInt32, rFloat64);
- CheckChange(IrOpcode::kChangeUint32ToFloat64, rWord32 | tUint32, rFloat64);
- CheckChange(IrOpcode::kChangeFloat64ToInt32, rFloat64 | tInt32, rWord32);
- CheckChange(IrOpcode::kChangeFloat64ToUint32, rFloat64 | tUint32, rWord32);
+ CheckChange(IrOpcode::kChangeInt32ToFloat64, kRepWord32 | kTypeInt32,
+ kRepFloat64);
+ CheckChange(IrOpcode::kChangeUint32ToFloat64, kRepWord32 | kTypeUint32,
+ kRepFloat64);
+ CheckChange(IrOpcode::kChangeFloat64ToInt32, kRepFloat64 | kTypeInt32,
+ kRepWord32);
+ CheckChange(IrOpcode::kChangeFloat64ToUint32, kRepFloat64 | kTypeUint32,
+ kRepWord32);
+
+ // Int32,Uint32 <-> Float32 require two changes.
+ CheckTwoChanges(IrOpcode::kChangeInt32ToFloat64,
+ IrOpcode::kTruncateFloat64ToFloat32, kRepWord32 | kTypeInt32,
+ kRepFloat32);
+ CheckTwoChanges(IrOpcode::kChangeUint32ToFloat64,
+ IrOpcode::kTruncateFloat64ToFloat32, kRepWord32 | kTypeUint32,
+ kRepFloat32);
+ CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64,
+ IrOpcode::kChangeFloat64ToInt32, kRepFloat32 | kTypeInt32,
+ kRepWord32);
+ CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64,
+ IrOpcode::kChangeFloat64ToUint32, kRepFloat32 | kTypeUint32,
+ kRepWord32);
+
+ // Float32 <-> Tagged require two changes.
+ CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64,
+ IrOpcode::kChangeFloat64ToTagged, kRepFloat32, kRepTagged);
+ CheckTwoChanges(IrOpcode::kChangeTaggedToFloat64,
+ IrOpcode::kTruncateFloat64ToFloat32, kRepTagged, kRepFloat32);
}
TEST(SignednessInWord32) {
RepresentationChangerTester r;
- // TODO(titzer): assume that uses of a word32 without a sign mean tInt32.
- CheckChange(IrOpcode::kChangeTaggedToInt32, rTagged, rWord32 | tInt32);
- CheckChange(IrOpcode::kChangeTaggedToUint32, rTagged, rWord32 | tUint32);
- CheckChange(IrOpcode::kChangeInt32ToFloat64, rWord32, rFloat64);
- CheckChange(IrOpcode::kChangeFloat64ToInt32, rFloat64, rWord32);
+ // TODO(titzer): assume that uses of a word32 without a sign mean kTypeInt32.
+ CheckChange(IrOpcode::kChangeTaggedToInt32, kRepTagged,
+ kRepWord32 | kTypeInt32);
+ CheckChange(IrOpcode::kChangeTaggedToUint32, kRepTagged,
+ kRepWord32 | kTypeUint32);
+ CheckChange(IrOpcode::kChangeInt32ToFloat64, kRepWord32, kRepFloat64);
+ CheckChange(IrOpcode::kChangeFloat64ToInt32, kRepFloat64, kRepWord32);
+
+ CheckTwoChanges(IrOpcode::kChangeInt32ToFloat64,
+ IrOpcode::kTruncateFloat64ToFloat32, kRepWord32, kRepFloat32);
+ CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64,
+ IrOpcode::kChangeFloat64ToInt32, kRepFloat32, kRepWord32);
}
@@ -204,21 +473,39 @@ TEST(Nops) {
RepresentationChangerTester r;
// X -> X is always a nop for any single representation X.
- for (size_t i = 0; i < ARRAY_SIZE(all_reps); i++) {
+ for (size_t i = 0; i < arraysize(all_reps); i++) {
r.CheckNop(all_reps[i], all_reps[i]);
}
- // 32-bit or 64-bit words can be used as branch conditions (rBit).
- r.CheckNop(rWord32, rBit);
- r.CheckNop(rWord32, rBit | tBool);
- r.CheckNop(rWord64, rBit);
- r.CheckNop(rWord64, rBit | tBool);
-
- // rBit (result of comparison) is implicitly a wordish thing.
- r.CheckNop(rBit, rWord32);
- r.CheckNop(rBit | tBool, rWord32);
- r.CheckNop(rBit, rWord64);
- r.CheckNop(rBit | tBool, rWord64);
+ // 32-bit floats.
+ r.CheckNop(kRepFloat32, kRepFloat32);
+ r.CheckNop(kRepFloat32 | kTypeNumber, kRepFloat32);
+ r.CheckNop(kRepFloat32, kRepFloat32 | kTypeNumber);
+
+ // 32-bit or 64-bit words can be used as branch conditions (kRepBit).
+ r.CheckNop(kRepWord32, kRepBit);
+ r.CheckNop(kRepWord32, kRepBit | kTypeBool);
+ r.CheckNop(kRepWord64, kRepBit);
+ r.CheckNop(kRepWord64, kRepBit | kTypeBool);
+
+ // 32-bit words can be used as smaller word sizes and vice versa, because
+ // loads from memory implicitly sign or zero extend the value to the
+ // full machine word size, and stores implicitly truncate.
+ r.CheckNop(kRepWord32, kRepWord8);
+ r.CheckNop(kRepWord32, kRepWord16);
+ r.CheckNop(kRepWord32, kRepWord32);
+ r.CheckNop(kRepWord8, kRepWord32);
+ r.CheckNop(kRepWord16, kRepWord32);
+
+ // kRepBit (result of comparison) is implicitly a wordish thing.
+ r.CheckNop(kRepBit, kRepWord8);
+ r.CheckNop(kRepBit | kTypeBool, kRepWord8);
+ r.CheckNop(kRepBit, kRepWord16);
+ r.CheckNop(kRepBit | kTypeBool, kRepWord16);
+ r.CheckNop(kRepBit, kRepWord32);
+ r.CheckNop(kRepBit | kTypeBool, kRepWord32);
+ r.CheckNop(kRepBit, kRepWord64);
+ r.CheckNop(kRepBit | kTypeBool, kRepWord64);
}
@@ -226,51 +513,37 @@ TEST(TypeErrors) {
RepresentationChangerTester r;
// Floats cannot be implicitly converted to/from comparison conditions.
- r.CheckTypeError(rFloat64, rBit);
- r.CheckTypeError(rFloat64, rBit | tBool);
- r.CheckTypeError(rBit, rFloat64);
- r.CheckTypeError(rBit | tBool, rFloat64);
+ r.CheckTypeError(kRepFloat64, kRepBit);
+ r.CheckTypeError(kRepFloat64, kRepBit | kTypeBool);
+ r.CheckTypeError(kRepBit, kRepFloat64);
+ r.CheckTypeError(kRepBit | kTypeBool, kRepFloat64);
+
+ // Floats cannot be implicitly converted to/from comparison conditions.
+ r.CheckTypeError(kRepFloat32, kRepBit);
+ r.CheckTypeError(kRepFloat32, kRepBit | kTypeBool);
+ r.CheckTypeError(kRepBit, kRepFloat32);
+ r.CheckTypeError(kRepBit | kTypeBool, kRepFloat32);
// Word64 is internal and shouldn't be implicitly converted.
- r.CheckTypeError(rWord64, rTagged | tBool);
- r.CheckTypeError(rWord64, rTagged);
- r.CheckTypeError(rWord64, rTagged | tBool);
- r.CheckTypeError(rTagged, rWord64);
- r.CheckTypeError(rTagged | tBool, rWord64);
+ r.CheckTypeError(kRepWord64, kRepTagged | kTypeBool);
+ r.CheckTypeError(kRepWord64, kRepTagged);
+ r.CheckTypeError(kRepWord64, kRepTagged | kTypeBool);
+ r.CheckTypeError(kRepTagged, kRepWord64);
+ r.CheckTypeError(kRepTagged | kTypeBool, kRepWord64);
// Word64 / Word32 shouldn't be implicitly converted.
- r.CheckTypeError(rWord64, rWord32);
- r.CheckTypeError(rWord32, rWord64);
- r.CheckTypeError(rWord64, rWord32 | tInt32);
- r.CheckTypeError(rWord32 | tInt32, rWord64);
- r.CheckTypeError(rWord64, rWord32 | tUint32);
- r.CheckTypeError(rWord32 | tUint32, rWord64);
-
- for (size_t i = 0; i < ARRAY_SIZE(all_reps); i++) {
- for (size_t j = 0; j < ARRAY_SIZE(all_reps); j++) {
+ r.CheckTypeError(kRepWord64, kRepWord32);
+ r.CheckTypeError(kRepWord32, kRepWord64);
+ r.CheckTypeError(kRepWord64, kRepWord32 | kTypeInt32);
+ r.CheckTypeError(kRepWord32 | kTypeInt32, kRepWord64);
+ r.CheckTypeError(kRepWord64, kRepWord32 | kTypeUint32);
+ r.CheckTypeError(kRepWord32 | kTypeUint32, kRepWord64);
+
+ for (size_t i = 0; i < arraysize(all_reps); i++) {
+ for (size_t j = 0; j < arraysize(all_reps); j++) {
if (i == j) continue;
// Only a single from representation is allowed.
- r.CheckTypeError(all_reps[i] | all_reps[j], rTagged);
+ r.CheckTypeError(all_reps[i] | all_reps[j], kRepTagged);
}
}
}
-
-
-TEST(CompleteMatrix) {
- // TODO(titzer): test all variants in the matrix.
- // rB
- // tBrB
- // tBrT
- // rW32
- // tIrW32
- // tUrW32
- // rW64
- // tIrW64
- // tUrW64
- // rF64
- // tIrF64
- // tUrF64
- // tArF64
- // rT
- // tArT
-}
diff --git a/deps/v8/test/cctest/compiler/test-run-deopt.cc b/deps/v8/test/cctest/compiler/test-run-deopt.cc
index af173d6be6..14c024cdbc 100644
--- a/deps/v8/test/cctest/compiler/test-run-deopt.cc
+++ b/deps/v8/test/cctest/compiler/test-run-deopt.cc
@@ -4,13 +4,29 @@
#include "src/v8.h"
+#include "test/cctest/cctest.h"
#include "test/cctest/compiler/function-tester.h"
+using namespace v8;
using namespace v8::internal;
using namespace v8::internal::compiler;
#if V8_TURBOFAN_TARGET
+static void IsOptimized(const FunctionCallbackInfo<v8::Value>& args) {
+ JavaScriptFrameIterator it(CcTest::i_isolate());
+ JavaScriptFrame* frame = it.frame();
+ return args.GetReturnValue().Set(frame->is_optimized());
+}
+
+
+static void InstallIsOptimizedHelper(v8::Isolate* isolate) {
+ Local<v8::Context> context = isolate->GetCurrentContext();
+ Local<v8::FunctionTemplate> t = FunctionTemplate::New(isolate, IsOptimized);
+ context->Global()->Set(v8_str("IsOptimized"), t->GetFunction());
+}
+
+
TEST(TurboSimpleDeopt) {
FLAG_allow_natives_syntax = true;
FLAG_turbo_deoptimization = true;
@@ -18,11 +34,12 @@ TEST(TurboSimpleDeopt) {
FunctionTester T(
"(function f(a) {"
"var b = 1;"
- "if (!%IsOptimized()) return 0;"
+ "if (!IsOptimized()) return 0;"
"%DeoptimizeFunction(f);"
- "if (%IsOptimized()) return 0;"
+ "if (IsOptimized()) return 0;"
"return a + b; })");
+ InstallIsOptimizedHelper(CcTest::isolate());
T.CheckCall(T.Val(2), T.Val(1));
}
@@ -35,11 +52,12 @@ TEST(TurboSimpleDeoptInExpr) {
"(function f(a) {"
"var b = 1;"
"var c = 2;"
- "if (!%IsOptimized()) return 0;"
+ "if (!IsOptimized()) return 0;"
"var d = b + (%DeoptimizeFunction(f), c);"
- "if (%IsOptimized()) return 0;"
+ "if (IsOptimized()) return 0;"
"return d + a; })");
+ InstallIsOptimizedHelper(CcTest::isolate());
T.CheckCall(T.Val(6), T.Val(3));
}
diff --git a/deps/v8/test/cctest/compiler/test-run-inlining.cc b/deps/v8/test/cctest/compiler/test-run-inlining.cc
new file mode 100644
index 0000000000..ad82fecaa2
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/test-run-inlining.cc
@@ -0,0 +1,353 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+#if V8_TURBOFAN_TARGET
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+// Helper to determine inline count via JavaScriptFrame::GetInlineCount.
+// Note that a count of 1 indicates that no inlining has occured.
+static void AssertInlineCount(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ StackTraceFrameIterator it(CcTest::i_isolate());
+ int frames_seen = 0;
+ JavaScriptFrame* topmost = it.frame();
+ while (!it.done()) {
+ JavaScriptFrame* frame = it.frame();
+ PrintF("%d %s, inline count: %d\n", frames_seen,
+ frame->function()->shared()->DebugName()->ToCString().get(),
+ frame->GetInlineCount());
+ frames_seen++;
+ it.Advance();
+ }
+ CHECK_EQ(args[0]->ToInt32()->Value(), topmost->GetInlineCount());
+}
+
+
+static void InstallAssertInlineCountHelper(v8::Isolate* isolate) {
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::FunctionTemplate> t =
+ v8::FunctionTemplate::New(isolate, AssertInlineCount);
+ context->Global()->Set(v8_str("AssertInlineCount"), t->GetFunction());
+}
+
+
+TEST(SimpleInlining) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "(function(){"
+ "function foo(s) { AssertInlineCount(2); return s; };"
+ "function bar(s, t) { return foo(s); };"
+ "return bar;})();",
+ CompilationInfo::kInliningEnabled |
+ CompilationInfo::kContextSpecializing |
+ CompilationInfo::kTypingEnabled);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.Val(1), T.Val(1), T.Val(2));
+}
+
+
+TEST(SimpleInliningDeopt) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "(function(){"
+ "function foo(s) { %DeoptimizeFunction(bar); return "
+ "s; };"
+ "function bar(s, t) { return foo(s); };"
+ "return bar;})();",
+ CompilationInfo::kInliningEnabled |
+ CompilationInfo::kContextSpecializing |
+ CompilationInfo::kTypingEnabled);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.Val(1), T.Val(1), T.Val(2));
+}
+
+
+TEST(SimpleInliningContext) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "(function () {"
+ "function foo(s) { AssertInlineCount(2); var x = 12; return s + x; };"
+ "function bar(s, t) { return foo(s); };"
+ "return bar;"
+ "})();",
+ CompilationInfo::kInliningEnabled |
+ CompilationInfo::kContextSpecializing |
+ CompilationInfo::kTypingEnabled);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.Val(13), T.Val(1), T.Val(2));
+}
+
+
+TEST(SimpleInliningContextDeopt) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "(function () {"
+ "function foo(s) { "
+ " AssertInlineCount(2); %DeoptimizeFunction(bar); var x = 12;"
+ " return s + x;"
+ "};"
+ "function bar(s, t) { return foo(s); };"
+ "return bar;"
+ "})();",
+ CompilationInfo::kInliningEnabled |
+ CompilationInfo::kContextSpecializing |
+ CompilationInfo::kTypingEnabled);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.Val(13), T.Val(1), T.Val(2));
+}
+
+
+TEST(CaptureContext) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "var f = (function () {"
+ "var x = 42;"
+ "function bar(s) { return x + s; };"
+ "return (function (s) { return bar(s); });"
+ "})();"
+ "(function (s) { return f(s)})",
+ CompilationInfo::kInliningEnabled |
+ CompilationInfo::kContextSpecializing |
+ CompilationInfo::kTypingEnabled);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.Val(42 + 12), T.Val(12), T.undefined());
+}
+
+
+// TODO(sigurds) For now we do not inline any native functions. If we do at
+// some point, change this test.
+TEST(DontInlineEval) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "var x = 42;"
+ "(function () {"
+ "function bar(s, t) { return eval(\"AssertInlineCount(1); x\") };"
+ "return bar;"
+ "})();",
+ CompilationInfo::kInliningEnabled |
+ CompilationInfo::kContextSpecializing |
+ CompilationInfo::kTypingEnabled);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.Val(42), T.Val("x"), T.undefined());
+}
+
+
+TEST(InlineOmitArguments) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "(function () {"
+ "var x = 42;"
+ "function bar(s, t, u, v) { AssertInlineCount(2); return x + s; };"
+ "return (function (s,t) { return bar(s); });"
+ "})();",
+ CompilationInfo::kInliningEnabled |
+ CompilationInfo::kContextSpecializing |
+ CompilationInfo::kTypingEnabled);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.Val(42 + 12), T.Val(12), T.undefined());
+}
+
+
+TEST(InlineOmitArgumentsDeopt) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "(function () {"
+ "function foo(s,t,u,v) { AssertInlineCount(2); %DeoptimizeFunction(bar); "
+ "return baz(); };"
+ "function bar() { return foo(11); };"
+ "function baz() { return foo.arguments.length == 1 && "
+ " foo.arguments[0] == 11 ; }"
+ "return bar;"
+ "})();",
+ CompilationInfo::kInliningEnabled |
+ CompilationInfo::kContextSpecializing |
+ CompilationInfo::kTypingEnabled);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.true_value(), T.Val(12), T.Val(14));
+}
+
+
+TEST(InlineSurplusArguments) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "(function () {"
+ "var x = 42;"
+ "function foo(s) { AssertInlineCount(2); return x + s; };"
+ "function bar(s,t) { return foo(s,t,13); };"
+ "return bar;"
+ "})();",
+ CompilationInfo::kInliningEnabled |
+ CompilationInfo::kContextSpecializing |
+ CompilationInfo::kTypingEnabled);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.Val(42 + 12), T.Val(12), T.undefined());
+}
+
+
+TEST(InlineSurplusArgumentsDeopt) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "(function () {"
+ "function foo(s) { AssertInlineCount(2); %DeoptimizeFunction(bar); "
+ "return baz(); };"
+ "function bar() { return foo(13, 14, 15); };"
+ "function baz() { return foo.arguments.length == 3 && "
+ " foo.arguments[0] == 13 && "
+ " foo.arguments[1] == 14 && "
+ " foo.arguments[2] == 15; }"
+ "return bar;"
+ "})();",
+ CompilationInfo::kInliningEnabled |
+ CompilationInfo::kContextSpecializing |
+ CompilationInfo::kTypingEnabled);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.true_value(), T.Val(12), T.Val(14));
+}
+
+
+TEST(InlineTwice) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "(function () {"
+ "var x = 42;"
+ "function bar(s) { AssertInlineCount(2); return x + s; };"
+ "return (function (s,t) { return bar(s) + bar(t); });"
+ "})();",
+ CompilationInfo::kInliningEnabled |
+ CompilationInfo::kContextSpecializing |
+ CompilationInfo::kTypingEnabled);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.Val(2 * 42 + 12 + 4), T.Val(12), T.Val(4));
+}
+
+
+TEST(InlineTwiceDependent) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "(function () {"
+ "var x = 42;"
+ "function foo(s) { AssertInlineCount(2); return x + s; };"
+ "function bar(s,t) { return foo(foo(s)); };"
+ "return bar;"
+ "})();",
+ CompilationInfo::kInliningEnabled |
+ CompilationInfo::kContextSpecializing |
+ CompilationInfo::kTypingEnabled);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.Val(42 + 42 + 12), T.Val(12), T.Val(4));
+}
+
+
+TEST(InlineTwiceDependentDiamond) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "(function () {"
+ "var x = 41;"
+ "function foo(s) { AssertInlineCount(2); if (s % 2 == 0) {"
+ " return x - s } else { return x + s; } };"
+ "function bar(s,t) { return foo(foo(s)); };"
+ "return bar;"
+ "})();",
+ CompilationInfo::kInliningEnabled |
+ CompilationInfo::kContextSpecializing |
+ CompilationInfo::kTypingEnabled);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.Val(-11), T.Val(11), T.Val(4));
+}
+
+
+TEST(InlineTwiceDependentDiamondDifferent) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "(function () {"
+ "var x = 41;"
+ "function foo(s,t) { AssertInlineCount(2); if (s % 2 == 0) {"
+ " return x - s * t } else { return x + s * t; } };"
+ "function bar(s,t) { return foo(foo(s, 3), 5); };"
+ "return bar;"
+ "})();",
+ CompilationInfo::kInliningEnabled |
+ CompilationInfo::kContextSpecializing |
+ CompilationInfo::kTypingEnabled);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.Val(-329), T.Val(11), T.Val(4));
+}
+
+
+TEST(InlineLoop) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "(function () {"
+ "var x = 41;"
+ "function foo(s) { AssertInlineCount(2); while (s > 0) {"
+ " s = s - 1; }; return s; };"
+ "function bar(s,t) { return foo(foo(s)); };"
+ "return bar;"
+ "})();",
+ CompilationInfo::kInliningEnabled |
+ CompilationInfo::kContextSpecializing |
+ CompilationInfo::kTypingEnabled);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.Val(0.0), T.Val(11), T.Val(4));
+}
+
+
+TEST(InlineStrictIntoNonStrict) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "(function () {"
+ "var x = Object.create({}, { y: { value:42, writable:false } });"
+ "function foo(s) { 'use strict';"
+ " x.y = 9; };"
+ "function bar(s,t) { return foo(s); };"
+ "return bar;"
+ "})();",
+ CompilationInfo::kInliningEnabled |
+ CompilationInfo::kContextSpecializing |
+ CompilationInfo::kTypingEnabled);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckThrows(T.undefined(), T.undefined());
+}
+
+
+TEST(InlineNonStrictIntoStrict) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "(function () {"
+ "var x = Object.create({}, { y: { value:42, writable:false } });"
+ "function foo(s) { x.y = 9; return x.y; };"
+ "function bar(s,t) { \'use strict\'; return foo(s); };"
+ "return bar;"
+ "})();",
+ CompilationInfo::kInliningEnabled |
+ CompilationInfo::kContextSpecializing |
+ CompilationInfo::kTypingEnabled);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.Val(42), T.undefined(), T.undefined());
+}
+
+
+#endif // V8_TURBOFAN_TARGET
diff --git a/deps/v8/test/cctest/compiler/test-run-jsbranches.cc b/deps/v8/test/cctest/compiler/test-run-jsbranches.cc
index 2eb4fa6d0f..df2fcdcb6d 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsbranches.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsbranches.cc
@@ -148,6 +148,26 @@ TEST(ForInStatement) {
}
+TEST(ForInContinueStatement) {
+ const char* src =
+ "(function(a,b) {"
+ " var r = '-';"
+ " for (var x in a) {"
+ " r += 'A-';"
+ " if (b) continue;"
+ " r += 'B-';"
+ " }"
+ " return r;"
+ "})";
+ FunctionTester T(src);
+
+ T.CheckCall(T.Val("-A-B-"), T.NewObject("({x:1})"), T.false_value());
+ T.CheckCall(T.Val("-A-B-A-B-"), T.NewObject("({x:1,y:2})"), T.false_value());
+ T.CheckCall(T.Val("-A-"), T.NewObject("({x:1})"), T.true_value());
+ T.CheckCall(T.Val("-A-A-"), T.NewObject("({x:1,y:2})"), T.true_value());
+}
+
+
TEST(SwitchStatement) {
const char* src =
"(function(a,b) {"
diff --git a/deps/v8/test/cctest/compiler/test-run-jscalls.cc b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
index 2ad7e50467..dec7194c4a 100644
--- a/deps/v8/test/cctest/compiler/test-run-jscalls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
@@ -233,3 +233,57 @@ TEST(ReceiverPatching) {
Handle<JSObject> g(T.function->context()->global_object()->global_proxy());
T.CheckCall(g, T.undefined());
}
+
+
+TEST(CallEval) {
+ FunctionTester T(
+ "var x = 42;"
+ "(function () {"
+ "function bar() { return eval('x') };"
+ "return bar;"
+ "})();");
+
+ T.CheckCall(T.Val(42), T.Val("x"), T.undefined());
+}
+
+
+TEST(ContextLoadedFromActivation) {
+ const char* script =
+ "var x = 42;"
+ "(function() {"
+ " return function () { return x };"
+ "})()";
+
+ // Disable context specialization.
+ FunctionTester T(script);
+ v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
+ v8::Context::Scope scope(context);
+ v8::Local<v8::Value> value = CompileRun(script);
+ i::Handle<i::Object> ofun = v8::Utils::OpenHandle(*value);
+ i::Handle<i::JSFunction> jsfun = Handle<JSFunction>::cast(ofun);
+ jsfun->set_code(T.function->code());
+ context->Global()->Set(v8_str("foo"), v8::Utils::ToLocal(jsfun));
+ CompileRun("var x = 24;");
+ ExpectInt32("foo();", 24);
+}
+
+
+TEST(BuiltinLoadedFromActivation) {
+ const char* script =
+ "var x = 42;"
+ "(function() {"
+ " return function () { return this; };"
+ "})()";
+
+ // Disable context specialization.
+ FunctionTester T(script);
+ v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
+ v8::Context::Scope scope(context);
+ v8::Local<v8::Value> value = CompileRun(script);
+ i::Handle<i::Object> ofun = v8::Utils::OpenHandle(*value);
+ i::Handle<i::JSFunction> jsfun = Handle<JSFunction>::cast(ofun);
+ jsfun->set_code(T.function->code());
+ context->Global()->Set(v8_str("foo"), v8::Utils::ToLocal(jsfun));
+ CompileRun("var x = 24;");
+ ExpectObject("foo()", context->Global());
+}
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index 6786f38741..5606126e06 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -5,12 +5,19 @@
#include <functional>
#include <limits>
+#include "src/base/bits.h"
+#include "src/compiler/generic-node-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/value-helper.h"
#if V8_TURBOFAN_TARGET
+using namespace v8::base;
+
+#define CHECK_UINT32_EQ(x, y) \
+ CHECK_EQ(static_cast<int32_t>(x), static_cast<int32_t>(y))
+
using namespace v8::internal;
using namespace v8::internal::compiler;
@@ -41,7 +48,7 @@ static Node* Int32Input(RawMachineAssemblerTester<int32_t>* m, int index) {
case 6:
return m->Int32Constant(0x01234567);
case 7:
- return m->Load(kMachineWord32, m->PointerConstant(NULL));
+ return m->Load(kMachInt32, m->PointerConstant(NULL));
default:
return NULL;
}
@@ -51,7 +58,7 @@ static Node* Int32Input(RawMachineAssemblerTester<int32_t>* m, int index) {
TEST(CodeGenInt32Binop) {
RawMachineAssemblerTester<void> m;
- Operator* ops[] = {
+ const Operator* ops[] = {
m.machine()->Word32And(), m.machine()->Word32Or(),
m.machine()->Word32Xor(), m.machine()->Word32Shl(),
m.machine()->Word32Shr(), m.machine()->Word32Sar(),
@@ -66,7 +73,7 @@ TEST(CodeGenInt32Binop) {
for (int i = 0; ops[i] != NULL; i++) {
for (int j = 0; j < 8; j++) {
for (int k = 0; k < 8; k++) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
Node* a = Int32Input(&m, j);
Node* b = Int32Input(&m, k);
m.Return(m.NewNode(ops[i], a, b));
@@ -95,7 +102,7 @@ TEST(RunGotoMultiple) {
int constant = 9999977;
MLabel labels[10];
- for (size_t i = 0; i < ARRAY_SIZE(labels); i++) {
+ for (size_t i = 0; i < arraysize(labels); i++) {
m.Goto(&labels[i]);
m.Bind(&labels[i]);
}
@@ -202,7 +209,8 @@ TEST(RunLoop) {
template <typename R>
static void BuildDiamondPhi(RawMachineAssemblerTester<R>* m, Node* cond_node,
- Node* true_node, Node* false_node) {
+ MachineType type, Node* true_node,
+ Node* false_node) {
MLabel blocka, blockb;
MLabel* end = m->Exit();
m->Branch(cond_node, &blocka, &blockb);
@@ -212,51 +220,51 @@ static void BuildDiamondPhi(RawMachineAssemblerTester<R>* m, Node* cond_node,
m->Goto(end);
m->Bind(end);
- Node* phi = m->Phi(true_node, false_node);
+ Node* phi = m->Phi(type, true_node, false_node);
m->Return(phi);
}
TEST(RunDiamondPhiConst) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
int false_val = 0xFF666;
int true_val = 0x00DDD;
Node* true_node = m.Int32Constant(true_val);
Node* false_node = m.Int32Constant(false_val);
- BuildDiamondPhi(&m, m.Parameter(0), true_node, false_node);
+ BuildDiamondPhi(&m, m.Parameter(0), kMachInt32, true_node, false_node);
CHECK_EQ(false_val, m.Call(0));
CHECK_EQ(true_val, m.Call(1));
}
TEST(RunDiamondPhiNumber) {
- RawMachineAssemblerTester<Object*> m(kMachineWord32);
+ RawMachineAssemblerTester<Object*> m(kMachInt32);
double false_val = -11.1;
double true_val = 200.1;
Node* true_node = m.NumberConstant(true_val);
Node* false_node = m.NumberConstant(false_val);
- BuildDiamondPhi(&m, m.Parameter(0), true_node, false_node);
+ BuildDiamondPhi(&m, m.Parameter(0), kMachAnyTagged, true_node, false_node);
m.CheckNumber(false_val, m.Call(0));
m.CheckNumber(true_val, m.Call(1));
}
TEST(RunDiamondPhiString) {
- RawMachineAssemblerTester<Object*> m(kMachineWord32);
+ RawMachineAssemblerTester<Object*> m(kMachInt32);
const char* false_val = "false";
const char* true_val = "true";
Node* true_node = m.StringConstant(true_val);
Node* false_node = m.StringConstant(false_val);
- BuildDiamondPhi(&m, m.Parameter(0), true_node, false_node);
+ BuildDiamondPhi(&m, m.Parameter(0), kMachAnyTagged, true_node, false_node);
m.CheckString(false_val, m.Call(0));
m.CheckString(true_val, m.Call(1));
}
TEST(RunDiamondPhiParam) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
- BuildDiamondPhi(&m, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+ RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachInt32);
+ BuildDiamondPhi(&m, m.Parameter(0), kMachInt32, m.Parameter(1),
+ m.Parameter(2));
int32_t c1 = 0x260cb75a;
int32_t c2 = 0xcd3e9c8b;
int result = m.Call(0, c1, c2);
@@ -281,7 +289,7 @@ TEST(RunLoopPhiConst) {
m.Goto(&header);
m.Bind(&header);
- Node* phi = m.Phi(false_node, true_node);
+ Node* phi = m.Phi(kMachInt32, false_node, true_node);
m.Branch(cond_node, &body, end);
m.Bind(&body);
m.Goto(&header);
@@ -293,8 +301,7 @@ TEST(RunLoopPhiConst) {
TEST(RunLoopPhiParam) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachInt32);
MLabel blocka, blockb;
MLabel* end = m.Exit();
@@ -302,8 +309,8 @@ TEST(RunLoopPhiParam) {
m.Goto(&blocka);
m.Bind(&blocka);
- Node* phi = m.Phi(m.Parameter(1), m.Parameter(2));
- Node* cond = m.Phi(m.Parameter(0), m.Int32Constant(0));
+ Node* phi = m.Phi(kMachInt32, m.Parameter(1), m.Parameter(2));
+ Node* cond = m.Phi(kMachInt32, m.Parameter(0), m.Int32Constant(0));
m.Branch(cond, &blockb, end);
m.Bind(&blockb);
@@ -334,7 +341,7 @@ TEST(RunLoopPhiInduction) {
m.Goto(&header);
m.Bind(&header);
- Node* phi = m.Phi(false_node, false_node);
+ Node* phi = m.Phi(kMachInt32, false_node, false_node);
m.Branch(m.Int32Constant(0), &body, end);
m.Bind(&body);
@@ -361,7 +368,7 @@ TEST(RunLoopIncrement) {
m.Goto(&header);
m.Bind(&header);
- Node* phi = m.Phi(zero, zero);
+ Node* phi = m.Phi(kMachInt32, zero, zero);
m.Branch(m.WordXor(phi, bt.param0), &body, end);
m.Bind(&body);
@@ -389,7 +396,7 @@ TEST(RunLoopIncrement2) {
m.Goto(&header);
m.Bind(&header);
- Node* phi = m.Phi(zero, zero);
+ Node* phi = m.Phi(kMachInt32, zero, zero);
m.Branch(m.Int32LessThan(phi, bt.param0), &body, end);
m.Bind(&body);
@@ -418,7 +425,7 @@ TEST(RunLoopIncrement3) {
m.Goto(&header);
m.Bind(&header);
- Node* phi = m.Phi(zero, zero);
+ Node* phi = m.Phi(kMachInt32, zero, zero);
m.Branch(m.Uint32LessThan(phi, bt.param0), &body, end);
m.Bind(&body);
@@ -446,7 +453,7 @@ TEST(RunLoopDecrement) {
m.Goto(&header);
m.Bind(&header);
- Node* phi = m.Phi(bt.param0, m.Int32Constant(0));
+ Node* phi = m.Phi(kMachInt32, bt.param0, m.Int32Constant(0));
m.Branch(phi, &body, end);
m.Bind(&body);
@@ -474,7 +481,7 @@ TEST(RunLoopIncrementFloat64) {
m.Goto(&header);
m.Bind(&header);
- Node* phi = m.Phi(minus_3, ten);
+ Node* phi = m.Phi(kMachFloat64, minus_3, ten);
m.Branch(m.Float64LessThan(phi, ten), &body, end);
m.Bind(&body);
@@ -492,7 +499,7 @@ TEST(RunLoadInt32) {
RawMachineAssemblerTester<int32_t> m;
int32_t p1 = 0; // loads directly from this location.
- m.Return(m.LoadFromPointer(&p1, kMachineWord32));
+ m.Return(m.LoadFromPointer(&p1, kMachInt32));
FOR_INT32_INPUTS(i) {
p1 = *i;
@@ -507,12 +514,12 @@ TEST(RunLoadInt32Offset) {
int32_t offsets[] = {-2000000, -100, -101, 1, 3,
7, 120, 2000, 2000000000, 0xff};
- for (size_t i = 0; i < ARRAY_SIZE(offsets); i++) {
+ for (size_t i = 0; i < arraysize(offsets); i++) {
RawMachineAssemblerTester<int32_t> m;
int32_t offset = offsets[i];
byte* pointer = reinterpret_cast<byte*>(&p1) - offset;
// generate load [#base + #index]
- m.Return(m.LoadFromPointer(pointer, kMachineWord32, offset));
+ m.Return(m.LoadFromPointer(pointer, kMachInt32, offset));
FOR_INT32_INPUTS(j) {
p1 = *j;
@@ -533,10 +540,9 @@ TEST(RunLoadStoreFloat64Offset) {
byte* from = reinterpret_cast<byte*>(&p1) - offset;
byte* to = reinterpret_cast<byte*>(&p2) - offset;
// generate load [#base + #index]
- Node* load = m.Load(kMachineFloat64, m.PointerConstant(from),
- m.Int32Constant(offset));
- m.Store(kMachineFloat64, m.PointerConstant(to), m.Int32Constant(offset),
- load);
+ Node* load =
+ m.Load(kMachFloat64, m.PointerConstant(from), m.Int32Constant(offset));
+ m.Store(kMachFloat64, m.PointerConstant(to), m.Int32Constant(offset), load);
m.Return(m.Int32Constant(magic));
FOR_FLOAT64_INPUTS(j) {
@@ -567,14 +573,12 @@ TEST(RunInt32AddP) {
TEST(RunInt32AddAndWord32SarP) {
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32, kMachUint32);
m.Return(m.Int32Add(m.Parameter(0),
m.Word32Sar(m.Parameter(1), m.Parameter(2))));
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- FOR_UINT32_INPUTS(k) {
- uint32_t shift = *k & 0x1F;
+ FOR_UINT32_SHIFTS(shift) {
// Use uint32_t because signed overflow is UB in C.
int32_t expected = *i + (*j >> shift);
CHECK_EQ(expected, m.Call(*i, *j, shift));
@@ -583,14 +587,12 @@ TEST(RunInt32AddAndWord32SarP) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachUint32, kMachUint32);
m.Return(m.Int32Add(m.Word32Sar(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
FOR_INT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
+ FOR_UINT32_SHIFTS(shift) {
FOR_UINT32_INPUTS(k) {
- uint32_t shift = *j & 0x1F;
// Use uint32_t because signed overflow is UB in C.
int32_t expected = (*i >> shift) + *k;
CHECK_EQ(expected, m.Call(*i, shift, *k));
@@ -603,14 +605,12 @@ TEST(RunInt32AddAndWord32SarP) {
TEST(RunInt32AddAndWord32ShlP) {
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32, kMachUint32);
m.Return(m.Int32Add(m.Parameter(0),
m.Word32Shl(m.Parameter(1), m.Parameter(2))));
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- FOR_UINT32_INPUTS(k) {
- uint32_t shift = *k & 0x1F;
+ FOR_UINT32_SHIFTS(shift) {
// Use uint32_t because signed overflow is UB in C.
int32_t expected = *i + (*j << shift);
CHECK_EQ(expected, m.Call(*i, *j, shift));
@@ -619,14 +619,12 @@ TEST(RunInt32AddAndWord32ShlP) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachUint32, kMachUint32);
m.Return(m.Int32Add(m.Word32Shl(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
FOR_INT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
+ FOR_UINT32_SHIFTS(shift) {
FOR_UINT32_INPUTS(k) {
- uint32_t shift = *j & 0x1F;
// Use uint32_t because signed overflow is UB in C.
int32_t expected = (*i << shift) + *k;
CHECK_EQ(expected, m.Call(*i, shift, *k));
@@ -639,14 +637,12 @@ TEST(RunInt32AddAndWord32ShlP) {
TEST(RunInt32AddAndWord32ShrP) {
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachUint32, kMachUint32);
m.Return(m.Int32Add(m.Parameter(0),
m.Word32Shr(m.Parameter(1), m.Parameter(2))));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- FOR_UINT32_INPUTS(k) {
- uint32_t shift = *k & 0x1F;
+ FOR_UINT32_SHIFTS(shift) {
// Use uint32_t because signed overflow is UB in C.
int32_t expected = *i + (*j >> shift);
CHECK_EQ(expected, m.Call(*i, *j, shift));
@@ -655,14 +651,12 @@ TEST(RunInt32AddAndWord32ShrP) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachUint32, kMachUint32);
m.Return(m.Int32Add(m.Word32Shr(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
FOR_UINT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
+ FOR_UINT32_SHIFTS(shift) {
FOR_UINT32_INPUTS(k) {
- uint32_t shift = *j & 0x1F;
// Use uint32_t because signed overflow is UB in C.
int32_t expected = (*i >> shift) + *k;
CHECK_EQ(expected, m.Call(*i, shift, *k));
@@ -677,7 +671,7 @@ TEST(RunInt32AddInBranch) {
static const int32_t constant = 987654321;
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
MLabel blocka, blockb;
m.Branch(
m.Word32Equal(m.Int32Add(bt.param0, bt.param1), m.Int32Constant(0)),
@@ -695,7 +689,7 @@ TEST(RunInt32AddInBranch) {
}
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
MLabel blocka, blockb;
m.Branch(
m.Word32NotEqual(m.Int32Add(bt.param0, bt.param1), m.Int32Constant(0)),
@@ -713,7 +707,7 @@ TEST(RunInt32AddInBranch) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
MLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Int32Add(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)),
@@ -723,14 +717,14 @@ TEST(RunInt32AddInBranch) {
m.Bind(&blockb);
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i + *j) == 0 ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = (*i + *j) == 0 ? constant : 0 - constant;
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
MLabel blocka, blockb;
m.Branch(m.Word32NotEqual(m.Int32Add(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)),
@@ -740,18 +734,19 @@ TEST(RunInt32AddInBranch) {
m.Bind(&blockb);
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i + *j) != 0 ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = (*i + *j) != 0 ? constant : 0 - constant;
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
{
RawMachineAssemblerTester<void> m;
- Operator* shops[] = {m.machine()->Word32Sar(), m.machine()->Word32Shl(),
- m.machine()->Word32Shr()};
- for (size_t n = 0; n < ARRAY_SIZE(shops); n++) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ const Operator* shops[] = {m.machine()->Word32Sar(),
+ m.machine()->Word32Shl(),
+ m.machine()->Word32Shr()};
+ for (size_t n = 0; n < arraysize(shops); n++) {
+ RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32,
+ kMachUint32);
MLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Int32Add(m.Parameter(0),
m.NewNode(shops[n], m.Parameter(1),
@@ -764,8 +759,7 @@ TEST(RunInt32AddInBranch) {
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- FOR_UINT32_INPUTS(k) {
- uint32_t shift = *k & 0x1F;
+ FOR_UINT32_SHIFTS(shift) {
int32_t right;
switch (shops[n]->opcode()) {
default:
@@ -793,65 +787,65 @@ TEST(RunInt32AddInBranch) {
TEST(RunInt32AddInComparison) {
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
bt.AddReturn(
m.Word32Equal(m.Int32Add(bt.param0, bt.param1), m.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i + *j) == 0;
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = (*i + *j) == 0;
+ CHECK_UINT32_EQ(expected, bt.call(*i, *j));
}
}
}
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
bt.AddReturn(
m.Word32Equal(m.Int32Constant(0), m.Int32Add(bt.param0, bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i + *j) == 0;
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = (*i + *j) == 0;
+ CHECK_UINT32_EQ(expected, bt.call(*i, *j));
}
}
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(m.Word32Equal(m.Int32Add(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i + *j) == 0;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = (*i + *j) == 0;
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(m.Word32Equal(m.Int32Add(m.Parameter(0), m.Int32Constant(*i)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*j + *i) == 0;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = (*j + *i) == 0;
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
{
RawMachineAssemblerTester<void> m;
- Operator* shops[] = {m.machine()->Word32Sar(), m.machine()->Word32Shl(),
- m.machine()->Word32Shr()};
- for (size_t n = 0; n < ARRAY_SIZE(shops); n++) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ const Operator* shops[] = {m.machine()->Word32Sar(),
+ m.machine()->Word32Shl(),
+ m.machine()->Word32Shr()};
+ for (size_t n = 0; n < arraysize(shops); n++) {
+ RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32,
+ kMachUint32);
m.Return(m.Word32Equal(
m.Int32Add(m.Parameter(0),
m.NewNode(shops[n], m.Parameter(1), m.Parameter(2))),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- FOR_UINT32_INPUTS(k) {
- uint32_t shift = *k & 0x1F;
+ FOR_UINT32_SHIFTS(shift) {
int32_t right;
switch (shops[n]->opcode()) {
default:
@@ -878,15 +872,14 @@ TEST(RunInt32AddInComparison) {
TEST(RunInt32SubP) {
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
m.Return(m.Int32Sub(bt.param0, bt.param1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- // Use uint32_t because signed overflow is UB in C.
- int expected = static_cast<int32_t>(*i - *j);
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = static_cast<int32_t>(*i - *j);
+ CHECK_UINT32_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -895,23 +888,21 @@ TEST(RunInt32SubP) {
TEST(RunInt32SubImm) {
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)));
FOR_UINT32_INPUTS(j) {
- // Use uint32_t because signed overflow is UB in C.
- int32_t expected = static_cast<int32_t>(*i - *j);
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = *i - *j;
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(m.Int32Sub(m.Parameter(0), m.Int32Constant(*i)));
FOR_UINT32_INPUTS(j) {
- // Use uint32_t because signed overflow is UB in C.
- int32_t expected = static_cast<int32_t>(*j - *i);
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = *j - *i;
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
@@ -920,15 +911,12 @@ TEST(RunInt32SubImm) {
TEST(RunInt32SubAndWord32SarP) {
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32, kMachUint32);
m.Return(m.Int32Sub(m.Parameter(0),
m.Word32Sar(m.Parameter(1), m.Parameter(2))));
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- FOR_UINT32_INPUTS(k) {
- uint32_t shift = *k & 0x1F;
- // Use uint32_t because signed overflow is UB in C.
+ FOR_UINT32_SHIFTS(shift) {
int32_t expected = *i - (*j >> shift);
CHECK_EQ(expected, m.Call(*i, *j, shift));
}
@@ -936,15 +924,12 @@ TEST(RunInt32SubAndWord32SarP) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachUint32, kMachUint32);
m.Return(m.Int32Sub(m.Word32Sar(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
FOR_INT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
+ FOR_UINT32_SHIFTS(shift) {
FOR_UINT32_INPUTS(k) {
- uint32_t shift = *j & 0x1F;
- // Use uint32_t because signed overflow is UB in C.
int32_t expected = (*i >> shift) - *k;
CHECK_EQ(expected, m.Call(*i, shift, *k));
}
@@ -956,15 +941,12 @@ TEST(RunInt32SubAndWord32SarP) {
TEST(RunInt32SubAndWord32ShlP) {
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32, kMachUint32);
m.Return(m.Int32Sub(m.Parameter(0),
m.Word32Shl(m.Parameter(1), m.Parameter(2))));
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- FOR_UINT32_INPUTS(k) {
- uint32_t shift = *k & 0x1F;
- // Use uint32_t because signed overflow is UB in C.
+ FOR_UINT32_SHIFTS(shift) {
int32_t expected = *i - (*j << shift);
CHECK_EQ(expected, m.Call(*i, *j, shift));
}
@@ -972,14 +954,12 @@ TEST(RunInt32SubAndWord32ShlP) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachUint32, kMachUint32);
m.Return(m.Int32Sub(m.Word32Shl(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
FOR_INT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
+ FOR_UINT32_SHIFTS(shift) {
FOR_UINT32_INPUTS(k) {
- uint32_t shift = *j & 0x1F;
// Use uint32_t because signed overflow is UB in C.
int32_t expected = (*i << shift) - *k;
CHECK_EQ(expected, m.Call(*i, shift, *k));
@@ -992,30 +972,28 @@ TEST(RunInt32SubAndWord32ShlP) {
TEST(RunInt32SubAndWord32ShrP) {
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32, kMachUint32,
+ kMachUint32);
m.Return(m.Int32Sub(m.Parameter(0),
m.Word32Shr(m.Parameter(1), m.Parameter(2))));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- FOR_UINT32_INPUTS(k) {
- uint32_t shift = *k & 0x1F;
+ FOR_UINT32_SHIFTS(shift) {
// Use uint32_t because signed overflow is UB in C.
int32_t expected = *i - (*j >> shift);
- CHECK_EQ(expected, m.Call(*i, *j, shift));
+ CHECK_UINT32_EQ(expected, m.Call(*i, *j, shift));
}
}
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32, kMachUint32,
+ kMachUint32);
m.Return(m.Int32Sub(m.Word32Shr(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
FOR_UINT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
+ FOR_UINT32_SHIFTS(shift) {
FOR_UINT32_INPUTS(k) {
- uint32_t shift = *j & 0x1F;
// Use uint32_t because signed overflow is UB in C.
int32_t expected = (*i >> shift) - *k;
CHECK_EQ(expected, m.Call(*i, shift, *k));
@@ -1030,7 +1008,7 @@ TEST(RunInt32SubInBranch) {
static const int constant = 987654321;
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
MLabel blocka, blockb;
m.Branch(
m.Word32Equal(m.Int32Sub(bt.param0, bt.param1), m.Int32Constant(0)),
@@ -1048,7 +1026,7 @@ TEST(RunInt32SubInBranch) {
}
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
MLabel blocka, blockb;
m.Branch(
m.Word32NotEqual(m.Int32Sub(bt.param0, bt.param1), m.Int32Constant(0)),
@@ -1066,7 +1044,7 @@ TEST(RunInt32SubInBranch) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
MLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)),
@@ -1083,7 +1061,7 @@ TEST(RunInt32SubInBranch) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachUint32);
MLabel blocka, blockb;
m.Branch(m.Word32NotEqual(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)),
@@ -1100,11 +1078,12 @@ TEST(RunInt32SubInBranch) {
}
{
RawMachineAssemblerTester<void> m;
- Operator* shops[] = {m.machine()->Word32Sar(), m.machine()->Word32Shl(),
- m.machine()->Word32Shr()};
- for (size_t n = 0; n < ARRAY_SIZE(shops); n++) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ const Operator* shops[] = {m.machine()->Word32Sar(),
+ m.machine()->Word32Shl(),
+ m.machine()->Word32Shr()};
+ for (size_t n = 0; n < arraysize(shops); n++) {
+ RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32,
+ kMachUint32);
MLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Int32Sub(m.Parameter(0),
m.NewNode(shops[n], m.Parameter(1),
@@ -1117,8 +1096,7 @@ TEST(RunInt32SubInBranch) {
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- FOR_UINT32_INPUTS(k) {
- uint32_t shift = *k & 0x1F;
+ FOR_UINT32_SHIFTS(shift) {
int32_t right;
switch (shops[n]->opcode()) {
default:
@@ -1146,65 +1124,65 @@ TEST(RunInt32SubInBranch) {
TEST(RunInt32SubInComparison) {
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
bt.AddReturn(
m.Word32Equal(m.Int32Sub(bt.param0, bt.param1), m.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i - *j) == 0;
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = (*i - *j) == 0;
+ CHECK_UINT32_EQ(expected, bt.call(*i, *j));
}
}
}
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
bt.AddReturn(
m.Word32Equal(m.Int32Constant(0), m.Int32Sub(bt.param0, bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i - *j) == 0;
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = (*i - *j) == 0;
+ CHECK_UINT32_EQ(expected, bt.call(*i, *j));
}
}
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(m.Word32Equal(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i - *j) == 0;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = (*i - *j) == 0;
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(m.Word32Equal(m.Int32Sub(m.Parameter(0), m.Int32Constant(*i)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*j - *i) == 0;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = (*j - *i) == 0;
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
{
RawMachineAssemblerTester<void> m;
- Operator* shops[] = {m.machine()->Word32Sar(), m.machine()->Word32Shl(),
- m.machine()->Word32Shr()};
- for (size_t n = 0; n < ARRAY_SIZE(shops); n++) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ const Operator* shops[] = {m.machine()->Word32Sar(),
+ m.machine()->Word32Shl(),
+ m.machine()->Word32Shr()};
+ for (size_t n = 0; n < arraysize(shops); n++) {
+ RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32,
+ kMachUint32);
m.Return(m.Word32Equal(
m.Int32Sub(m.Parameter(0),
m.NewNode(shops[n], m.Parameter(1), m.Parameter(2))),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- FOR_UINT32_INPUTS(k) {
- uint32_t shift = *k & 0x1F;
+ FOR_UINT32_SHIFTS(shift) {
int32_t right;
switch (shops[n]->opcode()) {
default:
@@ -1243,12 +1221,12 @@ TEST(RunInt32MulP) {
}
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
bt.AddReturn(m.Int32Mul(bt.param0, bt.param1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- int expected = static_cast<int32_t>(*i * *j);
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = *i * *j;
+ CHECK_UINT32_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -1258,21 +1236,21 @@ TEST(RunInt32MulP) {
TEST(RunInt32MulImm) {
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(m.Int32Mul(m.Int32Constant(*i), m.Parameter(0)));
FOR_UINT32_INPUTS(j) {
- int32_t expected = static_cast<int32_t>(*i * *j);
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = *i * *j;
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant(*i)));
FOR_UINT32_INPUTS(j) {
- int32_t expected = static_cast<int32_t>(*j * *i);
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = *j * *i;
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
@@ -1281,8 +1259,7 @@ TEST(RunInt32MulImm) {
TEST(RunInt32MulAndInt32AddP) {
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachInt32);
m.Return(
m.Int32Add(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
FOR_INT32_INPUTS(i) {
@@ -1298,8 +1275,7 @@ TEST(RunInt32MulAndInt32AddP) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachInt32);
m.Return(
m.Int32Add(m.Int32Mul(m.Parameter(0), m.Parameter(1)), m.Parameter(2)));
FOR_INT32_INPUTS(i) {
@@ -1335,8 +1311,7 @@ TEST(RunInt32MulAndInt32AddP) {
TEST(RunInt32MulAndInt32SubP) {
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32, kMachInt32);
m.Return(
m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
FOR_UINT32_INPUTS(i) {
@@ -1548,25 +1523,25 @@ TEST(RunWord32AndP) {
TEST(RunWord32AndAndWord32ShlP) {
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
bt.AddReturn(
m.Word32Shl(bt.param0, m.Word32And(bt.param1, m.Int32Constant(0x1f))));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i << (*j & 0x1f);
- CHECK_EQ(expected, bt.call(*i, *j));
+ CHECK_UINT32_EQ(expected, bt.call(*i, *j));
}
}
}
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
bt.AddReturn(
m.Word32Shl(bt.param0, m.Word32And(m.Int32Constant(0x1f), bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i << (0x1f & *j);
- CHECK_EQ(expected, bt.call(*i, *j));
+ CHECK_UINT32_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -1576,25 +1551,25 @@ TEST(RunWord32AndAndWord32ShlP) {
TEST(RunWord32AndAndWord32ShrP) {
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
bt.AddReturn(
m.Word32Shr(bt.param0, m.Word32And(bt.param1, m.Int32Constant(0x1f))));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i >> (*j & 0x1f);
- CHECK_EQ(expected, bt.call(*i, *j));
+ CHECK_UINT32_EQ(expected, bt.call(*i, *j));
}
}
}
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
bt.AddReturn(
m.Word32Shr(bt.param0, m.Word32And(m.Int32Constant(0x1f), bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i >> (0x1f & *j);
- CHECK_EQ(expected, bt.call(*i, *j));
+ CHECK_UINT32_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -1608,8 +1583,8 @@ TEST(RunWord32AndAndWord32SarP) {
bt.AddReturn(
m.Word32Sar(bt.param0, m.Word32And(bt.param1, m.Int32Constant(0x1f))));
FOR_INT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i >> (*j & 0x1f);
+ FOR_INT32_INPUTS(j) {
+ int32_t expected = *i >> (*j & 0x1f);
CHECK_EQ(expected, bt.call(*i, *j));
}
}
@@ -1620,7 +1595,7 @@ TEST(RunWord32AndAndWord32SarP) {
bt.AddReturn(
m.Word32Sar(bt.param0, m.Word32And(m.Int32Constant(0x1f), bt.param1)));
FOR_INT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
+ FOR_INT32_INPUTS(j) {
uint32_t expected = *i >> (0x1f & *j);
CHECK_EQ(expected, bt.call(*i, *j));
}
@@ -1632,21 +1607,21 @@ TEST(RunWord32AndAndWord32SarP) {
TEST(RunWord32AndImm) {
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(m.Word32And(m.Int32Constant(*i), m.Parameter(0)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i & *j;
- CHECK_EQ(expected, m.Call(*j));
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(m.Word32And(m.Int32Constant(*i), m.Word32Not(m.Parameter(0))));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i & ~(*j);
- CHECK_EQ(expected, m.Call(*j));
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
@@ -1657,7 +1632,7 @@ TEST(RunWord32AndInBranch) {
static const int constant = 987654321;
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
MLabel blocka, blockb;
m.Branch(
m.Word32Equal(m.Word32And(bt.param0, bt.param1), m.Int32Constant(0)),
@@ -1675,7 +1650,7 @@ TEST(RunWord32AndInBranch) {
}
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
MLabel blocka, blockb;
m.Branch(
m.Word32NotEqual(m.Word32And(bt.param0, bt.param1), m.Int32Constant(0)),
@@ -1693,7 +1668,7 @@ TEST(RunWord32AndInBranch) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachUint32);
MLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Word32And(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)),
@@ -1710,7 +1685,7 @@ TEST(RunWord32AndInBranch) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachUint32);
MLabel blocka, blockb;
m.Branch(
m.Word32NotEqual(m.Word32And(m.Int32Constant(*i), m.Parameter(0)),
@@ -1728,11 +1703,12 @@ TEST(RunWord32AndInBranch) {
}
{
RawMachineAssemblerTester<void> m;
- Operator* shops[] = {m.machine()->Word32Sar(), m.machine()->Word32Shl(),
- m.machine()->Word32Shr()};
- for (size_t n = 0; n < ARRAY_SIZE(shops); n++) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ const Operator* shops[] = {m.machine()->Word32Sar(),
+ m.machine()->Word32Shl(),
+ m.machine()->Word32Shr()};
+ for (size_t n = 0; n < arraysize(shops); n++) {
+ RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32,
+ kMachUint32);
MLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Word32And(m.Parameter(0),
m.NewNode(shops[n], m.Parameter(1),
@@ -1745,8 +1721,7 @@ TEST(RunWord32AndInBranch) {
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- FOR_UINT32_INPUTS(k) {
- uint32_t shift = *k & 0x1F;
+ FOR_UINT32_SHIFTS(shift) {
int32_t right;
switch (shops[n]->opcode()) {
default:
@@ -1774,47 +1749,47 @@ TEST(RunWord32AndInBranch) {
TEST(RunWord32AndInComparison) {
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
bt.AddReturn(
m.Word32Equal(m.Word32And(bt.param0, bt.param1), m.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i & *j) == 0;
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = (*i & *j) == 0;
+ CHECK_UINT32_EQ(expected, bt.call(*i, *j));
}
}
}
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
bt.AddReturn(
m.Word32Equal(m.Int32Constant(0), m.Word32And(bt.param0, bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i & *j) == 0;
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = (*i & *j) == 0;
+ CHECK_UINT32_EQ(expected, bt.call(*i, *j));
}
}
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(m.Word32Equal(m.Word32And(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i & *j) == 0;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = (*i & *j) == 0;
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(m.Word32Equal(m.Word32And(m.Parameter(0), m.Int32Constant(*i)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*j & *i) == 0;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = (*j & *i) == 0;
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
@@ -1824,34 +1799,34 @@ TEST(RunWord32AndInComparison) {
TEST(RunWord32OrP) {
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
bt.AddReturn(m.Word32Or(bt.param0, bt.param1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i | *j;
- CHECK_EQ(expected, bt.call(*i, *j));
+ CHECK_UINT32_EQ(expected, bt.call(*i, *j));
}
}
}
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
bt.AddReturn(m.Word32Or(bt.param0, m.Word32Not(bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i | ~(*j);
- CHECK_EQ(expected, bt.call(*i, *j));
+ CHECK_UINT32_EQ(expected, bt.call(*i, *j));
}
}
}
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
bt.AddReturn(m.Word32Or(m.Word32Not(bt.param0), bt.param1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = ~(*i) | *j;
- CHECK_EQ(expected, bt.call(*i, *j));
+ CHECK_UINT32_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -1861,21 +1836,21 @@ TEST(RunWord32OrP) {
TEST(RunWord32OrImm) {
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i | *j;
- CHECK_EQ(expected, m.Call(*j));
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(m.Word32Or(m.Int32Constant(*i), m.Word32Not(m.Parameter(0))));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i | ~(*j);
- CHECK_EQ(expected, m.Call(*j));
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
@@ -1895,8 +1870,8 @@ TEST(RunWord32OrInBranch) {
bt.AddReturn(m.Int32Constant(constant));
m.Bind(&blockb);
bt.AddReturn(m.Int32Constant(0 - constant));
- FOR_UINT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
int32_t expected = (*i | *j) == 0 ? constant : 0 - constant;
CHECK_EQ(expected, bt.call(*i, *j));
}
@@ -1913,16 +1888,16 @@ TEST(RunWord32OrInBranch) {
bt.AddReturn(m.Int32Constant(constant));
m.Bind(&blockb);
bt.AddReturn(m.Int32Constant(0 - constant));
- FOR_UINT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
int32_t expected = (*i | *j) != 0 ? constant : 0 - constant;
CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
{
- FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ FOR_INT32_INPUTS(i) {
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
MLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)),
@@ -1931,15 +1906,15 @@ TEST(RunWord32OrInBranch) {
m.Return(m.Int32Constant(constant));
m.Bind(&blockb);
m.Return(m.Int32Constant(0 - constant));
- FOR_UINT32_INPUTS(j) {
+ FOR_INT32_INPUTS(j) {
int32_t expected = (*i | *j) == 0 ? constant : 0 - constant;
CHECK_EQ(expected, m.Call(*j));
}
}
}
{
- FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ FOR_INT32_INPUTS(i) {
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
MLabel blocka, blockb;
m.Branch(m.Word32NotEqual(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)),
@@ -1948,7 +1923,7 @@ TEST(RunWord32OrInBranch) {
m.Return(m.Int32Constant(constant));
m.Bind(&blockb);
m.Return(m.Int32Constant(0 - constant));
- FOR_UINT32_INPUTS(j) {
+ FOR_INT32_INPUTS(j) {
int32_t expected = (*i | *j) != 0 ? constant : 0 - constant;
CHECK_EQ(expected, m.Call(*j));
}
@@ -1956,11 +1931,12 @@ TEST(RunWord32OrInBranch) {
}
{
RawMachineAssemblerTester<void> m;
- Operator* shops[] = {m.machine()->Word32Sar(), m.machine()->Word32Shl(),
- m.machine()->Word32Shr()};
- for (size_t n = 0; n < ARRAY_SIZE(shops); n++) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ const Operator* shops[] = {m.machine()->Word32Sar(),
+ m.machine()->Word32Shl(),
+ m.machine()->Word32Shr()};
+ for (size_t n = 0; n < arraysize(shops); n++) {
+ RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32,
+ kMachUint32);
MLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Word32Or(m.Parameter(0),
m.NewNode(shops[n], m.Parameter(1),
@@ -1973,8 +1949,7 @@ TEST(RunWord32OrInBranch) {
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- FOR_UINT32_INPUTS(k) {
- uint32_t shift = *k & 0x1F;
+ FOR_UINT32_SHIFTS(shift) {
int32_t right;
switch (shops[n]->opcode()) {
default:
@@ -2002,7 +1977,7 @@ TEST(RunWord32OrInBranch) {
TEST(RunWord32OrInComparison) {
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
bt.AddReturn(
m.Word32Equal(m.Word32Or(bt.param0, bt.param1), m.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
@@ -2014,7 +1989,7 @@ TEST(RunWord32OrInComparison) {
}
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
bt.AddReturn(
m.Word32Equal(m.Int32Constant(0), m.Word32Or(bt.param0, bt.param1)));
FOR_UINT32_INPUTS(i) {
@@ -2026,23 +2001,23 @@ TEST(RunWord32OrInComparison) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(m.Word32Equal(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i | *j) == 0;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = (*i | *j) == 0;
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(m.Word32Equal(m.Word32Or(m.Parameter(0), m.Int32Constant(*i)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*j | *i) == 0;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = (*j | *i) == 0;
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
@@ -2052,22 +2027,22 @@ TEST(RunWord32OrInComparison) {
TEST(RunWord32XorP) {
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachUint32);
m.Return(m.Word32Xor(m.Int32Constant(*i), m.Parameter(0)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i ^ *j;
- CHECK_EQ(expected, m.Call(*j));
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
bt.AddReturn(m.Word32Xor(bt.param0, bt.param1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i ^ *j;
- CHECK_EQ(expected, bt.call(*i, *j));
+ int32_t expected = *i ^ *j;
+ CHECK_UINT32_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -2075,9 +2050,9 @@ TEST(RunWord32XorP) {
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
bt.AddReturn(m.Word32Xor(bt.param0, m.Word32Not(bt.param1)));
- FOR_UINT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i ^ ~(*j);
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int32_t expected = *i ^ ~(*j);
CHECK_EQ(expected, bt.call(*i, *j));
}
}
@@ -2086,20 +2061,20 @@ TEST(RunWord32XorP) {
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
bt.AddReturn(m.Word32Xor(m.Word32Not(bt.param0), bt.param1));
- FOR_UINT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
- uint32_t expected = ~(*i) ^ *j;
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int32_t expected = ~(*i) ^ *j;
CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(m.Word32Xor(m.Int32Constant(*i), m.Word32Not(m.Parameter(0))));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i ^ ~(*j);
- CHECK_EQ(expected, m.Call(*j));
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
@@ -2107,10 +2082,10 @@ TEST(RunWord32XorP) {
TEST(RunWord32XorInBranch) {
- static const int constant = 987654321;
+ static const uint32_t constant = 987654321;
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
MLabel blocka, blockb;
m.Branch(
m.Word32Equal(m.Word32Xor(bt.param0, bt.param1), m.Int32Constant(0)),
@@ -2121,14 +2096,14 @@ TEST(RunWord32XorInBranch) {
bt.AddReturn(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i ^ *j) == 0 ? constant : 0 - constant;
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = (*i ^ *j) == 0 ? constant : 0 - constant;
+ CHECK_UINT32_EQ(expected, bt.call(*i, *j));
}
}
}
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
MLabel blocka, blockb;
m.Branch(
m.Word32NotEqual(m.Word32Xor(bt.param0, bt.param1), m.Int32Constant(0)),
@@ -2139,14 +2114,14 @@ TEST(RunWord32XorInBranch) {
bt.AddReturn(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i ^ *j) != 0 ? constant : 0 - constant;
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = (*i ^ *j) != 0 ? constant : 0 - constant;
+ CHECK_UINT32_EQ(expected, bt.call(*i, *j));
}
}
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
MLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Word32Xor(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)),
@@ -2156,14 +2131,14 @@ TEST(RunWord32XorInBranch) {
m.Bind(&blockb);
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i ^ *j) == 0 ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = (*i ^ *j) == 0 ? constant : 0 - constant;
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
MLabel blocka, blockb;
m.Branch(
m.Word32NotEqual(m.Word32Xor(m.Int32Constant(*i), m.Parameter(0)),
@@ -2174,18 +2149,19 @@ TEST(RunWord32XorInBranch) {
m.Bind(&blockb);
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i ^ *j) != 0 ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = (*i ^ *j) != 0 ? constant : 0 - constant;
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
{
RawMachineAssemblerTester<void> m;
- Operator* shops[] = {m.machine()->Word32Sar(), m.machine()->Word32Shl(),
- m.machine()->Word32Shr()};
- for (size_t n = 0; n < ARRAY_SIZE(shops); n++) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ const Operator* shops[] = {m.machine()->Word32Sar(),
+ m.machine()->Word32Shl(),
+ m.machine()->Word32Shr()};
+ for (size_t n = 0; n < arraysize(shops); n++) {
+ RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32,
+ kMachUint32);
MLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Word32Xor(m.Parameter(0),
m.NewNode(shops[n], m.Parameter(1),
@@ -2198,8 +2174,7 @@ TEST(RunWord32XorInBranch) {
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- FOR_UINT32_INPUTS(k) {
- uint32_t shift = *k & 0x1F;
+ FOR_UINT32_SHIFTS(shift) {
int32_t right;
switch (shops[n]->opcode()) {
default:
@@ -2226,52 +2201,100 @@ TEST(RunWord32XorInBranch) {
TEST(RunWord32ShlP) {
{
- FOR_UINT32_INPUTS(i) {
- uint32_t shift = *i & 0x1F;
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ FOR_UINT32_SHIFTS(shift) {
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(m.Word32Shl(m.Parameter(0), m.Int32Constant(shift)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *j << shift;
- CHECK_EQ(expected, m.Call(*j));
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
bt.AddReturn(m.Word32Shl(bt.param0, bt.param1));
FOR_UINT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
- uint32_t shift = *j & 0x1F;
+ FOR_UINT32_SHIFTS(shift) {
uint32_t expected = *i << shift;
- CHECK_EQ(expected, bt.call(*i, shift));
+ CHECK_UINT32_EQ(expected, bt.call(*i, shift));
}
}
}
}
-TEST(RunWord32ShrP) {
+TEST(RunWord32ShlInComparison) {
+ {
+ RawMachineAssemblerTester<int32_t> m;
+ Uint32BinopTester bt(&m);
+ bt.AddReturn(
+ m.Word32Equal(m.Word32Shl(bt.param0, bt.param1), m.Int32Constant(0)));
+ FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_SHIFTS(shift) {
+ uint32_t expected = 0 == (*i << shift);
+ CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+ }
+ }
+ }
{
+ RawMachineAssemblerTester<int32_t> m;
+ Uint32BinopTester bt(&m);
+ bt.AddReturn(
+ m.Word32Equal(m.Int32Constant(0), m.Word32Shl(bt.param0, bt.param1)));
FOR_UINT32_INPUTS(i) {
- uint32_t shift = *i & 0x1F;
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ FOR_UINT32_SHIFTS(shift) {
+ uint32_t expected = 0 == (*i << shift);
+ CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+ }
+ }
+ }
+ {
+ FOR_UINT32_SHIFTS(shift) {
+ RawMachineAssemblerTester<int32_t> m(kMachUint32);
+ m.Return(
+ m.Word32Equal(m.Int32Constant(0),
+ m.Word32Shl(m.Parameter(0), m.Int32Constant(shift))));
+ FOR_UINT32_INPUTS(i) {
+ uint32_t expected = 0 == (*i << shift);
+ CHECK_UINT32_EQ(expected, m.Call(*i));
+ }
+ }
+ }
+ {
+ FOR_UINT32_SHIFTS(shift) {
+ RawMachineAssemblerTester<int32_t> m(kMachUint32);
+ m.Return(
+ m.Word32Equal(m.Word32Shl(m.Parameter(0), m.Int32Constant(shift)),
+ m.Int32Constant(0)));
+ FOR_UINT32_INPUTS(i) {
+ uint32_t expected = 0 == (*i << shift);
+ CHECK_UINT32_EQ(expected, m.Call(*i));
+ }
+ }
+ }
+}
+
+
+TEST(RunWord32ShrP) {
+ {
+ FOR_UINT32_SHIFTS(shift) {
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *j >> shift;
- CHECK_EQ(expected, m.Call(*j));
+ CHECK_UINT32_EQ(expected, m.Call(*j));
}
}
}
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
bt.AddReturn(m.Word32Shr(bt.param0, bt.param1));
FOR_UINT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
- uint32_t shift = *j & 0x1F;
+ FOR_UINT32_SHIFTS(shift) {
uint32_t expected = *i >> shift;
- CHECK_EQ(expected, bt.call(*i, shift));
+ CHECK_UINT32_EQ(expected, bt.call(*i, shift));
}
}
CHECK_EQ(0x00010000, bt.call(0x80000000, 15));
@@ -2279,11 +2302,62 @@ TEST(RunWord32ShrP) {
}
+TEST(RunWord32ShrInComparison) {
+ {
+ RawMachineAssemblerTester<int32_t> m;
+ Uint32BinopTester bt(&m);
+ bt.AddReturn(
+ m.Word32Equal(m.Word32Shr(bt.param0, bt.param1), m.Int32Constant(0)));
+ FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_SHIFTS(shift) {
+ uint32_t expected = 0 == (*i >> shift);
+ CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+ }
+ }
+ }
+ {
+ RawMachineAssemblerTester<int32_t> m;
+ Uint32BinopTester bt(&m);
+ bt.AddReturn(
+ m.Word32Equal(m.Int32Constant(0), m.Word32Shr(bt.param0, bt.param1)));
+ FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_SHIFTS(shift) {
+ uint32_t expected = 0 == (*i >> shift);
+ CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+ }
+ }
+ }
+ {
+ FOR_UINT32_SHIFTS(shift) {
+ RawMachineAssemblerTester<int32_t> m(kMachUint32);
+ m.Return(
+ m.Word32Equal(m.Int32Constant(0),
+ m.Word32Shr(m.Parameter(0), m.Int32Constant(shift))));
+ FOR_UINT32_INPUTS(i) {
+ uint32_t expected = 0 == (*i >> shift);
+ CHECK_UINT32_EQ(expected, m.Call(*i));
+ }
+ }
+ }
+ {
+ FOR_UINT32_SHIFTS(shift) {
+ RawMachineAssemblerTester<int32_t> m(kMachUint32);
+ m.Return(
+ m.Word32Equal(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)),
+ m.Int32Constant(0)));
+ FOR_UINT32_INPUTS(i) {
+ uint32_t expected = 0 == (*i >> shift);
+ CHECK_UINT32_EQ(expected, m.Call(*i));
+ }
+ }
+ }
+}
+
+
TEST(RunWord32SarP) {
{
- FOR_INT32_INPUTS(i) {
- int32_t shift = *i & 0x1F;
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ FOR_INT32_SHIFTS(shift) {
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
m.Return(m.Word32Sar(m.Parameter(0), m.Int32Constant(shift)));
FOR_INT32_INPUTS(j) {
int32_t expected = *j >> shift;
@@ -2296,8 +2370,7 @@ TEST(RunWord32SarP) {
Int32BinopTester bt(&m);
bt.AddReturn(m.Word32Sar(bt.param0, bt.param1));
FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
- int32_t shift = *j & 0x1F;
+ FOR_INT32_SHIFTS(shift) {
int32_t expected = *i >> shift;
CHECK_EQ(expected, bt.call(*i, shift));
}
@@ -2307,10 +2380,139 @@ TEST(RunWord32SarP) {
}
+TEST(RunWord32SarInComparison) {
+ {
+ RawMachineAssemblerTester<int32_t> m;
+ Int32BinopTester bt(&m);
+ bt.AddReturn(
+ m.Word32Equal(m.Word32Sar(bt.param0, bt.param1), m.Int32Constant(0)));
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_SHIFTS(shift) {
+ int32_t expected = 0 == (*i >> shift);
+ CHECK_EQ(expected, bt.call(*i, shift));
+ }
+ }
+ }
+ {
+ RawMachineAssemblerTester<int32_t> m;
+ Int32BinopTester bt(&m);
+ bt.AddReturn(
+ m.Word32Equal(m.Int32Constant(0), m.Word32Sar(bt.param0, bt.param1)));
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_SHIFTS(shift) {
+ int32_t expected = 0 == (*i >> shift);
+ CHECK_EQ(expected, bt.call(*i, shift));
+ }
+ }
+ }
+ {
+ FOR_INT32_SHIFTS(shift) {
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ m.Return(
+ m.Word32Equal(m.Int32Constant(0),
+ m.Word32Sar(m.Parameter(0), m.Int32Constant(shift))));
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = 0 == (*i >> shift);
+ CHECK_EQ(expected, m.Call(*i));
+ }
+ }
+ }
+ {
+ FOR_INT32_SHIFTS(shift) {
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ m.Return(
+ m.Word32Equal(m.Word32Sar(m.Parameter(0), m.Int32Constant(shift)),
+ m.Int32Constant(0)));
+ FOR_INT32_INPUTS(i) {
+ uint32_t expected = 0 == (*i >> shift);
+ CHECK_EQ(expected, m.Call(*i));
+ }
+ }
+ }
+}
+
+
+TEST(RunWord32RorP) {
+ {
+ FOR_UINT32_SHIFTS(shift) {
+ RawMachineAssemblerTester<int32_t> m(kMachUint32);
+ m.Return(m.Word32Ror(m.Parameter(0), m.Int32Constant(shift)));
+ FOR_UINT32_INPUTS(j) {
+ int32_t expected = bits::RotateRight32(*j, shift);
+ CHECK_EQ(expected, m.Call(*j));
+ }
+ }
+ }
+ {
+ RawMachineAssemblerTester<int32_t> m;
+ Uint32BinopTester bt(&m);
+ bt.AddReturn(m.Word32Ror(bt.param0, bt.param1));
+ FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_SHIFTS(shift) {
+ uint32_t expected = bits::RotateRight32(*i, shift);
+ CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+ }
+ }
+ }
+}
+
+
+TEST(RunWord32RorInComparison) {
+ {
+ RawMachineAssemblerTester<int32_t> m;
+ Uint32BinopTester bt(&m);
+ bt.AddReturn(
+ m.Word32Equal(m.Word32Ror(bt.param0, bt.param1), m.Int32Constant(0)));
+ FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_SHIFTS(shift) {
+ uint32_t expected = 0 == bits::RotateRight32(*i, shift);
+ CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+ }
+ }
+ }
+ {
+ RawMachineAssemblerTester<int32_t> m;
+ Uint32BinopTester bt(&m);
+ bt.AddReturn(
+ m.Word32Equal(m.Int32Constant(0), m.Word32Ror(bt.param0, bt.param1)));
+ FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_SHIFTS(shift) {
+ uint32_t expected = 0 == bits::RotateRight32(*i, shift);
+ CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+ }
+ }
+ }
+ {
+ FOR_UINT32_SHIFTS(shift) {
+ RawMachineAssemblerTester<int32_t> m(kMachUint32);
+ m.Return(
+ m.Word32Equal(m.Int32Constant(0),
+ m.Word32Ror(m.Parameter(0), m.Int32Constant(shift))));
+ FOR_UINT32_INPUTS(i) {
+ uint32_t expected = 0 == bits::RotateRight32(*i, shift);
+ CHECK_UINT32_EQ(expected, m.Call(*i));
+ }
+ }
+ }
+ {
+ FOR_UINT32_SHIFTS(shift) {
+ RawMachineAssemblerTester<int32_t> m(kMachUint32);
+ m.Return(
+ m.Word32Equal(m.Word32Ror(m.Parameter(0), m.Int32Constant(shift)),
+ m.Int32Constant(0)));
+ FOR_UINT32_INPUTS(i) {
+ uint32_t expected = 0 == bits::RotateRight32(*i, shift);
+ CHECK_UINT32_EQ(expected, m.Call(*i));
+ }
+ }
+ }
+}
+
+
TEST(RunWord32NotP) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
m.Return(m.Word32Not(m.Parameter(0)));
- FOR_UINT32_INPUTS(i) {
+ FOR_INT32_INPUTS(i) {
int expected = ~(*i);
CHECK_EQ(expected, m.Call(*i));
}
@@ -2318,7 +2520,7 @@ TEST(RunWord32NotP) {
TEST(RunInt32NegP) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
m.Return(m.Int32Neg(m.Parameter(0)));
FOR_INT32_INPUTS(i) {
int expected = -*i;
@@ -2329,14 +2531,12 @@ TEST(RunInt32NegP) {
TEST(RunWord32EqualAndWord32SarP) {
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachUint32);
m.Return(m.Word32Equal(m.Parameter(0),
m.Word32Sar(m.Parameter(1), m.Parameter(2))));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- FOR_UINT32_INPUTS(k) {
- uint32_t shift = *k & 0x1F;
+ FOR_UINT32_SHIFTS(shift) {
int32_t expected = (*i == (*j >> shift));
CHECK_EQ(expected, m.Call(*i, *j, shift));
}
@@ -2344,14 +2544,12 @@ TEST(RunWord32EqualAndWord32SarP) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachUint32, kMachInt32);
m.Return(m.Word32Equal(m.Word32Sar(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
FOR_INT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
+ FOR_UINT32_SHIFTS(shift) {
FOR_INT32_INPUTS(k) {
- uint32_t shift = *j & 0x1F;
int32_t expected = ((*i >> shift) == *k);
CHECK_EQ(expected, m.Call(*i, shift, *k));
}
@@ -2363,14 +2561,12 @@ TEST(RunWord32EqualAndWord32SarP) {
TEST(RunWord32EqualAndWord32ShlP) {
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachUint32, kMachUint32);
m.Return(m.Word32Equal(m.Parameter(0),
m.Word32Shl(m.Parameter(1), m.Parameter(2))));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- FOR_UINT32_INPUTS(k) {
- uint32_t shift = *k & 0x1F;
+ FOR_UINT32_SHIFTS(shift) {
int32_t expected = (*i == (*j << shift));
CHECK_EQ(expected, m.Call(*i, *j, shift));
}
@@ -2378,14 +2574,12 @@ TEST(RunWord32EqualAndWord32ShlP) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachUint32, kMachUint32);
m.Return(m.Word32Equal(m.Word32Shl(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
FOR_UINT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
+ FOR_UINT32_SHIFTS(shift) {
FOR_UINT32_INPUTS(k) {
- uint32_t shift = *j & 0x1F;
int32_t expected = ((*i << shift) == *k);
CHECK_EQ(expected, m.Call(*i, shift, *k));
}
@@ -2397,14 +2591,12 @@ TEST(RunWord32EqualAndWord32ShlP) {
TEST(RunWord32EqualAndWord32ShrP) {
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachUint32, kMachUint32);
m.Return(m.Word32Equal(m.Parameter(0),
m.Word32Shr(m.Parameter(1), m.Parameter(2))));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- FOR_UINT32_INPUTS(k) {
- uint32_t shift = *k & 0x1F;
+ FOR_UINT32_SHIFTS(shift) {
int32_t expected = (*i == (*j >> shift));
CHECK_EQ(expected, m.Call(*i, *j, shift));
}
@@ -2412,14 +2604,12 @@ TEST(RunWord32EqualAndWord32ShrP) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
- kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachUint32, kMachUint32);
m.Return(m.Word32Equal(m.Word32Shr(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
FOR_UINT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
+ FOR_UINT32_SHIFTS(shift) {
FOR_UINT32_INPUTS(k) {
- uint32_t shift = *j & 0x1F;
int32_t expected = ((*i >> shift) == *k);
CHECK_EQ(expected, m.Call(*i, shift, *k));
}
@@ -2431,8 +2621,7 @@ TEST(RunWord32EqualAndWord32ShrP) {
TEST(RunDeadNodes) {
for (int i = 0; true; i++) {
- RawMachineAssemblerTester<int32_t> m(i == 5 ? kMachineWord32
- : kMachineLast);
+ RawMachineAssemblerTester<int32_t> m(i == 5 ? kMachInt32 : kMachNone);
int constant = 0x55 + i;
switch (i) {
case 0:
@@ -2448,7 +2637,7 @@ TEST(RunDeadNodes) {
m.PointerConstant(&constant);
break;
case 4:
- m.LoadFromPointer(&constant, kMachineWord32);
+ m.LoadFromPointer(&constant, kMachInt32);
break;
case 5:
m.Parameter(0);
@@ -2469,20 +2658,20 @@ TEST(RunDeadNodes) {
TEST(RunDeadInt32Binops) {
RawMachineAssemblerTester<int32_t> m;
- Operator* ops[] = {
- m.machine()->Word32And(), m.machine()->Word32Or(),
- m.machine()->Word32Xor(), m.machine()->Word32Shl(),
- m.machine()->Word32Shr(), m.machine()->Word32Sar(),
- m.machine()->Word32Equal(), m.machine()->Int32Add(),
- m.machine()->Int32Sub(), m.machine()->Int32Mul(),
- m.machine()->Int32Div(), m.machine()->Int32UDiv(),
- m.machine()->Int32Mod(), m.machine()->Int32UMod(),
- m.machine()->Int32LessThan(), m.machine()->Int32LessThanOrEqual(),
- m.machine()->Uint32LessThan(), m.machine()->Uint32LessThanOrEqual(),
- NULL};
+ const Operator* ops[] = {
+ m.machine()->Word32And(), m.machine()->Word32Or(),
+ m.machine()->Word32Xor(), m.machine()->Word32Shl(),
+ m.machine()->Word32Shr(), m.machine()->Word32Sar(),
+ m.machine()->Word32Ror(), m.machine()->Word32Equal(),
+ m.machine()->Int32Add(), m.machine()->Int32Sub(),
+ m.machine()->Int32Mul(), m.machine()->Int32Div(),
+ m.machine()->Int32UDiv(), m.machine()->Int32Mod(),
+ m.machine()->Int32UMod(), m.machine()->Int32LessThan(),
+ m.machine()->Int32LessThanOrEqual(), m.machine()->Uint32LessThan(),
+ m.machine()->Uint32LessThanOrEqual(), NULL};
for (int i = 0; ops[i] != NULL; i++) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
int constant = 0x55555 + i;
m.NewNode(ops[i], m.Parameter(0), m.Parameter(1));
m.Return(m.Int32Constant(constant));
@@ -2492,10 +2681,10 @@ TEST(RunDeadInt32Binops) {
}
-template <typename Type, typename CType>
+template <typename Type>
static void RunLoadImmIndex(MachineType rep) {
const int kNumElems = 3;
- CType buffer[kNumElems];
+ Type buffer[kNumElems];
// initialize the buffer with raw data.
byte* raw = reinterpret_cast<byte*>(buffer);
@@ -2512,21 +2701,24 @@ static void RunLoadImmIndex(MachineType rep) {
m.Return(m.Load(rep, base, index));
Type expected = buffer[i];
- Type actual = static_cast<CType>(m.Call());
- CHECK_EQ(expected, actual);
- printf("XXX\n");
+ Type actual = m.Call();
+ CHECK(expected == actual);
}
}
}
TEST(RunLoadImmIndex) {
- RunLoadImmIndex<int8_t, uint8_t>(kMachineWord8);
- RunLoadImmIndex<int16_t, uint16_t>(kMachineWord16);
- RunLoadImmIndex<int32_t, uint32_t>(kMachineWord32);
- RunLoadImmIndex<int32_t*, int32_t*>(kMachineTagged);
-
- // TODO(titzer): test kMachineFloat64 loads
+ RunLoadImmIndex<int8_t>(kMachInt8);
+ RunLoadImmIndex<uint8_t>(kMachUint8);
+ RunLoadImmIndex<int16_t>(kMachInt16);
+ RunLoadImmIndex<uint16_t>(kMachUint16);
+ RunLoadImmIndex<int32_t>(kMachInt32);
+ RunLoadImmIndex<uint32_t>(kMachUint32);
+ RunLoadImmIndex<int32_t*>(kMachAnyTagged);
+
+ // TODO(titzer): test kRepBit loads
+ // TODO(titzer): test kMachFloat64 loads
// TODO(titzer): test various indexing modes.
}
@@ -2553,19 +2745,23 @@ static void RunLoadStore(MachineType rep) {
m.Store(rep, base, index1, load);
m.Return(m.Int32Constant(OK));
- CHECK_NE(buffer[x], buffer[y]);
+ CHECK(buffer[x] != buffer[y]);
CHECK_EQ(OK, m.Call());
- CHECK_EQ(buffer[x], buffer[y]);
+ CHECK(buffer[x] == buffer[y]);
}
}
TEST(RunLoadStore) {
- RunLoadStore<int8_t>(kMachineWord8);
- RunLoadStore<int16_t>(kMachineWord16);
- RunLoadStore<int32_t>(kMachineWord32);
- RunLoadStore<void*>(kMachineTagged);
- RunLoadStore<double>(kMachineFloat64);
+ RunLoadStore<int8_t>(kMachInt8);
+ RunLoadStore<uint8_t>(kMachUint8);
+ RunLoadStore<int16_t>(kMachInt16);
+ RunLoadStore<uint16_t>(kMachUint16);
+ RunLoadStore<int32_t>(kMachInt32);
+ RunLoadStore<uint32_t>(kMachUint32);
+ RunLoadStore<void*>(kMachAnyTagged);
+ RunLoadStore<float>(kMachFloat32);
+ RunLoadStore<double>(kMachFloat64);
}
@@ -2573,12 +2769,12 @@ TEST(RunFloat64Binop) {
RawMachineAssemblerTester<int32_t> m;
double result;
- Operator* ops[] = {m.machine()->Float64Add(), m.machine()->Float64Sub(),
- m.machine()->Float64Mul(), m.machine()->Float64Div(),
- m.machine()->Float64Mod(), NULL};
+ const Operator* ops[] = {m.machine()->Float64Add(), m.machine()->Float64Sub(),
+ m.machine()->Float64Mul(), m.machine()->Float64Div(),
+ m.machine()->Float64Mod(), NULL};
double inf = V8_INFINITY;
- Operator* inputs[] = {
+ const Operator* inputs[] = {
m.common()->Float64Constant(0), m.common()->Float64Constant(1),
m.common()->Float64Constant(1), m.common()->Float64Constant(0),
m.common()->Float64Constant(0), m.common()->Float64Constant(-1),
@@ -2597,7 +2793,7 @@ TEST(RunFloat64Binop) {
Node* binop = m.NewNode(ops[i], a, b);
Node* base = m.PointerConstant(&result);
Node* zero = m.Int32Constant(0);
- m.Store(kMachineFloat64, base, zero, binop);
+ m.Store(kMachFloat64, base, zero, binop);
m.Return(m.Int32Constant(i + j));
CHECK_EQ(i + j, m.Call());
}
@@ -2608,9 +2804,9 @@ TEST(RunFloat64Binop) {
TEST(RunDeadFloat64Binops) {
RawMachineAssemblerTester<int32_t> m;
- Operator* ops[] = {m.machine()->Float64Add(), m.machine()->Float64Sub(),
- m.machine()->Float64Mul(), m.machine()->Float64Div(),
- m.machine()->Float64Mod(), NULL};
+ const Operator* ops[] = {m.machine()->Float64Add(), m.machine()->Float64Sub(),
+ m.machine()->Float64Mul(), m.machine()->Float64Div(),
+ m.machine()->Float64Mod(), NULL};
for (int i = 0; ops[i] != NULL; i++) {
RawMachineAssemblerTester<int32_t> m;
@@ -2658,9 +2854,9 @@ TEST(RunFloat64SubImm1) {
FOR_FLOAT64_INPUTS(i) {
RawMachineAssemblerTester<int32_t> m;
- Node* t0 = m.LoadFromPointer(&input, kMachineFloat64);
+ Node* t0 = m.LoadFromPointer(&input, kMachFloat64);
Node* t1 = m.Float64Sub(m.Float64Constant(*i), t0);
- m.StoreToPointer(&output, kMachineFloat64, t1);
+ m.StoreToPointer(&output, kMachFloat64, t1);
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(j) {
input = *j;
@@ -2678,9 +2874,9 @@ TEST(RunFloat64SubImm2) {
FOR_FLOAT64_INPUTS(i) {
RawMachineAssemblerTester<int32_t> m;
- Node* t0 = m.LoadFromPointer(&input, kMachineFloat64);
+ Node* t0 = m.LoadFromPointer(&input, kMachFloat64);
Node* t1 = m.Float64Sub(t0, m.Float64Constant(*i));
- m.StoreToPointer(&output, kMachineFloat64, t1);
+ m.StoreToPointer(&output, kMachFloat64, t1);
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(j) {
input = *j;
@@ -2715,10 +2911,10 @@ TEST(RunFloat64MulAndFloat64AddP) {
{
RawMachineAssemblerTester<int32_t> m;
- Node* a = m.LoadFromPointer(&input_a, kMachineFloat64);
- Node* b = m.LoadFromPointer(&input_b, kMachineFloat64);
- Node* c = m.LoadFromPointer(&input_c, kMachineFloat64);
- m.StoreToPointer(&output, kMachineFloat64,
+ Node* a = m.LoadFromPointer(&input_a, kMachFloat64);
+ Node* b = m.LoadFromPointer(&input_b, kMachFloat64);
+ Node* c = m.LoadFromPointer(&input_c, kMachFloat64);
+ m.StoreToPointer(&output, kMachFloat64,
m.Float64Add(m.Float64Mul(a, b), c));
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(i) {
@@ -2737,10 +2933,10 @@ TEST(RunFloat64MulAndFloat64AddP) {
}
{
RawMachineAssemblerTester<int32_t> m;
- Node* a = m.LoadFromPointer(&input_a, kMachineFloat64);
- Node* b = m.LoadFromPointer(&input_b, kMachineFloat64);
- Node* c = m.LoadFromPointer(&input_c, kMachineFloat64);
- m.StoreToPointer(&output, kMachineFloat64,
+ Node* a = m.LoadFromPointer(&input_a, kMachFloat64);
+ Node* b = m.LoadFromPointer(&input_b, kMachFloat64);
+ Node* c = m.LoadFromPointer(&input_c, kMachFloat64);
+ m.StoreToPointer(&output, kMachFloat64,
m.Float64Add(a, m.Float64Mul(b, c)));
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(i) {
@@ -2767,11 +2963,10 @@ TEST(RunFloat64MulAndFloat64SubP) {
double output = 0.0;
RawMachineAssemblerTester<int32_t> m;
- Node* a = m.LoadFromPointer(&input_a, kMachineFloat64);
- Node* b = m.LoadFromPointer(&input_b, kMachineFloat64);
- Node* c = m.LoadFromPointer(&input_c, kMachineFloat64);
- m.StoreToPointer(&output, kMachineFloat64,
- m.Float64Sub(a, m.Float64Mul(b, c)));
+ Node* a = m.LoadFromPointer(&input_a, kMachFloat64);
+ Node* b = m.LoadFromPointer(&input_b, kMachFloat64);
+ Node* c = m.LoadFromPointer(&input_c, kMachFloat64);
+ m.StoreToPointer(&output, kMachFloat64, m.Float64Sub(a, m.Float64Mul(b, c)));
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(i) {
@@ -2797,9 +2992,9 @@ TEST(RunFloat64MulImm) {
{
FOR_FLOAT64_INPUTS(i) {
RawMachineAssemblerTester<int32_t> m;
- Node* t0 = m.LoadFromPointer(&input, kMachineFloat64);
+ Node* t0 = m.LoadFromPointer(&input, kMachFloat64);
Node* t1 = m.Float64Mul(m.Float64Constant(*i), t0);
- m.StoreToPointer(&output, kMachineFloat64, t1);
+ m.StoreToPointer(&output, kMachFloat64, t1);
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(j) {
input = *j;
@@ -2812,9 +3007,9 @@ TEST(RunFloat64MulImm) {
{
FOR_FLOAT64_INPUTS(i) {
RawMachineAssemblerTester<int32_t> m;
- Node* t0 = m.LoadFromPointer(&input, kMachineFloat64);
+ Node* t0 = m.LoadFromPointer(&input, kMachFloat64);
Node* t1 = m.Float64Mul(t0, m.Float64Constant(*i));
- m.StoreToPointer(&output, kMachineFloat64, t1);
+ m.StoreToPointer(&output, kMachFloat64, t1);
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(j) {
input = *j;
@@ -2864,7 +3059,7 @@ TEST(RunChangeInt32ToFloat64_A) {
double result = 0;
Node* convert = m.ChangeInt32ToFloat64(m.Int32Constant(magic));
- m.Store(kMachineFloat64, m.PointerConstant(&result), m.Int32Constant(0),
+ m.Store(kMachFloat64, m.PointerConstant(&result), m.Int32Constant(0),
convert);
m.Return(m.Int32Constant(magic));
@@ -2874,11 +3069,11 @@ TEST(RunChangeInt32ToFloat64_A) {
TEST(RunChangeInt32ToFloat64_B) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
double output = 0;
Node* convert = m.ChangeInt32ToFloat64(m.Parameter(0));
- m.Store(kMachineFloat64, m.PointerConstant(&output), m.Int32Constant(0),
+ m.Store(kMachFloat64, m.PointerConstant(&output), m.Int32Constant(0),
convert);
m.Return(m.Parameter(0));
@@ -2891,11 +3086,11 @@ TEST(RunChangeInt32ToFloat64_B) {
TEST(RunChangeUint32ToFloat64_B) {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachUint32);
double output = 0;
Node* convert = m.ChangeUint32ToFloat64(m.Parameter(0));
- m.Store(kMachineFloat64, m.PointerConstant(&output), m.Int32Constant(0),
+ m.Store(kMachFloat64, m.PointerConstant(&output), m.Int32Constant(0),
convert);
m.Return(m.Parameter(0));
@@ -2913,7 +3108,7 @@ TEST(RunChangeFloat64ToInt32_A) {
double input = 11.1;
int32_t result = 0;
- m.Store(kMachineWord32, m.PointerConstant(&result), m.Int32Constant(0),
+ m.Store(kMachInt32, m.PointerConstant(&result), m.Int32Constant(0),
m.ChangeFloat64ToInt32(m.Float64Constant(input)));
m.Return(m.Int32Constant(magic));
@@ -2928,10 +3123,9 @@ TEST(RunChangeFloat64ToInt32_B) {
int32_t output = 0;
Node* load =
- m.Load(kMachineFloat64, m.PointerConstant(&input), m.Int32Constant(0));
+ m.Load(kMachFloat64, m.PointerConstant(&input), m.Int32Constant(0));
Node* convert = m.ChangeFloat64ToInt32(load);
- m.Store(kMachineWord32, m.PointerConstant(&output), m.Int32Constant(0),
- convert);
+ m.Store(kMachInt32, m.PointerConstant(&output), m.Int32Constant(0), convert);
m.Return(convert);
{
@@ -2970,10 +3164,9 @@ TEST(RunChangeFloat64ToUint32_B) {
int32_t output = 0;
Node* load =
- m.Load(kMachineFloat64, m.PointerConstant(&input), m.Int32Constant(0));
+ m.Load(kMachFloat64, m.PointerConstant(&input), m.Int32Constant(0));
Node* convert = m.ChangeFloat64ToUint32(load);
- m.Store(kMachineWord32, m.PointerConstant(&output), m.Int32Constant(0),
- convert);
+ m.Store(kMachInt32, m.PointerConstant(&output), m.Int32Constant(0), convert);
m.Return(convert);
{
@@ -3016,12 +3209,12 @@ TEST(RunChangeFloat64ToInt32_spilled) {
Node* input_node[kNumInputs];
for (int i = 0; i < kNumInputs; i++) {
- input_node[i] = m.Load(kMachineFloat64, m.PointerConstant(&input),
- m.Int32Constant(i * 8));
+ input_node[i] =
+ m.Load(kMachFloat64, m.PointerConstant(&input), m.Int32Constant(i * 8));
}
for (int i = 0; i < kNumInputs; i++) {
- m.Store(kMachineWord32, m.PointerConstant(&result), m.Int32Constant(i * 4),
+ m.Store(kMachInt32, m.PointerConstant(&result), m.Int32Constant(i * 4),
m.ChangeFloat64ToInt32(input_node[i]));
}
@@ -3039,6 +3232,46 @@ TEST(RunChangeFloat64ToInt32_spilled) {
}
+TEST(RunChangeFloat64ToUint32_spilled) {
+ RawMachineAssemblerTester<uint32_t> m;
+ const int kNumInputs = 32;
+ int32_t magic = 0x786234;
+ double input[kNumInputs];
+ uint32_t result[kNumInputs];
+ Node* input_node[kNumInputs];
+
+ for (int i = 0; i < kNumInputs; i++) {
+ input_node[i] =
+ m.Load(kMachFloat64, m.PointerConstant(&input), m.Int32Constant(i * 8));
+ }
+
+ for (int i = 0; i < kNumInputs; i++) {
+ m.Store(kMachUint32, m.PointerConstant(&result), m.Int32Constant(i * 4),
+ m.ChangeFloat64ToUint32(input_node[i]));
+ }
+
+ m.Return(m.Int32Constant(magic));
+
+ for (int i = 0; i < kNumInputs; i++) {
+ if (i % 2) {
+ input[i] = 100 + i + 2147483648u;
+ } else {
+ input[i] = 100 + i;
+ }
+ }
+
+ CHECK_EQ(magic, m.Call());
+
+ for (int i = 0; i < kNumInputs; i++) {
+ if (i % 2) {
+ CHECK_UINT32_EQ(result[i], static_cast<uint32_t>(100 + i + 2147483648u));
+ } else {
+ CHECK_UINT32_EQ(result[i], static_cast<uint32_t>(100 + i));
+ }
+ }
+}
+
+
TEST(RunDeadChangeFloat64ToInt32) {
RawMachineAssemblerTester<int32_t> m;
const int magic = 0x88abcda4;
@@ -3067,7 +3300,7 @@ TEST(RunLoopPhiInduction2) {
Node* false_node = m.Int32Constant(false_val);
m.Goto(&header);
m.Bind(&header);
- Node* phi = m.Phi(false_node, false_node);
+ Node* phi = m.Phi(kMachInt32, false_node, false_node);
m.Branch(m.Int32Constant(0), &body, &end);
m.Bind(&body);
Node* add = m.Int32Add(phi, m.Int32Constant(1));
@@ -3096,8 +3329,8 @@ TEST(RunDoubleDiamond) {
m.Bind(&blockb);
m.Goto(&end);
m.Bind(&end);
- Node* phi = m.Phi(k2, k1);
- m.Store(kMachineFloat64, m.PointerConstant(&buffer), m.Int32Constant(0), phi);
+ Node* phi = m.Phi(kMachFloat64, k2, k1);
+ m.Store(kMachFloat64, m.PointerConstant(&buffer), m.Int32Constant(0), phi);
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
@@ -3122,8 +3355,8 @@ TEST(RunRefDiamond) {
m.Bind(&blockb);
m.Goto(&end);
m.Bind(&end);
- Node* phi = m.Phi(k2, k1);
- m.Store(kMachineTagged, m.PointerConstant(&buffer), m.Int32Constant(0), phi);
+ Node* phi = m.Phi(kMachAnyTagged, k2, k1);
+ m.Store(kMachAnyTagged, m.PointerConstant(&buffer), m.Int32Constant(0), phi);
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
@@ -3152,11 +3385,10 @@ TEST(RunDoubleRefDiamond) {
m.Bind(&blockb);
m.Goto(&end);
m.Bind(&end);
- Node* dphi = m.Phi(d2, d1);
- Node* rphi = m.Phi(r2, r1);
- m.Store(kMachineFloat64, m.PointerConstant(&dbuffer), m.Int32Constant(0),
- dphi);
- m.Store(kMachineTagged, m.PointerConstant(&rbuffer), m.Int32Constant(0),
+ Node* dphi = m.Phi(kMachFloat64, d2, d1);
+ Node* rphi = m.Phi(kMachAnyTagged, r2, r1);
+ m.Store(kMachFloat64, m.PointerConstant(&dbuffer), m.Int32Constant(0), dphi);
+ m.Store(kMachAnyTagged, m.PointerConstant(&rbuffer), m.Int32Constant(0),
rphi);
m.Return(m.Int32Constant(magic));
@@ -3187,8 +3419,8 @@ TEST(RunDoubleRefDoubleDiamond) {
m.Bind(&blockb);
m.Goto(&mid);
m.Bind(&mid);
- Node* dphi1 = m.Phi(d2, d1);
- Node* rphi1 = m.Phi(r2, r1);
+ Node* dphi1 = m.Phi(kMachFloat64, d2, d1);
+ Node* rphi1 = m.Phi(kMachAnyTagged, r2, r1);
m.Branch(m.Int32Constant(0), &blockd, &blocke);
m.Bind(&blockd);
@@ -3196,12 +3428,11 @@ TEST(RunDoubleRefDoubleDiamond) {
m.Bind(&blocke);
m.Goto(&end);
m.Bind(&end);
- Node* dphi2 = m.Phi(d1, dphi1);
- Node* rphi2 = m.Phi(r1, rphi1);
+ Node* dphi2 = m.Phi(kMachFloat64, d1, dphi1);
+ Node* rphi2 = m.Phi(kMachAnyTagged, r1, rphi1);
- m.Store(kMachineFloat64, m.PointerConstant(&dbuffer), m.Int32Constant(0),
- dphi2);
- m.Store(kMachineTagged, m.PointerConstant(&rbuffer), m.Int32Constant(0),
+ m.Store(kMachFloat64, m.PointerConstant(&dbuffer), m.Int32Constant(0), dphi2);
+ m.Store(kMachAnyTagged, m.PointerConstant(&rbuffer), m.Int32Constant(0),
rphi2);
m.Return(m.Int32Constant(magic));
@@ -3224,13 +3455,13 @@ TEST(RunDoubleLoopPhi) {
m.Goto(&header);
m.Bind(&header);
- Node* phi = m.Phi(dk, dk);
+ Node* phi = m.Phi(kMachFloat64, dk, dk);
phi->ReplaceInput(1, phi);
m.Branch(zero, &body, &end);
m.Bind(&body);
m.Goto(&header);
m.Bind(&end);
- m.Store(kMachineFloat64, m.PointerConstant(&buffer), m.Int32Constant(0), phi);
+ m.Store(kMachFloat64, m.PointerConstant(&buffer), m.Int32Constant(0), phi);
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
@@ -3249,8 +3480,8 @@ TEST(RunCountToTenAccRaw) {
m.Goto(&header);
m.Bind(&header);
- Node* i = m.Phi(zero, zero);
- Node* j = m.Phi(zero, zero);
+ Node* i = m.Phi(kMachInt32, zero, zero);
+ Node* j = m.Phi(kMachInt32, zero, zero);
m.Goto(&body);
m.Bind(&body);
@@ -3282,9 +3513,9 @@ TEST(RunCountToTenAccRaw2) {
m.Goto(&header);
m.Bind(&header);
- Node* i = m.Phi(zero, zero);
- Node* j = m.Phi(zero, zero);
- Node* k = m.Phi(zero, zero);
+ Node* i = m.Phi(kMachInt32, zero, zero);
+ Node* j = m.Phi(kMachInt32, zero, zero);
+ Node* k = m.Phi(kMachInt32, zero, zero);
m.Goto(&body);
m.Bind(&body);
@@ -3311,14 +3542,14 @@ TEST(RunAddTree) {
int32_t inputs[] = {11, 12, 13, 14, 15, 16, 17, 18};
Node* base = m.PointerConstant(inputs);
- Node* n0 = m.Load(kMachineWord32, base, m.Int32Constant(0 * sizeof(int32_t)));
- Node* n1 = m.Load(kMachineWord32, base, m.Int32Constant(1 * sizeof(int32_t)));
- Node* n2 = m.Load(kMachineWord32, base, m.Int32Constant(2 * sizeof(int32_t)));
- Node* n3 = m.Load(kMachineWord32, base, m.Int32Constant(3 * sizeof(int32_t)));
- Node* n4 = m.Load(kMachineWord32, base, m.Int32Constant(4 * sizeof(int32_t)));
- Node* n5 = m.Load(kMachineWord32, base, m.Int32Constant(5 * sizeof(int32_t)));
- Node* n6 = m.Load(kMachineWord32, base, m.Int32Constant(6 * sizeof(int32_t)));
- Node* n7 = m.Load(kMachineWord32, base, m.Int32Constant(7 * sizeof(int32_t)));
+ Node* n0 = m.Load(kMachInt32, base, m.Int32Constant(0 * sizeof(int32_t)));
+ Node* n1 = m.Load(kMachInt32, base, m.Int32Constant(1 * sizeof(int32_t)));
+ Node* n2 = m.Load(kMachInt32, base, m.Int32Constant(2 * sizeof(int32_t)));
+ Node* n3 = m.Load(kMachInt32, base, m.Int32Constant(3 * sizeof(int32_t)));
+ Node* n4 = m.Load(kMachInt32, base, m.Int32Constant(4 * sizeof(int32_t)));
+ Node* n5 = m.Load(kMachInt32, base, m.Int32Constant(5 * sizeof(int32_t)));
+ Node* n6 = m.Load(kMachInt32, base, m.Int32Constant(6 * sizeof(int32_t)));
+ Node* n7 = m.Load(kMachInt32, base, m.Int32Constant(7 * sizeof(int32_t)));
Node* i1 = m.Int32Add(n0, n1);
Node* i2 = m.Int32Add(n2, n3);
@@ -3336,85 +3567,6 @@ TEST(RunAddTree) {
}
-#if MACHINE_ASSEMBLER_SUPPORTS_CALL_C
-
-static int Seven() { return 7; }
-static int UnaryMinus(int a) { return -a; }
-static int APlusTwoB(int a, int b) { return a + 2 * b; }
-
-
-TEST(RunCallSeven) {
- for (int i = 0; i < 2; i++) {
- bool call_direct = i == 0;
- void* function_address =
- reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&Seven));
-
- RawMachineAssemblerTester<int32_t> m;
- Node** args = NULL;
- MachineType* arg_types = NULL;
- Node* function =
- call_direct ? m.PointerConstant(function_address)
- : m.LoadFromPointer(&function_address,
- MachineOperatorBuilder::pointer_rep());
- m.Return(m.CallC(function, kMachineWord32, arg_types, args, 0));
-
- CHECK_EQ(7, m.Call());
- }
-}
-
-
-TEST(RunCallUnaryMinus) {
- for (int i = 0; i < 2; i++) {
- bool call_direct = i == 0;
- void* function_address =
- reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&UnaryMinus));
-
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
- Node* args[] = {m.Parameter(0)};
- MachineType arg_types[] = {kMachineWord32};
- Node* function =
- call_direct ? m.PointerConstant(function_address)
- : m.LoadFromPointer(&function_address,
- MachineOperatorBuilder::pointer_rep());
- m.Return(m.CallC(function, kMachineWord32, arg_types, args, 1));
-
- FOR_INT32_INPUTS(i) {
- int a = *i;
- CHECK_EQ(-a, m.Call(a));
- }
- }
-}
-
-
-TEST(RunCallAPlusTwoB) {
- for (int i = 0; i < 2; i++) {
- bool call_direct = i == 0;
- void* function_address =
- reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&APlusTwoB));
-
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
- Node* args[] = {m.Parameter(0), m.Parameter(1)};
- MachineType arg_types[] = {kMachineWord32, kMachineWord32};
- Node* function =
- call_direct ? m.PointerConstant(function_address)
- : m.LoadFromPointer(&function_address,
- MachineOperatorBuilder::pointer_rep());
- m.Return(m.CallC(function, kMachineWord32, arg_types, args, 2));
-
- FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
- int a = *i;
- int b = *j;
- int result = m.Call(a, b);
- CHECK_EQ(a + 2 * b, result);
- }
- }
- }
-}
-
-#endif // MACHINE_ASSEMBLER_SUPPORTS_CALL_C
-
-
static const int kFloat64CompareHelperTestCases = 15;
static const int kFloat64CompareHelperNodeType = 4;
@@ -3429,9 +3581,9 @@ static int Float64CompareHelper(RawMachineAssemblerTester<int32_t>* m,
CHECK(x < y);
bool load_a = node_type / 2 == 1;
bool load_b = node_type % 2 == 1;
- Node* a = load_a ? m->Load(kMachineFloat64, m->PointerConstant(&buffer[0]))
+ Node* a = load_a ? m->Load(kMachFloat64, m->PointerConstant(&buffer[0]))
: m->Float64Constant(x);
- Node* b = load_b ? m->Load(kMachineFloat64, m->PointerConstant(&buffer[1]))
+ Node* b = load_b ? m->Load(kMachFloat64, m->PointerConstant(&buffer[1]))
: m->Float64Constant(y);
Node* cmp = NULL;
bool expected = false;
@@ -3519,7 +3671,7 @@ TEST(RunFloat64Compare) {
for (int test = 0; test < kFloat64CompareHelperTestCases; test++) {
for (int node_type = 0; node_type < kFloat64CompareHelperNodeType;
node_type++) {
- for (size_t input = 0; input < ARRAY_SIZE(inputs); input += 2) {
+ for (size_t input = 0; input < arraysize(inputs); input += 2) {
RawMachineAssemblerTester<int32_t> m;
int expected = Float64CompareHelper(&m, test, node_type, inputs[input],
inputs[input + 1]);
@@ -3533,14 +3685,14 @@ TEST(RunFloat64Compare) {
TEST(RunFloat64UnorderedCompare) {
RawMachineAssemblerTester<int32_t> m;
- Operator* operators[] = {m.machine()->Float64Equal(),
- m.machine()->Float64LessThan(),
- m.machine()->Float64LessThanOrEqual()};
+ const Operator* operators[] = {m.machine()->Float64Equal(),
+ m.machine()->Float64LessThan(),
+ m.machine()->Float64LessThanOrEqual()};
double nan = v8::base::OS::nan_value();
FOR_FLOAT64_INPUTS(i) {
- for (size_t o = 0; o < ARRAY_SIZE(operators); ++o) {
+ for (size_t o = 0; o < arraysize(operators); ++o) {
for (int j = 0; j < 2; j++) {
RawMachineAssemblerTester<int32_t> m;
Node* a = m.Float64Constant(*i);
@@ -3559,8 +3711,8 @@ TEST(RunFloat64Equal) {
double input_b = 0.0;
RawMachineAssemblerTester<int32_t> m;
- Node* a = m.LoadFromPointer(&input_a, kMachineFloat64);
- Node* b = m.LoadFromPointer(&input_b, kMachineFloat64);
+ Node* a = m.LoadFromPointer(&input_a, kMachFloat64);
+ Node* b = m.LoadFromPointer(&input_b, kMachFloat64);
m.Return(m.Float64Equal(a, b));
CompareWrapper cmp(IrOpcode::kFloat64Equal);
@@ -3580,8 +3732,8 @@ TEST(RunFloat64LessThan) {
double input_b = 0.0;
RawMachineAssemblerTester<int32_t> m;
- Node* a = m.LoadFromPointer(&input_a, kMachineFloat64);
- Node* b = m.LoadFromPointer(&input_b, kMachineFloat64);
+ Node* a = m.LoadFromPointer(&input_a, kMachFloat64);
+ Node* b = m.LoadFromPointer(&input_b, kMachFloat64);
m.Return(m.Float64LessThan(a, b));
CompareWrapper cmp(IrOpcode::kFloat64LessThan);
@@ -3616,29 +3768,28 @@ static void LoadStoreTruncation() {
// Test lower bound.
input = min;
- CHECK_EQ(max + 2, m.Call());
+ CHECK_EQ(static_cast<IntType>(max + 2), m.Call());
CHECK_EQ(min + 1, input);
// Test all one byte values that are not one byte bounds.
for (int i = -127; i < 127; i++) {
input = i;
int expected = i >= 0 ? i + 1 : max + (i - min) + 2;
- CHECK_EQ(expected, m.Call());
- CHECK_EQ(i + 1, input);
+ CHECK_EQ(static_cast<IntType>(expected), m.Call());
+ CHECK_EQ(static_cast<IntType>(i + 1), input);
}
}
TEST(RunLoadStoreTruncation) {
- LoadStoreTruncation<int8_t, kMachineWord8>();
- LoadStoreTruncation<int16_t, kMachineWord16>();
+ LoadStoreTruncation<int8_t, kMachInt8>();
+ LoadStoreTruncation<int16_t, kMachInt16>();
}
static void IntPtrCompare(intptr_t left, intptr_t right) {
for (int test = 0; test < 7; test++) {
- RawMachineAssemblerTester<bool> m(MachineOperatorBuilder::pointer_rep(),
- MachineOperatorBuilder::pointer_rep());
+ RawMachineAssemblerTester<bool> m(kMachPtr, kMachPtr);
Node* p0 = m.Parameter(0);
Node* p1 = m.Parameter(1);
Node* res = NULL;
@@ -3688,7 +3839,7 @@ TEST(RunIntPtrCompare) {
intptr_t max = std::numeric_limits<intptr_t>::max();
// An ascending chain of intptr_t
intptr_t inputs[] = {min, min / 2, -1, 0, 1, max / 2, max};
- for (size_t i = 0; i < ARRAY_SIZE(inputs) - 1; i++) {
+ for (size_t i = 0; i < arraysize(inputs) - 1; i++) {
IntPtrCompare(inputs[i], inputs[i + 1]);
}
}
@@ -3707,7 +3858,7 @@ TEST(RunTestIntPtrArithmetic) {
Node* output = m.PointerConstant(&outputs[kInputSize - 1]);
Node* elem_size = m.ConvertInt32ToIntPtr(m.Int32Constant(sizeof(inputs[0])));
for (int i = 0; i < kInputSize; i++) {
- m.Store(kMachineWord32, output, m.Load(kMachineWord32, input));
+ m.Store(kMachInt32, output, m.Load(kMachInt32, input));
input = m.IntPtrAdd(input, elem_size);
output = m.IntPtrSub(output, elem_size);
}
@@ -3720,53 +3871,6 @@ TEST(RunTestIntPtrArithmetic) {
}
-static inline uint32_t rotr32(uint32_t i, uint32_t j) {
- return (i >> j) | (i << (32 - j));
-}
-
-
-TEST(RunTestInt32RotateRightP) {
- {
- RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
- bt.AddReturn(m.Word32Or(
- m.Word32Shr(bt.param0, bt.param1),
- m.Word32Shl(bt.param0, m.Int32Sub(m.Int32Constant(32), bt.param1))));
- bt.Run(ValueHelper::uint32_vector(), ValueHelper::ror_vector(), rotr32);
- }
- {
- RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
- bt.AddReturn(m.Word32Or(
- m.Word32Shl(bt.param0, m.Int32Sub(m.Int32Constant(32), bt.param1)),
- m.Word32Shr(bt.param0, bt.param1)));
- bt.Run(ValueHelper::uint32_vector(), ValueHelper::ror_vector(), rotr32);
- }
-}
-
-
-TEST(RunTestInt32RotateRightImm) {
- FOR_INPUTS(uint32_t, ror, i) {
- {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
- Node* value = m.Parameter(0);
- m.Return(m.Word32Or(m.Word32Shr(value, m.Int32Constant(*i)),
- m.Word32Shl(value, m.Int32Constant(32 - *i))));
- m.Run(ValueHelper::uint32_vector(),
- std::bind2nd(std::ptr_fun(&rotr32), *i));
- }
- {
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
- Node* value = m.Parameter(0);
- m.Return(m.Word32Or(m.Word32Shl(value, m.Int32Constant(32 - *i)),
- m.Word32Shr(value, m.Int32Constant(*i))));
- m.Run(ValueHelper::uint32_vector(),
- std::bind2nd(std::ptr_fun(&rotr32), *i));
- }
- }
-}
-
-
TEST(RunSpillLotsOfThings) {
static const int kInputSize = 1000;
RawMachineAssemblerTester<void> m;
@@ -3779,7 +3883,7 @@ TEST(RunSpillLotsOfThings) {
accs[i] = acc;
}
for (int i = 0; i < kInputSize; i++) {
- m.StoreToPointer(&outputs[i], kMachineWord32, accs[i]);
+ m.StoreToPointer(&outputs[i], kMachInt32, accs[i]);
}
m.Return(one);
m.Call();
@@ -3792,7 +3896,7 @@ TEST(RunSpillLotsOfThings) {
TEST(RunSpillConstantsAndParameters) {
static const int kInputSize = 1000;
static const int32_t kBase = 987;
- RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
int32_t outputs[kInputSize];
Node* csts[kInputSize];
Node* accs[kInputSize];
@@ -3805,7 +3909,7 @@ TEST(RunSpillConstantsAndParameters) {
accs[i] = acc;
}
for (int i = 0; i < kInputSize; i++) {
- m.StoreToPointer(&outputs[i], kMachineWord32, accs[i]);
+ m.StoreToPointer(&outputs[i], kMachInt32, accs[i]);
}
m.Return(m.Int32Add(acc, m.Int32Add(m.Parameter(0), m.Parameter(1))));
FOR_INT32_INPUTS(i) {
@@ -3826,7 +3930,7 @@ TEST(RunSpillConstantsAndParameters) {
TEST(RunNewSpaceConstantsInPhi) {
- RawMachineAssemblerTester<Object*> m(kMachineWord32);
+ RawMachineAssemblerTester<Object*> m(kMachInt32);
Isolate* isolate = CcTest::i_isolate();
Handle<HeapNumber> true_val = isolate->factory()->NewHeapNumber(11.2);
@@ -3842,7 +3946,7 @@ TEST(RunNewSpaceConstantsInPhi) {
m.Goto(&end);
m.Bind(&end);
- Node* phi = m.Phi(true_node, false_node);
+ Node* phi = m.Phi(kMachAnyTagged, true_node, false_node);
m.Return(phi);
CHECK_EQ(*false_val, m.Call(0));
@@ -3850,55 +3954,6 @@ TEST(RunNewSpaceConstantsInPhi) {
}
-#if MACHINE_ASSEMBLER_SUPPORTS_CALL_C
-
-TEST(RunSpillLotsOfThingsWithCall) {
- static const int kInputSize = 1000;
- RawMachineAssemblerTester<void> m;
- Node* accs[kInputSize];
- int32_t outputs[kInputSize];
- Node* one = m.Int32Constant(1);
- Node* acc = one;
- for (int i = 0; i < kInputSize; i++) {
- acc = m.Int32Add(acc, one);
- accs[i] = acc;
- }
- // If the spill slot computation is wrong, it might load from the c frame
- {
- void* func = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&Seven));
- Node** args = NULL;
- MachineType* arg_types = NULL;
- m.CallC(m.PointerConstant(func), kMachineWord32, arg_types, args, 0);
- }
- for (int i = 0; i < kInputSize; i++) {
- m.StoreToPointer(&outputs[i], kMachineWord32, accs[i]);
- }
- m.Return(one);
- m.Call();
- for (int i = 0; i < kInputSize; i++) {
- CHECK_EQ(outputs[i], i + 2);
- }
-}
-
-#endif // MACHINE_ASSEMBLER_SUPPORTS_CALL_C
-
-
-static bool sadd_overflow(int32_t x, int32_t y, int32_t* val) {
- int32_t v =
- static_cast<int32_t>(static_cast<uint32_t>(x) + static_cast<uint32_t>(y));
- *val = v;
- return (((v ^ x) & (v ^ y)) >> 31) & 1;
-}
-
-
-static bool ssub_overflow(int32_t x, int32_t y, int32_t* val) {
- int32_t v =
- static_cast<int32_t>(static_cast<uint32_t>(x) - static_cast<uint32_t>(y));
- *val = v;
- return (((v ^ x) & (v ^ ~y)) >> 31) & 1;
-}
-
-
TEST(RunInt32AddWithOverflowP) {
int32_t actual_val = -1;
RawMachineAssemblerTester<int32_t> m;
@@ -3906,12 +3961,12 @@ TEST(RunInt32AddWithOverflowP) {
Node* add = m.Int32AddWithOverflow(bt.param0, bt.param1);
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
- m.StoreToPointer(&actual_val, kMachineWord32, val);
+ m.StoreToPointer(&actual_val, kMachInt32, val);
bt.AddReturn(ovf);
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
int32_t expected_val;
- int expected_ovf = sadd_overflow(*i, *j, &expected_val);
+ int expected_ovf = bits::SignedAddOverflow32(*i, *j, &expected_val);
CHECK_EQ(expected_ovf, bt.call(*i, *j));
CHECK_EQ(expected_val, actual_val);
}
@@ -3923,27 +3978,27 @@ TEST(RunInt32AddWithOverflowImm) {
int32_t actual_val = -1, expected_val = 0;
FOR_INT32_INPUTS(i) {
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
Node* add = m.Int32AddWithOverflow(m.Int32Constant(*i), m.Parameter(0));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
- m.StoreToPointer(&actual_val, kMachineWord32, val);
+ m.StoreToPointer(&actual_val, kMachInt32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
- int expected_ovf = sadd_overflow(*i, *j, &expected_val);
+ int expected_ovf = bits::SignedAddOverflow32(*i, *j, &expected_val);
CHECK_EQ(expected_ovf, m.Call(*j));
CHECK_EQ(expected_val, actual_val);
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
Node* add = m.Int32AddWithOverflow(m.Parameter(0), m.Int32Constant(*i));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
- m.StoreToPointer(&actual_val, kMachineWord32, val);
+ m.StoreToPointer(&actual_val, kMachInt32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
- int expected_ovf = sadd_overflow(*i, *j, &expected_val);
+ int expected_ovf = bits::SignedAddOverflow32(*i, *j, &expected_val);
CHECK_EQ(expected_ovf, m.Call(*j));
CHECK_EQ(expected_val, actual_val);
}
@@ -3954,9 +4009,9 @@ TEST(RunInt32AddWithOverflowImm) {
m.Int32AddWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
- m.StoreToPointer(&actual_val, kMachineWord32, val);
+ m.StoreToPointer(&actual_val, kMachInt32, val);
m.Return(ovf);
- int expected_ovf = sadd_overflow(*i, *j, &expected_val);
+ int expected_ovf = bits::SignedAddOverflow32(*i, *j, &expected_val);
CHECK_EQ(expected_ovf, m.Call());
CHECK_EQ(expected_val, actual_val);
}
@@ -3977,10 +4032,10 @@ TEST(RunInt32AddWithOverflowInBranchP) {
m.Bind(&blockb);
Node* val = m.Projection(0, add);
bt.AddReturn(val);
- FOR_UINT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
int32_t expected;
- if (sadd_overflow(*i, *j, &expected)) expected = constant;
+ if (bits::SignedAddOverflow32(*i, *j, &expected)) expected = constant;
CHECK_EQ(expected, bt.call(*i, *j));
}
}
@@ -3994,12 +4049,12 @@ TEST(RunInt32SubWithOverflowP) {
Node* add = m.Int32SubWithOverflow(bt.param0, bt.param1);
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
- m.StoreToPointer(&actual_val, kMachineWord32, val);
+ m.StoreToPointer(&actual_val, kMachInt32, val);
bt.AddReturn(ovf);
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
int32_t expected_val;
- int expected_ovf = ssub_overflow(*i, *j, &expected_val);
+ int expected_ovf = bits::SignedSubOverflow32(*i, *j, &expected_val);
CHECK_EQ(expected_ovf, bt.call(*i, *j));
CHECK_EQ(expected_val, actual_val);
}
@@ -4011,27 +4066,27 @@ TEST(RunInt32SubWithOverflowImm) {
int32_t actual_val = -1, expected_val = 0;
FOR_INT32_INPUTS(i) {
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
Node* add = m.Int32SubWithOverflow(m.Int32Constant(*i), m.Parameter(0));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
- m.StoreToPointer(&actual_val, kMachineWord32, val);
+ m.StoreToPointer(&actual_val, kMachInt32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
- int expected_ovf = ssub_overflow(*i, *j, &expected_val);
+ int expected_ovf = bits::SignedSubOverflow32(*i, *j, &expected_val);
CHECK_EQ(expected_ovf, m.Call(*j));
CHECK_EQ(expected_val, actual_val);
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
Node* add = m.Int32SubWithOverflow(m.Parameter(0), m.Int32Constant(*i));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
- m.StoreToPointer(&actual_val, kMachineWord32, val);
+ m.StoreToPointer(&actual_val, kMachInt32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
- int expected_ovf = ssub_overflow(*j, *i, &expected_val);
+ int expected_ovf = bits::SignedSubOverflow32(*j, *i, &expected_val);
CHECK_EQ(expected_ovf, m.Call(*j));
CHECK_EQ(expected_val, actual_val);
}
@@ -4042,9 +4097,9 @@ TEST(RunInt32SubWithOverflowImm) {
m.Int32SubWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
- m.StoreToPointer(&actual_val, kMachineWord32, val);
+ m.StoreToPointer(&actual_val, kMachInt32, val);
m.Return(ovf);
- int expected_ovf = ssub_overflow(*i, *j, &expected_val);
+ int expected_ovf = bits::SignedSubOverflow32(*i, *j, &expected_val);
CHECK_EQ(expected_ovf, m.Call());
CHECK_EQ(expected_val, actual_val);
}
@@ -4065,13 +4120,172 @@ TEST(RunInt32SubWithOverflowInBranchP) {
m.Bind(&blockb);
Node* val = m.Projection(0, sub);
bt.AddReturn(val);
- FOR_UINT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
int32_t expected;
- if (ssub_overflow(*i, *j, &expected)) expected = constant;
+ if (bits::SignedSubOverflow32(*i, *j, &expected)) expected = constant;
CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
+
+TEST(RunChangeInt32ToInt64P) {
+ if (kPointerSize < 8) return;
+ int64_t actual = -1;
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ m.StoreToPointer(&actual, kMachInt64, m.ChangeInt32ToInt64(m.Parameter(0)));
+ m.Return(m.Int32Constant(0));
+ FOR_INT32_INPUTS(i) {
+ int64_t expected = *i;
+ CHECK_EQ(0, m.Call(*i));
+ CHECK_EQ(expected, actual);
+ }
+}
+
+
+TEST(RunChangeUint32ToUint64P) {
+ if (kPointerSize < 8) return;
+ int64_t actual = -1;
+ RawMachineAssemblerTester<int32_t> m(kMachUint32);
+ m.StoreToPointer(&actual, kMachUint64,
+ m.ChangeUint32ToUint64(m.Parameter(0)));
+ m.Return(m.Int32Constant(0));
+ FOR_UINT32_INPUTS(i) {
+ int64_t expected = static_cast<uint64_t>(*i);
+ CHECK_EQ(0, m.Call(*i));
+ CHECK_EQ(expected, actual);
+ }
+}
+
+
+TEST(RunTruncateInt64ToInt32P) {
+ if (kPointerSize < 8) return;
+ int64_t expected = -1;
+ RawMachineAssemblerTester<int32_t> m;
+ m.Return(m.TruncateInt64ToInt32(m.LoadFromPointer(&expected, kMachInt64)));
+ FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(j) {
+ expected = (static_cast<uint64_t>(*j) << 32) | *i;
+ CHECK_UINT32_EQ(expected, m.Call());
+ }
+ }
+}
+
+
+TEST(RunTruncateFloat64ToInt32P) {
+ struct {
+ double from;
+ double raw;
+ } kValues[] = {{0, 0},
+ {0.5, 0},
+ {-0.5, 0},
+ {1.5, 1},
+ {-1.5, -1},
+ {5.5, 5},
+ {-5.0, -5},
+ {v8::base::OS::nan_value(), 0},
+ {std::numeric_limits<double>::infinity(), 0},
+ {-v8::base::OS::nan_value(), 0},
+ {-std::numeric_limits<double>::infinity(), 0},
+ {4.94065645841e-324, 0},
+ {-4.94065645841e-324, 0},
+ {0.9999999999999999, 0},
+ {-0.9999999999999999, 0},
+ {4294967296.0, 0},
+ {-4294967296.0, 0},
+ {9223372036854775000.0, 4294966272.0},
+ {-9223372036854775000.0, -4294966272.0},
+ {4.5036e+15, 372629504},
+ {-4.5036e+15, -372629504},
+ {287524199.5377777, 0x11234567},
+ {-287524199.5377777, -0x11234567},
+ {2300193596.302222, 2300193596.0},
+ {-2300193596.302222, -2300193596.0},
+ {4600387192.604444, 305419896},
+ {-4600387192.604444, -305419896},
+ {4823855600872397.0, 1737075661},
+ {-4823855600872397.0, -1737075661},
+ {4503603922337791.0, -1},
+ {-4503603922337791.0, 1},
+ {4503601774854143.0, 2147483647},
+ {-4503601774854143.0, -2147483647},
+ {9007207844675582.0, -2},
+ {-9007207844675582.0, 2},
+ {2.4178527921507624e+24, -536870912},
+ {-2.4178527921507624e+24, 536870912},
+ {2.417853945072267e+24, -536870912},
+ {-2.417853945072267e+24, 536870912},
+ {4.8357055843015248e+24, -1073741824},
+ {-4.8357055843015248e+24, 1073741824},
+ {4.8357078901445341e+24, -1073741824},
+ {-4.8357078901445341e+24, 1073741824},
+ {2147483647.0, 2147483647.0},
+ {-2147483648.0, -2147483648.0},
+ {9.6714111686030497e+24, -2147483648.0},
+ {-9.6714111686030497e+24, -2147483648.0},
+ {9.6714157802890681e+24, -2147483648.0},
+ {-9.6714157802890681e+24, -2147483648.0},
+ {1.9342813113834065e+25, 2147483648.0},
+ {-1.9342813113834065e+25, 2147483648.0},
+ {3.868562622766813e+25, 0},
+ {-3.868562622766813e+25, 0},
+ {1.7976931348623157e+308, 0},
+ {-1.7976931348623157e+308, 0}};
+ double input = -1.0;
+ RawMachineAssemblerTester<int32_t> m;
+ m.Return(m.TruncateFloat64ToInt32(m.LoadFromPointer(&input, kMachFloat64)));
+ for (size_t i = 0; i < arraysize(kValues); ++i) {
+ input = kValues[i].from;
+ uint64_t expected = static_cast<int64_t>(kValues[i].raw);
+ CHECK_EQ(static_cast<int>(expected), m.Call());
+ }
+}
+
+
+TEST(RunChangeFloat32ToFloat64) {
+ double actual = 0.0f;
+ float expected = 0.0;
+ RawMachineAssemblerTester<int32_t> m;
+ m.StoreToPointer(
+ &actual, kMachFloat64,
+ m.ChangeFloat32ToFloat64(m.LoadFromPointer(&expected, kMachFloat32)));
+ m.Return(m.Int32Constant(0));
+ FOR_FLOAT32_INPUTS(i) {
+ expected = *i;
+ CHECK_EQ(0, m.Call());
+ CHECK_EQ(expected, actual);
+ }
+}
+
+
+TEST(RunTruncateFloat64ToFloat32) {
+ float actual = 0.0f;
+ double input = 0.0;
+ RawMachineAssemblerTester<int32_t> m;
+ m.StoreToPointer(
+ &actual, kMachFloat32,
+ m.TruncateFloat64ToFloat32(m.LoadFromPointer(&input, kMachFloat64)));
+ m.Return(m.Int32Constant(0));
+ FOR_FLOAT64_INPUTS(i) {
+ input = *i;
+ volatile double expected = DoubleToFloat32(input);
+ CHECK_EQ(0, m.Call());
+ CHECK_EQ(expected, actual);
+ }
+}
+
+
+TEST(RunFloat32Constant) {
+ FOR_FLOAT32_INPUTS(i) {
+ float expected = *i;
+ float actual = *i;
+ RawMachineAssemblerTester<int32_t> m;
+ m.StoreToPointer(&actual, kMachFloat32, m.Float32Constant(expected));
+ m.Return(m.Int32Constant(0));
+ CHECK_EQ(0, m.Call());
+ CHECK_EQ(expected, actual);
+ }
+}
+
#endif // V8_TURBOFAN_TARGET
diff --git a/deps/v8/test/cctest/compiler/test-run-properties.cc b/deps/v8/test/cctest/compiler/test-run-properties.cc
new file mode 100644
index 0000000000..d4442f7a85
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/test-run-properties.cc
@@ -0,0 +1,141 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+template <typename U>
+static void TypedArrayLoadHelper(const char* array_type) {
+ static const uint32_t kValues[] = {
+ 0x00000000, 0x00000001, 0x00000023, 0x00000042, 0x12345678, 0x87654321,
+ 0x0000003f, 0x0000007f, 0x00003fff, 0x00007fff, 0x3fffffff, 0x7fffffff,
+ 0x000000ff, 0x00000080, 0x0000ffff, 0x00008000, 0xffffffff, 0x80000000};
+ EmbeddedVector<char, 1024> values_buffer;
+ StringBuilder values_builder(values_buffer.start(), values_buffer.length());
+ for (size_t i = 0; i < arraysize(kValues); ++i) {
+ values_builder.AddFormatted("a[%d] = 0x%08x;", i, kValues[i]);
+ }
+
+ // Note that below source creates two different typed arrays with distinct
+ // elements kind to get coverage for both access patterns:
+ // - IsFixedTypedArrayElementsKind(x)
+ // - IsExternalArrayElementsKind(y)
+ const char* source =
+ "(function(a) {"
+ " var x = (a = new %sArray(%d)); %s;"
+ " var y = (a = new %sArray(%d)); %s; %%TypedArrayGetBuffer(y);"
+ " if (!%%HasFixed%sElements(x)) %%AbortJS('x');"
+ " if (!%%HasExternal%sElements(y)) %%AbortJS('y');"
+ " function f(a,b) {"
+ " a = a | 0; b = b | 0;"
+ " return x[a] + y[b];"
+ " }"
+ " return f;"
+ "})()";
+ EmbeddedVector<char, 1024> source_buffer;
+ SNPrintF(source_buffer, source, array_type, arraysize(kValues),
+ values_buffer.start(), array_type, arraysize(kValues),
+ values_buffer.start(), array_type, array_type);
+
+ FunctionTester T(
+ source_buffer.start(),
+ CompilationInfo::kContextSpecializing | CompilationInfo::kTypingEnabled);
+ for (size_t i = 0; i < arraysize(kValues); ++i) {
+ for (size_t j = 0; j < arraysize(kValues); ++j) {
+ volatile U value_a = static_cast<U>(kValues[i]);
+ volatile U value_b = static_cast<U>(kValues[j]);
+ double expected =
+ static_cast<double>(value_a) + static_cast<double>(value_b);
+ T.CheckCall(T.Val(expected), T.Val(static_cast<double>(i)),
+ T.Val(static_cast<double>(j)));
+ }
+ }
+}
+
+
+TEST(TypedArrayLoad) {
+ FLAG_typed_array_max_size_in_heap = 256;
+ TypedArrayLoadHelper<int8_t>("Int8");
+ TypedArrayLoadHelper<uint8_t>("Uint8");
+ TypedArrayLoadHelper<int16_t>("Int16");
+ TypedArrayLoadHelper<uint16_t>("Uint16");
+ TypedArrayLoadHelper<int32_t>("Int32");
+ TypedArrayLoadHelper<uint32_t>("Uint32");
+ TypedArrayLoadHelper<float>("Float32");
+ TypedArrayLoadHelper<double>("Float64");
+ // TODO(mstarzinger): Add tests for ClampedUint8.
+}
+
+
+template <typename U>
+static void TypedArrayStoreHelper(const char* array_type) {
+ static const uint32_t kValues[] = {
+ 0x00000000, 0x00000001, 0x00000023, 0x00000042, 0x12345678, 0x87654321,
+ 0x0000003f, 0x0000007f, 0x00003fff, 0x00007fff, 0x3fffffff, 0x7fffffff,
+ 0x000000ff, 0x00000080, 0x0000ffff, 0x00008000, 0xffffffff, 0x80000000};
+ EmbeddedVector<char, 1024> values_buffer;
+ StringBuilder values_builder(values_buffer.start(), values_buffer.length());
+ for (size_t i = 0; i < arraysize(kValues); ++i) {
+ values_builder.AddFormatted("a[%d] = 0x%08x;", i, kValues[i]);
+ }
+
+ // Note that below source creates two different typed arrays with distinct
+ // elements kind to get coverage for both access patterns:
+ // - IsFixedTypedArrayElementsKind(x)
+ // - IsExternalArrayElementsKind(y)
+ const char* source =
+ "(function(a) {"
+ " var x = (a = new %sArray(%d)); %s;"
+ " var y = (a = new %sArray(%d)); %s; %%TypedArrayGetBuffer(y);"
+ " if (!%%HasFixed%sElements(x)) %%AbortJS('x');"
+ " if (!%%HasExternal%sElements(y)) %%AbortJS('y');"
+ " function f(a,b) {"
+ " a = a | 0; b = b | 0;"
+ " var t = x[a];"
+ " x[a] = y[b];"
+ " y[b] = t;"
+ " t = y[b];"
+ " y[b] = x[a];"
+ " x[a] = t;"
+ " return x[a] + y[b];"
+ " }"
+ " return f;"
+ "})()";
+ EmbeddedVector<char, 2048> source_buffer;
+ SNPrintF(source_buffer, source, array_type, arraysize(kValues),
+ values_buffer.start(), array_type, arraysize(kValues),
+ values_buffer.start(), array_type, array_type);
+
+ FunctionTester T(
+ source_buffer.start(),
+ CompilationInfo::kContextSpecializing | CompilationInfo::kTypingEnabled);
+ for (size_t i = 0; i < arraysize(kValues); ++i) {
+ for (size_t j = 0; j < arraysize(kValues); ++j) {
+ volatile U value_a = static_cast<U>(kValues[i]);
+ volatile U value_b = static_cast<U>(kValues[j]);
+ double expected =
+ static_cast<double>(value_a) + static_cast<double>(value_b);
+ T.CheckCall(T.Val(expected), T.Val(static_cast<double>(i)),
+ T.Val(static_cast<double>(j)));
+ }
+ }
+}
+
+
+TEST(TypedArrayStore) {
+ FLAG_typed_array_max_size_in_heap = 256;
+ TypedArrayStoreHelper<int8_t>("Int8");
+ TypedArrayStoreHelper<uint8_t>("Uint8");
+ TypedArrayStoreHelper<int16_t>("Int16");
+ TypedArrayStoreHelper<uint16_t>("Uint16");
+ TypedArrayStoreHelper<int32_t>("Int32");
+ TypedArrayStoreHelper<uint32_t>("Uint32");
+ TypedArrayStoreHelper<float>("Float32");
+ TypedArrayStoreHelper<double>("Float64");
+ // TODO(mstarzinger): Add tests for ClampedUint8.
+}
diff --git a/deps/v8/test/cctest/compiler/test-schedule.cc b/deps/v8/test/cctest/compiler/test-schedule.cc
index bfa47d872a..6c05c05916 100644
--- a/deps/v8/test/cctest/compiler/test-schedule.cc
+++ b/deps/v8/test/cctest/compiler/test-schedule.cc
@@ -23,8 +23,8 @@ TEST(TestScheduleAllocation) {
HandleAndZoneScope scope;
Schedule schedule(scope.main_zone());
- CHECK_NE(NULL, schedule.entry());
- CHECK_EQ(schedule.entry(), *(schedule.all_blocks().begin()));
+ CHECK_NE(NULL, schedule.start());
+ CHECK_EQ(schedule.start(), *(schedule.all_blocks().begin()));
}
@@ -36,7 +36,7 @@ TEST(TestScheduleAddNode) {
Schedule schedule(scope.main_zone());
- BasicBlock* entry = schedule.entry();
+ BasicBlock* entry = schedule.start();
schedule.AddNode(entry, n0);
schedule.AddNode(entry, n1);
@@ -53,7 +53,7 @@ TEST(TestScheduleAddGoto) {
HandleAndZoneScope scope;
Schedule schedule(scope.main_zone());
- BasicBlock* entry = schedule.entry();
+ BasicBlock* entry = schedule.start();
BasicBlock* next = schedule.NewBasicBlock();
schedule.AddGoto(entry, next);
@@ -72,7 +72,7 @@ TEST(TestScheduleAddBranch) {
HandleAndZoneScope scope;
Schedule schedule(scope.main_zone());
- BasicBlock* entry = schedule.entry();
+ BasicBlock* entry = schedule.start();
BasicBlock* tblock = schedule.NewBasicBlock();
BasicBlock* fblock = schedule.NewBasicBlock();
@@ -103,12 +103,12 @@ TEST(TestScheduleAddReturn) {
Schedule schedule(scope.main_zone());
Graph graph(scope.main_zone());
Node* n0 = graph.NewNode(&dummy_operator);
- BasicBlock* entry = schedule.entry();
+ BasicBlock* entry = schedule.start();
schedule.AddReturn(entry, n0);
CHECK_EQ(0, entry->PredecessorCount());
CHECK_EQ(1, entry->SuccessorCount());
- CHECK_EQ(schedule.exit(), entry->SuccessorAt(0));
+ CHECK_EQ(schedule.end(), entry->SuccessorAt(0));
}
@@ -117,26 +117,12 @@ TEST(TestScheduleAddThrow) {
Schedule schedule(scope.main_zone());
Graph graph(scope.main_zone());
Node* n0 = graph.NewNode(&dummy_operator);
- BasicBlock* entry = schedule.entry();
+ BasicBlock* entry = schedule.start();
schedule.AddThrow(entry, n0);
CHECK_EQ(0, entry->PredecessorCount());
CHECK_EQ(1, entry->SuccessorCount());
- CHECK_EQ(schedule.exit(), entry->SuccessorAt(0));
-}
-
-
-TEST(TestScheduleAddDeopt) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
- Graph graph(scope.main_zone());
- Node* n0 = graph.NewNode(&dummy_operator);
- BasicBlock* entry = schedule.entry();
- schedule.AddDeoptimize(entry, n0);
-
- CHECK_EQ(0, entry->PredecessorCount());
- CHECK_EQ(1, entry->SuccessorCount());
- CHECK_EQ(schedule.exit(), entry->SuccessorAt(0));
+ CHECK_EQ(schedule.end(), entry->SuccessorAt(0));
}
@@ -145,7 +131,7 @@ TEST(BuildMulNodeGraph) {
Schedule schedule(scope.main_zone());
Graph graph(scope.main_zone());
CommonOperatorBuilder common(scope.main_zone());
- MachineOperatorBuilder machine(scope.main_zone(), kMachineWord32);
+ MachineOperatorBuilder machine;
Node* start = graph.NewNode(common.Start(0));
graph.SetStart(start);
diff --git a/deps/v8/test/cctest/compiler/test-scheduler.cc b/deps/v8/test/cctest/compiler/test-scheduler.cc
index ec4e77e111..cf3312351d 100644
--- a/deps/v8/test/cctest/compiler/test-scheduler.cc
+++ b/deps/v8/test/cctest/compiler/test-scheduler.cc
@@ -16,10 +16,12 @@
#include "src/compiler/operator.h"
#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
+#include "src/compiler/verifier.h"
using namespace v8::internal;
using namespace v8::internal::compiler;
+// TODO(titzer): pull RPO tests out to their own file.
struct TestLoop {
int count;
BasicBlock** nodes;
@@ -65,13 +67,49 @@ static void CheckLoopContains(BasicBlock** blocks, int body_size) {
}
+static int GetScheduledNodeCount(Schedule* schedule) {
+ int node_count = 0;
+ for (BasicBlockVectorIter i = schedule->rpo_order()->begin();
+ i != schedule->rpo_order()->end(); ++i) {
+ BasicBlock* block = *i;
+ for (BasicBlock::const_iterator j = block->begin(); j != block->end();
+ ++j) {
+ ++node_count;
+ }
+ BasicBlock::Control control = block->control_;
+ if (control != BasicBlock::kNone) {
+ ++node_count;
+ }
+ }
+ return node_count;
+}
+
+
+static Schedule* ComputeAndVerifySchedule(int expected, Graph* graph) {
+ if (FLAG_trace_turbo) {
+ OFStream os(stdout);
+ os << AsDOT(*graph);
+ }
+
+ Schedule* schedule = Scheduler::ComputeSchedule(graph);
+
+ if (FLAG_trace_turbo_scheduler) {
+ OFStream os(stdout);
+ os << *schedule << endl;
+ }
+ ScheduleVerifier::Run(schedule);
+ CHECK_EQ(expected, GetScheduledNodeCount(schedule));
+ return schedule;
+}
+
+
TEST(RPODegenerate1) {
HandleAndZoneScope scope;
Schedule schedule(scope.main_zone());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
CheckRPONumbers(order, 1, false);
- CHECK_EQ(schedule.entry(), order->at(0));
+ CHECK_EQ(schedule.start(), order->at(0));
}
@@ -79,11 +117,11 @@ TEST(RPODegenerate2) {
HandleAndZoneScope scope;
Schedule schedule(scope.main_zone());
- schedule.AddGoto(schedule.entry(), schedule.exit());
+ schedule.AddGoto(schedule.start(), schedule.end());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
CheckRPONumbers(order, 2, false);
- CHECK_EQ(schedule.entry(), order->at(0));
- CHECK_EQ(schedule.exit(), order->at(1));
+ CHECK_EQ(schedule.start(), order->at(0));
+ CHECK_EQ(schedule.end(), order->at(1));
}
@@ -93,7 +131,7 @@ TEST(RPOLine) {
for (int i = 0; i < 10; i++) {
Schedule schedule(scope.main_zone());
- BasicBlock* last = schedule.entry();
+ BasicBlock* last = schedule.start();
for (int j = 0; j < i; j++) {
BasicBlock* block = schedule.NewBasicBlock();
schedule.AddGoto(last, block);
@@ -117,10 +155,10 @@ TEST(RPOLine) {
TEST(RPOSelfLoop) {
HandleAndZoneScope scope;
Schedule schedule(scope.main_zone());
- schedule.AddSuccessor(schedule.entry(), schedule.entry());
+ schedule.AddSuccessor(schedule.start(), schedule.start());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
CheckRPONumbers(order, 1, true);
- BasicBlock* loop[] = {schedule.entry()};
+ BasicBlock* loop[] = {schedule.start()};
CheckLoopContains(loop, 1);
}
@@ -128,11 +166,11 @@ TEST(RPOSelfLoop) {
TEST(RPOEntryLoop) {
HandleAndZoneScope scope;
Schedule schedule(scope.main_zone());
- schedule.AddSuccessor(schedule.entry(), schedule.exit());
- schedule.AddSuccessor(schedule.exit(), schedule.entry());
+ schedule.AddSuccessor(schedule.start(), schedule.end());
+ schedule.AddSuccessor(schedule.end(), schedule.start());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
CheckRPONumbers(order, 2, true);
- BasicBlock* loop[] = {schedule.entry(), schedule.exit()};
+ BasicBlock* loop[] = {schedule.start(), schedule.end()};
CheckLoopContains(loop, 2);
}
@@ -141,7 +179,7 @@ TEST(RPOEndLoop) {
HandleAndZoneScope scope;
Schedule schedule(scope.main_zone());
SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
- schedule.AddSuccessor(schedule.entry(), loop1->header());
+ schedule.AddSuccessor(schedule.start(), loop1->header());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
CheckRPONumbers(order, 3, true);
CheckLoopContains(loop1->nodes, loop1->count);
@@ -152,8 +190,8 @@ TEST(RPOEndLoopNested) {
HandleAndZoneScope scope;
Schedule schedule(scope.main_zone());
SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
- schedule.AddSuccessor(schedule.entry(), loop1->header());
- schedule.AddSuccessor(loop1->last(), schedule.entry());
+ schedule.AddSuccessor(schedule.start(), loop1->header());
+ schedule.AddSuccessor(loop1->last(), schedule.start());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
CheckRPONumbers(order, 3, true);
CheckLoopContains(loop1->nodes, loop1->count);
@@ -164,10 +202,10 @@ TEST(RPODiamond) {
HandleAndZoneScope scope;
Schedule schedule(scope.main_zone());
- BasicBlock* A = schedule.entry();
+ BasicBlock* A = schedule.start();
BasicBlock* B = schedule.NewBasicBlock();
BasicBlock* C = schedule.NewBasicBlock();
- BasicBlock* D = schedule.exit();
+ BasicBlock* D = schedule.end();
schedule.AddSuccessor(A, B);
schedule.AddSuccessor(A, C);
@@ -188,10 +226,10 @@ TEST(RPOLoop1) {
HandleAndZoneScope scope;
Schedule schedule(scope.main_zone());
- BasicBlock* A = schedule.entry();
+ BasicBlock* A = schedule.start();
BasicBlock* B = schedule.NewBasicBlock();
BasicBlock* C = schedule.NewBasicBlock();
- BasicBlock* D = schedule.exit();
+ BasicBlock* D = schedule.end();
schedule.AddSuccessor(A, B);
schedule.AddSuccessor(B, C);
@@ -209,10 +247,10 @@ TEST(RPOLoop2) {
HandleAndZoneScope scope;
Schedule schedule(scope.main_zone());
- BasicBlock* A = schedule.entry();
+ BasicBlock* A = schedule.start();
BasicBlock* B = schedule.NewBasicBlock();
BasicBlock* C = schedule.NewBasicBlock();
- BasicBlock* D = schedule.exit();
+ BasicBlock* D = schedule.end();
schedule.AddSuccessor(A, B);
schedule.AddSuccessor(B, C);
@@ -231,13 +269,13 @@ TEST(RPOLoopN) {
for (int i = 0; i < 11; i++) {
Schedule schedule(scope.main_zone());
- BasicBlock* A = schedule.entry();
+ BasicBlock* A = schedule.start();
BasicBlock* B = schedule.NewBasicBlock();
BasicBlock* C = schedule.NewBasicBlock();
BasicBlock* D = schedule.NewBasicBlock();
BasicBlock* E = schedule.NewBasicBlock();
BasicBlock* F = schedule.NewBasicBlock();
- BasicBlock* G = schedule.exit();
+ BasicBlock* G = schedule.end();
schedule.AddSuccessor(A, B);
schedule.AddSuccessor(B, C);
@@ -273,12 +311,12 @@ TEST(RPOLoopNest1) {
HandleAndZoneScope scope;
Schedule schedule(scope.main_zone());
- BasicBlock* A = schedule.entry();
+ BasicBlock* A = schedule.start();
BasicBlock* B = schedule.NewBasicBlock();
BasicBlock* C = schedule.NewBasicBlock();
BasicBlock* D = schedule.NewBasicBlock();
BasicBlock* E = schedule.NewBasicBlock();
- BasicBlock* F = schedule.exit();
+ BasicBlock* F = schedule.end();
schedule.AddSuccessor(A, B);
schedule.AddSuccessor(B, C);
@@ -302,14 +340,14 @@ TEST(RPOLoopNest2) {
HandleAndZoneScope scope;
Schedule schedule(scope.main_zone());
- BasicBlock* A = schedule.entry();
+ BasicBlock* A = schedule.start();
BasicBlock* B = schedule.NewBasicBlock();
BasicBlock* C = schedule.NewBasicBlock();
BasicBlock* D = schedule.NewBasicBlock();
BasicBlock* E = schedule.NewBasicBlock();
BasicBlock* F = schedule.NewBasicBlock();
BasicBlock* G = schedule.NewBasicBlock();
- BasicBlock* H = schedule.exit();
+ BasicBlock* H = schedule.end();
schedule.AddSuccessor(A, B);
schedule.AddSuccessor(B, C);
@@ -343,8 +381,8 @@ TEST(RPOLoopFollow1) {
SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
- BasicBlock* A = schedule.entry();
- BasicBlock* E = schedule.exit();
+ BasicBlock* A = schedule.start();
+ BasicBlock* E = schedule.end();
schedule.AddSuccessor(A, loop1->header());
schedule.AddSuccessor(loop1->header(), loop2->header());
@@ -367,9 +405,9 @@ TEST(RPOLoopFollow2) {
SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
- BasicBlock* A = schedule.entry();
+ BasicBlock* A = schedule.start();
BasicBlock* S = schedule.NewBasicBlock();
- BasicBlock* E = schedule.exit();
+ BasicBlock* E = schedule.end();
schedule.AddSuccessor(A, loop1->header());
schedule.AddSuccessor(loop1->header(), S);
@@ -394,8 +432,8 @@ TEST(RPOLoopFollowN) {
Schedule schedule(scope.main_zone());
SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
SmartPointer<TestLoop> loop2(CreateLoop(&schedule, size));
- BasicBlock* A = schedule.entry();
- BasicBlock* E = schedule.exit();
+ BasicBlock* A = schedule.start();
+ BasicBlock* E = schedule.end();
schedule.AddSuccessor(A, loop1->header());
schedule.AddSuccessor(loop1->nodes[exit], loop2->header());
@@ -418,10 +456,10 @@ TEST(RPONestedLoopFollow1) {
SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
- BasicBlock* A = schedule.entry();
+ BasicBlock* A = schedule.start();
BasicBlock* B = schedule.NewBasicBlock();
BasicBlock* C = schedule.NewBasicBlock();
- BasicBlock* E = schedule.exit();
+ BasicBlock* E = schedule.end();
schedule.AddSuccessor(A, B);
schedule.AddSuccessor(B, loop1->header());
@@ -450,8 +488,8 @@ TEST(RPOLoopBackedges1) {
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
Schedule schedule(scope.main_zone());
- BasicBlock* A = schedule.entry();
- BasicBlock* E = schedule.exit();
+ BasicBlock* A = schedule.start();
+ BasicBlock* E = schedule.end();
SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
schedule.AddSuccessor(A, loop1->header());
@@ -475,9 +513,9 @@ TEST(RPOLoopOutedges1) {
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
Schedule schedule(scope.main_zone());
- BasicBlock* A = schedule.entry();
+ BasicBlock* A = schedule.start();
BasicBlock* D = schedule.NewBasicBlock();
- BasicBlock* E = schedule.exit();
+ BasicBlock* E = schedule.end();
SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
schedule.AddSuccessor(A, loop1->header());
@@ -501,8 +539,8 @@ TEST(RPOLoopOutedges2) {
int size = 8;
for (int i = 0; i < size; i++) {
Schedule schedule(scope.main_zone());
- BasicBlock* A = schedule.entry();
- BasicBlock* E = schedule.exit();
+ BasicBlock* A = schedule.start();
+ BasicBlock* E = schedule.end();
SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
schedule.AddSuccessor(A, loop1->header());
@@ -527,8 +565,8 @@ TEST(RPOLoopOutloops1) {
int size = 8;
for (int i = 0; i < size; i++) {
Schedule schedule(scope.main_zone());
- BasicBlock* A = schedule.entry();
- BasicBlock* E = schedule.exit();
+ BasicBlock* A = schedule.start();
+ BasicBlock* E = schedule.end();
SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
schedule.AddSuccessor(A, loop1->header());
schedule.AddSuccessor(loop1->last(), E);
@@ -557,10 +595,10 @@ TEST(RPOLoopMultibackedge) {
HandleAndZoneScope scope;
Schedule schedule(scope.main_zone());
- BasicBlock* A = schedule.entry();
+ BasicBlock* A = schedule.start();
BasicBlock* B = schedule.NewBasicBlock();
BasicBlock* C = schedule.NewBasicBlock();
- BasicBlock* D = schedule.exit();
+ BasicBlock* D = schedule.end();
BasicBlock* E = schedule.NewBasicBlock();
schedule.AddSuccessor(A, B);
@@ -605,36 +643,6 @@ TEST(BuildScheduleOneParameter) {
}
-static int GetScheduledNodeCount(Schedule* schedule) {
- int node_count = 0;
- for (BasicBlockVectorIter i = schedule->rpo_order()->begin();
- i != schedule->rpo_order()->end(); ++i) {
- BasicBlock* block = *i;
- for (BasicBlock::const_iterator j = block->begin(); j != block->end();
- ++j) {
- ++node_count;
- }
- BasicBlock::Control control = block->control_;
- if (control != BasicBlock::kNone) {
- ++node_count;
- }
- }
- return node_count;
-}
-
-
-static void PrintGraph(Graph* graph) {
- OFStream os(stdout);
- os << AsDOT(*graph);
-}
-
-
-static void PrintSchedule(Schedule* schedule) {
- OFStream os(stdout);
- os << *schedule << endl;
-}
-
-
TEST(BuildScheduleIfSplit) {
HandleAndZoneScope scope;
Graph graph(scope.main_zone());
@@ -658,14 +666,7 @@ TEST(BuildScheduleIfSplit) {
Node* merge = graph.NewNode(builder.Merge(2), ret1, ret2);
graph.SetEnd(graph.NewNode(builder.End(), merge));
- PrintGraph(&graph);
-
- Schedule* schedule = Scheduler::ComputeSchedule(&graph);
-
- PrintSchedule(schedule);
-
-
- CHECK_EQ(13, GetScheduledNodeCount(schedule));
+ ComputeAndVerifySchedule(13, &graph);
}
@@ -675,12 +676,11 @@ TEST(BuildScheduleIfSplitWithEffects) {
Graph graph(scope.main_zone());
CommonOperatorBuilder common_builder(scope.main_zone());
JSOperatorBuilder js_builder(scope.main_zone());
- Operator* op;
+ const Operator* op;
Handle<Object> object =
Handle<Object>(isolate->heap()->undefined_value(), isolate);
- PrintableUnique<Object> unique_constant =
- PrintableUnique<Object>::CreateUninitialized(scope.main_zone(), object);
+ Unique<Object> unique_constant = Unique<Object>::CreateUninitialized(object);
// Manually transcripted code for:
// function turbo_fan_test(a, b, c, y) {
@@ -811,13 +811,7 @@ TEST(BuildScheduleIfSplitWithEffects) {
graph.SetStart(n0);
graph.SetEnd(n23);
- PrintGraph(&graph);
-
- Schedule* schedule = Scheduler::ComputeSchedule(&graph);
-
- PrintSchedule(schedule);
-
- CHECK_EQ(20, GetScheduledNodeCount(schedule));
+ ComputeAndVerifySchedule(20, &graph);
}
@@ -827,12 +821,11 @@ TEST(BuildScheduleSimpleLoop) {
Graph graph(scope.main_zone());
CommonOperatorBuilder common_builder(scope.main_zone());
JSOperatorBuilder js_builder(scope.main_zone());
- Operator* op;
+ const Operator* op;
Handle<Object> object =
Handle<Object>(isolate->heap()->undefined_value(), isolate);
- PrintableUnique<Object> unique_constant =
- PrintableUnique<Object>::CreateUninitialized(scope.main_zone(), object);
+ Unique<Object> unique_constant = Unique<Object>::CreateUninitialized(object);
// Manually transcripted code for:
// function turbo_fan_test(a, b) {
@@ -851,7 +844,7 @@ TEST(BuildScheduleSimpleLoop) {
op = common_builder.Return();
Node* n19 = graph.NewNode(op, nil, nil, nil);
USE(n19);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n8 = graph.NewNode(op, nil, nil, nil);
USE(n8);
op = common_builder.Parameter(0);
@@ -873,7 +866,7 @@ TEST(BuildScheduleSimpleLoop) {
Node* n12 = graph.NewNode(op, nil, nil, nil, nil, nil);
USE(n12);
n12->ReplaceInput(0, n8);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n9 = graph.NewNode(op, nil, nil, nil);
USE(n9);
op = common_builder.Parameter(0);
@@ -898,7 +891,7 @@ TEST(BuildScheduleSimpleLoop) {
n9->ReplaceInput(2, n6);
n12->ReplaceInput(1, n9);
n12->ReplaceInput(2, n5);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n10 = graph.NewNode(op, nil, nil, nil);
USE(n10);
n10->ReplaceInput(0, n0);
@@ -930,13 +923,7 @@ TEST(BuildScheduleSimpleLoop) {
graph.SetStart(n0);
graph.SetEnd(n20);
- PrintGraph(&graph);
-
- Schedule* schedule = Scheduler::ComputeSchedule(&graph);
-
- PrintSchedule(schedule);
-
- CHECK_EQ(19, GetScheduledNodeCount(schedule));
+ ComputeAndVerifySchedule(19, &graph);
}
@@ -946,12 +933,11 @@ TEST(BuildScheduleComplexLoops) {
Graph graph(scope.main_zone());
CommonOperatorBuilder common_builder(scope.main_zone());
JSOperatorBuilder js_builder(scope.main_zone());
- Operator* op;
+ const Operator* op;
Handle<Object> object =
Handle<Object>(isolate->heap()->undefined_value(), isolate);
- PrintableUnique<Object> unique_constant =
- PrintableUnique<Object>::CreateUninitialized(scope.main_zone(), object);
+ Unique<Object> unique_constant = Unique<Object>::CreateUninitialized(object);
// Manually transcripted code for:
// function turbo_fan_test(a, b, c) {
@@ -976,17 +962,17 @@ TEST(BuildScheduleComplexLoops) {
op = common_builder.Return();
Node* n45 = graph.NewNode(op, nil, nil, nil);
USE(n45);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n35 = graph.NewNode(op, nil, nil, nil);
USE(n35);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n9 = graph.NewNode(op, nil, nil, nil);
USE(n9);
op = common_builder.Parameter(0);
Node* n2 = graph.NewNode(op, n0);
USE(n2);
n9->ReplaceInput(0, n2);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n23 = graph.NewNode(op, nil, nil, nil);
USE(n23);
op = js_builder.Add();
@@ -1004,14 +990,14 @@ TEST(BuildScheduleComplexLoops) {
Node* n14 = graph.NewNode(op, nil, nil, nil, nil, nil);
USE(n14);
n14->ReplaceInput(0, n9);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n10 = graph.NewNode(op, nil, nil, nil);
USE(n10);
op = common_builder.Parameter(0);
Node* n3 = graph.NewNode(op, n0);
USE(n3);
n10->ReplaceInput(0, n3);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n24 = graph.NewNode(op, nil, nil, nil);
USE(n24);
n24->ReplaceInput(0, n10);
@@ -1039,10 +1025,10 @@ TEST(BuildScheduleComplexLoops) {
op = js_builder.LessThan();
Node* n27 = graph.NewNode(op, nil, nil, nil, nil, nil);
USE(n27);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n25 = graph.NewNode(op, nil, nil, nil);
USE(n25);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n11 = graph.NewNode(op, nil, nil, nil);
USE(n11);
op = common_builder.Parameter(0);
@@ -1079,7 +1065,7 @@ TEST(BuildScheduleComplexLoops) {
n27->ReplaceInput(0, n25);
n27->ReplaceInput(1, n24);
n27->ReplaceInput(2, n6);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n26 = graph.NewNode(op, nil, nil, nil);
USE(n26);
n26->ReplaceInput(0, n20);
@@ -1100,7 +1086,7 @@ TEST(BuildScheduleComplexLoops) {
n10->ReplaceInput(2, n7);
n14->ReplaceInput(1, n10);
n14->ReplaceInput(2, n6);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n12 = graph.NewNode(op, nil, nil, nil);
USE(n12);
n12->ReplaceInput(0, n0);
@@ -1134,7 +1120,7 @@ TEST(BuildScheduleComplexLoops) {
Node* n39 = graph.NewNode(op, nil, nil, nil, nil, nil);
USE(n39);
n39->ReplaceInput(0, n35);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n36 = graph.NewNode(op, nil, nil, nil);
USE(n36);
n36->ReplaceInput(0, n10);
@@ -1160,7 +1146,7 @@ TEST(BuildScheduleComplexLoops) {
n36->ReplaceInput(2, n33);
n39->ReplaceInput(1, n36);
n39->ReplaceInput(2, n6);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n38 = graph.NewNode(op, nil, nil, nil);
USE(n38);
n38->ReplaceInput(0, n14);
@@ -1184,13 +1170,7 @@ TEST(BuildScheduleComplexLoops) {
graph.SetStart(n0);
graph.SetEnd(n46);
- PrintGraph(&graph);
-
- Schedule* schedule = Scheduler::ComputeSchedule(&graph);
-
- PrintSchedule(schedule);
-
- CHECK_EQ(46, GetScheduledNodeCount(schedule));
+ ComputeAndVerifySchedule(46, &graph);
}
@@ -1200,12 +1180,11 @@ TEST(BuildScheduleBreakAndContinue) {
Graph graph(scope.main_zone());
CommonOperatorBuilder common_builder(scope.main_zone());
JSOperatorBuilder js_builder(scope.main_zone());
- Operator* op;
+ const Operator* op;
Handle<Object> object =
Handle<Object>(isolate->heap()->undefined_value(), isolate);
- PrintableUnique<Object> unique_constant =
- PrintableUnique<Object>::CreateUninitialized(scope.main_zone(), object);
+ Unique<Object> unique_constant = Unique<Object>::CreateUninitialized(object);
// Manually transcripted code for:
// function turbo_fan_test(a, b, c) {
@@ -1235,14 +1214,14 @@ TEST(BuildScheduleBreakAndContinue) {
op = js_builder.Add();
Node* n56 = graph.NewNode(op, nil, nil, nil, nil, nil);
USE(n56);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n10 = graph.NewNode(op, nil, nil, nil);
USE(n10);
op = common_builder.Parameter(0);
Node* n2 = graph.NewNode(op, n0);
USE(n2);
n10->ReplaceInput(0, n2);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n25 = graph.NewNode(op, nil, nil, nil);
USE(n25);
op = js_builder.Add();
@@ -1260,14 +1239,14 @@ TEST(BuildScheduleBreakAndContinue) {
Node* n16 = graph.NewNode(op, nil, nil, nil, nil, nil);
USE(n16);
n16->ReplaceInput(0, n10);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n11 = graph.NewNode(op, nil, nil, nil);
USE(n11);
op = common_builder.Parameter(0);
Node* n3 = graph.NewNode(op, n0);
USE(n3);
n11->ReplaceInput(0, n3);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n26 = graph.NewNode(op, nil, nil, nil);
USE(n26);
n26->ReplaceInput(0, n11);
@@ -1304,23 +1283,23 @@ TEST(BuildScheduleBreakAndContinue) {
USE(n46);
n47->ReplaceInput(1, n46);
n47->ReplaceInput(2, n6);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n42 = graph.NewNode(op, nil, nil, nil);
USE(n42);
op = js_builder.LessThan();
Node* n30 = graph.NewNode(op, nil, nil, nil, nil, nil);
USE(n30);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n27 = graph.NewNode(op, nil, nil, nil);
USE(n27);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n12 = graph.NewNode(op, nil, nil, nil);
USE(n12);
op = common_builder.Parameter(0);
Node* n4 = graph.NewNode(op, n0);
USE(n4);
n12->ReplaceInput(0, n4);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n41 = graph.NewNode(op, nil, nil, nil);
USE(n41);
n41->ReplaceInput(0, n27);
@@ -1369,17 +1348,17 @@ TEST(BuildScheduleBreakAndContinue) {
op = js_builder.Equal();
Node* n37 = graph.NewNode(op, nil, nil, nil, nil, nil);
USE(n37);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n28 = graph.NewNode(op, nil, nil, nil);
USE(n28);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n13 = graph.NewNode(op, nil, nil, nil);
USE(n13);
op = common_builder.NumberConstant(0);
Node* n7 = graph.NewNode(op);
USE(n7);
n13->ReplaceInput(0, n7);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n54 = graph.NewNode(op, nil, nil, nil);
USE(n54);
n54->ReplaceInput(0, n28);
@@ -1430,7 +1409,7 @@ TEST(BuildScheduleBreakAndContinue) {
n30->ReplaceInput(0, n27);
n30->ReplaceInput(1, n26);
n30->ReplaceInput(2, n6);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n29 = graph.NewNode(op, nil, nil, nil);
USE(n29);
n29->ReplaceInput(0, n22);
@@ -1477,11 +1456,11 @@ TEST(BuildScheduleBreakAndContinue) {
n11->ReplaceInput(2, n8);
n16->ReplaceInput(1, n11);
n16->ReplaceInput(2, n6);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n14 = graph.NewNode(op, nil, nil, nil);
USE(n14);
n14->ReplaceInput(0, n0);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n55 = graph.NewNode(op, nil, nil, nil);
USE(n55);
n55->ReplaceInput(0, n47);
@@ -1520,13 +1499,7 @@ TEST(BuildScheduleBreakAndContinue) {
graph.SetStart(n0);
graph.SetEnd(n58);
- PrintGraph(&graph);
-
- Schedule* schedule = Scheduler::ComputeSchedule(&graph);
-
- PrintSchedule(schedule);
-
- CHECK_EQ(62, GetScheduledNodeCount(schedule));
+ ComputeAndVerifySchedule(62, &graph);
}
@@ -1536,13 +1509,12 @@ TEST(BuildScheduleSimpleLoopWithCodeMotion) {
Graph graph(scope.main_zone());
CommonOperatorBuilder common_builder(scope.main_zone());
JSOperatorBuilder js_builder(scope.main_zone());
- MachineOperatorBuilder machine_builder(scope.main_zone(), kMachineWord32);
- Operator* op;
+ MachineOperatorBuilder machine_builder;
+ const Operator* op;
Handle<Object> object =
Handle<Object>(isolate->heap()->undefined_value(), isolate);
- PrintableUnique<Object> unique_constant =
- PrintableUnique<Object>::CreateUninitialized(scope.main_zone(), object);
+ Unique<Object> unique_constant = Unique<Object>::CreateUninitialized(object);
// Manually transcripted code for:
// function turbo_fan_test(a, b, c) {
@@ -1561,7 +1533,7 @@ TEST(BuildScheduleSimpleLoopWithCodeMotion) {
op = common_builder.Return();
Node* n21 = graph.NewNode(op, nil, nil, nil);
USE(n21);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n9 = graph.NewNode(op, nil, nil, nil);
USE(n9);
op = common_builder.Parameter(0);
@@ -1575,7 +1547,7 @@ TEST(BuildScheduleSimpleLoopWithCodeMotion) {
op = machine_builder.Int32Add();
Node* n19 = graph.NewNode(op, nil, nil);
USE(n19);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n10 = graph.NewNode(op, nil, nil, nil);
USE(n10);
op = common_builder.Parameter(0);
@@ -1605,7 +1577,7 @@ TEST(BuildScheduleSimpleLoopWithCodeMotion) {
Node* n6 = graph.NewNode(op);
USE(n6);
n14->ReplaceInput(2, n6);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n12 = graph.NewNode(op, nil, nil, nil);
USE(n12);
n12->ReplaceInput(0, n0);
@@ -1623,7 +1595,7 @@ TEST(BuildScheduleSimpleLoopWithCodeMotion) {
n7->ReplaceInput(1, n17);
n10->ReplaceInput(2, n7);
n19->ReplaceInput(0, n2);
- op = common_builder.Phi(2);
+ op = common_builder.Phi(kMachAnyTagged, 2);
Node* n11 = graph.NewNode(op, nil, nil, nil);
USE(n11);
op = common_builder.Parameter(0);
@@ -1651,14 +1623,7 @@ TEST(BuildScheduleSimpleLoopWithCodeMotion) {
graph.SetStart(n0);
graph.SetEnd(n22);
- PrintGraph(&graph);
-
- Schedule* schedule = Scheduler::ComputeSchedule(&graph);
-
- PrintSchedule(schedule);
-
- CHECK_EQ(19, GetScheduledNodeCount(schedule));
-
+ Schedule* schedule = ComputeAndVerifySchedule(19, &graph);
// Make sure the integer-only add gets hoisted to a different block that the
// JSAdd.
CHECK(schedule->block(n19) != schedule->block(n20));
@@ -1667,143 +1632,82 @@ TEST(BuildScheduleSimpleLoopWithCodeMotion) {
#if V8_TURBOFAN_TARGET
-// So we can get a real JS function.
-static Handle<JSFunction> Compile(const char* source) {
- Isolate* isolate = CcTest::i_isolate();
- Handle<String> source_code = isolate->factory()
- ->NewStringFromUtf8(CStrVector(source))
- .ToHandleChecked();
- Handle<SharedFunctionInfo> shared_function = Compiler::CompileScript(
- source_code, Handle<String>(), 0, 0, false,
- Handle<Context>(isolate->native_context()), NULL, NULL,
- v8::ScriptCompiler::kNoCompileOptions, NOT_NATIVES_CODE);
- return isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared_function, isolate->native_context());
+static Node* CreateDiamond(Graph* graph, CommonOperatorBuilder* common,
+ Node* cond) {
+ Node* tv = graph->NewNode(common->Int32Constant(6));
+ Node* fv = graph->NewNode(common->Int32Constant(7));
+ Node* br = graph->NewNode(common->Branch(), cond, graph->start());
+ Node* t = graph->NewNode(common->IfTrue(), br);
+ Node* f = graph->NewNode(common->IfFalse(), br);
+ Node* m = graph->NewNode(common->Merge(2), t, f);
+ Node* phi = graph->NewNode(common->Phi(kMachAnyTagged, 2), tv, fv, m);
+ return phi;
}
-TEST(BuildScheduleTrivialLazyDeoptCall) {
- FLAG_turbo_deoptimization = true;
+TEST(FloatingDiamond1) {
+ HandleAndZoneScope scope;
+ Graph graph(scope.main_zone());
+ CommonOperatorBuilder common(scope.main_zone());
+
+ Node* start = graph.NewNode(common.Start(1));
+ graph.SetStart(start);
+
+ Node* p0 = graph.NewNode(common.Parameter(0), start);
+ Node* d1 = CreateDiamond(&graph, &common, p0);
+ Node* ret = graph.NewNode(common.Return(), d1, start, start);
+ Node* end = graph.NewNode(common.End(), ret, start);
+
+ graph.SetEnd(end);
+
+ ComputeAndVerifySchedule(13, &graph);
+}
+
+TEST(FloatingDiamond2) {
HandleAndZoneScope scope;
- Isolate* isolate = scope.main_isolate();
Graph graph(scope.main_zone());
CommonOperatorBuilder common(scope.main_zone());
- JSOperatorBuilder js_builder(scope.main_zone());
+ MachineOperatorBuilder machine;
- InitializedHandleScope handles;
- Handle<JSFunction> function = Compile("m()");
- CompilationInfoWithZone info(function);
- Linkage linkage(&info);
+ Node* start = graph.NewNode(common.Start(2));
+ graph.SetStart(start);
- // Manually transcribed code for:
- // function turbo_fan_test() {
- // m();
- // }
- // where m can lazy deopt (so it has a deopt block associated with it).
-
-
- // Start //
- // ^ //
- // | (EC) //
- // | //
- // /------> Call <--------------\ //
- // / ^ ^ \ //
- // / | | \ undef //
- // / / \ \ ^ //
- // (E) | (C) / \ (C) \ (E) | //
- // | Continuation LazyDeoptimization | | //
- // \___ ^ ^ / | //
- // \ | | ______/ Framestate //
- // undef \ | (VC) | (C) / ^ //
- // \ \ | | / / //
- // Return Deoptimization ----------/ //
- // ^ ^ //
- // \ / //
- // (C) \ / (C) //
- // \ / //
- // Merge //
- // ^ //
- // | //
- // End //
-
- Handle<Object> undef_object =
- Handle<Object>(isolate->heap()->undefined_value(), isolate);
- PrintableUnique<Object> undef_constant =
- PrintableUnique<Object>::CreateUninitialized(scope.main_zone(),
- undef_object);
-
- Node* undef_node = graph.NewNode(common.HeapConstant(undef_constant));
-
- Node* start_node = graph.NewNode(common.Start(0));
-
- CallDescriptor* descriptor = linkage.GetJSCallDescriptor(0);
- Node* call_node = graph.NewNode(common.Call(descriptor),
- undef_node, // function
- undef_node, // context
- start_node, // effect
- start_node); // control
-
- Node* cont_node = graph.NewNode(common.Continuation(), call_node);
- Node* lazy_deopt_node = graph.NewNode(common.LazyDeoptimization(), call_node);
-
- Node* parameters = graph.NewNode(common.StateValues(1), undef_node);
- Node* locals = graph.NewNode(common.StateValues(0));
- Node* stack = graph.NewNode(common.StateValues(0));
-
- Node* state_node = graph.NewNode(common.FrameState(BailoutId(1234)),
- parameters, locals, stack);
-
- Node* return_node = graph.NewNode(common.Return(),
- undef_node, // return value
- call_node, // effect
- cont_node); // control
- Node* deoptimization_node = graph.NewNode(common.Deoptimize(),
- state_node, // deopt environment
- call_node, // effect
- lazy_deopt_node); // control
-
- Node* merge_node =
- graph.NewNode(common.Merge(2), return_node, deoptimization_node);
-
- Node* end_node = graph.NewNode(common.End(), merge_node);
-
- graph.SetStart(start_node);
- graph.SetEnd(end_node);
-
- PrintGraph(&graph);
-
- Schedule* schedule = Scheduler::ComputeSchedule(&graph);
-
- PrintSchedule(schedule);
-
- // Tests:
- // Continuation and deopt have basic blocks.
- BasicBlock* cont_block = schedule->block(cont_node);
- BasicBlock* deopt_block = schedule->block(lazy_deopt_node);
- BasicBlock* call_block = schedule->block(call_node);
- CHECK_NE(NULL, cont_block);
- CHECK_NE(NULL, deopt_block);
- CHECK_NE(NULL, call_block);
- // The basic blocks are different.
- CHECK_NE(cont_block, deopt_block);
- CHECK_NE(cont_block, call_block);
- CHECK_NE(deopt_block, call_block);
- // The call node finishes its own basic block.
- CHECK_EQ(BasicBlock::kCall, call_block->control_);
- CHECK_EQ(call_node, call_block->control_input_);
- // The lazy deopt block is deferred.
- CHECK(deopt_block->deferred_);
- CHECK(!call_block->deferred_);
- CHECK(!cont_block->deferred_);
- // The lazy deopt block contains framestate + bailout (and nothing else).
- CHECK_EQ(deoptimization_node, deopt_block->control_input_);
- CHECK_EQ(5, static_cast<int>(deopt_block->nodes_.size()));
- CHECK_EQ(lazy_deopt_node, deopt_block->nodes_[0]);
- CHECK_EQ(IrOpcode::kStateValues, deopt_block->nodes_[1]->op()->opcode());
- CHECK_EQ(IrOpcode::kStateValues, deopt_block->nodes_[2]->op()->opcode());
- CHECK_EQ(IrOpcode::kStateValues, deopt_block->nodes_[3]->op()->opcode());
- CHECK_EQ(state_node, deopt_block->nodes_[4]);
+ Node* p0 = graph.NewNode(common.Parameter(0), start);
+ Node* p1 = graph.NewNode(common.Parameter(1), start);
+ Node* d1 = CreateDiamond(&graph, &common, p0);
+ Node* d2 = CreateDiamond(&graph, &common, p1);
+ Node* add = graph.NewNode(machine.Int32Add(), d1, d2);
+ Node* ret = graph.NewNode(common.Return(), add, start, start);
+ Node* end = graph.NewNode(common.End(), ret, start);
+
+ graph.SetEnd(end);
+
+ ComputeAndVerifySchedule(24, &graph);
+}
+
+
+TEST(FloatingDiamond3) {
+ HandleAndZoneScope scope;
+ Graph graph(scope.main_zone());
+ CommonOperatorBuilder common(scope.main_zone());
+ MachineOperatorBuilder machine;
+
+ Node* start = graph.NewNode(common.Start(2));
+ graph.SetStart(start);
+
+ Node* p0 = graph.NewNode(common.Parameter(0), start);
+ Node* p1 = graph.NewNode(common.Parameter(1), start);
+ Node* d1 = CreateDiamond(&graph, &common, p0);
+ Node* d2 = CreateDiamond(&graph, &common, p1);
+ Node* add = graph.NewNode(machine.Int32Add(), d1, d2);
+ Node* d3 = CreateDiamond(&graph, &common, add);
+ Node* ret = graph.NewNode(common.Return(), d3, start, start);
+ Node* end = graph.NewNode(common.End(), ret, start);
+
+ graph.SetEnd(end);
+
+ ComputeAndVerifySchedule(33, &graph);
}
#endif
diff --git a/deps/v8/test/cctest/compiler/test-simplified-lowering.cc b/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
index 18f4136b90..bafa2d88f4 100644
--- a/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
@@ -4,6 +4,7 @@
#include <limits>
+#include "src/compiler/access-builder.h"
#include "src/compiler/control-builders.h"
#include "src/compiler/generic-node-inl.h"
#include "src/compiler/graph-visualizer.h"
@@ -11,7 +12,6 @@
#include "src/compiler/pipeline.h"
#include "src/compiler/representation-change.h"
#include "src/compiler/simplified-lowering.h"
-#include "src/compiler/simplified-node-factory.h"
#include "src/compiler/typer.h"
#include "src/compiler/verifier.h"
#include "src/execution.h"
@@ -29,19 +29,20 @@ using namespace v8::internal::compiler;
template <typename ReturnType>
class SimplifiedLoweringTester : public GraphBuilderTester<ReturnType> {
public:
- SimplifiedLoweringTester(MachineType p0 = kMachineLast,
- MachineType p1 = kMachineLast,
- MachineType p2 = kMachineLast,
- MachineType p3 = kMachineLast,
- MachineType p4 = kMachineLast)
+ SimplifiedLoweringTester(MachineType p0 = kMachNone,
+ MachineType p1 = kMachNone,
+ MachineType p2 = kMachNone,
+ MachineType p3 = kMachNone,
+ MachineType p4 = kMachNone)
: GraphBuilderTester<ReturnType>(p0, p1, p2, p3, p4),
typer(this->zone()),
- source_positions(this->graph()),
- jsgraph(this->graph(), this->common(), &typer),
- lowering(&jsgraph, &source_positions) {}
+ javascript(this->zone()),
+ jsgraph(this->graph(), this->common(), &javascript, &typer,
+ this->machine()),
+ lowering(&jsgraph) {}
Typer typer;
- SourcePositionTable source_positions;
+ JSOperatorBuilder javascript;
JSGraph jsgraph;
SimplifiedLowering lowering;
@@ -55,47 +56,64 @@ class SimplifiedLoweringTester : public GraphBuilderTester<ReturnType> {
};
-// TODO(dcarney): find a home for these functions.
-namespace {
-
-FieldAccess ForJSObjectMap() {
- FieldAccess access = {kTaggedBase, JSObject::kMapOffset, Handle<Name>(),
- Type::Any(), kMachineTagged};
- return access;
-}
-
-
-FieldAccess ForJSObjectProperties() {
- FieldAccess access = {kTaggedBase, JSObject::kPropertiesOffset,
- Handle<Name>(), Type::Any(), kMachineTagged};
- return access;
-}
-
+#ifndef V8_TARGET_ARCH_ARM64
+// TODO(titzer): these result in a stub call that doesn't work on ARM64.
+// TODO(titzer): factor these tests out to test-run-simplifiedops.cc.
+// TODO(titzer): test tagged representation for input to NumberToInt32.
+TEST(RunNumberToInt32_float64) {
+ // TODO(titzer): explicit load/stores here are only because of representations
+ double input;
+ int32_t result;
+ SimplifiedLoweringTester<Object*> t;
+ FieldAccess load = {kUntaggedBase, 0, Handle<Name>(), Type::Number(),
+ kMachFloat64};
+ Node* loaded = t.LoadField(load, t.PointerConstant(&input));
+ Node* convert = t.NumberToInt32(loaded);
+ FieldAccess store = {kUntaggedBase, 0, Handle<Name>(), Type::Signed32(),
+ kMachInt32};
+ t.StoreField(store, t.PointerConstant(&result), convert);
+ t.Return(t.jsgraph.TrueConstant());
+ t.LowerAllNodes();
+ t.GenerateCode();
-FieldAccess ForArrayBufferBackingStore() {
- FieldAccess access = {
- kTaggedBase, JSArrayBuffer::kBackingStoreOffset,
- Handle<Name>(), Type::UntaggedPtr(),
- MachineOperatorBuilder::pointer_rep(),
- };
- return access;
+ if (Pipeline::SupportedTarget()) {
+ FOR_FLOAT64_INPUTS(i) {
+ input = *i;
+ int32_t expected = DoubleToInt32(*i);
+ t.Call();
+ CHECK_EQ(expected, result);
+ }
+ }
}
-ElementAccess ForFixedArrayElement() {
- ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
- kMachineTagged};
- return access;
-}
-
+// TODO(titzer): test tagged representation for input to NumberToUint32.
+TEST(RunNumberToUint32_float64) {
+ // TODO(titzer): explicit load/stores here are only because of representations
+ double input;
+ uint32_t result;
+ SimplifiedLoweringTester<Object*> t;
+ FieldAccess load = {kUntaggedBase, 0, Handle<Name>(), Type::Number(),
+ kMachFloat64};
+ Node* loaded = t.LoadField(load, t.PointerConstant(&input));
+ Node* convert = t.NumberToUint32(loaded);
+ FieldAccess store = {kUntaggedBase, 0, Handle<Name>(), Type::Unsigned32(),
+ kMachUint32};
+ t.StoreField(store, t.PointerConstant(&result), convert);
+ t.Return(t.jsgraph.TrueConstant());
+ t.LowerAllNodes();
+ t.GenerateCode();
-ElementAccess ForBackingStoreElement(MachineType rep) {
- ElementAccess access = {kUntaggedBase,
- kNonHeapObjectHeaderSize - kHeapObjectTag,
- Type::Any(), rep};
- return access;
-}
+ if (Pipeline::SupportedTarget()) {
+ FOR_FLOAT64_INPUTS(i) {
+ input = *i;
+ uint32_t expected = DoubleToUint32(*i);
+ t.Call();
+ CHECK_EQ(static_cast<int32_t>(expected), static_cast<int32_t>(result));
+ }
+ }
}
+#endif
// Create a simple JSObject with a unique map.
@@ -108,8 +126,8 @@ static Handle<JSObject> TestObject() {
TEST(RunLoadMap) {
- SimplifiedLoweringTester<Object*> t(kMachineTagged);
- FieldAccess access = ForJSObjectMap();
+ SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ FieldAccess access = AccessBuilder::ForMap();
Node* load = t.LoadField(access, t.Parameter(0));
t.Return(load);
@@ -126,8 +144,8 @@ TEST(RunLoadMap) {
TEST(RunStoreMap) {
- SimplifiedLoweringTester<int32_t> t(kMachineTagged, kMachineTagged);
- FieldAccess access = ForJSObjectMap();
+ SimplifiedLoweringTester<int32_t> t(kMachAnyTagged, kMachAnyTagged);
+ FieldAccess access = AccessBuilder::ForMap();
t.StoreField(access, t.Parameter(1), t.Parameter(0));
t.Return(t.jsgraph.TrueConstant());
@@ -146,8 +164,8 @@ TEST(RunStoreMap) {
TEST(RunLoadProperties) {
- SimplifiedLoweringTester<Object*> t(kMachineTagged);
- FieldAccess access = ForJSObjectProperties();
+ SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ FieldAccess access = AccessBuilder::ForJSObjectProperties();
Node* load = t.LoadField(access, t.Parameter(0));
t.Return(load);
@@ -164,8 +182,8 @@ TEST(RunLoadProperties) {
TEST(RunLoadStoreMap) {
- SimplifiedLoweringTester<Object*> t(kMachineTagged, kMachineTagged);
- FieldAccess access = ForJSObjectMap();
+ SimplifiedLoweringTester<Object*> t(kMachAnyTagged, kMachAnyTagged);
+ FieldAccess access = AccessBuilder::ForMap();
Node* load = t.LoadField(access, t.Parameter(0));
t.StoreField(access, t.Parameter(1), load);
t.Return(load);
@@ -187,10 +205,12 @@ TEST(RunLoadStoreMap) {
TEST(RunLoadStoreFixedArrayIndex) {
- SimplifiedLoweringTester<Object*> t(kMachineTagged);
- ElementAccess access = ForFixedArrayElement();
- Node* load = t.LoadElement(access, t.Parameter(0), t.Int32Constant(0));
- t.StoreElement(access, t.Parameter(0), t.Int32Constant(1), load);
+ SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ ElementAccess access = AccessBuilder::ForFixedArrayElement();
+ Node* load = t.LoadElement(access, t.Parameter(0), t.Int32Constant(0),
+ t.Int32Constant(2));
+ t.StoreElement(access, t.Parameter(0), t.Int32Constant(1), t.Int32Constant(2),
+ load);
t.Return(load);
t.LowerAllNodes();
@@ -211,15 +231,18 @@ TEST(RunLoadStoreFixedArrayIndex) {
TEST(RunLoadStoreArrayBuffer) {
- SimplifiedLoweringTester<Object*> t(kMachineTagged);
+ SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
const int index = 12;
- ElementAccess buffer_access = ForBackingStoreElement(kMachineWord8);
- Node* backing_store =
- t.LoadField(ForArrayBufferBackingStore(), t.Parameter(0));
+ const int array_length = 2 * index;
+ ElementAccess buffer_access =
+ AccessBuilder::ForTypedArrayElement(v8::kExternalInt8Array, true);
+ Node* backing_store = t.LoadField(
+ AccessBuilder::ForJSArrayBufferBackingStore(), t.Parameter(0));
Node* load =
- t.LoadElement(buffer_access, backing_store, t.Int32Constant(index));
+ t.LoadElement(buffer_access, backing_store, t.Int32Constant(index),
+ t.Int32Constant(array_length));
t.StoreElement(buffer_access, backing_store, t.Int32Constant(index + 1),
- load);
+ t.Int32Constant(array_length), load);
t.Return(t.jsgraph.TrueConstant());
t.LowerAllNodes();
@@ -227,7 +250,6 @@ TEST(RunLoadStoreArrayBuffer) {
if (Pipeline::SupportedTarget()) {
Handle<JSArrayBuffer> array = t.factory()->NewJSArrayBuffer();
- const int array_length = 2 * index;
Runtime::SetupArrayBufferAllocatingData(t.isolate(), array, array_length);
uint8_t* data = reinterpret_cast<uint8_t*>(array->backing_store());
for (int i = 0; i < array_length; i++) {
@@ -249,10 +271,10 @@ TEST(RunLoadStoreArrayBuffer) {
TEST(RunLoadFieldFromUntaggedBase) {
Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3)};
- for (size_t i = 0; i < ARRAY_SIZE(smis); i++) {
+ for (size_t i = 0; i < arraysize(smis); i++) {
int offset = static_cast<int>(i * sizeof(Smi*));
FieldAccess access = {kUntaggedBase, offset, Handle<Name>(),
- Type::Integral32(), kMachineTagged};
+ Type::Integral32(), kMachAnyTagged};
SimplifiedLoweringTester<Object*> t;
Node* load = t.LoadField(access, t.PointerConstant(smis));
@@ -273,12 +295,12 @@ TEST(RunLoadFieldFromUntaggedBase) {
TEST(RunStoreFieldToUntaggedBase) {
Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3)};
- for (size_t i = 0; i < ARRAY_SIZE(smis); i++) {
+ for (size_t i = 0; i < arraysize(smis); i++) {
int offset = static_cast<int>(i * sizeof(Smi*));
FieldAccess access = {kUntaggedBase, offset, Handle<Name>(),
- Type::Integral32(), kMachineTagged};
+ Type::Integral32(), kMachAnyTagged};
- SimplifiedLoweringTester<Object*> t(kMachineTagged);
+ SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
Node* p0 = t.Parameter(0);
t.StoreField(access, t.PointerConstant(smis), p0);
t.Return(p0);
@@ -300,15 +322,16 @@ TEST(RunLoadElementFromUntaggedBase) {
Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3),
Smi::FromInt(4), Smi::FromInt(5)};
- for (size_t i = 0; i < ARRAY_SIZE(smis); i++) { // for header sizes
- for (size_t j = 0; (i + j) < ARRAY_SIZE(smis); j++) { // for element index
+ for (size_t i = 0; i < arraysize(smis); i++) { // for header sizes
+ for (size_t j = 0; (i + j) < arraysize(smis); j++) { // for element index
int offset = static_cast<int>(i * sizeof(Smi*));
ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
- kMachineTagged};
+ kMachAnyTagged};
SimplifiedLoweringTester<Object*> t;
- Node* load = t.LoadElement(access, t.PointerConstant(smis),
- t.Int32Constant(static_cast<int>(j)));
+ Node* load = t.LoadElement(
+ access, t.PointerConstant(smis), t.Int32Constant(static_cast<int>(j)),
+ t.Int32Constant(static_cast<int>(arraysize(smis))));
t.Return(load);
t.LowerAllNodes();
@@ -328,16 +351,17 @@ TEST(RunStoreElementFromUntaggedBase) {
Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3),
Smi::FromInt(4), Smi::FromInt(5)};
- for (size_t i = 0; i < ARRAY_SIZE(smis); i++) { // for header sizes
- for (size_t j = 0; (i + j) < ARRAY_SIZE(smis); j++) { // for element index
+ for (size_t i = 0; i < arraysize(smis); i++) { // for header sizes
+ for (size_t j = 0; (i + j) < arraysize(smis); j++) { // for element index
int offset = static_cast<int>(i * sizeof(Smi*));
ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
- kMachineTagged};
+ kMachAnyTagged};
- SimplifiedLoweringTester<Object*> t(kMachineTagged);
+ SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
Node* p0 = t.Parameter(0);
t.StoreElement(access, t.PointerConstant(smis),
- t.Int32Constant(static_cast<int>(j)), p0);
+ t.Int32Constant(static_cast<int>(j)),
+ t.Int32Constant(static_cast<int>(arraysize(smis))), p0);
t.Return(p0);
t.LowerAllNodes();
@@ -403,8 +427,10 @@ class AccessTester : public HandleAndZoneScope {
SimplifiedLoweringTester<Object*> t;
Node* ptr = GetBaseNode(&t);
- Node* load = t.LoadElement(access, ptr, t.Int32Constant(from_index));
- t.StoreElement(access, ptr, t.Int32Constant(to_index), load);
+ Node* load = t.LoadElement(access, ptr, t.Int32Constant(from_index),
+ t.Int32Constant(static_cast<int>(num_elements)));
+ t.StoreElement(access, ptr, t.Int32Constant(to_index),
+ t.Int32Constant(static_cast<int>(num_elements)), load);
t.Return(t.jsgraph.TrueConstant());
t.LowerAllNodes();
t.GenerateCode();
@@ -439,6 +465,8 @@ class AccessTester : public HandleAndZoneScope {
// Create and run code that copies the elements from {this} to {that}.
void RunCopyElements(AccessTester<E>* that) {
+// TODO(titzer): Rewrite this test without StructuredGraphBuilder support.
+#if 0
SimplifiedLoweringTester<Object*> t;
Node* one = t.Int32Constant(1);
@@ -474,6 +502,7 @@ class AccessTester : public HandleAndZoneScope {
Object* result = t.Call();
CHECK_EQ(t.isolate()->heap()->true_value(), result);
}
+#endif
}
E GetElement(int index) {
@@ -559,19 +588,19 @@ static void RunAccessTest(MachineType rep, E* original_elements, size_t num) {
TEST(RunAccessTests_uint8) {
uint8_t data[] = {0x07, 0x16, 0x25, 0x34, 0x43, 0x99,
0xab, 0x78, 0x89, 0x19, 0x2b, 0x38};
- RunAccessTest<uint8_t>(kMachineWord8, data, ARRAY_SIZE(data));
+ RunAccessTest<uint8_t>(kMachInt8, data, arraysize(data));
}
TEST(RunAccessTests_uint16) {
uint16_t data[] = {0x071a, 0x162b, 0x253c, 0x344d, 0x435e, 0x7777};
- RunAccessTest<uint16_t>(kMachineWord16, data, ARRAY_SIZE(data));
+ RunAccessTest<uint16_t>(kMachInt16, data, arraysize(data));
}
TEST(RunAccessTests_int32) {
int32_t data[] = {-211, 211, 628347, 2000000000, -2000000000, -1, -100000034};
- RunAccessTest<int32_t>(kMachineWord32, data, ARRAY_SIZE(data));
+ RunAccessTest<int32_t>(kMachInt32, data, arraysize(data));
}
@@ -585,13 +614,13 @@ TEST(RunAccessTests_int64) {
V8_2PART_INT64(0x30313233, 34353637),
V8_2PART_INT64(0xa0a1a2a3, a4a5a6a7),
V8_2PART_INT64(0xf0f1f2f3, f4f5f6f7)};
- RunAccessTest<int64_t>(kMachineWord64, data, ARRAY_SIZE(data));
+ RunAccessTest<int64_t>(kMachInt64, data, arraysize(data));
}
TEST(RunAccessTests_float64) {
double data[] = {1.25, -1.25, 2.75, 11.0, 11100.8};
- RunAccessTest<double>(kMachineFloat64, data, ARRAY_SIZE(data));
+ RunAccessTest<double>(kMachFloat64, data, arraysize(data));
}
@@ -599,7 +628,7 @@ TEST(RunAccessTests_Smi) {
Smi* data[] = {Smi::FromInt(-1), Smi::FromInt(-9),
Smi::FromInt(0), Smi::FromInt(666),
Smi::FromInt(77777), Smi::FromInt(Smi::kMaxValue)};
- RunAccessTest<Smi*>(kMachineTagged, data, ARRAY_SIZE(data));
+ RunAccessTest<Smi*>(kMachAnyTagged, data, arraysize(data));
}
@@ -607,17 +636,21 @@ TEST(RunAccessTests_Smi) {
class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
public:
Typer typer;
+ JSOperatorBuilder javascript;
JSGraph jsgraph;
Node* p0;
Node* p1;
+ Node* p2;
Node* start;
Node* end;
Node* ret;
- explicit TestingGraph(Type* p0_type, Type* p1_type = Type::None())
+ explicit TestingGraph(Type* p0_type, Type* p1_type = Type::None(),
+ Type* p2_type = Type::None())
: GraphAndBuilders(main_zone()),
typer(main_zone()),
- jsgraph(graph(), common(), &typer) {
+ javascript(main_zone()),
+ jsgraph(graph(), common(), &javascript, &typer, machine()) {
start = graph()->NewNode(common()->Start(2));
graph()->SetStart(start);
ret =
@@ -626,18 +659,20 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
graph()->SetEnd(end);
p0 = graph()->NewNode(common()->Parameter(0), start);
p1 = graph()->NewNode(common()->Parameter(1), start);
+ p2 = graph()->NewNode(common()->Parameter(2), start);
NodeProperties::SetBounds(p0, Bounds(p0_type));
NodeProperties::SetBounds(p1, Bounds(p1_type));
+ NodeProperties::SetBounds(p2, Bounds(p2_type));
}
- void CheckLoweringBinop(IrOpcode::Value expected, Operator* op) {
+ void CheckLoweringBinop(IrOpcode::Value expected, const Operator* op) {
Node* node = Return(graph()->NewNode(op, p0, p1));
Lower();
CHECK_EQ(expected, node->opcode());
}
- void CheckLoweringTruncatedBinop(IrOpcode::Value expected, Operator* op,
- Operator* trunc) {
+ void CheckLoweringTruncatedBinop(IrOpcode::Value expected, const Operator* op,
+ const Operator* trunc) {
Node* node = graph()->NewNode(op, p0, p1);
Return(graph()->NewNode(trunc, node));
Lower();
@@ -645,7 +680,7 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
}
void Lower() {
- SimplifiedLowering lowering(&jsgraph, NULL);
+ SimplifiedLowering lowering(&jsgraph);
lowering.LowerAllNodes();
}
@@ -658,42 +693,42 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
// Inserts the node as the effect input to the return of the graph.
void Effect(Node* node) { ret->ReplaceInput(1, node); }
- Node* ExampleWithOutput(RepType type) {
+ Node* ExampleWithOutput(MachineType type) {
// TODO(titzer): use parameters with guaranteed representations.
- if (type & tInt32) {
+ if (type & kTypeInt32) {
return graph()->NewNode(machine()->Int32Add(), jsgraph.Int32Constant(1),
jsgraph.Int32Constant(1));
- } else if (type & tUint32) {
+ } else if (type & kTypeUint32) {
return graph()->NewNode(machine()->Word32Shr(), jsgraph.Int32Constant(1),
jsgraph.Int32Constant(1));
- } else if (type & rFloat64) {
+ } else if (type & kRepFloat64) {
return graph()->NewNode(machine()->Float64Add(),
jsgraph.Float64Constant(1),
jsgraph.Float64Constant(1));
- } else if (type & rBit) {
+ } else if (type & kRepBit) {
return graph()->NewNode(machine()->Word32Equal(),
jsgraph.Int32Constant(1),
jsgraph.Int32Constant(1));
- } else if (type & rWord64) {
+ } else if (type & kRepWord64) {
return graph()->NewNode(machine()->Int64Add(), Int64Constant(1),
Int64Constant(1));
} else {
- CHECK(type & rTagged);
+ CHECK(type & kRepTagged);
return p0;
}
}
- Node* Use(Node* node, RepType type) {
- if (type & tInt32) {
+ Node* Use(Node* node, MachineType type) {
+ if (type & kTypeInt32) {
return graph()->NewNode(machine()->Int32LessThan(), node,
jsgraph.Int32Constant(1));
- } else if (type & tUint32) {
+ } else if (type & kTypeUint32) {
return graph()->NewNode(machine()->Uint32LessThan(), node,
jsgraph.Int32Constant(1));
- } else if (type & rFloat64) {
+ } else if (type & kRepFloat64) {
return graph()->NewNode(machine()->Float64Add(), node,
jsgraph.Float64Constant(1));
- } else if (type & rWord64) {
+ } else if (type & kRepWord64) {
return graph()->NewNode(machine()->Int64LessThan(), node,
Int64Constant(1));
} else {
@@ -723,9 +758,9 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
TEST(LowerBooleanNot_bit_bit) {
- // BooleanNot(x: rBit) used as rBit
+ // BooleanNot(x: kRepBit) used as kRepBit
TestingGraph t(Type::Boolean());
- Node* b = t.ExampleWithOutput(rBit);
+ Node* b = t.ExampleWithOutput(kRepBit);
Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
Node* use = t.Branch(inv);
t.Lower();
@@ -738,11 +773,11 @@ TEST(LowerBooleanNot_bit_bit) {
TEST(LowerBooleanNot_bit_tagged) {
- // BooleanNot(x: rBit) used as rTagged
+ // BooleanNot(x: kRepBit) used as kRepTagged
TestingGraph t(Type::Boolean());
- Node* b = t.ExampleWithOutput(rBit);
+ Node* b = t.ExampleWithOutput(kRepBit);
Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
- Node* use = t.Use(inv, rTagged);
+ Node* use = t.Use(inv, kRepTagged);
t.Return(use);
t.Lower();
CHECK_EQ(IrOpcode::kChangeBitToBool, use->InputAt(0)->opcode());
@@ -755,7 +790,7 @@ TEST(LowerBooleanNot_bit_tagged) {
TEST(LowerBooleanNot_tagged_bit) {
- // BooleanNot(x: rTagged) used as rBit
+ // BooleanNot(x: kRepTagged) used as kRepBit
TestingGraph t(Type::Boolean());
Node* b = t.p0;
Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
@@ -770,11 +805,11 @@ TEST(LowerBooleanNot_tagged_bit) {
TEST(LowerBooleanNot_tagged_tagged) {
- // BooleanNot(x: rTagged) used as rTagged
+ // BooleanNot(x: kRepTagged) used as kRepTagged
TestingGraph t(Type::Boolean());
Node* b = t.p0;
Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
- Node* use = t.Use(inv, rTagged);
+ Node* use = t.Use(inv, kRepTagged);
t.Return(use);
t.Lower();
CHECK_EQ(IrOpcode::kChangeBitToBool, use->InputAt(0)->opcode());
@@ -786,6 +821,63 @@ TEST(LowerBooleanNot_tagged_tagged) {
}
+TEST(LowerBooleanToNumber_bit_int32) {
+ // BooleanToNumber(x: kRepBit) used as kMachInt32
+ TestingGraph t(Type::Boolean());
+ Node* b = t.ExampleWithOutput(kRepBit);
+ Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
+ Node* use = t.Use(cnv, kMachInt32);
+ t.Return(use);
+ t.Lower();
+ CHECK_EQ(b, use->InputAt(0));
+}
+
+
+TEST(LowerBooleanToNumber_tagged_int32) {
+ // BooleanToNumber(x: kRepTagged) used as kMachInt32
+ TestingGraph t(Type::Boolean());
+ Node* b = t.p0;
+ Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
+ Node* use = t.Use(cnv, kMachInt32);
+ t.Return(use);
+ t.Lower();
+ CHECK_EQ(t.machine()->WordEqual()->opcode(), cnv->opcode());
+ CHECK(b == cnv->InputAt(0) || b == cnv->InputAt(1));
+ Node* c = t.jsgraph.TrueConstant();
+ CHECK(c == cnv->InputAt(0) || c == cnv->InputAt(1));
+}
+
+
+TEST(LowerBooleanToNumber_bit_tagged) {
+ // BooleanToNumber(x: kRepBit) used as kMachAnyTagged
+ TestingGraph t(Type::Boolean());
+ Node* b = t.ExampleWithOutput(kRepBit);
+ Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
+ Node* use = t.Use(cnv, kMachAnyTagged);
+ t.Return(use);
+ t.Lower();
+ CHECK_EQ(b, use->InputAt(0)->InputAt(0));
+ CHECK_EQ(IrOpcode::kChangeInt32ToTagged, use->InputAt(0)->opcode());
+}
+
+
+TEST(LowerBooleanToNumber_tagged_tagged) {
+ // BooleanToNumber(x: kRepTagged) used as kMachAnyTagged
+ TestingGraph t(Type::Boolean());
+ Node* b = t.p0;
+ Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
+ Node* use = t.Use(cnv, kMachAnyTagged);
+ t.Return(use);
+ t.Lower();
+ CHECK_EQ(cnv, use->InputAt(0)->InputAt(0));
+ CHECK_EQ(IrOpcode::kChangeInt32ToTagged, use->InputAt(0)->opcode());
+ CHECK_EQ(t.machine()->WordEqual()->opcode(), cnv->opcode());
+ CHECK(b == cnv->InputAt(0) || b == cnv->InputAt(1));
+ Node* c = t.jsgraph.TrueConstant();
+ CHECK(c == cnv->InputAt(0) || c == cnv->InputAt(1));
+}
+
+
static Type* test_types[] = {Type::Signed32(), Type::Unsigned32(),
Type::Number(), Type::Any()};
@@ -815,7 +907,7 @@ TEST(LowerNumberCmp_to_uint32) {
TEST(LowerNumberCmp_to_float64) {
static Type* types[] = {Type::Number(), Type::Any()};
- for (size_t i = 0; i < ARRAY_SIZE(types); i++) {
+ for (size_t i = 0; i < arraysize(types); i++) {
TestingGraph t(types[i], types[i]);
t.CheckLoweringBinop(IrOpcode::kFloat64Equal,
@@ -851,7 +943,7 @@ TEST(LowerNumberAddSub_to_uint32) {
TEST(LowerNumberAddSub_to_float64) {
- for (size_t i = 0; i < ARRAY_SIZE(test_types); i++) {
+ for (size_t i = 0; i < arraysize(test_types); i++) {
TestingGraph t(test_types[i], test_types[i]);
t.CheckLoweringBinop(IrOpcode::kFloat64Add, t.simplified()->NumberAdd());
@@ -862,7 +954,7 @@ TEST(LowerNumberAddSub_to_float64) {
TEST(LowerNumberDivMod_to_float64) {
- for (size_t i = 0; i < ARRAY_SIZE(test_types); i++) {
+ for (size_t i = 0; i < arraysize(test_types); i++) {
TestingGraph t(test_types[i], test_types[i]);
t.CheckLoweringBinop(IrOpcode::kFloat64Div, t.simplified()->NumberDivide());
@@ -879,10 +971,10 @@ static void CheckChangeOf(IrOpcode::Value change, Node* of, Node* node) {
TEST(LowerNumberToInt32_to_nop) {
- // NumberToInt32(x: rTagged | tInt32) used as rTagged
+ // NumberToInt32(x: kRepTagged | kTypeInt32) used as kRepTagged
TestingGraph t(Type::Signed32());
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
- Node* use = t.Use(trunc, rTagged);
+ Node* use = t.Use(trunc, kRepTagged);
t.Return(use);
t.Lower();
CHECK_EQ(t.p0, use->InputAt(0));
@@ -890,10 +982,10 @@ TEST(LowerNumberToInt32_to_nop) {
TEST(LowerNumberToInt32_to_ChangeTaggedToFloat64) {
- // NumberToInt32(x: rTagged | tInt32) used as rFloat64
+ // NumberToInt32(x: kRepTagged | kTypeInt32) used as kRepFloat64
TestingGraph t(Type::Signed32());
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
- Node* use = t.Use(trunc, rFloat64);
+ Node* use = t.Use(trunc, kRepFloat64);
t.Return(use);
t.Lower();
CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p0, use->InputAt(0));
@@ -901,36 +993,59 @@ TEST(LowerNumberToInt32_to_ChangeTaggedToFloat64) {
TEST(LowerNumberToInt32_to_ChangeTaggedToInt32) {
- // NumberToInt32(x: rTagged | tInt32) used as rWord32
+ // NumberToInt32(x: kRepTagged | kTypeInt32) used as kRepWord32
TestingGraph t(Type::Signed32());
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
- Node* use = t.Use(trunc, tInt32);
+ Node* use = t.Use(trunc, kTypeInt32);
t.Return(use);
t.Lower();
CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p0, use->InputAt(0));
}
-TEST(LowerNumberToInt32_to_ChangeFloat64ToTagged) {
- // TODO(titzer): NumberToInt32(x: rFloat64 | tInt32) used as rTagged
+TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32) {
+ // NumberToInt32(x: kRepFloat64) used as kMachInt32
+ TestingGraph t(Type::Number());
+ Node* p0 = t.ExampleWithOutput(kMachFloat64);
+ Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), p0);
+ Node* use = t.Use(trunc, kMachInt32);
+ t.Return(use);
+ t.Lower();
+ CheckChangeOf(IrOpcode::kTruncateFloat64ToInt32, p0, use->InputAt(0));
}
-TEST(LowerNumberToInt32_to_ChangeFloat64ToInt32) {
- // TODO(titzer): NumberToInt32(x: rFloat64 | tInt32) used as rWord32 | tInt32
+TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32_with_change) {
+ // NumberToInt32(x: kTypeNumber | kRepTagged) used as kMachInt32
+ TestingGraph t(Type::Number());
+ Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
+ Node* use = t.Use(trunc, kMachInt32);
+ t.Return(use);
+ t.Lower();
+ Node* node = use->InputAt(0);
+ CHECK_EQ(IrOpcode::kTruncateFloat64ToInt32, node->opcode());
+ Node* of = node->InputAt(0);
+ CHECK_EQ(IrOpcode::kChangeTaggedToFloat64, of->opcode());
+ CHECK_EQ(t.p0, of->InputAt(0));
}
-TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32) {
- // TODO(titzer): NumberToInt32(x: rFloat64) used as rWord32 | tUint32
+TEST(LowerNumberToInt32_to_ChangeFloat64ToTagged) {
+ // TODO(titzer): NumberToInt32(x: kRepFloat64 | kTypeInt32) used as kRepTagged
+}
+
+
+TEST(LowerNumberToInt32_to_ChangeFloat64ToInt32) {
+ // TODO(titzer): NumberToInt32(x: kRepFloat64 | kTypeInt32) used as kRepWord32
+ // | kTypeInt32
}
TEST(LowerNumberToUint32_to_nop) {
- // NumberToUint32(x: rTagged | tUint32) used as rTagged
+ // NumberToUint32(x: kRepTagged | kTypeUint32) used as kRepTagged
TestingGraph t(Type::Unsigned32());
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
- Node* use = t.Use(trunc, rTagged);
+ Node* use = t.Use(trunc, kRepTagged);
t.Return(use);
t.Lower();
CHECK_EQ(t.p0, use->InputAt(0));
@@ -938,10 +1053,10 @@ TEST(LowerNumberToUint32_to_nop) {
TEST(LowerNumberToUint32_to_ChangeTaggedToFloat64) {
- // NumberToUint32(x: rTagged | tUint32) used as rWord32
+ // NumberToUint32(x: kRepTagged | kTypeUint32) used as kRepWord32
TestingGraph t(Type::Unsigned32());
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
- Node* use = t.Use(trunc, rFloat64);
+ Node* use = t.Use(trunc, kRepFloat64);
t.Return(use);
t.Lower();
CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p0, use->InputAt(0));
@@ -949,28 +1064,57 @@ TEST(LowerNumberToUint32_to_ChangeTaggedToFloat64) {
TEST(LowerNumberToUint32_to_ChangeTaggedToUint32) {
- // NumberToUint32(x: rTagged | tUint32) used as rWord32
+ // NumberToUint32(x: kRepTagged | kTypeUint32) used as kRepWord32
TestingGraph t(Type::Unsigned32());
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
- Node* use = t.Use(trunc, tUint32);
+ Node* use = t.Use(trunc, kTypeUint32);
t.Return(use);
t.Lower();
CheckChangeOf(IrOpcode::kChangeTaggedToUint32, t.p0, use->InputAt(0));
}
+TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32) {
+ // NumberToUint32(x: kRepFloat64) used as kMachUint32
+ TestingGraph t(Type::Number());
+ Node* p0 = t.ExampleWithOutput(kMachFloat64);
+ Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), p0);
+ Node* use = t.Use(trunc, kMachUint32);
+ t.Return(use);
+ t.Lower();
+ CheckChangeOf(IrOpcode::kTruncateFloat64ToInt32, p0, use->InputAt(0));
+}
+
+
+TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32_with_change) {
+ // NumberToInt32(x: kTypeNumber | kRepTagged) used as kMachUint32
+ TestingGraph t(Type::Number());
+ Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
+ Node* use = t.Use(trunc, kMachUint32);
+ t.Return(use);
+ t.Lower();
+ Node* node = use->InputAt(0);
+ CHECK_EQ(IrOpcode::kTruncateFloat64ToInt32, node->opcode());
+ Node* of = node->InputAt(0);
+ CHECK_EQ(IrOpcode::kChangeTaggedToFloat64, of->opcode());
+ CHECK_EQ(t.p0, of->InputAt(0));
+}
+
+
TEST(LowerNumberToUint32_to_ChangeFloat64ToTagged) {
- // TODO(titzer): NumberToUint32(x: rFloat64 | tUint32) used as rTagged
+ // TODO(titzer): NumberToUint32(x: kRepFloat64 | kTypeUint32) used as
+ // kRepTagged
}
TEST(LowerNumberToUint32_to_ChangeFloat64ToUint32) {
- // TODO(titzer): NumberToUint32(x: rFloat64 | tUint32) used as rWord32
+ // TODO(titzer): NumberToUint32(x: kRepFloat64 | kTypeUint32) used as
+ // kRepWord32
}
TEST(LowerNumberToUint32_to_TruncateFloat64ToUint32) {
- // TODO(titzer): NumberToUint32(x: rFloat64) used as rWord32
+ // TODO(titzer): NumberToUint32(x: kRepFloat64) used as kRepWord32
}
@@ -982,19 +1126,26 @@ TEST(LowerReferenceEqual_to_wordeq) {
}
-TEST(LowerStringOps_to_rtcalls) {
- if (false) { // TODO(titzer): lower StringOps to runtime calls
+TEST(LowerStringOps_to_call_and_compare) {
+ if (Pipeline::SupportedTarget()) {
+ // These tests need linkage for the calls.
TestingGraph t(Type::String(), Type::String());
- t.CheckLoweringBinop(IrOpcode::kCall, t.simplified()->StringEqual());
- t.CheckLoweringBinop(IrOpcode::kCall, t.simplified()->StringLessThan());
- t.CheckLoweringBinop(IrOpcode::kCall,
- t.simplified()->StringLessThanOrEqual());
+ IrOpcode::Value compare_eq =
+ static_cast<IrOpcode::Value>(t.machine()->WordEqual()->opcode());
+ IrOpcode::Value compare_lt =
+ static_cast<IrOpcode::Value>(t.machine()->IntLessThan()->opcode());
+ IrOpcode::Value compare_le = static_cast<IrOpcode::Value>(
+ t.machine()->IntLessThanOrEqual()->opcode());
+ t.CheckLoweringBinop(compare_eq, t.simplified()->StringEqual());
+ t.CheckLoweringBinop(compare_lt, t.simplified()->StringLessThan());
+ t.CheckLoweringBinop(compare_le, t.simplified()->StringLessThanOrEqual());
t.CheckLoweringBinop(IrOpcode::kCall, t.simplified()->StringAdd());
}
}
-void CheckChangeInsertion(IrOpcode::Value expected, RepType from, RepType to) {
+void CheckChangeInsertion(IrOpcode::Value expected, MachineType from,
+ MachineType to) {
TestingGraph t(Type::Any());
Node* in = t.ExampleWithOutput(from);
Node* use = t.Use(in, to);
@@ -1006,26 +1157,31 @@ void CheckChangeInsertion(IrOpcode::Value expected, RepType from, RepType to) {
TEST(InsertBasicChanges) {
- if (false) {
- // TODO(titzer): these changes need the output to have the right type.
- CheckChangeInsertion(IrOpcode::kChangeFloat64ToInt32, rFloat64, tInt32);
- CheckChangeInsertion(IrOpcode::kChangeFloat64ToUint32, rFloat64, tUint32);
- CheckChangeInsertion(IrOpcode::kChangeTaggedToInt32, rTagged, tInt32);
- CheckChangeInsertion(IrOpcode::kChangeTaggedToUint32, rTagged, tUint32);
- }
+ CheckChangeInsertion(IrOpcode::kChangeFloat64ToInt32, kRepFloat64,
+ kTypeInt32);
+ CheckChangeInsertion(IrOpcode::kChangeFloat64ToUint32, kRepFloat64,
+ kTypeUint32);
+ CheckChangeInsertion(IrOpcode::kChangeTaggedToInt32, kRepTagged, kTypeInt32);
+ CheckChangeInsertion(IrOpcode::kChangeTaggedToUint32, kRepTagged,
+ kTypeUint32);
- CheckChangeInsertion(IrOpcode::kChangeFloat64ToTagged, rFloat64, rTagged);
- CheckChangeInsertion(IrOpcode::kChangeTaggedToFloat64, rTagged, rFloat64);
+ CheckChangeInsertion(IrOpcode::kChangeFloat64ToTagged, kRepFloat64,
+ kRepTagged);
+ CheckChangeInsertion(IrOpcode::kChangeTaggedToFloat64, kRepTagged,
+ kRepFloat64);
- CheckChangeInsertion(IrOpcode::kChangeInt32ToFloat64, tInt32, rFloat64);
- CheckChangeInsertion(IrOpcode::kChangeInt32ToTagged, tInt32, rTagged);
+ CheckChangeInsertion(IrOpcode::kChangeInt32ToFloat64, kTypeInt32,
+ kRepFloat64);
+ CheckChangeInsertion(IrOpcode::kChangeInt32ToTagged, kTypeInt32, kRepTagged);
- CheckChangeInsertion(IrOpcode::kChangeUint32ToFloat64, tUint32, rFloat64);
- CheckChangeInsertion(IrOpcode::kChangeUint32ToTagged, tUint32, rTagged);
+ CheckChangeInsertion(IrOpcode::kChangeUint32ToFloat64, kTypeUint32,
+ kRepFloat64);
+ CheckChangeInsertion(IrOpcode::kChangeUint32ToTagged, kTypeUint32,
+ kRepTagged);
}
-static void CheckChangesAroundBinop(TestingGraph* t, Operator* op,
+static void CheckChangesAroundBinop(TestingGraph* t, const Operator* op,
IrOpcode::Value input_change,
IrOpcode::Value output_change) {
Node* binop = t->graph()->NewNode(op, t->p0, t->p1);
@@ -1043,13 +1199,13 @@ static void CheckChangesAroundBinop(TestingGraph* t, Operator* op,
TEST(InsertChangesAroundInt32Binops) {
TestingGraph t(Type::Signed32(), Type::Signed32());
- Operator* ops[] = {t.machine()->Int32Add(), t.machine()->Int32Sub(),
- t.machine()->Int32Mul(), t.machine()->Int32Div(),
- t.machine()->Int32Mod(), t.machine()->Word32And(),
- t.machine()->Word32Or(), t.machine()->Word32Xor(),
- t.machine()->Word32Shl(), t.machine()->Word32Sar()};
+ const Operator* ops[] = {t.machine()->Int32Add(), t.machine()->Int32Sub(),
+ t.machine()->Int32Mul(), t.machine()->Int32Div(),
+ t.machine()->Int32Mod(), t.machine()->Word32And(),
+ t.machine()->Word32Or(), t.machine()->Word32Xor(),
+ t.machine()->Word32Shl(), t.machine()->Word32Sar()};
- for (size_t i = 0; i < ARRAY_SIZE(ops); i++) {
+ for (size_t i = 0; i < arraysize(ops); i++) {
CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToInt32,
IrOpcode::kChangeInt32ToTagged);
}
@@ -1059,10 +1215,10 @@ TEST(InsertChangesAroundInt32Binops) {
TEST(InsertChangesAroundInt32Cmp) {
TestingGraph t(Type::Signed32(), Type::Signed32());
- Operator* ops[] = {t.machine()->Int32LessThan(),
- t.machine()->Int32LessThanOrEqual()};
+ const Operator* ops[] = {t.machine()->Int32LessThan(),
+ t.machine()->Int32LessThanOrEqual()};
- for (size_t i = 0; i < ARRAY_SIZE(ops); i++) {
+ for (size_t i = 0; i < arraysize(ops); i++) {
CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToInt32,
IrOpcode::kChangeBitToBool);
}
@@ -1072,10 +1228,10 @@ TEST(InsertChangesAroundInt32Cmp) {
TEST(InsertChangesAroundUint32Cmp) {
TestingGraph t(Type::Unsigned32(), Type::Unsigned32());
- Operator* ops[] = {t.machine()->Uint32LessThan(),
- t.machine()->Uint32LessThanOrEqual()};
+ const Operator* ops[] = {t.machine()->Uint32LessThan(),
+ t.machine()->Uint32LessThanOrEqual()};
- for (size_t i = 0; i < ARRAY_SIZE(ops); i++) {
+ for (size_t i = 0; i < arraysize(ops); i++) {
CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToUint32,
IrOpcode::kChangeBitToBool);
}
@@ -1085,13 +1241,13 @@ TEST(InsertChangesAroundUint32Cmp) {
TEST(InsertChangesAroundFloat64Binops) {
TestingGraph t(Type::Number(), Type::Number());
- Operator* ops[] = {
+ const Operator* ops[] = {
t.machine()->Float64Add(), t.machine()->Float64Sub(),
t.machine()->Float64Mul(), t.machine()->Float64Div(),
t.machine()->Float64Mod(),
};
- for (size_t i = 0; i < ARRAY_SIZE(ops); i++) {
+ for (size_t i = 0; i < arraysize(ops); i++) {
CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToFloat64,
IrOpcode::kChangeFloat64ToTagged);
}
@@ -1101,11 +1257,11 @@ TEST(InsertChangesAroundFloat64Binops) {
TEST(InsertChangesAroundFloat64Cmp) {
TestingGraph t(Type::Number(), Type::Number());
- Operator* ops[] = {t.machine()->Float64Equal(),
- t.machine()->Float64LessThan(),
- t.machine()->Float64LessThanOrEqual()};
+ const Operator* ops[] = {t.machine()->Float64Equal(),
+ t.machine()->Float64LessThan(),
+ t.machine()->Float64LessThanOrEqual()};
- for (size_t i = 0; i < ARRAY_SIZE(ops); i++) {
+ for (size_t i = 0; i < arraysize(ops); i++) {
CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToFloat64,
IrOpcode::kChangeBitToBool);
}
@@ -1123,28 +1279,7 @@ Node* CheckElementAccessArithmetic(ElementAccess access, Node* load_or_store) {
CHECK_EQ(IrOpcode::kInt32Add, index.node()->opcode());
CHECK(index.right().Is(access.header_size - access.tag()));
- int element_size = 0;
- switch (access.representation) {
- case kMachineTagged:
- element_size = kPointerSize;
- break;
- case kMachineWord8:
- element_size = 1;
- break;
- case kMachineWord16:
- element_size = 2;
- break;
- case kMachineWord32:
- element_size = 4;
- break;
- case kMachineWord64:
- case kMachineFloat64:
- element_size = 8;
- break;
- case kMachineLast:
- UNREACHABLE();
- break;
- }
+ int element_size = ElementSizeOf(access.machine_type);
if (element_size != 1) {
Int32BinopMatcher mul(index.left().node());
@@ -1157,30 +1292,21 @@ Node* CheckElementAccessArithmetic(ElementAccess access, Node* load_or_store) {
}
-static const MachineType machine_reps[] = {kMachineWord8, kMachineWord16,
- kMachineWord32, kMachineWord64,
- kMachineFloat64, kMachineTagged};
-
-
-// Representation types corresponding to those above.
-static const RepType rep_types[] = {static_cast<RepType>(rWord32 | tUint32),
- static_cast<RepType>(rWord32 | tUint32),
- static_cast<RepType>(rWord32 | tInt32),
- static_cast<RepType>(rWord64),
- static_cast<RepType>(rFloat64 | tNumber),
- static_cast<RepType>(rTagged | tAny)};
+static const MachineType machine_reps[] = {
+ kRepBit, kMachInt8, kMachInt16, kMachInt32,
+ kMachInt64, kMachFloat64, kMachAnyTagged};
TEST(LowerLoadField_to_load) {
TestingGraph t(Type::Any(), Type::Signed32());
- for (size_t i = 0; i < ARRAY_SIZE(machine_reps); i++) {
+ for (size_t i = 0; i < arraysize(machine_reps); i++) {
FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
Handle<Name>::null(), Type::Any(), machine_reps[i]};
Node* load =
t.graph()->NewNode(t.simplified()->LoadField(access), t.p0, t.start);
- Node* use = t.Use(load, rep_types[i]);
+ Node* use = t.Use(load, machine_reps[i]);
t.Return(use);
t.Lower();
CHECK_EQ(IrOpcode::kLoad, load->opcode());
@@ -1196,12 +1322,12 @@ TEST(LowerLoadField_to_load) {
TEST(LowerStoreField_to_store) {
TestingGraph t(Type::Any(), Type::Signed32());
- for (size_t i = 0; i < ARRAY_SIZE(machine_reps); i++) {
+ for (size_t i = 0; i < arraysize(machine_reps); i++) {
FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
Handle<Name>::null(), Type::Any(), machine_reps[i]};
- Node* val = t.ExampleWithOutput(rep_types[i]);
+ Node* val = t.ExampleWithOutput(machine_reps[i]);
Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
val, t.start, t.start);
t.Effect(store);
@@ -1211,10 +1337,10 @@ TEST(LowerStoreField_to_store) {
CheckFieldAccessArithmetic(access, store);
StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
- if (rep_types[i] & rTagged) {
- CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind);
+ if (machine_reps[i] & kRepTagged) {
+ CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
}
- CHECK_EQ(machine_reps[i], rep.rep);
+ CHECK_EQ(machine_reps[i], rep.machine_type());
}
}
@@ -1222,13 +1348,14 @@ TEST(LowerStoreField_to_store) {
TEST(LowerLoadElement_to_load) {
TestingGraph t(Type::Any(), Type::Signed32());
- for (size_t i = 0; i < ARRAY_SIZE(machine_reps); i++) {
+ for (size_t i = 0; i < arraysize(machine_reps); i++) {
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
Type::Any(), machine_reps[i]};
- Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
- t.p1, t.start);
- Node* use = t.Use(load, rep_types[i]);
+ Node* load =
+ t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0, t.p1,
+ t.jsgraph.Int32Constant(1024), t.start);
+ Node* use = t.Use(load, machine_reps[i]);
t.Return(use);
t.Lower();
CHECK_EQ(IrOpcode::kLoad, load->opcode());
@@ -1244,13 +1371,14 @@ TEST(LowerLoadElement_to_load) {
TEST(LowerStoreElement_to_store) {
TestingGraph t(Type::Any(), Type::Signed32());
- for (size_t i = 0; i < ARRAY_SIZE(machine_reps); i++) {
+ for (size_t i = 0; i < arraysize(machine_reps); i++) {
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
Type::Any(), machine_reps[i]};
- Node* val = t.ExampleWithOutput(rep_types[i]);
+ Node* val = t.ExampleWithOutput(machine_reps[i]);
Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
- t.p1, val, t.start, t.start);
+ t.p1, t.jsgraph.Int32Constant(1024), val,
+ t.start, t.start);
t.Effect(store);
t.Lower();
CHECK_EQ(IrOpcode::kStore, store->opcode());
@@ -1258,23 +1386,23 @@ TEST(LowerStoreElement_to_store) {
CheckElementAccessArithmetic(access, store);
StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
- if (rep_types[i] & rTagged) {
- CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind);
+ if (machine_reps[i] & kRepTagged) {
+ CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
}
- CHECK_EQ(machine_reps[i], rep.rep);
+ CHECK_EQ(machine_reps[i], rep.machine_type());
}
}
TEST(InsertChangeForLoadElementIndex) {
- // LoadElement(obj: Tagged, index: tInt32 | rTagged) =>
+ // LoadElement(obj: Tagged, index: kTypeInt32 | kRepTagged, length) =>
// Load(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k))
- TestingGraph t(Type::Any(), Type::Signed32());
+ TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- kMachineTagged};
+ kMachAnyTagged};
Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
- t.p1, t.start);
+ t.p1, t.p2, t.start);
t.Return(load);
t.Lower();
CHECK_EQ(IrOpcode::kLoad, load->opcode());
@@ -1286,14 +1414,14 @@ TEST(InsertChangeForLoadElementIndex) {
TEST(InsertChangeForStoreElementIndex) {
- // StoreElement(obj: Tagged, index: tInt32 | rTagged, val) =>
+ // StoreElement(obj: Tagged, index: kTypeInt32 | kRepTagged, length, val) =>
// Store(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k), val)
- TestingGraph t(Type::Any(), Type::Signed32());
+ TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- kMachineTagged};
+ kMachAnyTagged};
Node* store =
- t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0, t.p1,
+ t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0, t.p1, t.p2,
t.jsgraph.TrueConstant(), t.start, t.start);
t.Effect(store);
t.Lower();
@@ -1307,12 +1435,12 @@ TEST(InsertChangeForStoreElementIndex) {
TEST(InsertChangeForLoadElement) {
// TODO(titzer): test all load/store representation change insertions.
- TestingGraph t(Type::Any(), Type::Signed32());
+ TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- kMachineFloat64};
+ kMachFloat64};
Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
- t.p1, t.start);
+ t.p1, t.p1, t.start);
t.Return(load);
t.Lower();
CHECK_EQ(IrOpcode::kLoad, load->opcode());
@@ -1325,7 +1453,7 @@ TEST(InsertChangeForLoadField) {
// TODO(titzer): test all load/store representation change insertions.
TestingGraph t(Type::Any(), Type::Signed32());
FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(), kMachineFloat64};
+ Handle<Name>::null(), Type::Any(), kMachFloat64};
Node* load =
t.graph()->NewNode(t.simplified()->LoadField(access), t.p0, t.start);
@@ -1339,13 +1467,13 @@ TEST(InsertChangeForLoadField) {
TEST(InsertChangeForStoreElement) {
// TODO(titzer): test all load/store representation change insertions.
- TestingGraph t(Type::Any(), Type::Signed32());
+ TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- kMachineFloat64};
+ kMachFloat64};
- Node* store =
- t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
- t.jsgraph.Int32Constant(0), t.p1, t.start, t.start);
+ Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
+ t.jsgraph.Int32Constant(0), t.p2, t.p1,
+ t.start, t.start);
t.Effect(store);
t.Lower();
@@ -1359,7 +1487,7 @@ TEST(InsertChangeForStoreField) {
// TODO(titzer): test all load/store representation change insertions.
TestingGraph t(Type::Any(), Type::Signed32());
FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(), kMachineFloat64};
+ Handle<Name>::null(), Type::Any(), kMachFloat64};
Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
t.p1, t.start, t.start);
@@ -1370,3 +1498,28 @@ TEST(InsertChangeForStoreField) {
CHECK_EQ(t.p0, store->InputAt(0));
CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p1, store->InputAt(2));
}
+
+
+TEST(UpdatePhi) {
+ TestingGraph t(Type::Any(), Type::Signed32());
+ static const MachineType kMachineTypes[] = {kMachInt32, kMachUint32,
+ kMachFloat64};
+
+ for (size_t i = 0; i < arraysize(kMachineTypes); i++) {
+ FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Handle<Name>::null(), Type::Any(), kMachineTypes[i]};
+
+ Node* load0 =
+ t.graph()->NewNode(t.simplified()->LoadField(access), t.p0, t.start);
+ Node* load1 =
+ t.graph()->NewNode(t.simplified()->LoadField(access), t.p1, t.start);
+ Node* phi = t.graph()->NewNode(t.common()->Phi(kMachAnyTagged, 2), load0,
+ load1, t.start);
+ t.Return(t.Use(phi, kMachineTypes[i]));
+ t.Lower();
+
+ CHECK_EQ(IrOpcode::kPhi, phi->opcode());
+ CHECK_EQ(RepresentationOf(kMachineTypes[i]),
+ RepresentationOf(OpParameter<MachineType>(phi)));
+ }
+}
diff --git a/deps/v8/test/cctest/compiler/test-structured-ifbuilder-fuzzer.cc b/deps/v8/test/cctest/compiler/test-structured-ifbuilder-fuzzer.cc
deleted file mode 100644
index 02232264d9..0000000000
--- a/deps/v8/test/cctest/compiler/test-structured-ifbuilder-fuzzer.cc
+++ /dev/null
@@ -1,667 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <string>
-
-#include "src/v8.h"
-#include "test/cctest/cctest.h"
-
-#include "src/base/utils/random-number-generator.h"
-#include "test/cctest/compiler/codegen-tester.h"
-
-#if V8_TURBOFAN_TARGET
-
-using namespace v8::internal;
-using namespace v8::internal::compiler;
-
-typedef StructuredMachineAssembler::IfBuilder IfBuilder;
-typedef StructuredMachineAssembler::LoopBuilder Loop;
-
-static const int32_t kUninitializedVariableOffset = -1;
-static const int32_t kUninitializedOutput = -1;
-static const int32_t kVerifiedOutput = -2;
-
-static const int32_t kInitalVar = 1013;
-static const int32_t kConjunctionInc = 1069;
-static const int32_t kDisjunctionInc = 1151;
-static const int32_t kThenInc = 1223;
-static const int32_t kElseInc = 1291;
-static const int32_t kIfInc = 1373;
-
-class IfBuilderModel {
- public:
- explicit IfBuilderModel(Zone* zone)
- : zone_(zone),
- variable_offset_(0),
- root_(new (zone_) Node(NULL)),
- current_node_(root_),
- current_expression_(NULL) {}
-
- void If() {
- if (current_node_->else_node != NULL) {
- current_node_ = current_node_->else_node;
- } else if (current_node_->then_node != NULL) {
- current_node_ = current_node_->then_node;
- }
- DCHECK(current_expression_ == NULL);
- current_expression_ = new (zone_) Expression(zone_, NULL);
- current_node_->condition = current_expression_;
- }
- void IfNode() { LastChild()->variable_offset = variable_offset_++; }
-
- void OpenParen() { current_expression_ = LastChild(); }
- void CloseParen() { current_expression_ = current_expression_->parent; }
-
- void And() { NewChild()->conjunction = true; }
- void Or() { NewChild()->disjunction = true; }
-
- void Then() {
- DCHECK(current_expression_ == NULL || current_expression_->parent == NULL);
- current_expression_ = NULL;
- DCHECK(current_node_->then_node == NULL);
- current_node_->then_node = new (zone_) Node(current_node_);
- }
- void Else() {
- DCHECK(current_expression_ == NULL || current_expression_->parent == NULL);
- current_expression_ = NULL;
- DCHECK(current_node_->else_node == NULL);
- current_node_->else_node = new (zone_) Node(current_node_);
- }
- void Return() {
- if (current_node_->else_node != NULL) {
- current_node_->else_node->returns = true;
- } else if (current_node_->then_node != NULL) {
- current_node_->then_node->returns = true;
- } else {
- CHECK(false);
- }
- }
- void End() {}
-
- void Print(std::vector<char>* v) { PrintRecursive(v, root_); }
-
- struct VerificationState {
- int32_t* inputs;
- int32_t* outputs;
- int32_t var;
- };
-
- int32_t Verify(int length, int32_t* inputs, int32_t* outputs) {
- CHECK_EQ(variable_offset_, length);
- // Input/Output verification.
- for (int i = 0; i < length; ++i) {
- CHECK(inputs[i] == 0 || inputs[i] == 1);
- CHECK(outputs[i] == kUninitializedOutput || outputs[i] >= 0);
- }
- // Do verification.
- VerificationState state;
- state.inputs = inputs;
- state.outputs = outputs;
- state.var = kInitalVar;
- VerifyRecursive(root_, &state);
- // Verify all outputs marked.
- for (int i = 0; i < length; ++i) {
- CHECK(outputs[i] == kUninitializedOutput ||
- outputs[i] == kVerifiedOutput);
- }
- return state.var;
- }
-
- private:
- struct Expression;
- typedef std::vector<Expression*, zone_allocator<Expression*> > Expressions;
-
- struct Expression : public ZoneObject {
- Expression(Zone* zone, Expression* p)
- : variable_offset(kUninitializedVariableOffset),
- disjunction(false),
- conjunction(false),
- parent(p),
- children(Expressions::allocator_type(zone)) {}
- int variable_offset;
- bool disjunction;
- bool conjunction;
- Expression* parent;
- Expressions children;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(Expression);
- };
-
- struct Node : public ZoneObject {
- explicit Node(Node* p)
- : parent(p),
- condition(NULL),
- then_node(NULL),
- else_node(NULL),
- returns(false) {}
- Node* parent;
- Expression* condition;
- Node* then_node;
- Node* else_node;
- bool returns;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(Node);
- };
-
- Expression* LastChild() {
- if (current_expression_->children.empty()) {
- current_expression_->children.push_back(
- new (zone_) Expression(zone_, current_expression_));
- }
- return current_expression_->children.back();
- }
-
- Expression* NewChild() {
- Expression* child = new (zone_) Expression(zone_, current_expression_);
- current_expression_->children.push_back(child);
- return child;
- }
-
- static void PrintRecursive(std::vector<char>* v, Expression* expression) {
- CHECK(expression != NULL);
- if (expression->conjunction) {
- DCHECK(!expression->disjunction);
- v->push_back('&');
- } else if (expression->disjunction) {
- v->push_back('|');
- }
- if (expression->variable_offset != kUninitializedVariableOffset) {
- v->push_back('v');
- }
- Expressions& children = expression->children;
- if (children.empty()) return;
- v->push_back('(');
- for (Expressions::iterator i = children.begin(); i != children.end(); ++i) {
- PrintRecursive(v, *i);
- }
- v->push_back(')');
- }
-
- static void PrintRecursive(std::vector<char>* v, Node* node) {
- // Termination condition.
- if (node->condition == NULL) {
- CHECK(node->then_node == NULL && node->else_node == NULL);
- if (node->returns) v->push_back('r');
- return;
- }
- CHECK(!node->returns);
- v->push_back('i');
- PrintRecursive(v, node->condition);
- if (node->then_node != NULL) {
- v->push_back('t');
- PrintRecursive(v, node->then_node);
- }
- if (node->else_node != NULL) {
- v->push_back('e');
- PrintRecursive(v, node->else_node);
- }
- }
-
- static bool VerifyRecursive(Expression* expression,
- VerificationState* state) {
- bool result = false;
- bool first_iteration = true;
- Expressions& children = expression->children;
- CHECK(!children.empty());
- for (Expressions::iterator i = children.begin(); i != children.end(); ++i) {
- Expression* child = *i;
- // Short circuit evaluation,
- // but mixes of &&s and ||s have weird semantics.
- if ((child->conjunction && !result) || (child->disjunction && result)) {
- continue;
- }
- if (child->conjunction) state->var += kConjunctionInc;
- if (child->disjunction) state->var += kDisjunctionInc;
- bool child_result;
- if (child->variable_offset != kUninitializedVariableOffset) {
- // Verify output
- CHECK_EQ(state->var, state->outputs[child->variable_offset]);
- state->outputs[child->variable_offset] = kVerifiedOutput; // Mark seen.
- child_result = state->inputs[child->variable_offset];
- CHECK(child->children.empty());
- state->var += kIfInc;
- } else {
- child_result = VerifyRecursive(child, state);
- }
- if (child->conjunction) {
- result &= child_result;
- } else if (child->disjunction) {
- result |= child_result;
- } else {
- CHECK(first_iteration);
- result = child_result;
- }
- first_iteration = false;
- }
- return result;
- }
-
- static void VerifyRecursive(Node* node, VerificationState* state) {
- if (node->condition == NULL) return;
- bool result = VerifyRecursive(node->condition, state);
- if (result) {
- if (node->then_node) {
- state->var += kThenInc;
- return VerifyRecursive(node->then_node, state);
- }
- } else {
- if (node->else_node) {
- state->var += kElseInc;
- return VerifyRecursive(node->else_node, state);
- }
- }
- }
-
- Zone* zone_;
- int variable_offset_;
- Node* root_;
- Node* current_node_;
- Expression* current_expression_;
- DISALLOW_COPY_AND_ASSIGN(IfBuilderModel);
-};
-
-
-class IfBuilderGenerator : public StructuredMachineAssemblerTester<int32_t> {
- public:
- IfBuilderGenerator()
- : StructuredMachineAssemblerTester<int32_t>(
- MachineOperatorBuilder::pointer_rep(),
- MachineOperatorBuilder::pointer_rep()),
- var_(NewVariable(Int32Constant(kInitalVar))),
- c_(this),
- m_(this->zone()),
- one_(Int32Constant(1)),
- offset_(0) {}
-
- static void GenerateExpression(v8::base::RandomNumberGenerator* rng,
- std::vector<char>* v, int n_vars) {
- int depth = 1;
- v->push_back('(');
- bool need_if = true;
- bool populated = false;
- while (n_vars != 0) {
- if (need_if) {
- // can nest a paren or do a variable
- if (rng->NextBool()) {
- v->push_back('v');
- n_vars--;
- need_if = false;
- populated = true;
- } else {
- v->push_back('(');
- depth++;
- populated = false;
- }
- } else {
- // can pop, do && or do ||
- int options = 3;
- if (depth == 1 || !populated) {
- options--;
- }
- switch (rng->NextInt(options)) {
- case 0:
- v->push_back('&');
- need_if = true;
- break;
- case 1:
- v->push_back('|');
- need_if = true;
- break;
- case 2:
- v->push_back(')');
- depth--;
- break;
- }
- }
- }
- CHECK(!need_if);
- while (depth != 0) {
- v->push_back(')');
- depth--;
- }
- }
-
- static void GenerateIfThenElse(v8::base::RandomNumberGenerator* rng,
- std::vector<char>* v, int n_ifs,
- int max_exp_length) {
- CHECK_GT(n_ifs, 0);
- CHECK_GT(max_exp_length, 0);
- bool have_env = true;
- bool then_done = false;
- bool else_done = false;
- bool first_iteration = true;
- while (n_ifs != 0) {
- if (have_env) {
- int options = 3;
- if (else_done || first_iteration) { // Don't do else or return
- options -= 2;
- first_iteration = false;
- }
- switch (rng->NextInt(options)) {
- case 0:
- v->push_back('i');
- n_ifs--;
- have_env = false;
- GenerateExpression(rng, v, rng->NextInt(max_exp_length) + 1);
- break;
- case 1:
- v->push_back('r');
- have_env = false;
- break;
- case 2:
- v->push_back('e');
- else_done = true;
- then_done = false;
- break;
- default:
- CHECK(false);
- }
- } else { // Can only do then or else
- int options = 2;
- if (then_done) options--;
- switch (rng->NextInt(options)) {
- case 0:
- v->push_back('e');
- else_done = true;
- then_done = false;
- break;
- case 1:
- v->push_back('t');
- then_done = true;
- else_done = false;
- break;
- default:
- CHECK(false);
- }
- have_env = true;
- }
- }
- // Last instruction must have been an if, can complete it in several ways.
- int options = 2;
- if (then_done && !else_done) options++;
- switch (rng->NextInt(3)) {
- case 0:
- // Do nothing.
- break;
- case 1:
- v->push_back('t');
- switch (rng->NextInt(3)) {
- case 0:
- v->push_back('r');
- break;
- case 1:
- v->push_back('e');
- break;
- case 2:
- v->push_back('e');
- v->push_back('r');
- break;
- default:
- CHECK(false);
- }
- break;
- case 2:
- v->push_back('e');
- if (rng->NextBool()) v->push_back('r');
- break;
- default:
- CHECK(false);
- }
- }
-
- std::string::const_iterator ParseExpression(std::string::const_iterator it,
- std::string::const_iterator end) {
- // Prepare for expression.
- m_.If();
- c_.If();
- int depth = 0;
- for (; it != end; ++it) {
- switch (*it) {
- case 'v':
- m_.IfNode();
- {
- Node* offset = Int32Constant(offset_ * 4);
- Store(kMachineWord32, Parameter(1), offset, var_.Get());
- var_.Set(Int32Add(var_.Get(), Int32Constant(kIfInc)));
- c_.If(Load(kMachineWord32, Parameter(0), offset));
- offset_++;
- }
- break;
- case '&':
- m_.And();
- c_.And();
- var_.Set(Int32Add(var_.Get(), Int32Constant(kConjunctionInc)));
- break;
- case '|':
- m_.Or();
- c_.Or();
- var_.Set(Int32Add(var_.Get(), Int32Constant(kDisjunctionInc)));
- break;
- case '(':
- if (depth != 0) {
- m_.OpenParen();
- c_.OpenParen();
- }
- depth++;
- break;
- case ')':
- depth--;
- if (depth == 0) return it;
- m_.CloseParen();
- c_.CloseParen();
- break;
- default:
- CHECK(false);
- }
- }
- CHECK(false);
- return it;
- }
-
- void ParseIfThenElse(const std::string& str) {
- int n_vars = 0;
- for (std::string::const_iterator it = str.begin(); it != str.end(); ++it) {
- if (*it == 'v') n_vars++;
- }
- InitializeConstants(n_vars);
- for (std::string::const_iterator it = str.begin(); it != str.end(); ++it) {
- switch (*it) {
- case 'i': {
- it++;
- CHECK(it != str.end());
- CHECK_EQ('(', *it);
- it = ParseExpression(it, str.end());
- CHECK_EQ(')', *it);
- break;
- }
- case 't':
- m_.Then();
- c_.Then();
- var_.Set(Int32Add(var_.Get(), Int32Constant(kThenInc)));
- break;
- case 'e':
- m_.Else();
- c_.Else();
- var_.Set(Int32Add(var_.Get(), Int32Constant(kElseInc)));
- break;
- case 'r':
- m_.Return();
- Return(var_.Get());
- break;
- default:
- CHECK(false);
- }
- }
- m_.End();
- c_.End();
- Return(var_.Get());
- // Compare generated model to parsed version.
- {
- std::vector<char> v;
- m_.Print(&v);
- std::string m_str(v.begin(), v.end());
- CHECK(m_str == str);
- }
- }
-
- void ParseExpression(const std::string& str) {
- CHECK(inputs_.is_empty());
- std::string wrapped = "i(" + str + ")te";
- ParseIfThenElse(wrapped);
- }
-
- void ParseRandomIfThenElse(v8::base::RandomNumberGenerator* rng, int n_ifs,
- int n_vars) {
- std::vector<char> v;
- GenerateIfThenElse(rng, &v, n_ifs, n_vars);
- std::string str(v.begin(), v.end());
- ParseIfThenElse(str);
- }
-
- void RunRandom(v8::base::RandomNumberGenerator* rng) {
- // TODO(dcarney): permute inputs via model.
- // TODO(dcarney): compute test_cases from n_ifs and n_vars.
- int test_cases = 100;
- for (int test = 0; test < test_cases; test++) {
- Initialize();
- for (int i = 0; i < offset_; i++) {
- inputs_[i] = rng->NextBool();
- }
- DoCall();
- }
- }
-
- void Run(const std::string& str, int32_t expected) {
- Initialize();
- int offset = 0;
- for (std::string::const_iterator it = str.begin(); it != str.end(); ++it) {
- switch (*it) {
- case 't':
- inputs_[offset++] = 1;
- break;
- case 'f':
- inputs_[offset++] = 0;
- break;
- default:
- CHECK(false);
- }
- }
- CHECK_EQ(offset_, offset);
- // Call.
- int32_t result = DoCall();
- CHECK_EQ(result, expected);
- }
-
- private:
- typedef std::vector<int32_t, zone_allocator<int32_t> > IOVector;
-
- void InitializeConstants(int n_vars) {
- CHECK(inputs_.is_empty());
- inputs_.Reset(new int32_t[n_vars]);
- outputs_.Reset(new int32_t[n_vars]);
- }
-
- void Initialize() {
- for (int i = 0; i < offset_; i++) {
- inputs_[i] = 0;
- outputs_[i] = kUninitializedOutput;
- }
- }
-
- int32_t DoCall() {
- int32_t result = Call(inputs_.get(), outputs_.get());
- int32_t expected = m_.Verify(offset_, inputs_.get(), outputs_.get());
- CHECK_EQ(result, expected);
- return result;
- }
-
- const v8::internal::compiler::Variable var_;
- IfBuilder c_;
- IfBuilderModel m_;
- Node* one_;
- int32_t offset_;
- SmartArrayPointer<int32_t> inputs_;
- SmartArrayPointer<int32_t> outputs_;
-};
-
-
-TEST(RunExpressionString) {
- IfBuilderGenerator m;
- m.ParseExpression("((v|v)|v)");
- m.Run("ttt", kInitalVar + 1 * kIfInc + kThenInc);
- m.Run("ftt", kInitalVar + 2 * kIfInc + kDisjunctionInc + kThenInc);
- m.Run("fft", kInitalVar + 3 * kIfInc + 2 * kDisjunctionInc + kThenInc);
- m.Run("fff", kInitalVar + 3 * kIfInc + 2 * kDisjunctionInc + kElseInc);
-}
-
-
-TEST(RunExpressionStrings) {
- const char* strings[] = {
- "v", "(v)", "((v))", "v|v",
- "(v|v)", "((v|v))", "v&v", "(v&v)",
- "((v&v))", "v&(v)", "v&(v|v)", "v&(v|v)&v",
- "v|(v)", "v|(v&v)", "v|(v&v)|v", "v|(((v)|(v&v)|(v)|v)&(v))|v",
- };
- v8::base::RandomNumberGenerator rng;
- for (size_t i = 0; i < ARRAY_SIZE(strings); i++) {
- IfBuilderGenerator m;
- m.ParseExpression(strings[i]);
- m.RunRandom(&rng);
- }
-}
-
-
-TEST(RunSimpleIfElseTester) {
- const char* tests[] = {
- "i(v)", "i(v)t", "i(v)te",
- "i(v)er", "i(v)ter", "i(v)ti(v)trei(v)ei(v)ei(v)ei(v)ei(v)ei(v)ei(v)e"};
- v8::base::RandomNumberGenerator rng;
- for (size_t i = 0; i < ARRAY_SIZE(tests); ++i) {
- IfBuilderGenerator m;
- m.ParseIfThenElse(tests[i]);
- m.RunRandom(&rng);
- }
-}
-
-
-TEST(RunRandomExpressions) {
- v8::base::RandomNumberGenerator rng;
- for (int n_vars = 1; n_vars < 12; n_vars++) {
- for (int i = 0; i < n_vars * n_vars + 10; i++) {
- IfBuilderGenerator m;
- m.ParseRandomIfThenElse(&rng, 1, n_vars);
- m.RunRandom(&rng);
- }
- }
-}
-
-
-TEST(RunRandomIfElse) {
- v8::base::RandomNumberGenerator rng;
- for (int n_ifs = 1; n_ifs < 12; n_ifs++) {
- for (int i = 0; i < n_ifs * n_ifs + 10; i++) {
- IfBuilderGenerator m;
- m.ParseRandomIfThenElse(&rng, n_ifs, 1);
- m.RunRandom(&rng);
- }
- }
-}
-
-
-TEST(RunRandomIfElseExpressions) {
- v8::base::RandomNumberGenerator rng;
- for (int n_vars = 2; n_vars < 6; n_vars++) {
- for (int n_ifs = 2; n_ifs < 7; n_ifs++) {
- for (int i = 0; i < n_ifs * n_vars + 10; i++) {
- IfBuilderGenerator m;
- m.ParseRandomIfThenElse(&rng, n_ifs, n_vars);
- m.RunRandom(&rng);
- }
- }
- }
-}
-
-#endif
diff --git a/deps/v8/test/cctest/compiler/test-structured-machine-assembler.cc b/deps/v8/test/cctest/compiler/test-structured-machine-assembler.cc
deleted file mode 100644
index 6d8020baf4..0000000000
--- a/deps/v8/test/cctest/compiler/test-structured-machine-assembler.cc
+++ /dev/null
@@ -1,1055 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-#include "test/cctest/cctest.h"
-
-#include "src/base/utils/random-number-generator.h"
-#include "src/compiler/structured-machine-assembler.h"
-#include "test/cctest/compiler/codegen-tester.h"
-#include "test/cctest/compiler/value-helper.h"
-
-#if V8_TURBOFAN_TARGET
-
-using namespace v8::internal::compiler;
-
-typedef StructuredMachineAssembler::IfBuilder IfBuilder;
-typedef StructuredMachineAssembler::LoopBuilder Loop;
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class StructuredMachineAssemblerFriend {
- public:
- static bool VariableAlive(StructuredMachineAssembler* m,
- const Variable& var) {
- CHECK(m->current_environment_ != NULL);
- int offset = var.offset_;
- return offset < static_cast<int>(m->CurrentVars()->size()) &&
- m->CurrentVars()->at(offset) != NULL;
- }
-};
-}
-}
-} // namespace v8::internal::compiler
-
-
-TEST(RunVariable) {
- StructuredMachineAssemblerTester<int32_t> m;
-
- int32_t constant = 0x86c2bb16;
-
- Variable v1 = m.NewVariable(m.Int32Constant(constant));
- Variable v2 = m.NewVariable(v1.Get());
- m.Return(v2.Get());
-
- CHECK_EQ(constant, m.Call());
-}
-
-
-TEST(RunSimpleIf) {
- StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
-
- int32_t constant = 0xc4a3e3a6;
- {
- IfBuilder cond(&m);
- cond.If(m.Parameter(0)).Then();
- m.Return(m.Int32Constant(constant));
- }
- m.Return(m.Word32Not(m.Int32Constant(constant)));
-
- CHECK_EQ(~constant, m.Call(0));
- CHECK_EQ(constant, m.Call(1));
-}
-
-
-TEST(RunSimpleIfVariable) {
- StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
-
- int32_t constant = 0xdb6f20c2;
- Variable var = m.NewVariable(m.Int32Constant(constant));
- {
- IfBuilder cond(&m);
- cond.If(m.Parameter(0)).Then();
- var.Set(m.Word32Not(var.Get()));
- }
- m.Return(var.Get());
-
- CHECK_EQ(constant, m.Call(0));
- CHECK_EQ(~constant, m.Call(1));
-}
-
-
-TEST(RunSimpleElse) {
- StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
-
- int32_t constant = 0xfc5eadf4;
- {
- IfBuilder cond(&m);
- cond.If(m.Parameter(0)).Else();
- m.Return(m.Int32Constant(constant));
- }
- m.Return(m.Word32Not(m.Int32Constant(constant)));
-
- CHECK_EQ(constant, m.Call(0));
- CHECK_EQ(~constant, m.Call(1));
-}
-
-
-TEST(RunSimpleIfElse) {
- StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
-
- int32_t constant = 0xaa9c8cd3;
- {
- IfBuilder cond(&m);
- cond.If(m.Parameter(0)).Then();
- m.Return(m.Int32Constant(constant));
- cond.Else();
- m.Return(m.Word32Not(m.Int32Constant(constant)));
- }
-
- CHECK_EQ(~constant, m.Call(0));
- CHECK_EQ(constant, m.Call(1));
-}
-
-
-TEST(RunSimpleIfElseVariable) {
- StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
-
- int32_t constant = 0x67b6f39c;
- Variable var = m.NewVariable(m.Int32Constant(constant));
- {
- IfBuilder cond(&m);
- cond.If(m.Parameter(0)).Then();
- var.Set(m.Word32Not(m.Word32Not(var.Get())));
- cond.Else();
- var.Set(m.Word32Not(var.Get()));
- }
- m.Return(var.Get());
-
- CHECK_EQ(~constant, m.Call(0));
- CHECK_EQ(constant, m.Call(1));
-}
-
-
-TEST(RunSimpleIfNoThenElse) {
- StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
-
- int32_t constant = 0xd5e550ed;
- {
- IfBuilder cond(&m);
- cond.If(m.Parameter(0));
- }
- m.Return(m.Int32Constant(constant));
-
- CHECK_EQ(constant, m.Call(0));
- CHECK_EQ(constant, m.Call(1));
-}
-
-
-TEST(RunSimpleConjunctionVariable) {
- StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
-
- int32_t constant = 0xf8fb9ec6;
- Variable var = m.NewVariable(m.Int32Constant(constant));
- {
- IfBuilder cond(&m);
- cond.If(m.Int32Constant(1)).And();
- var.Set(m.Word32Not(var.Get()));
- cond.If(m.Parameter(0)).Then();
- var.Set(m.Word32Not(m.Word32Not(var.Get())));
- cond.Else();
- var.Set(m.Word32Not(var.Get()));
- }
- m.Return(var.Get());
-
- CHECK_EQ(constant, m.Call(0));
- CHECK_EQ(~constant, m.Call(1));
-}
-
-
-TEST(RunSimpleDisjunctionVariable) {
- StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
-
- int32_t constant = 0x118f6ffc;
- Variable var = m.NewVariable(m.Int32Constant(constant));
- {
- IfBuilder cond(&m);
- cond.If(m.Int32Constant(0)).Or();
- var.Set(m.Word32Not(var.Get()));
- cond.If(m.Parameter(0)).Then();
- var.Set(m.Word32Not(m.Word32Not(var.Get())));
- cond.Else();
- var.Set(m.Word32Not(var.Get()));
- }
- m.Return(var.Get());
-
- CHECK_EQ(constant, m.Call(0));
- CHECK_EQ(~constant, m.Call(1));
-}
-
-
-TEST(RunIfElse) {
- StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
-
- {
- IfBuilder cond(&m);
- bool first = true;
- FOR_INT32_INPUTS(i) {
- Node* c = m.Int32Constant(*i);
- if (first) {
- cond.If(m.Word32Equal(m.Parameter(0), c)).Then();
- m.Return(c);
- first = false;
- } else {
- cond.Else();
- cond.If(m.Word32Equal(m.Parameter(0), c)).Then();
- m.Return(c);
- }
- }
- }
- m.Return(m.Int32Constant(333));
-
- FOR_INT32_INPUTS(i) { CHECK_EQ(*i, m.Call(*i)); }
-}
-
-
-enum IfBuilderBranchType { kSkipBranch, kBranchFallsThrough, kBranchReturns };
-
-
-static IfBuilderBranchType all_branch_types[] = {
- kSkipBranch, kBranchFallsThrough, kBranchReturns};
-
-
-static void RunIfBuilderDisjunction(size_t max, IfBuilderBranchType then_type,
- IfBuilderBranchType else_type) {
- StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
-
- std::vector<int32_t> inputs = ValueHelper::int32_vector();
- std::vector<int32_t>::const_iterator i = inputs.begin();
- int32_t hit = 0x8c723c9a;
- int32_t miss = 0x88a6b9f3;
- {
- Node* p0 = m.Parameter(0);
- IfBuilder cond(&m);
- for (size_t j = 0; j < max; j++, ++i) {
- CHECK(i != inputs.end()); // Thank you STL.
- if (j > 0) cond.Or();
- cond.If(m.Word32Equal(p0, m.Int32Constant(*i)));
- }
- switch (then_type) {
- case kSkipBranch:
- break;
- case kBranchFallsThrough:
- cond.Then();
- break;
- case kBranchReturns:
- cond.Then();
- m.Return(m.Int32Constant(hit));
- break;
- }
- switch (else_type) {
- case kSkipBranch:
- break;
- case kBranchFallsThrough:
- cond.Else();
- break;
- case kBranchReturns:
- cond.Else();
- m.Return(m.Int32Constant(miss));
- break;
- }
- }
- if (then_type != kBranchReturns || else_type != kBranchReturns) {
- m.Return(m.Int32Constant(miss));
- }
-
- if (then_type != kBranchReturns) hit = miss;
-
- i = inputs.begin();
- for (size_t j = 0; i != inputs.end(); j++, ++i) {
- int32_t result = m.Call(*i);
- CHECK_EQ(j < max ? hit : miss, result);
- }
-}
-
-
-TEST(RunIfBuilderDisjunction) {
- size_t len = ValueHelper::int32_vector().size() - 1;
- size_t max = len > 10 ? 10 : len - 1;
- for (size_t i = 0; i < ARRAY_SIZE(all_branch_types); i++) {
- for (size_t j = 0; j < ARRAY_SIZE(all_branch_types); j++) {
- for (size_t size = 1; size < max; size++) {
- RunIfBuilderDisjunction(size, all_branch_types[i], all_branch_types[j]);
- }
- RunIfBuilderDisjunction(len, all_branch_types[i], all_branch_types[j]);
- }
- }
-}
-
-
-static void RunIfBuilderConjunction(size_t max, IfBuilderBranchType then_type,
- IfBuilderBranchType else_type) {
- StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
-
- std::vector<int32_t> inputs = ValueHelper::int32_vector();
- std::vector<int32_t>::const_iterator i = inputs.begin();
- int32_t hit = 0xa0ceb9ca;
- int32_t miss = 0x226cafaa;
- {
- IfBuilder cond(&m);
- Node* p0 = m.Parameter(0);
- for (size_t j = 0; j < max; j++, ++i) {
- if (j > 0) cond.And();
- cond.If(m.Word32NotEqual(p0, m.Int32Constant(*i)));
- }
- switch (then_type) {
- case kSkipBranch:
- break;
- case kBranchFallsThrough:
- cond.Then();
- break;
- case kBranchReturns:
- cond.Then();
- m.Return(m.Int32Constant(hit));
- break;
- }
- switch (else_type) {
- case kSkipBranch:
- break;
- case kBranchFallsThrough:
- cond.Else();
- break;
- case kBranchReturns:
- cond.Else();
- m.Return(m.Int32Constant(miss));
- break;
- }
- }
- if (then_type != kBranchReturns || else_type != kBranchReturns) {
- m.Return(m.Int32Constant(miss));
- }
-
- if (then_type != kBranchReturns) hit = miss;
-
- i = inputs.begin();
- for (size_t j = 0; i != inputs.end(); j++, ++i) {
- int32_t result = m.Call(*i);
- CHECK_EQ(j >= max ? hit : miss, result);
- }
-}
-
-
-TEST(RunIfBuilderConjunction) {
- size_t len = ValueHelper::int32_vector().size() - 1;
- size_t max = len > 10 ? 10 : len - 1;
- for (size_t i = 0; i < ARRAY_SIZE(all_branch_types); i++) {
- for (size_t j = 0; j < ARRAY_SIZE(all_branch_types); j++) {
- for (size_t size = 1; size < max; size++) {
- RunIfBuilderConjunction(size, all_branch_types[i], all_branch_types[j]);
- }
- RunIfBuilderConjunction(len, all_branch_types[i], all_branch_types[j]);
- }
- }
-}
-
-
-static void RunDisjunctionVariables(int disjunctions, bool explicit_then,
- bool explicit_else) {
- StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
-
- int32_t constant = 0x65a09535;
-
- Node* cmp_val = m.Int32Constant(constant);
- Node* one = m.Int32Constant(1);
- Variable var = m.NewVariable(m.Parameter(0));
- {
- IfBuilder cond(&m);
- cond.If(m.Word32Equal(var.Get(), cmp_val));
- for (int i = 0; i < disjunctions; i++) {
- cond.Or();
- var.Set(m.Int32Add(var.Get(), one));
- cond.If(m.Word32Equal(var.Get(), cmp_val));
- }
- if (explicit_then) {
- cond.Then();
- }
- if (explicit_else) {
- cond.Else();
- var.Set(m.Int32Add(var.Get(), one));
- }
- }
- m.Return(var.Get());
-
- int adds = disjunctions + (explicit_else ? 1 : 0);
- int32_t input = constant - 2 * adds;
- for (int i = 0; i < adds; i++) {
- CHECK_EQ(input + adds, m.Call(input));
- input++;
- }
- for (int i = 0; i < adds + 1; i++) {
- CHECK_EQ(constant, m.Call(input));
- input++;
- }
- for (int i = 0; i < adds; i++) {
- CHECK_EQ(input + adds, m.Call(input));
- input++;
- }
-}
-
-
-TEST(RunDisjunctionVariables) {
- for (int disjunctions = 0; disjunctions < 10; disjunctions++) {
- RunDisjunctionVariables(disjunctions, false, false);
- RunDisjunctionVariables(disjunctions, false, true);
- RunDisjunctionVariables(disjunctions, true, false);
- RunDisjunctionVariables(disjunctions, true, true);
- }
-}
-
-
-static void RunConjunctionVariables(int conjunctions, bool explicit_then,
- bool explicit_else) {
- StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
-
- int32_t constant = 0x2c7f4b45;
- Node* cmp_val = m.Int32Constant(constant);
- Node* one = m.Int32Constant(1);
- Variable var = m.NewVariable(m.Parameter(0));
- {
- IfBuilder cond(&m);
- cond.If(m.Word32NotEqual(var.Get(), cmp_val));
- for (int i = 0; i < conjunctions; i++) {
- cond.And();
- var.Set(m.Int32Add(var.Get(), one));
- cond.If(m.Word32NotEqual(var.Get(), cmp_val));
- }
- if (explicit_then) {
- cond.Then();
- var.Set(m.Int32Add(var.Get(), one));
- }
- if (explicit_else) {
- cond.Else();
- }
- }
- m.Return(var.Get());
-
- int adds = conjunctions + (explicit_then ? 1 : 0);
- int32_t input = constant - 2 * adds;
- for (int i = 0; i < adds; i++) {
- CHECK_EQ(input + adds, m.Call(input));
- input++;
- }
- for (int i = 0; i < adds + 1; i++) {
- CHECK_EQ(constant, m.Call(input));
- input++;
- }
- for (int i = 0; i < adds; i++) {
- CHECK_EQ(input + adds, m.Call(input));
- input++;
- }
-}
-
-
-TEST(RunConjunctionVariables) {
- for (int conjunctions = 0; conjunctions < 10; conjunctions++) {
- RunConjunctionVariables(conjunctions, false, false);
- RunConjunctionVariables(conjunctions, false, true);
- RunConjunctionVariables(conjunctions, true, false);
- RunConjunctionVariables(conjunctions, true, true);
- }
-}
-
-
-TEST(RunSimpleNestedIf) {
- StructuredMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
- const size_t NUM_VALUES = 7;
- std::vector<int32_t> inputs = ValueHelper::int32_vector();
- CHECK(inputs.size() >= NUM_VALUES);
- Node* values[NUM_VALUES];
- for (size_t j = 0; j < NUM_VALUES; j++) {
- values[j] = m.Int32Constant(inputs[j]);
- }
- {
- IfBuilder if_0(&m);
- if_0.If(m.Word32Equal(m.Parameter(0), values[0])).Then();
- {
- IfBuilder if_1(&m);
- if_1.If(m.Word32Equal(m.Parameter(1), values[1])).Then();
- { m.Return(values[3]); }
- if_1.Else();
- { m.Return(values[4]); }
- }
- if_0.Else();
- {
- IfBuilder if_1(&m);
- if_1.If(m.Word32Equal(m.Parameter(1), values[2])).Then();
- { m.Return(values[5]); }
- if_1.Else();
- { m.Return(values[6]); }
- }
- }
-
- int32_t result = m.Call(inputs[0], inputs[1]);
- CHECK_EQ(inputs[3], result);
-
- result = m.Call(inputs[0], inputs[1] + 1);
- CHECK_EQ(inputs[4], result);
-
- result = m.Call(inputs[0] + 1, inputs[2]);
- CHECK_EQ(inputs[5], result);
-
- result = m.Call(inputs[0] + 1, inputs[2] + 1);
- CHECK_EQ(inputs[6], result);
-}
-
-
-TEST(RunUnreachableBlockAfterIf) {
- StructuredMachineAssemblerTester<int32_t> m;
- {
- IfBuilder cond(&m);
- cond.If(m.Int32Constant(0)).Then();
- m.Return(m.Int32Constant(1));
- cond.Else();
- m.Return(m.Int32Constant(2));
- }
- // This is unreachable.
- m.Return(m.Int32Constant(3));
- CHECK_EQ(2, m.Call());
-}
-
-
-TEST(RunUnreachableBlockAfterLoop) {
- StructuredMachineAssemblerTester<int32_t> m;
- {
- Loop loop(&m);
- m.Return(m.Int32Constant(1));
- }
- // This is unreachable.
- m.Return(m.Int32Constant(3));
- CHECK_EQ(1, m.Call());
-}
-
-
-TEST(RunSimpleLoop) {
- StructuredMachineAssemblerTester<int32_t> m;
- int32_t constant = 0x120c1f85;
- {
- Loop loop(&m);
- m.Return(m.Int32Constant(constant));
- }
- CHECK_EQ(constant, m.Call());
-}
-
-
-TEST(RunSimpleLoopBreak) {
- StructuredMachineAssemblerTester<int32_t> m;
- int32_t constant = 0x10ddb0a6;
- {
- Loop loop(&m);
- loop.Break();
- }
- m.Return(m.Int32Constant(constant));
- CHECK_EQ(constant, m.Call());
-}
-
-
-TEST(RunCountToTen) {
- StructuredMachineAssemblerTester<int32_t> m;
- Variable i = m.NewVariable(m.Int32Constant(0));
- Node* ten = m.Int32Constant(10);
- Node* one = m.Int32Constant(1);
- {
- Loop loop(&m);
- {
- IfBuilder cond(&m);
- cond.If(m.Word32Equal(i.Get(), ten)).Then();
- loop.Break();
- }
- i.Set(m.Int32Add(i.Get(), one));
- }
- m.Return(i.Get());
- CHECK_EQ(10, m.Call());
-}
-
-
-TEST(RunCountToTenAcc) {
- StructuredMachineAssemblerTester<int32_t> m;
- int32_t constant = 0xf27aed64;
- Variable i = m.NewVariable(m.Int32Constant(0));
- Variable var = m.NewVariable(m.Int32Constant(constant));
- Node* ten = m.Int32Constant(10);
- Node* one = m.Int32Constant(1);
- {
- Loop loop(&m);
- {
- IfBuilder cond(&m);
- cond.If(m.Word32Equal(i.Get(), ten)).Then();
- loop.Break();
- }
- i.Set(m.Int32Add(i.Get(), one));
- var.Set(m.Int32Add(var.Get(), i.Get()));
- }
- m.Return(var.Get());
-
- CHECK_EQ(constant + 10 + 9 * 5, m.Call());
-}
-
-
-TEST(RunSimpleNestedLoop) {
- StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
-
- Node* zero = m.Int32Constant(0);
- Node* one = m.Int32Constant(1);
- Node* two = m.Int32Constant(2);
- Node* three = m.Int32Constant(3);
- {
- Loop l1(&m);
- {
- Loop l2(&m);
- {
- IfBuilder cond(&m);
- cond.If(m.Word32Equal(m.Parameter(0), one)).Then();
- l1.Break();
- }
- {
- Loop l3(&m);
- {
- IfBuilder cond(&m);
- cond.If(m.Word32Equal(m.Parameter(0), two)).Then();
- l2.Break();
- cond.Else();
- cond.If(m.Word32Equal(m.Parameter(0), three)).Then();
- l3.Break();
- }
- m.Return(three);
- }
- m.Return(two);
- }
- m.Return(one);
- }
- m.Return(zero);
-
- CHECK_EQ(0, m.Call(1));
- CHECK_EQ(1, m.Call(2));
- CHECK_EQ(2, m.Call(3));
- CHECK_EQ(3, m.Call(4));
-}
-
-
-TEST(RunFib) {
- StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
-
- // Constants.
- Node* zero = m.Int32Constant(0);
- Node* one = m.Int32Constant(1);
- Node* two = m.Int32Constant(2);
- // Variables.
- // cnt = input
- Variable cnt = m.NewVariable(m.Parameter(0));
- // if (cnt < 2) return i
- {
- IfBuilder lt2(&m);
- lt2.If(m.Int32LessThan(cnt.Get(), two)).Then();
- m.Return(cnt.Get());
- }
- // cnt -= 2
- cnt.Set(m.Int32Sub(cnt.Get(), two));
- // res = 1
- Variable res = m.NewVariable(one);
- {
- // prv_0 = 1
- // prv_1 = 1
- Variable prv_0 = m.NewVariable(one);
- Variable prv_1 = m.NewVariable(one);
- // while (cnt != 0) {
- Loop main(&m);
- {
- IfBuilder nz(&m);
- nz.If(m.Word32Equal(cnt.Get(), zero)).Then();
- main.Break();
- }
- // res = prv_0 + prv_1
- // prv_0 = prv_1
- // prv_1 = res
- res.Set(m.Int32Add(prv_0.Get(), prv_1.Get()));
- prv_0.Set(prv_1.Get());
- prv_1.Set(res.Get());
- // cnt--
- cnt.Set(m.Int32Sub(cnt.Get(), one));
- }
- m.Return(res.Get());
-
- int32_t values[] = {0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144};
- for (size_t i = 0; i < ARRAY_SIZE(values); i++) {
- CHECK_EQ(values[i], m.Call(static_cast<int32_t>(i)));
- }
-}
-
-
-static int VariableIntroduction() {
- while (true) {
- int ret = 0;
- for (int i = 0; i < 10; i++) {
- for (int j = i; j < 10; j++) {
- for (int k = j; k < 10; k++) {
- ret++;
- }
- ret++;
- }
- ret++;
- }
- return ret;
- }
-}
-
-
-TEST(RunVariableIntroduction) {
- StructuredMachineAssemblerTester<int32_t> m;
- Node* zero = m.Int32Constant(0);
- Node* one = m.Int32Constant(1);
- // Use an IfBuilder to get out of start block.
- {
- IfBuilder i0(&m);
- i0.If(zero).Then();
- m.Return(one);
- }
- Node* ten = m.Int32Constant(10);
- Variable v0 =
- m.NewVariable(zero); // Introduce variable outside of start block.
- {
- Loop l0(&m);
- Variable ret = m.NewVariable(zero); // Introduce loop variable.
- {
- Loop l1(&m);
- {
- IfBuilder i1(&m);
- i1.If(m.Word32Equal(v0.Get(), ten)).Then();
- l1.Break();
- }
- Variable v1 = m.NewVariable(v0.Get()); // Introduce loop variable.
- {
- Loop l2(&m);
- {
- IfBuilder i2(&m);
- i2.If(m.Word32Equal(v1.Get(), ten)).Then();
- l2.Break();
- }
- Variable v2 = m.NewVariable(v1.Get()); // Introduce loop variable.
- {
- Loop l3(&m);
- {
- IfBuilder i3(&m);
- i3.If(m.Word32Equal(v2.Get(), ten)).Then();
- l3.Break();
- }
- ret.Set(m.Int32Add(ret.Get(), one));
- v2.Set(m.Int32Add(v2.Get(), one));
- }
- ret.Set(m.Int32Add(ret.Get(), one));
- v1.Set(m.Int32Add(v1.Get(), one));
- }
- ret.Set(m.Int32Add(ret.Get(), one));
- v0.Set(m.Int32Add(v0.Get(), one));
- }
- m.Return(ret.Get()); // Return loop variable.
- }
- CHECK_EQ(VariableIntroduction(), m.Call());
-}
-
-
-TEST(RunIfBuilderVariableLiveness) {
- StructuredMachineAssemblerTester<int32_t> m;
- typedef i::compiler::StructuredMachineAssemblerFriend F;
- Node* zero = m.Int32Constant(0);
- Variable v_outer = m.NewVariable(zero);
- IfBuilder cond(&m);
- cond.If(zero).Then();
- Variable v_then = m.NewVariable(zero);
- CHECK(F::VariableAlive(&m, v_outer));
- CHECK(F::VariableAlive(&m, v_then));
- cond.Else();
- Variable v_else = m.NewVariable(zero);
- CHECK(F::VariableAlive(&m, v_outer));
- CHECK(F::VariableAlive(&m, v_else));
- CHECK(!F::VariableAlive(&m, v_then));
- cond.End();
- CHECK(F::VariableAlive(&m, v_outer));
- CHECK(!F::VariableAlive(&m, v_then));
- CHECK(!F::VariableAlive(&m, v_else));
-}
-
-
-TEST(RunSimpleExpression1) {
- StructuredMachineAssemblerTester<int32_t> m;
-
- int32_t constant = 0x0c2974ef;
- Node* zero = m.Int32Constant(0);
- Node* one = m.Int32Constant(1);
- {
- // if (((1 && 1) && 1) && 1) return constant; return 0;
- IfBuilder cond(&m);
- cond.OpenParen();
- cond.OpenParen().If(one).And();
- cond.If(one).CloseParen().And();
- cond.If(one).CloseParen().And();
- cond.If(one).Then();
- m.Return(m.Int32Constant(constant));
- }
- m.Return(zero);
-
- CHECK_EQ(constant, m.Call());
-}
-
-
-TEST(RunSimpleExpression2) {
- StructuredMachineAssemblerTester<int32_t> m;
-
- int32_t constant = 0x2eddc11b;
- Node* zero = m.Int32Constant(0);
- Node* one = m.Int32Constant(1);
- {
- // if (((0 || 1) && 1) && 1) return constant; return 0;
- IfBuilder cond(&m);
- cond.OpenParen();
- cond.OpenParen().If(zero).Or();
- cond.If(one).CloseParen().And();
- cond.If(one).CloseParen().And();
- cond.If(one).Then();
- m.Return(m.Int32Constant(constant));
- }
- m.Return(zero);
-
- CHECK_EQ(constant, m.Call());
-}
-
-
-TEST(RunSimpleExpression3) {
- StructuredMachineAssemblerTester<int32_t> m;
-
- int32_t constant = 0x9ed5e9ef;
- Node* zero = m.Int32Constant(0);
- Node* one = m.Int32Constant(1);
- {
- // if (1 && ((0 || 1) && 1) && 1) return constant; return 0;
- IfBuilder cond(&m);
- cond.If(one).And();
- cond.OpenParen();
- cond.OpenParen().If(zero).Or();
- cond.If(one).CloseParen().And();
- cond.If(one).CloseParen().And();
- cond.If(one).Then();
- m.Return(m.Int32Constant(constant));
- }
- m.Return(zero);
-
- CHECK_EQ(constant, m.Call());
-}
-
-
-TEST(RunSimpleExpressionVariable1) {
- StructuredMachineAssemblerTester<int32_t> m;
-
- int32_t constant = 0x4b40a986;
- Node* one = m.Int32Constant(1);
- Variable var = m.NewVariable(m.Int32Constant(constant));
- {
- // if (var.Get() && ((!var || var) && var) && var) {} return var;
- // incrementing var in each environment.
- IfBuilder cond(&m);
- cond.If(var.Get()).And();
- var.Set(m.Int32Add(var.Get(), one));
- cond.OpenParen().OpenParen().If(m.Word32BinaryNot(var.Get())).Or();
- var.Set(m.Int32Add(var.Get(), one));
- cond.If(var.Get()).CloseParen().And();
- var.Set(m.Int32Add(var.Get(), one));
- cond.If(var.Get()).CloseParen().And();
- var.Set(m.Int32Add(var.Get(), one));
- cond.If(var.Get());
- }
- m.Return(var.Get());
-
- CHECK_EQ(constant + 4, m.Call());
-}
-
-
-class QuicksortHelper : public StructuredMachineAssemblerTester<int32_t> {
- public:
- QuicksortHelper()
- : StructuredMachineAssemblerTester<int32_t>(
- MachineOperatorBuilder::pointer_rep(), kMachineWord32,
- MachineOperatorBuilder::pointer_rep(), kMachineWord32),
- input_(NULL),
- stack_limit_(NULL),
- one_(Int32Constant(1)),
- stack_frame_size_(Int32Constant(kFrameVariables * 4)),
- left_offset_(Int32Constant(0 * 4)),
- right_offset_(Int32Constant(1 * 4)) {
- Build();
- }
-
- int32_t DoCall(int32_t* input, int32_t input_length) {
- int32_t stack_space[20];
- // Do call.
- int32_t return_val = Call(input, input_length, stack_space,
- static_cast<int32_t>(ARRAY_SIZE(stack_space)));
- // Ran out of stack space.
- if (return_val != 0) return return_val;
- // Check sorted.
- int32_t last = input[0];
- for (int32_t i = 0; i < input_length; i++) {
- CHECK(last <= input[i]);
- last = input[i];
- }
- return return_val;
- }
-
- private:
- void Inc32(const Variable& var) { var.Set(Int32Add(var.Get(), one_)); }
- Node* Index(Node* index) { return Word32Shl(index, Int32Constant(2)); }
- Node* ArrayLoad(Node* index) {
- return Load(kMachineWord32, input_, Index(index));
- }
- void Swap(Node* a_index, Node* b_index) {
- Node* a = ArrayLoad(a_index);
- Node* b = ArrayLoad(b_index);
- Store(kMachineWord32, input_, Index(a_index), b);
- Store(kMachineWord32, input_, Index(b_index), a);
- }
- void AddToCallStack(const Variable& fp, Node* left, Node* right) {
- {
- // Stack limit check.
- IfBuilder cond(this);
- cond.If(IntPtrLessThanOrEqual(fp.Get(), stack_limit_)).Then();
- Return(Int32Constant(-1));
- }
- Store(kMachineWord32, fp.Get(), left_offset_, left);
- Store(kMachineWord32, fp.Get(), right_offset_, right);
- fp.Set(IntPtrAdd(fp.Get(), ConvertInt32ToIntPtr(stack_frame_size_)));
- }
- void Build() {
- Variable left = NewVariable(Int32Constant(0));
- Variable right =
- NewVariable(Int32Sub(Parameter(kInputLengthParameter), one_));
- input_ = Parameter(kInputParameter);
- Node* top_of_stack = Parameter(kStackParameter);
- stack_limit_ = IntPtrSub(
- top_of_stack, ConvertInt32ToIntPtr(Parameter(kStackLengthParameter)));
- Variable fp = NewVariable(top_of_stack);
- {
- Loop outermost(this);
- // Edge case - 2 element array.
- {
- IfBuilder cond(this);
- cond.If(Word32Equal(left.Get(), Int32Sub(right.Get(), one_))).And();
- cond.If(Int32LessThanOrEqual(ArrayLoad(right.Get()),
- ArrayLoad(left.Get()))).Then();
- Swap(left.Get(), right.Get());
- }
- {
- IfBuilder cond(this);
- // Algorithm complete condition.
- cond.If(WordEqual(top_of_stack, fp.Get())).And();
- cond.If(Int32LessThanOrEqual(Int32Sub(right.Get(), one_), left.Get()))
- .Then();
- outermost.Break();
- // 'Recursion' exit condition. Pop frame and continue.
- cond.Else();
- cond.If(Int32LessThanOrEqual(Int32Sub(right.Get(), one_), left.Get()))
- .Then();
- fp.Set(IntPtrSub(fp.Get(), ConvertInt32ToIntPtr(stack_frame_size_)));
- left.Set(Load(kMachineWord32, fp.Get(), left_offset_));
- right.Set(Load(kMachineWord32, fp.Get(), right_offset_));
- outermost.Continue();
- }
- // Partition.
- Variable store_index = NewVariable(left.Get());
- {
- Node* pivot_index =
- Int32Div(Int32Add(left.Get(), right.Get()), Int32Constant(2));
- Node* pivot = ArrayLoad(pivot_index);
- Swap(pivot_index, right.Get());
- Variable i = NewVariable(left.Get());
- {
- Loop partition(this);
- {
- IfBuilder cond(this);
- // Parition complete.
- cond.If(Word32Equal(i.Get(), right.Get())).Then();
- partition.Break();
- // Need swap.
- cond.Else();
- cond.If(Int32LessThanOrEqual(ArrayLoad(i.Get()), pivot)).Then();
- Swap(i.Get(), store_index.Get());
- Inc32(store_index);
- }
- Inc32(i);
- } // End partition loop.
- Swap(store_index.Get(), right.Get());
- }
- // 'Recurse' left and right halves of partition.
- // Tail recurse second one.
- AddToCallStack(fp, left.Get(), Int32Sub(store_index.Get(), one_));
- left.Set(Int32Add(store_index.Get(), one_));
- } // End outermost loop.
- Return(Int32Constant(0));
- }
-
- static const int kFrameVariables = 2; // left, right
- // Parameter offsets.
- static const int kInputParameter = 0;
- static const int kInputLengthParameter = 1;
- static const int kStackParameter = 2;
- static const int kStackLengthParameter = 3;
- // Function inputs.
- Node* input_;
- Node* stack_limit_;
- // Constants.
- Node* const one_;
- // Frame constants.
- Node* const stack_frame_size_;
- Node* const left_offset_;
- Node* const right_offset_;
-};
-
-
-TEST(RunSimpleQuicksort) {
- QuicksortHelper m;
- int32_t inputs[] = {9, 7, 1, 8, 11};
- CHECK_EQ(0, m.DoCall(inputs, ARRAY_SIZE(inputs)));
-}
-
-
-TEST(RunRandomQuicksort) {
- QuicksortHelper m;
-
- v8::base::RandomNumberGenerator rng;
- static const int kMaxLength = 40;
- int32_t inputs[kMaxLength];
-
- for (int length = 1; length < kMaxLength; length++) {
- for (int i = 0; i < 70; i++) {
- // Randomize inputs.
- for (int j = 0; j < length; j++) {
- inputs[j] = rng.NextInt(10) - 5;
- }
- CHECK_EQ(0, m.DoCall(inputs, length));
- }
- }
-}
-
-
-TEST(MultipleScopes) {
- StructuredMachineAssemblerTester<int32_t> m;
- for (int i = 0; i < 10; i++) {
- IfBuilder b(&m);
- b.If(m.Int32Constant(0)).Then();
- m.NewVariable(m.Int32Constant(0));
- }
- m.Return(m.Int32Constant(0));
- CHECK_EQ(0, m.Call());
-}
-
-#endif
diff --git a/deps/v8/test/cctest/compiler/value-helper.h b/deps/v8/test/cctest/compiler/value-helper.h
index 5bfd7884d0..7d7c11e4f1 100644
--- a/deps/v8/test/cctest/compiler/value-helper.h
+++ b/deps/v8/test/cctest/compiler/value-helper.h
@@ -27,34 +27,29 @@ class ValueHelper {
ValueHelper() : isolate_(CcTest::InitIsolateOnce()) {}
- template <typename T>
- void CheckConstant(T expected, Node* node) {
- CHECK_EQ(expected, ValueOf<T>(node->op()));
- }
-
void CheckFloat64Constant(double expected, Node* node) {
CHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
- CHECK_EQ(expected, ValueOf<double>(node->op()));
+ CHECK_EQ(expected, OpParameter<double>(node));
}
void CheckNumberConstant(double expected, Node* node) {
CHECK_EQ(IrOpcode::kNumberConstant, node->opcode());
- CHECK_EQ(expected, ValueOf<double>(node->op()));
+ CHECK_EQ(expected, OpParameter<double>(node));
}
void CheckInt32Constant(int32_t expected, Node* node) {
CHECK_EQ(IrOpcode::kInt32Constant, node->opcode());
- CHECK_EQ(expected, ValueOf<int32_t>(node->op()));
+ CHECK_EQ(expected, OpParameter<int32_t>(node));
}
void CheckUint32Constant(int32_t expected, Node* node) {
CHECK_EQ(IrOpcode::kInt32Constant, node->opcode());
- CHECK_EQ(expected, ValueOf<uint32_t>(node->op()));
+ CHECK_EQ(expected, OpParameter<uint32_t>(node));
}
void CheckHeapConstant(Object* expected, Node* node) {
CHECK_EQ(IrOpcode::kHeapConstant, node->opcode());
- CHECK_EQ(expected, *ValueOf<Handle<Object> >(node->op()));
+ CHECK_EQ(expected, *OpParameter<Unique<Object> >(node).handle());
}
void CheckTrue(Node* node) {
@@ -65,6 +60,45 @@ class ValueHelper {
CheckHeapConstant(isolate_->heap()->false_value(), node);
}
+ static std::vector<float> float32_vector() {
+ static const float kValues[] = {
+ -std::numeric_limits<float>::infinity(), -2.70497e+38f, -1.4698e+37f,
+ -1.22813e+35f, -1.20555e+35f, -1.34584e+34f,
+ -1.0079e+32f, -6.49364e+26f, -3.06077e+25f,
+ -1.46821e+25f, -1.17658e+23f, -1.9617e+22f,
+ -2.7357e+20f, -1.48708e+13f, -1.89633e+12f,
+ -4.66622e+11f, -2.22581e+11f, -1.45381e+10f,
+ -1.3956e+09f, -1.32951e+09f, -1.30721e+09f,
+ -1.19756e+09f, -9.26822e+08f, -6.35647e+08f,
+ -4.00037e+08f, -1.81227e+08f, -5.09256e+07f,
+ -964300.0f, -192446.0f, -28455.0f,
+ -27194.0f, -26401.0f, -20575.0f,
+ -17069.0f, -9167.0f, -960.178f,
+ -113.0f, -62.0f, -15.0f,
+ -7.0f, -0.0256635f, -4.60374e-07f,
+ -3.63759e-10f, -4.30175e-14f, -5.27385e-15f,
+ -1.48084e-15f, -1.05755e-19f, -3.2995e-21f,
+ -1.67354e-23f, -1.11885e-23f, -1.78506e-30f,
+ -5.07594e-31f, -3.65799e-31f, -1.43718e-34f,
+ -1.27126e-38f, -0.0f, 0.0f,
+ 1.17549e-38f, 1.56657e-37f, 4.08512e-29f,
+ 3.31357e-28f, 6.25073e-22f, 4.1723e-13f,
+ 1.44343e-09f, 5.27004e-08f, 9.48298e-08f,
+ 5.57888e-07f, 4.89988e-05f, 0.244326f,
+ 12.4895f, 19.0f, 47.0f,
+ 106.0f, 538.324f, 564.536f,
+ 819.124f, 7048.0f, 12611.0f,
+ 19878.0f, 20309.0f, 797056.0f,
+ 1.77219e+09f, 1.51116e+11f, 4.18193e+13f,
+ 3.59167e+16f, 3.38211e+19f, 2.67488e+20f,
+ 1.78831e+21f, 9.20914e+21f, 8.35654e+23f,
+ 1.4495e+24f, 5.94015e+25f, 4.43608e+30f,
+ 2.44502e+33f, 2.61152e+33f, 1.38178e+37f,
+ 1.71306e+37f, 3.31899e+38f, 3.40282e+38f,
+ std::numeric_limits<float>::infinity()};
+ return std::vector<float>(&kValues[0], &kValues[arraysize(kValues)]);
+ }
+
static std::vector<double> float64_vector() {
static const double nan = v8::base::OS::nan_value();
static const double values[] = {
@@ -76,7 +110,7 @@ class ValueHelper {
-V8_INFINITY, nan, 2147483647.375, 2147483647.75,
2147483648.0, 2147483648.25, 2147483649.25, -2147483647.0,
-2147483647.125, -2147483647.875, -2147483648.25, -2147483649.5};
- return std::vector<double>(&values[0], &values[ARRAY_SIZE(values)]);
+ return std::vector<double>(&values[0], &values[arraysize(values)]);
}
static const std::vector<int32_t> int32_vector() {
@@ -94,7 +128,7 @@ class ValueHelper {
0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x003fffff, 0x001fffff,
0x000fffff, 0x0007ffff, 0x0003ffff, 0x0001ffff, 0x0000ffff, 0x00007fff,
0x00003fff, 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff};
- return std::vector<uint32_t>(&kValues[0], &kValues[ARRAY_SIZE(kValues)]);
+ return std::vector<uint32_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
static const std::vector<double> nan_vector(size_t limit = 0) {
@@ -102,14 +136,14 @@ class ValueHelper {
static const double values[] = {-nan, -V8_INFINITY * -0.0,
-V8_INFINITY * 0.0, V8_INFINITY * -0.0,
V8_INFINITY * 0.0, nan};
- return std::vector<double>(&values[0], &values[ARRAY_SIZE(values)]);
+ return std::vector<double>(&values[0], &values[arraysize(values)]);
}
static const std::vector<uint32_t> ror_vector() {
static const uint32_t kValues[31] = {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31};
- return std::vector<uint32_t>(&kValues[0], &kValues[ARRAY_SIZE(kValues)]);
+ return std::vector<uint32_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
};
@@ -122,8 +156,13 @@ class ValueHelper {
#define FOR_INT32_INPUTS(var) FOR_INPUTS(int32_t, int32, var)
#define FOR_UINT32_INPUTS(var) FOR_INPUTS(uint32_t, uint32, var)
+#define FOR_FLOAT32_INPUTS(var) FOR_INPUTS(float, float32, var)
#define FOR_FLOAT64_INPUTS(var) FOR_INPUTS(double, float64, var)
+#define FOR_INT32_SHIFTS(var) for (int32_t var = 0; var < 32; var++)
+
+#define FOR_UINT32_SHIFTS(var) for (uint32_t var = 0; var < 32; var++)
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-alloc.cc b/deps/v8/test/cctest/test-alloc.cc
index 314c3c1479..d647a3128e 100644
--- a/deps/v8/test/cctest/test-alloc.cc
+++ b/deps/v8/test/cctest/test-alloc.cc
@@ -106,7 +106,7 @@ TEST(StressHandles) {
void TestGetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
@@ -115,7 +115,7 @@ void TestGetter(
void TestSetter(
- v8::Local<v8::String> name,
+ v8::Local<v8::Name> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE();
@@ -124,7 +124,7 @@ void TestSetter(
Handle<AccessorInfo> TestAccessorInfo(
Isolate* isolate, PropertyAttributes attributes) {
- Handle<String> name = isolate->factory()->NewStringFromStaticAscii("get");
+ Handle<String> name = isolate->factory()->NewStringFromStaticChars("get");
return Accessors::MakeAccessor(isolate, name, &TestGetter, &TestSetter,
attributes);
}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 9ddc9db71d..0330ac892f 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -63,6 +63,7 @@ using ::v8::FunctionTemplate;
using ::v8::Handle;
using ::v8::HandleScope;
using ::v8::Local;
+using ::v8::Name;
using ::v8::Message;
using ::v8::MessageCallback;
using ::v8::Object;
@@ -71,6 +72,7 @@ using ::v8::Persistent;
using ::v8::Script;
using ::v8::StackTrace;
using ::v8::String;
+using ::v8::Symbol;
using ::v8::TryCatch;
using ::v8::Undefined;
using ::v8::UniqueId;
@@ -128,19 +130,18 @@ static void SignatureCallback(
// Tests that call v8::V8::Dispose() cannot be threaded.
-TEST(InitializeAndDisposeOnce) {
+UNINITIALIZED_TEST(InitializeAndDisposeOnce) {
CHECK(v8::V8::Initialize());
CHECK(v8::V8::Dispose());
}
// Tests that call v8::V8::Dispose() cannot be threaded.
-TEST(InitializeAndDisposeMultiple) {
+UNINITIALIZED_TEST(InitializeAndDisposeMultiple) {
for (int i = 0; i < 3; ++i) CHECK(v8::V8::Dispose());
for (int i = 0; i < 3; ++i) CHECK(v8::V8::Initialize());
for (int i = 0; i < 3; ++i) CHECK(v8::V8::Dispose());
- // TODO(mstarzinger): This should fail gracefully instead of asserting.
- // for (int i = 0; i < 3; ++i) CHECK(v8::V8::Initialize());
+ for (int i = 0; i < 3; ++i) CHECK(v8::V8::Initialize());
for (int i = 0; i < 3; ++i) CHECK(v8::V8::Dispose());
}
@@ -261,7 +262,7 @@ THREADED_TEST(ReceiverSignature) {
const char* test_objects[] = {
"fun_instance", "sub_fun_instance", "obj", "unrel" };
unsigned bad_signature_start_offset = 2;
- for (unsigned i = 0; i < ARRAY_SIZE(test_objects); i++) {
+ for (unsigned i = 0; i < arraysize(test_objects); i++) {
i::ScopedVector<char> source(200);
i::SNPrintF(
source, "var test_object = %s; test_object", test_objects[i]);
@@ -436,16 +437,16 @@ class TestResource: public String::ExternalStringResource {
};
-class TestAsciiResource: public String::ExternalAsciiStringResource {
+class TestOneByteResource : public String::ExternalOneByteStringResource {
public:
- explicit TestAsciiResource(const char* data, int* counter = NULL,
- size_t offset = 0)
+ explicit TestOneByteResource(const char* data, int* counter = NULL,
+ size_t offset = 0)
: orig_data_(data),
data_(data + offset),
length_(strlen(data) - offset),
counter_(counter) {}
- ~TestAsciiResource() {
+ ~TestOneByteResource() {
i::DeleteArray(orig_data_);
if (counter_ != NULL) ++*counter_;
}
@@ -495,22 +496,22 @@ THREADED_TEST(ScriptUsingStringResource) {
}
-THREADED_TEST(ScriptUsingAsciiStringResource) {
+THREADED_TEST(ScriptUsingOneByteStringResource) {
int dispose_count = 0;
const char* c_source = "1 + 2 * 3";
{
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- TestAsciiResource* resource = new TestAsciiResource(i::StrDup(c_source),
- &dispose_count);
+ TestOneByteResource* resource =
+ new TestOneByteResource(i::StrDup(c_source), &dispose_count);
Local<String> source = String::NewExternal(env->GetIsolate(), resource);
- CHECK(source->IsExternalAscii());
+ CHECK(source->IsExternalOneByte());
CHECK_EQ(static_cast<const String::ExternalStringResourceBase*>(resource),
- source->GetExternalAsciiStringResource());
+ source->GetExternalOneByteStringResource());
String::Encoding encoding = String::UNKNOWN_ENCODING;
CHECK_EQ(static_cast<const String::ExternalStringResourceBase*>(resource),
source->GetExternalStringResourceBase(&encoding));
- CHECK_EQ(String::ASCII_ENCODING, encoding);
+ CHECK_EQ(String::ONE_BYTE_ENCODING, encoding);
Local<Script> script = v8_compile(source);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
@@ -536,10 +537,10 @@ THREADED_TEST(ScriptMakingExternalString) {
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
CHECK_EQ(source->IsExternal(), false);
- CHECK_EQ(source->IsExternalAscii(), false);
+ CHECK_EQ(source->IsExternalOneByte(), false);
String::Encoding encoding = String::UNKNOWN_ENCODING;
CHECK_EQ(NULL, source->GetExternalStringResourceBase(&encoding));
- CHECK_EQ(String::ASCII_ENCODING, encoding);
+ CHECK_EQ(String::ONE_BYTE_ENCODING, encoding);
bool success = source->MakeExternal(new TestResource(two_byte_source,
&dispose_count));
CHECK(success);
@@ -556,7 +557,7 @@ THREADED_TEST(ScriptMakingExternalString) {
}
-THREADED_TEST(ScriptMakingExternalAsciiString) {
+THREADED_TEST(ScriptMakingExternalOneByteString) {
int dispose_count = 0;
const char* c_source = "1 + 2 * 3";
{
@@ -567,7 +568,7 @@ THREADED_TEST(ScriptMakingExternalAsciiString) {
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
bool success = source->MakeExternal(
- new TestAsciiResource(i::StrDup(c_source), &dispose_count));
+ new TestOneByteResource(i::StrDup(c_source), &dispose_count));
CHECK(success);
Local<Script> script = v8_compile(source);
Local<Value> value = script->Run();
@@ -630,7 +631,7 @@ TEST(MakingExternalStringConditions) {
}
-TEST(MakingExternalAsciiStringConditions) {
+TEST(MakingExternalOneByteStringConditions) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -667,7 +668,7 @@ TEST(MakingExternalAsciiStringConditions) {
}
-TEST(MakingExternalUnalignedAsciiString) {
+TEST(MakingExternalUnalignedOneByteString) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -687,12 +688,12 @@ TEST(MakingExternalUnalignedAsciiString) {
// Turn into external string with unaligned resource data.
const char* c_cons = "_abcdefghijklmnopqrstuvwxyz";
- bool success = cons->MakeExternal(
- new TestAsciiResource(i::StrDup(c_cons), NULL, 1));
+ bool success =
+ cons->MakeExternal(new TestOneByteResource(i::StrDup(c_cons), NULL, 1));
CHECK(success);
const char* c_slice = "_bcdefghijklmnopqrstuvwxyz";
- success = slice->MakeExternal(
- new TestAsciiResource(i::StrDup(c_slice), NULL, 1));
+ success =
+ slice->MakeExternal(new TestOneByteResource(i::StrDup(c_slice), NULL, 1));
CHECK(success);
// Trigger GCs and force evacuation.
@@ -721,13 +722,13 @@ THREADED_TEST(UsingExternalString) {
}
-THREADED_TEST(UsingExternalAsciiString) {
+THREADED_TEST(UsingExternalOneByteString) {
i::Factory* factory = CcTest::i_isolate()->factory();
{
v8::HandleScope scope(CcTest::isolate());
const char* one_byte_string = "test string";
Local<String> string = String::NewExternal(
- CcTest::isolate(), new TestAsciiResource(i::StrDup(one_byte_string)));
+ CcTest::isolate(), new TestOneByteResource(i::StrDup(one_byte_string)));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
// Trigger GCs so that the newly allocated string moves to old gen.
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
@@ -741,6 +742,53 @@ THREADED_TEST(UsingExternalAsciiString) {
}
+class DummyResource : public v8::String::ExternalStringResource {
+ public:
+ virtual const uint16_t* data() const { return string_; }
+ virtual size_t length() const { return 1 << 30; }
+
+ private:
+ uint16_t string_[10];
+};
+
+
+class DummyOneByteResource : public v8::String::ExternalOneByteStringResource {
+ public:
+ virtual const char* data() const { return string_; }
+ virtual size_t length() const { return 1 << 30; }
+
+ private:
+ char string_[10];
+};
+
+
+THREADED_TEST(NewExternalForVeryLongString) {
+ {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::TryCatch try_catch;
+ DummyOneByteResource r;
+ v8::Local<v8::String> str = v8::String::NewExternal(CcTest::isolate(), &r);
+ CHECK(str.IsEmpty());
+ CHECK(try_catch.HasCaught());
+ String::Utf8Value exception_value(try_catch.Exception());
+ CHECK_EQ("RangeError: Invalid string length", *exception_value);
+ }
+
+ {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::TryCatch try_catch;
+ DummyResource r;
+ v8::Local<v8::String> str = v8::String::NewExternal(CcTest::isolate(), &r);
+ CHECK(str.IsEmpty());
+ CHECK(try_catch.HasCaught());
+ String::Utf8Value exception_value(try_catch.Exception());
+ CHECK_EQ("RangeError: Invalid string length", *exception_value);
+ }
+}
+
+
THREADED_TEST(ScavengeExternalString) {
i::FLAG_stress_compaction = false;
i::FLAG_gc_global = false;
@@ -763,7 +811,7 @@ THREADED_TEST(ScavengeExternalString) {
}
-THREADED_TEST(ScavengeExternalAsciiString) {
+THREADED_TEST(ScavengeExternalOneByteString) {
i::FLAG_stress_compaction = false;
i::FLAG_gc_global = false;
int dispose_count = 0;
@@ -773,7 +821,7 @@ THREADED_TEST(ScavengeExternalAsciiString) {
const char* one_byte_string = "test string";
Local<String> string = String::NewExternal(
CcTest::isolate(),
- new TestAsciiResource(i::StrDup(one_byte_string), &dispose_count));
+ new TestOneByteResource(i::StrDup(one_byte_string), &dispose_count));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
CcTest::heap()->CollectGarbage(i::NEW_SPACE);
in_new_space = CcTest::heap()->InNewSpace(*istring);
@@ -786,15 +834,14 @@ THREADED_TEST(ScavengeExternalAsciiString) {
}
-class TestAsciiResourceWithDisposeControl: public TestAsciiResource {
+class TestOneByteResourceWithDisposeControl : public TestOneByteResource {
public:
// Only used by non-threaded tests, so it can use static fields.
static int dispose_calls;
static int dispose_count;
- TestAsciiResourceWithDisposeControl(const char* data, bool dispose)
- : TestAsciiResource(data, &dispose_count),
- dispose_(dispose) { }
+ TestOneByteResourceWithDisposeControl(const char* data, bool dispose)
+ : TestOneByteResource(data, &dispose_count), dispose_(dispose) {}
void Dispose() {
++dispose_calls;
@@ -805,17 +852,17 @@ class TestAsciiResourceWithDisposeControl: public TestAsciiResource {
};
-int TestAsciiResourceWithDisposeControl::dispose_count = 0;
-int TestAsciiResourceWithDisposeControl::dispose_calls = 0;
+int TestOneByteResourceWithDisposeControl::dispose_count = 0;
+int TestOneByteResourceWithDisposeControl::dispose_calls = 0;
TEST(ExternalStringWithDisposeHandling) {
const char* c_source = "1 + 2 * 3";
// Use a stack allocated external string resource allocated object.
- TestAsciiResourceWithDisposeControl::dispose_count = 0;
- TestAsciiResourceWithDisposeControl::dispose_calls = 0;
- TestAsciiResourceWithDisposeControl res_stack(i::StrDup(c_source), false);
+ TestOneByteResourceWithDisposeControl::dispose_count = 0;
+ TestOneByteResourceWithDisposeControl::dispose_calls = 0;
+ TestOneByteResourceWithDisposeControl res_stack(i::StrDup(c_source), false);
{
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -825,18 +872,18 @@ TEST(ExternalStringWithDisposeHandling) {
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
CcTest::heap()->CollectAllAvailableGarbage();
- CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
+ CHECK_EQ(0, TestOneByteResourceWithDisposeControl::dispose_count);
}
CcTest::i_isolate()->compilation_cache()->Clear();
CcTest::heap()->CollectAllAvailableGarbage();
- CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_calls);
- CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
+ CHECK_EQ(1, TestOneByteResourceWithDisposeControl::dispose_calls);
+ CHECK_EQ(0, TestOneByteResourceWithDisposeControl::dispose_count);
// Use a heap allocated external string resource allocated object.
- TestAsciiResourceWithDisposeControl::dispose_count = 0;
- TestAsciiResourceWithDisposeControl::dispose_calls = 0;
- TestAsciiResource* res_heap =
- new TestAsciiResourceWithDisposeControl(i::StrDup(c_source), true);
+ TestOneByteResourceWithDisposeControl::dispose_count = 0;
+ TestOneByteResourceWithDisposeControl::dispose_calls = 0;
+ TestOneByteResource* res_heap =
+ new TestOneByteResourceWithDisposeControl(i::StrDup(c_source), true);
{
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -846,12 +893,12 @@ TEST(ExternalStringWithDisposeHandling) {
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
CcTest::heap()->CollectAllAvailableGarbage();
- CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
+ CHECK_EQ(0, TestOneByteResourceWithDisposeControl::dispose_count);
}
CcTest::i_isolate()->compilation_cache()->Clear();
CcTest::heap()->CollectAllAvailableGarbage();
- CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_calls);
- CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_count);
+ CHECK_EQ(1, TestOneByteResourceWithDisposeControl::dispose_calls);
+ CHECK_EQ(1, TestOneByteResourceWithDisposeControl::dispose_count);
}
@@ -875,7 +922,8 @@ THREADED_TEST(StringConcat) {
Local<String> source = String::Concat(left, right);
right = String::NewExternal(
- env->GetIsolate(), new TestAsciiResource(i::StrDup(one_byte_extern_1)));
+ env->GetIsolate(),
+ new TestOneByteResource(i::StrDup(one_byte_extern_1)));
source = String::Concat(source, right);
right = String::NewExternal(
env->GetIsolate(),
@@ -1187,7 +1235,7 @@ THREADED_PROFILED_TEST(FastReturnValues) {
0, 234, -723,
i::Smi::kMinValue, i::Smi::kMaxValue
};
- for (size_t i = 0; i < ARRAY_SIZE(int_values); i++) {
+ for (size_t i = 0; i < arraysize(int_values); i++) {
for (int modifier = -1; modifier <= 1; modifier++) {
int int_value = int_values[i] + modifier;
// check int32_t
@@ -1219,7 +1267,7 @@ THREADED_PROFILED_TEST(FastReturnValues) {
kUndefinedReturnValue,
kEmptyStringReturnValue
};
- for (size_t i = 0; i < ARRAY_SIZE(oddballs); i++) {
+ for (size_t i = 0; i < arraysize(oddballs); i++) {
fast_return_value_void = oddballs[i];
value = TestFastReturnValues<void>();
switch (fast_return_value_void) {
@@ -1541,6 +1589,83 @@ THREADED_TEST(IsNativeError) {
}
+THREADED_TEST(IsGeneratorFunctionOrObject) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+
+ CompileRun("function *gen() { yield 1; }\nfunction func() {}");
+ v8::Handle<Value> gen = CompileRun("gen");
+ v8::Handle<Value> genObj = CompileRun("gen()");
+ v8::Handle<Value> object = CompileRun("{a:42}");
+ v8::Handle<Value> func = CompileRun("func");
+
+ CHECK(gen->IsGeneratorFunction());
+ CHECK(gen->IsFunction());
+ CHECK(!gen->IsGeneratorObject());
+
+ CHECK(!genObj->IsGeneratorFunction());
+ CHECK(!genObj->IsFunction());
+ CHECK(genObj->IsGeneratorObject());
+
+ CHECK(!object->IsGeneratorFunction());
+ CHECK(!object->IsFunction());
+ CHECK(!object->IsGeneratorObject());
+
+ CHECK(!func->IsGeneratorFunction());
+ CHECK(func->IsFunction());
+ CHECK(!func->IsGeneratorObject());
+}
+
+
+THREADED_TEST(ArgumentsObject) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::Handle<Value> arguments_object =
+ CompileRun("var out = 0; (function(){ out = arguments; })(1,2,3); out;");
+ CHECK(arguments_object->IsArgumentsObject());
+ v8::Handle<Value> array = CompileRun("[1,2,3]");
+ CHECK(!array->IsArgumentsObject());
+ v8::Handle<Value> object = CompileRun("{a:42}");
+ CHECK(!object->IsArgumentsObject());
+}
+
+
+THREADED_TEST(IsMapOrSet) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::Handle<Value> map = CompileRun("new Map()");
+ v8::Handle<Value> set = CompileRun("new Set()");
+ v8::Handle<Value> weak_map = CompileRun("new WeakMap()");
+ v8::Handle<Value> weak_set = CompileRun("new WeakSet()");
+ CHECK(map->IsMap());
+ CHECK(set->IsSet());
+ CHECK(weak_map->IsWeakMap());
+ CHECK(weak_set->IsWeakSet());
+
+ CHECK(!map->IsSet());
+ CHECK(!map->IsWeakMap());
+ CHECK(!map->IsWeakSet());
+
+ CHECK(!set->IsMap());
+ CHECK(!set->IsWeakMap());
+ CHECK(!set->IsWeakSet());
+
+ CHECK(!weak_map->IsMap());
+ CHECK(!weak_map->IsSet());
+ CHECK(!weak_map->IsWeakSet());
+
+ CHECK(!weak_set->IsMap());
+ CHECK(!weak_set->IsSet());
+ CHECK(!weak_set->IsWeakMap());
+
+ v8::Handle<Value> object = CompileRun("{a:42}");
+ CHECK(!object->IsMap());
+ CHECK(!object->IsSet());
+ CHECK(!object->IsWeakMap());
+ CHECK(!object->IsWeakSet());
+}
+
+
THREADED_TEST(StringObject) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -1878,6 +2003,24 @@ void SimpleAccessorSetter(Local<String> name, Local<Value> value,
self->Set(String::Concat(v8_str("accessor_"), name), value);
}
+void SymbolAccessorGetter(Local<Name> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CHECK(name->IsSymbol());
+ Local<Symbol> sym = Local<Symbol>::Cast(name);
+ if (sym->Name()->IsUndefined())
+ return;
+ SimpleAccessorGetter(Local<String>::Cast(sym->Name()), info);
+}
+
+void SymbolAccessorSetter(Local<Name> name, Local<Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ CHECK(name->IsSymbol());
+ Local<Symbol> sym = Local<Symbol>::Cast(name);
+ if (sym->Name()->IsUndefined())
+ return;
+ SimpleAccessorSetter(Local<String>::Cast(sym->Name()), value, info);
+}
+
void EmptyInterceptorGetter(Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
}
@@ -1936,6 +2079,14 @@ void AddInterceptor(Handle<FunctionTemplate> templ,
}
+void AddAccessor(Handle<FunctionTemplate> templ,
+ Handle<Name> name,
+ v8::AccessorNameGetterCallback getter,
+ v8::AccessorNameSetterCallback setter) {
+ templ->PrototypeTemplate()->SetAccessor(name, getter, setter);
+}
+
+
THREADED_TEST(EmptyInterceptorDoesNotShadowAccessors) {
v8::HandleScope scope(CcTest::isolate());
Handle<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
@@ -1968,10 +2119,9 @@ THREADED_TEST(ExecutableAccessorIsPreservedOnAttributeChange) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::LookupResult lookup(i_isolate);
i::Handle<i::String> name(v8::Utils::OpenHandle(*v8_str("length")));
- a->LookupOwnRealNamedProperty(name, &lookup);
- CHECK(lookup.IsPropertyCallbacks());
- i::Handle<i::Object> callback(lookup.GetCallbackObject(), i_isolate);
- CHECK(callback->IsExecutableAccessorInfo());
+ i::LookupIterator it(a, name, i::LookupIterator::OWN_SKIP_INTERCEPTOR);
+ CHECK_EQ(i::LookupIterator::ACCESSOR, it.state());
+ CHECK(it.GetAccessors()->IsExecutableAccessorInfo());
}
@@ -2747,6 +2897,8 @@ THREADED_TEST(SymbolProperties) {
v8::Local<v8::Symbol> sym1 = v8::Symbol::New(isolate);
v8::Local<v8::Symbol> sym2 =
v8::Symbol::New(isolate, v8_str("my-symbol"));
+ v8::Local<v8::Symbol> sym3 =
+ v8::Symbol::New(isolate, v8_str("sym3"));
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -2802,31 +2954,62 @@ THREADED_TEST(SymbolProperties) {
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CHECK(obj->SetAccessor(sym3, SymbolAccessorGetter, SymbolAccessorSetter));
+ CHECK(obj->Get(sym3)->IsUndefined());
+ CHECK(obj->Set(sym3, v8::Integer::New(isolate, 42)));
+ CHECK(obj->Get(sym3)->Equals(v8::Integer::New(isolate, 42)));
+ CHECK(obj->Get(v8::String::NewFromUtf8(isolate, "accessor_sym3"))->Equals(
+ v8::Integer::New(isolate, 42)));
+
// Add another property and delete it afterwards to force the object in
// slow case.
CHECK(obj->Set(sym2, v8::Integer::New(isolate, 2008)));
CHECK_EQ(2002, obj->Get(sym1)->Int32Value());
CHECK_EQ(2008, obj->Get(sym2)->Int32Value());
CHECK_EQ(2002, obj->Get(sym1)->Int32Value());
- CHECK_EQ(1, obj->GetOwnPropertyNames()->Length());
+ CHECK_EQ(2, obj->GetOwnPropertyNames()->Length());
CHECK(obj->Has(sym1));
CHECK(obj->Has(sym2));
+ CHECK(obj->Has(sym3));
+ CHECK(obj->Has(v8::String::NewFromUtf8(isolate, "accessor_sym3")));
CHECK(obj->Delete(sym2));
CHECK(obj->Has(sym1));
CHECK(!obj->Has(sym2));
+ CHECK(obj->Has(sym3));
+ CHECK(obj->Has(v8::String::NewFromUtf8(isolate, "accessor_sym3")));
CHECK_EQ(2002, obj->Get(sym1)->Int32Value());
- CHECK_EQ(1, obj->GetOwnPropertyNames()->Length());
+ CHECK(obj->Get(sym3)->Equals(v8::Integer::New(isolate, 42)));
+ CHECK(obj->Get(v8::String::NewFromUtf8(isolate, "accessor_sym3"))->Equals(
+ v8::Integer::New(isolate, 42)));
+ CHECK_EQ(2, obj->GetOwnPropertyNames()->Length());
// Symbol properties are inherited.
v8::Local<v8::Object> child = v8::Object::New(isolate);
child->SetPrototype(obj);
CHECK(child->Has(sym1));
CHECK_EQ(2002, child->Get(sym1)->Int32Value());
+ CHECK(obj->Get(sym3)->Equals(v8::Integer::New(isolate, 42)));
+ CHECK(obj->Get(v8::String::NewFromUtf8(isolate, "accessor_sym3"))->Equals(
+ v8::Integer::New(isolate, 42)));
CHECK_EQ(0, child->GetOwnPropertyNames()->Length());
}
+THREADED_TEST(SymbolTemplateProperties) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::FunctionTemplate> foo = v8::FunctionTemplate::New(isolate);
+ v8::Local<v8::Name> name = v8::Symbol::New(isolate);
+ CHECK(!name.IsEmpty());
+ foo->PrototypeTemplate()->Set(name, v8::FunctionTemplate::New(isolate));
+ v8::Local<v8::Object> new_instance = foo->InstanceTemplate()->NewInstance();
+ CHECK(!new_instance.IsEmpty());
+ CHECK(new_instance->Has(name));
+}
+
+
THREADED_TEST(PrivateProperties) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -2911,6 +3094,29 @@ THREADED_TEST(GlobalSymbols) {
}
+static void CheckWellKnownSymbol(v8::Local<v8::Symbol>(*getter)(v8::Isolate*),
+ const char* name) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::Symbol> symbol = getter(isolate);
+ std::string script = std::string("var sym = ") + name;
+ CompileRun(script.c_str());
+ v8::Local<Value> value = env->Global()->Get(v8_str("sym"));
+
+ CHECK(!value.IsEmpty());
+ CHECK(!symbol.IsEmpty());
+ CHECK(value->SameValue(symbol));
+}
+
+
+THREADED_TEST(WellKnownSymbols) {
+ CheckWellKnownSymbol(v8::Symbol::GetIterator, "Symbol.iterator");
+ CheckWellKnownSymbol(v8::Symbol::GetUnscopables, "Symbol.unscopables");
+}
+
+
THREADED_TEST(GlobalPrivates) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -9412,18 +9618,14 @@ TEST(AccessControlES5) {
}
-static bool GetOwnPropertyNamesNamedBlocker(Local<v8::Object> global,
- Local<Value> name,
- v8::AccessType type,
- Local<Value> data) {
+static bool BlockEverythingNamed(Local<v8::Object> object, Local<Value> name,
+ v8::AccessType type, Local<Value> data) {
return false;
}
-static bool GetOwnPropertyNamesIndexedBlocker(Local<v8::Object> global,
- uint32_t key,
- v8::AccessType type,
- Local<Value> data) {
+static bool BlockEverythingIndexed(Local<v8::Object> object, uint32_t key,
+ v8::AccessType type, Local<Value> data) {
return false;
}
@@ -9435,8 +9637,8 @@ THREADED_TEST(AccessControlGetOwnPropertyNames) {
v8::ObjectTemplate::New(isolate);
obj_template->Set(v8_str("x"), v8::Integer::New(isolate, 42));
- obj_template->SetAccessCheckCallbacks(GetOwnPropertyNamesNamedBlocker,
- GetOwnPropertyNamesIndexedBlocker);
+ obj_template->SetAccessCheckCallbacks(BlockEverythingNamed,
+ BlockEverythingIndexed);
// Create an environment
v8::Local<Context> context0 = Context::New(isolate, NULL, obj_template);
@@ -9471,6 +9673,50 @@ THREADED_TEST(AccessControlGetOwnPropertyNames) {
}
+TEST(SuperAccessControl) {
+ i::FLAG_harmony_classes = true;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ v8::Handle<v8::ObjectTemplate> obj_template =
+ v8::ObjectTemplate::New(isolate);
+ obj_template->SetAccessCheckCallbacks(BlockEverythingNamed,
+ BlockEverythingIndexed);
+ LocalContext env;
+ env->Global()->Set(v8_str("prohibited"), obj_template->NewInstance());
+
+ {
+ v8::TryCatch try_catch;
+ CompileRun(
+ "function f() { return super.hasOwnProperty; };"
+ "var m = f.toMethod(prohibited);"
+ "m();");
+ CHECK(try_catch.HasCaught());
+ }
+
+ {
+ v8::TryCatch try_catch;
+ CompileRun(
+ "function f() { super.hasOwnProperty = function () {}; };"
+ "var m = f.toMethod(prohibited);"
+ "m();");
+ CHECK(try_catch.HasCaught());
+ }
+
+ {
+ v8::TryCatch try_catch;
+ CompileRun(
+ "Object.defineProperty(Object.prototype, 'x', { set : function(){}});"
+ "function f() { "
+ " 'use strict';"
+ " super.x = function () {}; "
+ "};"
+ "var m = f.toMethod(prohibited);"
+ "m();");
+ CHECK(try_catch.HasCaught());
+ }
+}
+
+
static void IndexedPropertyEnumerator(
const v8::PropertyCallbackInfo<v8::Array>& info) {
v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 2);
@@ -14040,19 +14286,14 @@ void SetFunctionEntryHookTest::RunLoopInNewEnv(v8::Isolate* isolate) {
void SetFunctionEntryHookTest::RunTest() {
// Work in a new isolate throughout.
- v8::Isolate* isolate = v8::Isolate::New();
-
- // Test setting the entry hook on the new isolate.
- CHECK(v8::V8::SetFunctionEntryHook(isolate, EntryHook));
-
- // Replacing the hook, once set should fail.
- CHECK_EQ(false, v8::V8::SetFunctionEntryHook(isolate, EntryHook));
+ v8::Isolate::CreateParams create_params;
+ create_params.entry_hook = EntryHook;
+ create_params.code_event_handler = JitEvent;
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
{
v8::Isolate::Scope scope(isolate);
- v8::V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault, JitEvent);
-
RunLoopInNewEnv(isolate);
// Check the exepected invocation counts.
@@ -14080,9 +14321,6 @@ void SetFunctionEntryHookTest::RunTest() {
// We should record no invocations in this isolate.
CHECK_EQ(0, static_cast<int>(invocations_.size()));
}
- // Since the isolate has been used, we shouldn't be able to set an entry
- // hook anymore.
- CHECK_EQ(false, v8::V8::SetFunctionEntryHook(isolate, EntryHook));
isolate->Dispose();
}
@@ -14276,7 +14514,7 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
saw_bar = 0;
move_events = 0;
- V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault, event_handler);
+ isolate->SetJitCodeEventHandler(v8::kJitCodeEventDefault, event_handler);
// Generate new code objects sparsely distributed across several
// different fragmented code-space pages.
@@ -14300,7 +14538,7 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
// Force code movement.
heap->CollectAllAvailableGarbage("TestSetJitCodeEventHandler");
- V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
+ isolate->SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
CHECK_LE(kIterations, saw_bar);
CHECK_LT(0, move_events);
@@ -14330,8 +14568,9 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
i::HashMap lineinfo(MatchPointers);
jitcode_line_info = &lineinfo;
- V8::SetJitCodeEventHandler(v8::kJitCodeEventEnumExisting, event_handler);
- V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
+ isolate->SetJitCodeEventHandler(v8::kJitCodeEventEnumExisting,
+ event_handler);
+ isolate->SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
jitcode_line_info = NULL;
// We expect that we got some events. Note that if we could get code removal
@@ -15003,11 +15242,11 @@ TEST(ObjectClone) {
}
-class AsciiVectorResource : public v8::String::ExternalAsciiStringResource {
+class OneByteVectorResource : public v8::String::ExternalOneByteStringResource {
public:
- explicit AsciiVectorResource(i::Vector<const char> vector)
+ explicit OneByteVectorResource(i::Vector<const char> vector)
: data_(vector) {}
- virtual ~AsciiVectorResource() {}
+ virtual ~OneByteVectorResource() {}
virtual size_t length() const { return data_.length(); }
virtual const char* data() const { return data_.start(); }
private:
@@ -15028,12 +15267,12 @@ class UC16VectorResource : public v8::String::ExternalStringResource {
static void MorphAString(i::String* string,
- AsciiVectorResource* ascii_resource,
+ OneByteVectorResource* one_byte_resource,
UC16VectorResource* uc16_resource) {
CHECK(i::StringShape(string).IsExternal());
if (string->IsOneByteRepresentation()) {
// Check old map is not internalized or long.
- CHECK(string->map() == CcTest::heap()->external_ascii_string_map());
+ CHECK(string->map() == CcTest::heap()->external_one_byte_string_map());
// Morph external string to be TwoByte string.
string->set_map(CcTest::heap()->external_string_map());
i::ExternalTwoByteString* morphed =
@@ -15042,11 +15281,10 @@ static void MorphAString(i::String* string,
} else {
// Check old map is not internalized or long.
CHECK(string->map() == CcTest::heap()->external_string_map());
- // Morph external string to be ASCII string.
- string->set_map(CcTest::heap()->external_ascii_string_map());
- i::ExternalAsciiString* morphed =
- i::ExternalAsciiString::cast(string);
- morphed->set_resource(ascii_resource);
+ // Morph external string to be one-byte string.
+ string->set_map(CcTest::heap()->external_one_byte_string_map());
+ i::ExternalOneByteString* morphed = i::ExternalOneByteString::cast(string);
+ morphed->set_resource(one_byte_resource);
}
}
@@ -15062,18 +15300,18 @@ THREADED_TEST(MorphCompositeStringTest) {
LocalContext env;
i::Factory* factory = CcTest::i_isolate()->factory();
v8::HandleScope scope(env->GetIsolate());
- AsciiVectorResource ascii_resource(
+ OneByteVectorResource one_byte_resource(
i::Vector<const char>(c_string, i::StrLength(c_string)));
UC16VectorResource uc16_resource(
i::Vector<const uint16_t>(two_byte_string,
i::StrLength(c_string)));
- Local<String> lhs(v8::Utils::ToLocal(
- factory->NewExternalStringFromAscii(&ascii_resource)
- .ToHandleChecked()));
- Local<String> rhs(v8::Utils::ToLocal(
- factory->NewExternalStringFromAscii(&ascii_resource)
- .ToHandleChecked()));
+ Local<String> lhs(
+ v8::Utils::ToLocal(factory->NewExternalStringFromOneByte(
+ &one_byte_resource).ToHandleChecked()));
+ Local<String> rhs(
+ v8::Utils::ToLocal(factory->NewExternalStringFromOneByte(
+ &one_byte_resource).ToHandleChecked()));
env->Global()->Set(v8_str("lhs"), lhs);
env->Global()->Set(v8_str("rhs"), rhs);
@@ -15086,8 +15324,10 @@ THREADED_TEST(MorphCompositeStringTest) {
CHECK(lhs->IsOneByte());
CHECK(rhs->IsOneByte());
- MorphAString(*v8::Utils::OpenHandle(*lhs), &ascii_resource, &uc16_resource);
- MorphAString(*v8::Utils::OpenHandle(*rhs), &ascii_resource, &uc16_resource);
+ MorphAString(*v8::Utils::OpenHandle(*lhs), &one_byte_resource,
+ &uc16_resource);
+ MorphAString(*v8::Utils::OpenHandle(*rhs), &one_byte_resource,
+ &uc16_resource);
// This should UTF-8 without flattening, since everything is ASCII.
Handle<String> cons = v8_compile("cons")->Run().As<String>();
@@ -15130,16 +15370,15 @@ TEST(CompileExternalTwoByteSource) {
// This is a very short list of sources, which currently is to check for a
// regression caused by r2703.
- const char* ascii_sources[] = {
- "0.5",
- "-0.5", // This mainly testes PushBack in the Scanner.
- "--0.5", // This mainly testes PushBack in the Scanner.
- NULL
- };
+ const char* one_byte_sources[] = {
+ "0.5",
+ "-0.5", // This mainly testes PushBack in the Scanner.
+ "--0.5", // This mainly testes PushBack in the Scanner.
+ NULL};
// Compile the sources as external two byte strings.
- for (int i = 0; ascii_sources[i] != NULL; i++) {
- uint16_t* two_byte_string = AsciiToTwoByteString(ascii_sources[i]);
+ for (int i = 0; one_byte_sources[i] != NULL; i++) {
+ uint16_t* two_byte_string = AsciiToTwoByteString(one_byte_sources[i]);
TestResource* uc16_resource = new TestResource(two_byte_string);
v8::Local<v8::String> source =
v8::String::NewExternal(context->GetIsolate(), uc16_resource);
@@ -15198,14 +15437,14 @@ TEST(RegExpInterruption) {
RegExpInterruptionThread timeout_thread(CcTest::isolate());
v8::V8::AddGCPrologueCallback(RunBeforeGC);
- static const char* ascii_content = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
- i::uc16* uc16_content = AsciiToTwoByteString(ascii_content);
- v8::Local<v8::String> string = v8_str(ascii_content);
+ static const char* one_byte_content = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
+ i::uc16* uc16_content = AsciiToTwoByteString(one_byte_content);
+ v8::Local<v8::String> string = v8_str(one_byte_content);
CcTest::global()->Set(v8_str("a"), string);
regexp_interruption_data.string.Reset(CcTest::isolate(), string);
regexp_interruption_data.string_resource = new UC16VectorResource(
- i::Vector<const i::uc16>(uc16_content, i::StrLength(ascii_content)));
+ i::Vector<const i::uc16>(uc16_content, i::StrLength(one_byte_content)));
v8::TryCatch try_catch;
timeout_thread.Start();
@@ -17697,14 +17936,13 @@ TEST(IdleNotificationWithLargeHint) {
TEST(Regress2107) {
const intptr_t MB = 1024 * 1024;
- const int kShortIdlePauseInMs = 100;
- const int kLongIdlePauseInMs = 1000;
+ const int kIdlePauseInMs = 1000;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(env->GetIsolate());
intptr_t initial_size = CcTest::heap()->SizeOfObjects();
// Send idle notification to start a round of incremental GCs.
- env->GetIsolate()->IdleNotification(kShortIdlePauseInMs);
+ env->GetIsolate()->IdleNotification(kIdlePauseInMs);
// Emulate 7 page reloads.
for (int i = 0; i < 7; i++) {
{
@@ -17715,7 +17953,7 @@ TEST(Regress2107) {
ctx->Exit();
}
env->GetIsolate()->ContextDisposedNotification();
- env->GetIsolate()->IdleNotification(kLongIdlePauseInMs);
+ env->GetIsolate()->IdleNotification(kIdlePauseInMs);
}
// Create garbage and check that idle notification still collects it.
CreateGarbageInOldSpace();
@@ -17723,7 +17961,7 @@ TEST(Regress2107) {
CHECK_GT(size_with_garbage, initial_size + MB);
bool finished = false;
for (int i = 0; i < 200 && !finished; i++) {
- finished = env->GetIsolate()->IdleNotification(kShortIdlePauseInMs);
+ finished = env->GetIsolate()->IdleNotification(kIdlePauseInMs);
}
intptr_t final_size = CcTest::heap()->SizeOfObjects();
CHECK_LT(final_size, initial_size + 1);
@@ -17764,13 +18002,11 @@ static uint32_t* ComputeStackLimit(uint32_t size) {
static const int stack_breathing_room = 256 * i::KB;
-TEST(SetResourceConstraints) {
+TEST(SetStackLimit) {
uint32_t* set_limit = ComputeStackLimit(stack_breathing_room);
// Set stack limit.
- v8::ResourceConstraints constraints;
- constraints.set_stack_limit(set_limit);
- CHECK(v8::SetResourceConstraints(CcTest::isolate(), &constraints));
+ CcTest::isolate()->SetStackLimit(reinterpret_cast<uintptr_t>(set_limit));
// Execute a script.
LocalContext env;
@@ -17785,16 +18021,14 @@ TEST(SetResourceConstraints) {
}
-TEST(SetResourceConstraintsInThread) {
+TEST(SetStackLimitInThread) {
uint32_t* set_limit;
{
v8::Locker locker(CcTest::isolate());
set_limit = ComputeStackLimit(stack_breathing_room);
// Set stack limit.
- v8::ResourceConstraints constraints;
- constraints.set_stack_limit(set_limit);
- CHECK(v8::SetResourceConstraints(CcTest::isolate(), &constraints));
+ CcTest::isolate()->SetStackLimit(reinterpret_cast<uintptr_t>(set_limit));
// Execute a script.
v8::HandleScope scope(CcTest::isolate());
@@ -17837,7 +18071,7 @@ class VisitorImpl : public v8::ExternalResourceVisitor {
virtual ~VisitorImpl() {}
virtual void VisitExternalString(v8::Handle<v8::String> string) {
if (!string->IsExternal()) {
- CHECK(string->IsExternalAscii());
+ CHECK(string->IsExternalOneByte());
return;
}
v8::String::ExternalStringResource* resource =
@@ -17894,12 +18128,12 @@ TEST(ExternalizeOldSpaceOneByteCons) {
CHECK(CcTest::heap()->old_pointer_space()->Contains(
*v8::Utils::OpenHandle(*cons)));
- TestAsciiResource* resource =
- new TestAsciiResource(i::StrDup("Romeo Montague Juliet Capulet"));
+ TestOneByteResource* resource =
+ new TestOneByteResource(i::StrDup("Romeo Montague Juliet Capulet"));
cons->MakeExternal(resource);
- CHECK(cons->IsExternalAscii());
- CHECK_EQ(resource, cons->GetExternalAsciiStringResource());
+ CHECK(cons->IsExternalOneByte());
+ CHECK_EQ(resource, cons->GetExternalOneByteStringResource());
String::Encoding encoding;
CHECK_EQ(resource, cons->GetExternalStringResourceBase(&encoding));
CHECK_EQ(String::ONE_BYTE_ENCODING, encoding);
@@ -17954,8 +18188,8 @@ TEST(ExternalStringCollectedAtTearDown) {
{ v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
const char* s = "One string to test them all, one string to find them.";
- TestAsciiResource* inscription =
- new TestAsciiResource(i::StrDup(s), &destroyed);
+ TestOneByteResource* inscription =
+ new TestOneByteResource(i::StrDup(s), &destroyed);
v8::Local<v8::String> ring = v8::String::NewExternal(isolate, inscription);
// Ring is still alive. Orcs are roaming freely across our lands.
CHECK_EQ(0, destroyed);
@@ -17976,8 +18210,8 @@ TEST(ExternalInternalizedStringCollectedAtTearDown) {
v8::HandleScope handle_scope(isolate);
CompileRun("var ring = 'One string to test them all';");
const char* s = "One string to test them all";
- TestAsciiResource* inscription =
- new TestAsciiResource(i::StrDup(s), &destroyed);
+ TestOneByteResource* inscription =
+ new TestOneByteResource(i::StrDup(s), &destroyed);
v8::Local<v8::String> ring = CompileRun("ring")->ToString();
CHECK(v8::Utils::OpenHandle(*ring)->IsInternalizedString());
ring->MakeExternal(inscription);
@@ -17998,8 +18232,8 @@ TEST(ExternalInternalizedStringCollectedAtGC) {
v8::HandleScope handle_scope(env->GetIsolate());
CompileRun("var ring = 'One string to test them all';");
const char* s = "One string to test them all";
- TestAsciiResource* inscription =
- new TestAsciiResource(i::StrDup(s), &destroyed);
+ TestOneByteResource* inscription =
+ new TestOneByteResource(i::StrDup(s), &destroyed);
v8::Local<v8::String> ring = CompileRun("ring")->ToString();
CHECK(v8::Utils::OpenHandle(*ring)->IsInternalizedString());
ring->MakeExternal(inscription);
@@ -18942,7 +19176,7 @@ THREADED_TEST(TestEviction) {
}
-THREADED_TEST(TwoByteStringInAsciiCons) {
+THREADED_TEST(TwoByteStringInOneByteCons) {
// See Chromium issue 47824.
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
@@ -18980,10 +19214,10 @@ THREADED_TEST(TwoByteStringInAsciiCons) {
// If the cons string has been short-circuited, skip the following checks.
if (!string.is_identical_to(flat_string)) {
- // At this point, we should have a Cons string which is flat and ASCII,
+ // At this point, we should have a Cons string which is flat and one-byte,
// with a first half that is a two-byte string (although it only contains
- // ASCII characters). This is a valid sequence of steps, and it can happen
- // in real pages.
+ // one-byte characters). This is a valid sequence of steps, and it can
+ // happen in real pages.
CHECK(string->IsOneByteRepresentation());
i::ConsString* cons = i::ConsString::cast(*string);
CHECK_EQ(0, cons->second()->length());
@@ -19071,7 +19305,7 @@ TEST(ContainsOnlyOneByte) {
String::NewExternal(isolate,
new TestResource(string_contents, NULL, false));
USE(two_byte); USE(cons_strings);
- for (size_t i = 0; i < ARRAY_SIZE(cons_strings); i++) {
+ for (size_t i = 0; i < arraysize(cons_strings); i++) {
// Base assumptions.
string = cons_strings[i];
CHECK(string->IsOneByte() && string->ContainsOnlyOneByte());
@@ -19367,31 +19601,26 @@ static int CalcFibonacci(v8::Isolate* isolate, int limit) {
class IsolateThread : public v8::base::Thread {
public:
- IsolateThread(v8::Isolate* isolate, int fib_limit)
- : Thread(Options("IsolateThread")),
- isolate_(isolate),
- fib_limit_(fib_limit),
- result_(0) {}
+ explicit IsolateThread(int fib_limit)
+ : Thread(Options("IsolateThread")), fib_limit_(fib_limit), result_(0) {}
void Run() {
- result_ = CalcFibonacci(isolate_, fib_limit_);
+ v8::Isolate* isolate = v8::Isolate::New();
+ result_ = CalcFibonacci(isolate, fib_limit_);
+ isolate->Dispose();
}
int result() { return result_; }
private:
- v8::Isolate* isolate_;
int fib_limit_;
int result_;
};
TEST(MultipleIsolatesOnIndividualThreads) {
- v8::Isolate* isolate1 = v8::Isolate::New();
- v8::Isolate* isolate2 = v8::Isolate::New();
-
- IsolateThread thread1(isolate1, 21);
- IsolateThread thread2(isolate2, 12);
+ IsolateThread thread1(21);
+ IsolateThread thread2(12);
// Compute some fibonacci numbers on 3 threads in 3 isolates.
thread1.Start();
@@ -19409,9 +19638,6 @@ TEST(MultipleIsolatesOnIndividualThreads) {
CHECK_EQ(result2, 144);
CHECK_EQ(result1, thread1.result());
CHECK_EQ(result2, thread2.result());
-
- isolate1->Dispose();
- isolate2->Dispose();
}
@@ -19455,16 +19681,22 @@ class InitDefaultIsolateThread : public v8::base::Thread {
result_(false) {}
void Run() {
- v8::Isolate* isolate = v8::Isolate::New();
- isolate->Enter();
+ v8::Isolate::CreateParams create_params;
switch (testCase_) {
case SetResourceConstraints: {
- v8::ResourceConstraints constraints;
- constraints.set_max_semi_space_size(1);
- constraints.set_max_old_space_size(4);
- v8::SetResourceConstraints(CcTest::isolate(), &constraints);
+ create_params.constraints.set_max_semi_space_size(1);
+ create_params.constraints.set_max_old_space_size(4);
break;
}
+ default:
+ break;
+ }
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ isolate->Enter();
+ switch (testCase_) {
+ case SetResourceConstraints:
+ // Already handled in pre-Isolate-creation block.
+ break;
case SetFatalHandler:
v8::V8::SetFatalErrorHandler(NULL);
@@ -21490,7 +21722,7 @@ THREADED_TEST(JSONParseNumber) {
}
-#if V8_OS_POSIX
+#if V8_OS_POSIX && !V8_OS_NACL
class ThreadInterruptTest {
public:
ThreadInterruptTest() : sem_(0), sem_value_(0) { }
@@ -21689,7 +21921,6 @@ TEST(AccessCheckThrows) {
// Create a context and set an x property on it's global object.
LocalContext context0(NULL, global_template);
- context0->Global()->Set(v8_str("x"), v8_num(42));
v8::Handle<v8::Object> global0 = context0->Global();
// Create a context with a different security token so that the
@@ -22287,12 +22518,12 @@ class ApiCallOptimizationChecker {
void RunAll() {
SignatureType signature_types[] =
{kNoSignature, kSignatureOnReceiver, kSignatureOnPrototype};
- for (unsigned i = 0; i < ARRAY_SIZE(signature_types); i++) {
+ for (unsigned i = 0; i < arraysize(signature_types); i++) {
SignatureType signature_type = signature_types[i];
for (int j = 0; j < 2; j++) {
bool global = j == 0;
int key = signature_type +
- ARRAY_SIZE(signature_types) * (global ? 1 : 0);
+ arraysize(signature_types) * (global ? 1 : 0);
Run(signature_type, global, key);
}
}
@@ -22736,32 +22967,6 @@ TEST(ScriptNameAndLineNumber) {
}
-Local<v8::Context> call_eval_context;
-Local<v8::Function> call_eval_bound_function;
-static void CallEval(const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::Context::Scope scope(call_eval_context);
- args.GetReturnValue().Set(
- call_eval_bound_function->Call(call_eval_context->Global(), 0, NULL));
-}
-
-
-TEST(CrossActivationEval) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
- {
- call_eval_context = v8::Context::New(isolate);
- v8::Context::Scope scope(call_eval_context);
- call_eval_bound_function =
- Local<Function>::Cast(CompileRun("eval.bind(this, '1')"));
- }
- env->Global()->Set(v8_str("CallEval"),
- v8::FunctionTemplate::New(isolate, CallEval)->GetFunction());
- Local<Value> result = CompileRun("CallEval();");
- CHECK_EQ(result, v8::Integer::New(isolate, 1));
-}
-
-
void SourceURLHelper(const char* source, const char* expected_source_url,
const char* expected_source_mapping_url) {
Local<Script> script = v8_compile(source);
@@ -22870,3 +23075,381 @@ TEST(GetOwnPropertyDescriptor) {
set->Call(x, 1, args);
CHECK_EQ(v8_num(14), get->Call(x, 0, NULL));
}
+
+
+TEST(Regress411877) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ v8::Handle<v8::ObjectTemplate> object_template =
+ v8::ObjectTemplate::New(isolate);
+ object_template->SetAccessCheckCallbacks(NamedAccessCounter,
+ IndexedAccessCounter);
+
+ v8::Handle<Context> context = Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+
+ context->Global()->Set(v8_str("o"), object_template->NewInstance());
+ CompileRun("Object.getOwnPropertyNames(o)");
+}
+
+
+TEST(GetHiddenPropertyTableAfterAccessCheck) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ v8::Handle<v8::ObjectTemplate> object_template =
+ v8::ObjectTemplate::New(isolate);
+ object_template->SetAccessCheckCallbacks(NamedAccessCounter,
+ IndexedAccessCounter);
+
+ v8::Handle<Context> context = Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+
+ v8::Handle<v8::Object> obj = object_template->NewInstance();
+ obj->Set(v8_str("key"), v8_str("value"));
+ obj->Delete(v8_str("key"));
+
+ obj->SetHiddenValue(v8_str("hidden key 2"), v8_str("hidden value 2"));
+}
+
+
+TEST(Regress411793) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ v8::Handle<v8::ObjectTemplate> object_template =
+ v8::ObjectTemplate::New(isolate);
+ object_template->SetAccessCheckCallbacks(NamedAccessCounter,
+ IndexedAccessCounter);
+
+ v8::Handle<Context> context = Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+
+ context->Global()->Set(v8_str("o"), object_template->NewInstance());
+ CompileRun(
+ "Object.defineProperty(o, 'key', "
+ " { get: function() {}, set: function() {} });");
+}
+
+class TestSourceStream : public v8::ScriptCompiler::ExternalSourceStream {
+ public:
+ explicit TestSourceStream(const char** chunks) : chunks_(chunks), index_(0) {}
+
+ virtual size_t GetMoreData(const uint8_t** src) {
+ // Unlike in real use cases, this function will never block.
+ if (chunks_[index_] == NULL) {
+ return 0;
+ }
+ // Copy the data, since the caller takes ownership of it.
+ size_t len = strlen(chunks_[index_]);
+ // We don't need to zero-terminate since we return the length.
+ uint8_t* copy = new uint8_t[len];
+ memcpy(copy, chunks_[index_], len);
+ *src = copy;
+ ++index_;
+ return len;
+ }
+
+ // Helper for constructing a string from chunks (the compilation needs it
+ // too).
+ static char* FullSourceString(const char** chunks) {
+ size_t total_len = 0;
+ for (size_t i = 0; chunks[i] != NULL; ++i) {
+ total_len += strlen(chunks[i]);
+ }
+ char* full_string = new char[total_len + 1];
+ size_t offset = 0;
+ for (size_t i = 0; chunks[i] != NULL; ++i) {
+ size_t len = strlen(chunks[i]);
+ memcpy(full_string + offset, chunks[i], len);
+ offset += len;
+ }
+ full_string[total_len] = 0;
+ return full_string;
+ }
+
+ private:
+ const char** chunks_;
+ unsigned index_;
+};
+
+
+// Helper function for running streaming tests.
+void RunStreamingTest(const char** chunks,
+ v8::ScriptCompiler::StreamedSource::Encoding encoding =
+ v8::ScriptCompiler::StreamedSource::ONE_BYTE,
+ bool expected_success = true) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::TryCatch try_catch;
+
+ v8::ScriptCompiler::StreamedSource source(new TestSourceStream(chunks),
+ encoding);
+ v8::ScriptCompiler::ScriptStreamingTask* task =
+ v8::ScriptCompiler::StartStreamingScript(isolate, &source);
+
+ // TestSourceStream::GetMoreData won't block, so it's OK to just run the
+ // task here in the main thread.
+ task->Run();
+ delete task;
+
+ v8::ScriptOrigin origin(v8_str("http://foo.com"));
+ char* full_source = TestSourceStream::FullSourceString(chunks);
+
+ // The possible errors are only produced while compiling.
+ CHECK_EQ(false, try_catch.HasCaught());
+
+ v8::Handle<Script> script = v8::ScriptCompiler::Compile(
+ isolate, &source, v8_str(full_source), origin);
+ if (expected_success) {
+ CHECK(!script.IsEmpty());
+ v8::Handle<Value> result(script->Run());
+ // All scripts are supposed to return the fixed value 13 when ran.
+ CHECK_EQ(13, result->Int32Value());
+ } else {
+ CHECK(script.IsEmpty());
+ CHECK(try_catch.HasCaught());
+ }
+ delete[] full_source;
+}
+
+
+TEST(StreamingSimpleScript) {
+ // This script is unrealistically small, since no one chunk is enough to fill
+ // the backing buffer of Scanner, let alone overflow it.
+ const char* chunks[] = {"function foo() { ret", "urn 13; } f", "oo(); ",
+ NULL};
+ RunStreamingTest(chunks);
+}
+
+
+TEST(StreamingBiggerScript) {
+ const char* chunk1 =
+ "function foo() {\n"
+ " // Make this chunk sufficiently long so that it will overflow the\n"
+ " // backing buffer of the Scanner.\n"
+ " var i = 0;\n"
+ " var result = 0;\n"
+ " for (i = 0; i < 13; ++i) { result = result + 1; }\n"
+ " result = 0;\n"
+ " for (i = 0; i < 13; ++i) { result = result + 1; }\n"
+ " result = 0;\n"
+ " for (i = 0; i < 13; ++i) { result = result + 1; }\n"
+ " result = 0;\n"
+ " for (i = 0; i < 13; ++i) { result = result + 1; }\n"
+ " return result;\n"
+ "}\n";
+ const char* chunks[] = {chunk1, "foo(); ", NULL};
+ RunStreamingTest(chunks);
+}
+
+
+TEST(StreamingScriptWithParseError) {
+ // Test that parse errors from streamed scripts are propagated correctly.
+ {
+ char chunk1[] =
+ " // This will result in a parse error.\n"
+ " var if else then foo";
+ char chunk2[] = " 13\n";
+ const char* chunks[] = {chunk1, chunk2, "foo();", NULL};
+
+ RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::ONE_BYTE,
+ false);
+ }
+ // Test that the next script succeeds normally.
+ {
+ char chunk1[] =
+ " // This will be parsed successfully.\n"
+ " function foo() { return ";
+ char chunk2[] = " 13; }\n";
+ const char* chunks[] = {chunk1, chunk2, "foo();", NULL};
+
+ RunStreamingTest(chunks);
+ }
+}
+
+
+TEST(StreamingUtf8Script) {
+ // We'd want to write \uc481 instead of \xeb\x91\x80, but Windows compilers
+ // don't like it.
+ const char* chunk1 =
+ "function foo() {\n"
+ " // This function will contain an UTF-8 character which is not in\n"
+ " // ASCII.\n"
+ " var foob\xeb\x91\x80r = 13;\n"
+ " return foob\xeb\x91\x80r;\n"
+ "}\n";
+ const char* chunks[] = {chunk1, "foo(); ", NULL};
+ RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
+}
+
+
+TEST(StreamingUtf8ScriptWithSplitCharactersSanityCheck) {
+ // A sanity check to prove that the approach of splitting UTF-8
+ // characters is correct. Here is an UTF-8 character which will take three
+ // bytes.
+ const char* reference = "\xeb\x91\x80";
+ CHECK(3u == strlen(reference)); // NOLINT - no CHECK_EQ for unsigned.
+
+ char chunk1[] =
+ "function foo() {\n"
+ " // This function will contain an UTF-8 character which is not in\n"
+ " // ASCII.\n"
+ " var foob";
+ char chunk2[] =
+ "XXXr = 13;\n"
+ " return foob\xeb\x91\x80r;\n"
+ "}\n";
+ for (int i = 0; i < 3; ++i) {
+ chunk2[i] = reference[i];
+ }
+ const char* chunks[] = {chunk1, chunk2, "foo();", NULL};
+ RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
+}
+
+
+TEST(StreamingUtf8ScriptWithSplitCharacters) {
+ // Stream data where a multi-byte UTF-8 character is split between two data
+ // chunks.
+ const char* reference = "\xeb\x91\x80";
+ char chunk1[] =
+ "function foo() {\n"
+ " // This function will contain an UTF-8 character which is not in\n"
+ " // ASCII.\n"
+ " var foobX";
+ char chunk2[] =
+ "XXr = 13;\n"
+ " return foob\xeb\x91\x80r;\n"
+ "}\n";
+ chunk1[strlen(chunk1) - 1] = reference[0];
+ chunk2[0] = reference[1];
+ chunk2[1] = reference[2];
+ const char* chunks[] = {chunk1, chunk2, "foo();", NULL};
+ RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
+}
+
+
+TEST(StreamingUtf8ScriptWithSplitCharactersValidEdgeCases) {
+ // Tests edge cases which should still be decoded correctly.
+
+ // Case 1: a chunk contains only bytes for a split character (and no other
+ // data). This kind of a chunk would be exceptionally small, but we should
+ // still decode it correctly.
+ const char* reference = "\xeb\x91\x80";
+ // The small chunk is at the beginning of the split character
+ {
+ char chunk1[] =
+ "function foo() {\n"
+ " // This function will contain an UTF-8 character which is not in\n"
+ " // ASCII.\n"
+ " var foob";
+ char chunk2[] = "XX";
+ char chunk3[] =
+ "Xr = 13;\n"
+ " return foob\xeb\x91\x80r;\n"
+ "}\n";
+ chunk2[0] = reference[0];
+ chunk2[1] = reference[1];
+ chunk3[0] = reference[2];
+ const char* chunks[] = {chunk1, chunk2, chunk3, "foo();", NULL};
+ RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
+ }
+ // The small chunk is at the end of a character
+ {
+ char chunk1[] =
+ "function foo() {\n"
+ " // This function will contain an UTF-8 character which is not in\n"
+ " // ASCII.\n"
+ " var foobX";
+ char chunk2[] = "XX";
+ char chunk3[] =
+ "r = 13;\n"
+ " return foob\xeb\x91\x80r;\n"
+ "}\n";
+ chunk1[strlen(chunk1) - 1] = reference[0];
+ chunk2[0] = reference[1];
+ chunk2[1] = reference[2];
+ const char* chunks[] = {chunk1, chunk2, chunk3, "foo();", NULL};
+ RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
+ }
+ // Case 2: the script ends with a multi-byte character. Make sure that it's
+ // decoded correctly and not just ignored.
+ {
+ char chunk1[] =
+ "var foob\xeb\x91\x80 = 13;\n"
+ "foob\xeb\x91\x80";
+ const char* chunks[] = {chunk1, NULL};
+ RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
+ }
+}
+
+
+TEST(StreamingUtf8ScriptWithSplitCharactersInvalidEdgeCases) {
+ // Test cases where a UTF-8 character is split over several chunks. Those
+ // cases are not supported (the embedder should give the data in big enough
+ // chunks), but we shouldn't crash, just produce a parse error.
+ const char* reference = "\xeb\x91\x80";
+ char chunk1[] =
+ "function foo() {\n"
+ " // This function will contain an UTF-8 character which is not in\n"
+ " // ASCII.\n"
+ " var foobX";
+ char chunk2[] = "X";
+ char chunk3[] =
+ "Xr = 13;\n"
+ " return foob\xeb\x91\x80r;\n"
+ "}\n";
+ chunk1[strlen(chunk1) - 1] = reference[0];
+ chunk2[0] = reference[1];
+ chunk3[0] = reference[2];
+ const char* chunks[] = {chunk1, chunk2, chunk3, "foo();", NULL};
+
+ RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8, false);
+}
+
+
+TEST(StreamingProducesParserCache) {
+ i::FLAG_min_preparse_length = 0;
+ const char* chunks[] = {"function foo() { ret", "urn 13; } f", "oo(); ",
+ NULL};
+
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ v8::ScriptCompiler::StreamedSource source(
+ new TestSourceStream(chunks),
+ v8::ScriptCompiler::StreamedSource::ONE_BYTE);
+ v8::ScriptCompiler::ScriptStreamingTask* task =
+ v8::ScriptCompiler::StartStreamingScript(
+ isolate, &source, v8::ScriptCompiler::kProduceParserCache);
+
+ // TestSourceStream::GetMoreData won't block, so it's OK to just run the
+ // task here in the main thread.
+ task->Run();
+ delete task;
+
+ const v8::ScriptCompiler::CachedData* cached_data = source.GetCachedData();
+ CHECK(cached_data != NULL);
+ CHECK(cached_data->data != NULL);
+ CHECK_GT(cached_data->length, 0);
+}
+
+
+TEST(StreamingScriptWithInvalidUtf8) {
+ // Regression test for a crash: test that invalid UTF-8 bytes in the end of a
+ // chunk don't produce a crash.
+ const char* reference = "\xeb\x91\x80\x80\x80";
+ char chunk1[] =
+ "function foo() {\n"
+ " // This function will contain an UTF-8 character which is not in\n"
+ " // ASCII.\n"
+ " var foobXXXXX"; // Too many bytes which look like incomplete chars!
+ char chunk2[] =
+ "r = 13;\n"
+ " return foob\xeb\x91\x80\x80\x80r;\n"
+ "}\n";
+ for (int i = 0; i < 5; ++i) chunk1[strlen(chunk1) - 5 + i] = reference[i];
+
+ const char* chunks[] = {chunk1, chunk2, "foo();", NULL};
+ RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8, false);
+}
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 4c339a32b4..ed9563d04b 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -1182,7 +1182,7 @@ TEST(14) {
code->Print(os);
#endif
F3 f = FUNCTION_CAST<F3>(code->entry());
- t.left = BitCast<double>(kHoleNanInt64);
+ t.left = bit_cast<double>(kHoleNanInt64);
t.right = 1;
t.add_result = 0;
t.sub_result = 0;
@@ -1199,14 +1199,18 @@ TEST(14) {
#endif
// With VFP2 the sign of the canonicalized Nan is undefined. So
// we remove the sign bit for the upper tests.
- CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.add_result) >> 32) & 0x7fffffff);
- CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.add_result) & 0xffffffffu);
- CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.sub_result) >> 32) & 0x7fffffff);
- CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.sub_result) & 0xffffffffu);
- CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.mul_result) >> 32) & 0x7fffffff);
- CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.mul_result) & 0xffffffffu);
- CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.div_result) >> 32) & 0x7fffffff);
- CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.div_result) & 0xffffffffu);
+ CHECK_EQ(kArmNanUpper32,
+ (bit_cast<int64_t>(t.add_result) >> 32) & 0x7fffffff);
+ CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.add_result) & 0xffffffffu);
+ CHECK_EQ(kArmNanUpper32,
+ (bit_cast<int64_t>(t.sub_result) >> 32) & 0x7fffffff);
+ CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.sub_result) & 0xffffffffu);
+ CHECK_EQ(kArmNanUpper32,
+ (bit_cast<int64_t>(t.mul_result) >> 32) & 0x7fffffff);
+ CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.mul_result) & 0xffffffffu);
+ CHECK_EQ(kArmNanUpper32,
+ (bit_cast<int64_t>(t.div_result) >> 32) & 0x7fffffff);
+ CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.div_result) & 0xffffffffu);
}
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index 3d05487f39..587a4ce971 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -10270,58 +10270,6 @@ TEST(copyfields) {
}
-static void DoSmiAbsTest(int32_t value, bool must_fail = false) {
- SETUP();
-
- START();
- Label end, slow;
- __ Mov(x2, 0xc001c0de);
- __ Mov(x1, value);
- __ SmiTag(x1);
- __ SmiAbs(x1, &slow);
- __ SmiUntag(x1);
- __ B(&end);
-
- __ Bind(&slow);
- __ Mov(x2, 0xbad);
-
- __ Bind(&end);
- END();
-
- RUN();
-
- if (must_fail) {
- // We tested an invalid conversion. The code must have jump on slow.
- CHECK_EQUAL_64(0xbad, x2);
- } else {
- // The conversion is valid, check the result.
- int32_t result = (value >= 0) ? value : -value;
- CHECK_EQUAL_64(result, x1);
-
- // Check that we didn't jump on slow.
- CHECK_EQUAL_64(0xc001c0de, x2);
- }
-
- TEARDOWN();
-}
-
-
-TEST(smi_abs) {
- INIT_V8();
- // Simple and edge cases.
- DoSmiAbsTest(0);
- DoSmiAbsTest(0x12345);
- DoSmiAbsTest(0x40000000);
- DoSmiAbsTest(0x7fffffff);
- DoSmiAbsTest(-1);
- DoSmiAbsTest(-12345);
- DoSmiAbsTest(0x80000001);
-
- // Check that the most negative SMI is detected.
- DoSmiAbsTest(0x80000000, true);
-}
-
-
TEST(blr_lr) {
// A simple test to check that the simulator correcty handle "blr lr".
INIT_V8();
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index e8c7f951fe..d943297393 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -170,11 +170,10 @@ TEST(AssemblerIa323) {
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
- // don't print the code - our disassembler can't handle cvttss2si
- // instead print bytes
- Disassembler::Dump(stdout,
- code->instruction_start(),
- code->instruction_start() + code->instruction_size());
+#ifdef OBJECT_PRINT
+ OFStream os(stdout);
+ code->Print(os);
+#endif
F3 f = FUNCTION_CAST<F3>(code->entry());
int res = f(static_cast<float>(-3.1415));
::printf("f() = %d\n", res);
@@ -200,11 +199,10 @@ TEST(AssemblerIa324) {
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
- // don't print the code - our disassembler can't handle cvttsd2si
- // instead print bytes
- Disassembler::Dump(stdout,
- code->instruction_start(),
- code->instruction_start() + code->instruction_size());
+#ifdef OBJECT_PRINT
+ OFStream os(stdout);
+ code->Print(os);
+#endif
F4 f = FUNCTION_CAST<F4>(code->entry());
int res = f(2.718281828);
::printf("f() = %d\n", res);
@@ -261,13 +259,9 @@ TEST(AssemblerIa326) {
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
-#ifdef DEBUG
- ::printf("\n---\n");
- // don't print the code - our disassembler can't handle SSE instructions
- // instead print bytes
- Disassembler::Dump(stdout,
- code->instruction_start(),
- code->instruction_start() + code->instruction_size());
+#ifdef OBJECT_PRINT
+ OFStream os(stdout);
+ code->Print(os);
#endif
F5 f = FUNCTION_CAST<F5>(code->entry());
double res = f(2.2, 1.1);
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index cd1d5d6cc7..74dcc3a0a2 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -170,7 +170,7 @@ TEST(MIPS2) {
__ Branch(&error, ne, v0, Operand(0x1));
__ nop();
__ sltu(v0, t7, t3);
- __ Branch(&error, ne, v0, Operand(0x0));
+ __ Branch(&error, ne, v0, Operand(zero_reg));
__ nop();
// End of SPECIAL class.
@@ -185,7 +185,7 @@ TEST(MIPS2) {
__ slti(v0, t1, 0x00002000); // 0x1
__ slti(v0, v0, 0xffff8000); // 0x0
- __ Branch(&error, ne, v0, Operand(0x0));
+ __ Branch(&error, ne, v0, Operand(zero_reg));
__ nop();
__ sltiu(v0, t1, 0x00002000); // 0x1
__ sltiu(v0, v0, 0x00008000); // 0x1
@@ -293,7 +293,7 @@ TEST(MIPS3) {
__ sdc1(f14, MemOperand(a0, OFFSET_OF(T, g)) );
// g = sqrt(f) = 10.97451593465515908537
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2)) {
__ ldc1(f4, MemOperand(a0, OFFSET_OF(T, h)) );
__ ldc1(f6, MemOperand(a0, OFFSET_OF(T, i)) );
__ madd_d(f14, f6, f4, f6);
@@ -325,7 +325,7 @@ TEST(MIPS3) {
CHECK_EQ(1.8066e16, t.e);
CHECK_EQ(120.44, t.f);
CHECK_EQ(10.97451593465515908537, t.g);
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2)) {
CHECK_EQ(6.875, t.h);
}
}
@@ -351,16 +351,28 @@ TEST(MIPS4) {
__ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
// Swap f4 and f6, by using four integer registers, t0-t3.
- __ mfc1(t0, f4);
- __ mfc1(t1, f5);
- __ mfc1(t2, f6);
- __ mfc1(t3, f7);
-
- __ mtc1(t0, f6);
- __ mtc1(t1, f7);
- __ mtc1(t2, f4);
- __ mtc1(t3, f5);
-
+ if (!IsFp64Mode()) {
+ __ mfc1(t0, f4);
+ __ mfc1(t1, f5);
+ __ mfc1(t2, f6);
+ __ mfc1(t3, f7);
+
+ __ mtc1(t0, f6);
+ __ mtc1(t1, f7);
+ __ mtc1(t2, f4);
+ __ mtc1(t3, f5);
+ } else {
+ DCHECK(!IsMipsArchVariant(kMips32r1) && !IsMipsArchVariant(kLoongson));
+ __ mfc1(t0, f4);
+ __ mfhc1(t1, f4);
+ __ mfc1(t2, f6);
+ __ mfhc1(t3, f6);
+
+ __ mtc1(t0, f6);
+ __ mthc1(t1, f6);
+ __ mtc1(t2, f4);
+ __ mthc1(t3, f4);
+ }
// Store the swapped f4 and f5 back to memory.
__ sdc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
__ sdc1(f6, MemOperand(a0, OFFSET_OF(T, c)) );
@@ -554,21 +566,30 @@ TEST(MIPS7) {
__ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
__ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
+ if (!IsMipsArchVariant(kMips32r6)) {
__ c(UN, D, f4, f6);
__ bc1f(&neither_is_nan);
+ } else {
+ __ cmp(UN, L, f2, f4, f6);
+ __ bc1eqz(&neither_is_nan, f2);
+ }
__ nop();
__ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
__ Branch(&outa_here);
__ bind(&neither_is_nan);
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
__ c(OLT, D, f6, f4);
__ bc1t(&less_than);
+ } else if (IsMipsArchVariant(kMips32r6)) {
+ __ cmp(OLT, L, f2, f6, f4);
+ __ bc1nez(&less_than, f2);
} else {
__ c(OLT, D, f6, f4, 2);
__ bc1t(&less_than, 2);
}
+
__ nop();
__ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
__ Branch(&outa_here);
@@ -716,7 +737,7 @@ TEST(MIPS9) {
MacroAssembler assm(isolate, NULL, 0);
Label exit, exit2, exit3;
- __ Branch(&exit, ge, a0, Operand(0x00000000));
+ __ Branch(&exit, ge, a0, Operand(zero_reg));
__ Branch(&exit2, ge, a0, Operand(0x00001FFF));
__ Branch(&exit3, ge, a0, Operand(0x0001FFFF));
@@ -753,50 +774,52 @@ TEST(MIPS10) {
Assembler assm(isolate, NULL, 0);
Label L, C;
- if (kArchVariant == kMips32r2) {
- // Load all structure elements to registers.
- __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, a)));
-
- // Save the raw bits of the double.
- __ mfc1(t0, f0);
- __ mfc1(t1, f1);
- __ sw(t0, MemOperand(a0, OFFSET_OF(T, dbl_mant)));
- __ sw(t1, MemOperand(a0, OFFSET_OF(T, dbl_exp)));
-
- // Convert double in f0 to long, save hi/lo parts.
- __ cvt_w_d(f0, f0);
- __ mfc1(t0, f0); // f0 has a 32-bits word.
- __ sw(t0, MemOperand(a0, OFFSET_OF(T, word)));
-
- // Convert the b long integers to double b.
- __ lw(t0, MemOperand(a0, OFFSET_OF(T, b_word)));
- __ mtc1(t0, f8); // f8 has a 32-bits word.
- __ cvt_d_w(f10, f8);
- __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, b)));
-
- __ jr(ra);
- __ nop();
-
- CodeDesc desc;
- assm.GetCode(&desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
- t.a = 2.147483646e+09; // 0x7FFFFFFE -> 0xFF80000041DFFFFF as double.
- t.b_word = 0x0ff00ff0; // 0x0FF00FF0 -> 0x as double.
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
- USE(dummy);
-
- CHECK_EQ(0x41DFFFFF, t.dbl_exp);
- CHECK_EQ(0xFF800000, t.dbl_mant);
- CHECK_EQ(0X7FFFFFFE, t.word);
- // 0x0FF00FF0 -> 2.6739096+e08
- CHECK_EQ(2.6739096e08, t.b);
- }
+ if (!IsMipsArchVariant(kMips32r2)) return;
+
+ // Load all structure elements to registers.
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, a)));
+
+ // Save the raw bits of the double.
+ __ mfc1(t0, f0);
+ __ mfc1(t1, f1);
+ __ sw(t0, MemOperand(a0, OFFSET_OF(T, dbl_mant)));
+ __ sw(t1, MemOperand(a0, OFFSET_OF(T, dbl_exp)));
+
+ // Convert double in f0 to long, save hi/lo parts.
+ __ cvt_w_d(f0, f0);
+ __ mfc1(t0, f0); // f0 has a 32-bits word.
+ __ sw(t0, MemOperand(a0, OFFSET_OF(T, word)));
+
+ // Convert the b long integers to double b.
+ __ lw(t0, MemOperand(a0, OFFSET_OF(T, b_word)));
+ __ mtc1(t0, f8); // f8 has a 32-bits word.
+ __ cvt_d_w(f10, f8);
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, b)));
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ t.a = 2.147483646e+09; // 0x7FFFFFFE -> 0xFF80000041DFFFFF as double.
+ t.b_word = 0x0ff00ff0; // 0x0FF00FF0 -> 0x as double.
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(0x41DFFFFF, t.dbl_exp);
+ CHECK_EQ(0xFF800000, t.dbl_mant);
+ CHECK_EQ(0X7FFFFFFE, t.word);
+ // 0x0FF00FF0 -> 2.6739096+e08
+ CHECK_EQ(2.6739096e08, t.b);
}
TEST(MIPS11) {
+ // Do not run test on MIPS32r6, as these instructions are removed.
+ if (IsMipsArchVariant(kMips32r6)) return;
// Test LWL, LWR, SWL and SWR instructions.
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index 4e9238930a..1ec9a65c96 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -353,14 +353,17 @@ TEST(MIPS4) {
double a;
double b;
double c;
+ double d;
+ int64_t high;
+ int64_t low;
} T;
T t;
Assembler assm(isolate, NULL, 0);
Label L, C;
- __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
- __ ldc1(f5, MemOperand(a0, OFFSET_OF(T, b)) );
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)));
+ __ ldc1(f5, MemOperand(a0, OFFSET_OF(T, b)));
// Swap f4 and f5, by using 3 integer registers, a4-a6,
// both two 32-bit chunks, and one 64-bit chunk.
@@ -375,8 +378,16 @@ TEST(MIPS4) {
__ dmtc1(a6, f4);
// Store the swapped f4 and f5 back to memory.
- __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
- __ sdc1(f5, MemOperand(a0, OFFSET_OF(T, c)) );
+ __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, a)));
+ __ sdc1(f5, MemOperand(a0, OFFSET_OF(T, c)));
+
+ // Test sign extension of move operations from coprocessor.
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, d)));
+ __ mfhc1(a4, f4);
+ __ mfc1(a5, f4);
+
+ __ sd(a4, MemOperand(a0, OFFSET_OF(T, high)));
+ __ sd(a5, MemOperand(a0, OFFSET_OF(T, low)));
__ jr(ra);
__ nop();
@@ -389,12 +400,15 @@ TEST(MIPS4) {
t.a = 1.5e22;
t.b = 2.75e11;
t.c = 17.17;
+ t.d = -2.75e11;
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(2.75e11, t.a);
CHECK_EQ(2.75e11, t.b);
CHECK_EQ(1.5e22, t.c);
+ CHECK_EQ(0xffffffffc25001d1L, t.high);
+ CHECK_EQ(0xffffffffbf800000L, t.low);
}
@@ -870,80 +884,80 @@ TEST(MIPS11) {
Assembler assm(isolate, NULL, 0);
// Test all combinations of LWL and vAddr.
- __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ lwl(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
- __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwl_0)) );
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)));
+ __ lwl(a4, MemOperand(a0, OFFSET_OF(T, mem_init)));
+ __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwl_0)));
- __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ lwl(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1) );
- __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwl_1)) );
+ __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)));
+ __ lwl(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1));
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwl_1)));
- __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ lwl(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2) );
- __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwl_2)) );
+ __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)));
+ __ lwl(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2));
+ __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwl_2)));
- __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ lwl(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3) );
- __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwl_3)) );
+ __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)));
+ __ lwl(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3));
+ __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwl_3)));
// Test all combinations of LWR and vAddr.
- __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ lwr(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
- __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwr_0)) );
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)));
+ __ lwr(a4, MemOperand(a0, OFFSET_OF(T, mem_init)));
+ __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwr_0)));
- __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ lwr(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1) );
- __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwr_1)) );
+ __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)));
+ __ lwr(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1));
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwr_1)));
- __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ lwr(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2) );
+ __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)));
+ __ lwr(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2));
__ sw(a6, MemOperand(a0, OFFSET_OF(T, lwr_2)) );
- __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ lwr(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3) );
+ __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)));
+ __ lwr(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3));
__ sw(a7, MemOperand(a0, OFFSET_OF(T, lwr_3)) );
// Test all combinations of SWL and vAddr.
- __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
- __ sw(a4, MemOperand(a0, OFFSET_OF(T, swl_0)) );
- __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ swl(a4, MemOperand(a0, OFFSET_OF(T, swl_0)) );
-
- __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)) );
- __ sw(a5, MemOperand(a0, OFFSET_OF(T, swl_1)) );
- __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ swl(a5, MemOperand(a0, OFFSET_OF(T, swl_1) + 1) );
-
- __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)) );
- __ sw(a6, MemOperand(a0, OFFSET_OF(T, swl_2)) );
- __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ swl(a6, MemOperand(a0, OFFSET_OF(T, swl_2) + 2) );
-
- __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)) );
- __ sw(a7, MemOperand(a0, OFFSET_OF(T, swl_3)) );
- __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ swl(a7, MemOperand(a0, OFFSET_OF(T, swl_3) + 3) );
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)));
+ __ sw(a4, MemOperand(a0, OFFSET_OF(T, swl_0)));
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)));
+ __ swl(a4, MemOperand(a0, OFFSET_OF(T, swl_0)));
+
+ __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)));
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, swl_1)));
+ __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)));
+ __ swl(a5, MemOperand(a0, OFFSET_OF(T, swl_1) + 1));
+
+ __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)));
+ __ sw(a6, MemOperand(a0, OFFSET_OF(T, swl_2)));
+ __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)));
+ __ swl(a6, MemOperand(a0, OFFSET_OF(T, swl_2) + 2));
+
+ __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)));
+ __ sw(a7, MemOperand(a0, OFFSET_OF(T, swl_3)));
+ __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)));
+ __ swl(a7, MemOperand(a0, OFFSET_OF(T, swl_3) + 3));
// Test all combinations of SWR and vAddr.
- __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
- __ sw(a4, MemOperand(a0, OFFSET_OF(T, swr_0)) );
- __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ swr(a4, MemOperand(a0, OFFSET_OF(T, swr_0)) );
-
- __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)) );
- __ sw(a5, MemOperand(a0, OFFSET_OF(T, swr_1)) );
- __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ swr(a5, MemOperand(a0, OFFSET_OF(T, swr_1) + 1) );
-
- __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)) );
- __ sw(a6, MemOperand(a0, OFFSET_OF(T, swr_2)) );
- __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ swr(a6, MemOperand(a0, OFFSET_OF(T, swr_2) + 2) );
-
- __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)) );
- __ sw(a7, MemOperand(a0, OFFSET_OF(T, swr_3)) );
- __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ swr(a7, MemOperand(a0, OFFSET_OF(T, swr_3) + 3) );
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)));
+ __ sw(a4, MemOperand(a0, OFFSET_OF(T, swr_0)));
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)));
+ __ swr(a4, MemOperand(a0, OFFSET_OF(T, swr_0)));
+
+ __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)));
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, swr_1)));
+ __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)));
+ __ swr(a5, MemOperand(a0, OFFSET_OF(T, swr_1) + 1));
+
+ __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)));
+ __ sw(a6, MemOperand(a0, OFFSET_OF(T, swr_2)));
+ __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)));
+ __ swr(a6, MemOperand(a0, OFFSET_OF(T, swr_2) + 2));
+
+ __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)));
+ __ sw(a7, MemOperand(a0, OFFSET_OF(T, swr_3)));
+ __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)));
+ __ swr(a7, MemOperand(a0, OFFSET_OF(T, swr_3) + 3));
__ jr(ra);
__ nop();
@@ -1001,8 +1015,8 @@ TEST(MIPS12) {
__ mov(t2, fp); // Save frame pointer.
__ mov(fp, a0); // Access struct T by fp.
- __ lw(a4, MemOperand(a0, OFFSET_OF(T, y)) );
- __ lw(a7, MemOperand(a0, OFFSET_OF(T, y4)) );
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, y)));
+ __ lw(a7, MemOperand(a0, OFFSET_OF(T, y4)));
__ addu(a5, a4, a7);
__ subu(t0, a4, a7);
@@ -1020,30 +1034,30 @@ TEST(MIPS12) {
__ push(a7);
__ pop(t0);
__ nop();
- __ sw(a4, MemOperand(fp, OFFSET_OF(T, y)) );
- __ lw(a4, MemOperand(fp, OFFSET_OF(T, y)) );
+ __ sw(a4, MemOperand(fp, OFFSET_OF(T, y)));
+ __ lw(a4, MemOperand(fp, OFFSET_OF(T, y)));
__ nop();
- __ sw(a4, MemOperand(fp, OFFSET_OF(T, y)) );
- __ lw(a5, MemOperand(fp, OFFSET_OF(T, y)) );
+ __ sw(a4, MemOperand(fp, OFFSET_OF(T, y)));
+ __ lw(a5, MemOperand(fp, OFFSET_OF(T, y)));
__ nop();
__ push(a5);
- __ lw(a5, MemOperand(fp, OFFSET_OF(T, y)) );
+ __ lw(a5, MemOperand(fp, OFFSET_OF(T, y)));
__ pop(a5);
__ nop();
__ push(a5);
- __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)) );
+ __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)));
__ pop(a5);
__ nop();
__ push(a5);
- __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)) );
+ __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)));
__ pop(a6);
__ nop();
__ push(a6);
- __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)) );
+ __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)));
__ pop(a5);
__ nop();
__ push(a5);
- __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)) );
+ __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)));
__ pop(a7);
__ nop();
@@ -1297,48 +1311,48 @@ TEST(MIPS16) {
Label L, C;
// Basic 32-bit word load/store, with un-signed data.
- __ lw(a4, MemOperand(a0, OFFSET_OF(T, ui)) );
- __ sw(a4, MemOperand(a0, OFFSET_OF(T, r1)) );
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, ui)));
+ __ sw(a4, MemOperand(a0, OFFSET_OF(T, r1)));
// Check that the data got zero-extended into 64-bit a4.
- __ sd(a4, MemOperand(a0, OFFSET_OF(T, r2)) );
+ __ sd(a4, MemOperand(a0, OFFSET_OF(T, r2)));
// Basic 32-bit word load/store, with SIGNED data.
- __ lw(a5, MemOperand(a0, OFFSET_OF(T, si)) );
- __ sw(a5, MemOperand(a0, OFFSET_OF(T, r3)) );
+ __ lw(a5, MemOperand(a0, OFFSET_OF(T, si)));
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, r3)));
// Check that the data got sign-extended into 64-bit a4.
- __ sd(a5, MemOperand(a0, OFFSET_OF(T, r4)) );
+ __ sd(a5, MemOperand(a0, OFFSET_OF(T, r4)));
// 32-bit UNSIGNED word load/store, with SIGNED data.
- __ lwu(a6, MemOperand(a0, OFFSET_OF(T, si)) );
- __ sw(a6, MemOperand(a0, OFFSET_OF(T, r5)) );
+ __ lwu(a6, MemOperand(a0, OFFSET_OF(T, si)));
+ __ sw(a6, MemOperand(a0, OFFSET_OF(T, r5)));
// Check that the data got zero-extended into 64-bit a4.
- __ sd(a6, MemOperand(a0, OFFSET_OF(T, r6)) );
+ __ sd(a6, MemOperand(a0, OFFSET_OF(T, r6)));
// lh with positive data.
- __ lh(a5, MemOperand(a0, OFFSET_OF(T, ui)) );
- __ sw(a5, MemOperand(a0, OFFSET_OF(T, r2)) );
+ __ lh(a5, MemOperand(a0, OFFSET_OF(T, ui)));
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, r2)));
// lh with negative data.
- __ lh(a6, MemOperand(a0, OFFSET_OF(T, si)) );
- __ sw(a6, MemOperand(a0, OFFSET_OF(T, r3)) );
+ __ lh(a6, MemOperand(a0, OFFSET_OF(T, si)));
+ __ sw(a6, MemOperand(a0, OFFSET_OF(T, r3)));
// lhu with negative data.
- __ lhu(a7, MemOperand(a0, OFFSET_OF(T, si)) );
- __ sw(a7, MemOperand(a0, OFFSET_OF(T, r4)) );
+ __ lhu(a7, MemOperand(a0, OFFSET_OF(T, si)));
+ __ sw(a7, MemOperand(a0, OFFSET_OF(T, r4)));
// lb with negative data.
- __ lb(t0, MemOperand(a0, OFFSET_OF(T, si)) );
- __ sw(t0, MemOperand(a0, OFFSET_OF(T, r5)) );
+ __ lb(t0, MemOperand(a0, OFFSET_OF(T, si)));
+ __ sw(t0, MemOperand(a0, OFFSET_OF(T, r5)));
// // sh writes only 1/2 of word.
__ lui(t1, 0x3333);
__ ori(t1, t1, 0x3333);
- __ sw(t1, MemOperand(a0, OFFSET_OF(T, r6)) );
- __ lhu(t1, MemOperand(a0, OFFSET_OF(T, si)) );
- __ sh(t1, MemOperand(a0, OFFSET_OF(T, r6)) );
+ __ sw(t1, MemOperand(a0, OFFSET_OF(T, r6)));
+ __ lhu(t1, MemOperand(a0, OFFSET_OF(T, si)));
+ __ sh(t1, MemOperand(a0, OFFSET_OF(T, r6)));
__ jr(ra);
__ nop();
diff --git a/deps/v8/test/cctest/test-ast.cc b/deps/v8/test/cctest/test-ast.cc
index a25ae69b61..24819dfcd4 100644
--- a/deps/v8/test/cctest/test-ast.cc
+++ b/deps/v8/test/cctest/test-ast.cc
@@ -35,13 +35,13 @@
using namespace v8::internal;
TEST(List) {
- v8::internal::V8::Initialize(NULL);
List<AstNode*>* list = new List<AstNode*>(0);
CHECK_EQ(0, list->length());
Isolate* isolate = CcTest::i_isolate();
Zone zone(isolate);
- AstNodeFactory<AstNullVisitor> factory(&zone, NULL);
+ AstNode::IdGen id_gen;
+ AstNodeFactory<AstNullVisitor> factory(&zone, NULL, &id_gen);
AstNode* node = factory.NewEmptyStatement(RelocInfo::kNoPosition);
list->Add(node);
CHECK_EQ(1, list->length());
diff --git a/deps/v8/test/cctest/test-checks.cc b/deps/v8/test/cctest/test-checks.cc
index a49a7dbe2a..79e87ddd0c 100644
--- a/deps/v8/test/cctest/test-checks.cc
+++ b/deps/v8/test/cctest/test-checks.cc
@@ -20,7 +20,7 @@ TEST(CheckEqualsReflexivity) {
double nan = v8::base::OS::nan_value();
double constants[] = {-nan, -inf, -3.1415, -1.0, -0.1, -0.0,
0.0, 0.1, 1.0, 3.1415, inf, nan};
- for (size_t i = 0; i < ARRAY_SIZE(constants); ++i) {
+ for (size_t i = 0; i < arraysize(constants); ++i) {
CHECK_EQ(constants[i], constants[i]);
}
}
diff --git a/deps/v8/test/cctest/test-code-stubs.cc b/deps/v8/test/cctest/test-code-stubs.cc
index 0784aac78e..95035aab0b 100644
--- a/deps/v8/test/cctest/test-code-stubs.cc
+++ b/deps/v8/test/cctest/test-code-stubs.cc
@@ -62,7 +62,7 @@ int STDCALL ConvertDToICVersion(double d) {
}
} else {
uint64_t big_result =
- (BitCast<uint64_t>(d) & Double::kSignificandMask) | Double::kHiddenBit;
+ (bit_cast<uint64_t>(d) & Double::kSignificandMask) | Double::kHiddenBit;
big_result = big_result >> (Double::kPhysicalSignificandSize - exponent);
result = static_cast<uint32_t>(big_result);
}
@@ -172,3 +172,19 @@ void RunAllTruncationTests(ConvertDToICallWrapper callWrapper,
#undef NaN
#undef Infinity
#undef RunOneTruncationTest
+
+
+TEST(CodeStubMajorKeys) {
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+
+#define CHECK_STUB(NAME) \
+ { \
+ HandleScope scope(isolate); \
+ NAME##Stub stub_impl(0xabcd, isolate); \
+ CodeStub* stub = &stub_impl; \
+ CHECK_EQ(stub->MajorKey(), CodeStub::NAME); \
+ }
+ CODE_STUB_LIST(CHECK_STUB);
+}
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 2d913715e1..4d6e005a8b 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -227,18 +227,18 @@ TEST(C2JSFrames) {
Handle<JSObject> global(isolate->context()->global_object());
Execution::Call(isolate, fun0, global, 0, NULL).Check();
- Handle<String> foo_string = isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("foo"));
+ Handle<String> foo_string =
+ isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("foo"));
Handle<Object> fun1 = Object::GetProperty(
isolate->global_object(), foo_string).ToHandleChecked();
CHECK(fun1->IsJSFunction());
- Handle<Object> argv[] = { isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("hello")) };
+ Handle<Object> argv[] = {isolate->factory()->InternalizeOneByteString(
+ STATIC_CHAR_VECTOR("hello"))};
Execution::Call(isolate,
Handle<JSFunction>::cast(fun1),
global,
- ARRAY_SIZE(argv),
+ arraysize(argv),
argv).Check();
}
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 6051c3fd7f..8d429d2e21 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -469,7 +469,7 @@ static const v8::CpuProfileNode* GetChild(v8::Isolate* isolate,
const v8::CpuProfileNode* result = FindChild(isolate, node, name);
if (!result) {
char buffer[100];
- i::SNPrintF(Vector<char>(buffer, ARRAY_SIZE(buffer)),
+ i::SNPrintF(Vector<char>(buffer, arraysize(buffer)),
"Failed to GetChild: %s", name);
FATAL(buffer);
}
@@ -552,8 +552,8 @@ TEST(CollectCpuProfile) {
v8::Integer::New(env->GetIsolate(), profiling_interval_ms)
};
v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 200);
- function->Call(env->Global(), ARRAY_SIZE(args), args);
+ RunProfiler(env.local(), function, args, arraysize(args), 200);
+ function->Call(env->Global(), arraysize(args), args);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
@@ -575,13 +575,13 @@ TEST(CollectCpuProfile) {
const char* barBranch[] = { "bar", "delay", "loop" };
CheckSimpleBranch(env->GetIsolate(), fooNode, barBranch,
- ARRAY_SIZE(barBranch));
+ arraysize(barBranch));
const char* bazBranch[] = { "baz", "delay", "loop" };
CheckSimpleBranch(env->GetIsolate(), fooNode, bazBranch,
- ARRAY_SIZE(bazBranch));
+ arraysize(bazBranch));
const char* delayBranch[] = { "delay", "loop" };
CheckSimpleBranch(env->GetIsolate(), fooNode, delayBranch,
- ARRAY_SIZE(delayBranch));
+ arraysize(delayBranch));
profile->Delete();
}
@@ -630,8 +630,8 @@ TEST(HotDeoptNoFrameEntry) {
v8::Integer::New(env->GetIsolate(), profiling_interval_ms)
};
v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 200);
- function->Call(env->Global(), ARRAY_SIZE(args), args);
+ RunProfiler(env.local(), function, args, arraysize(args), 200);
+ function->Call(env->Global(), arraysize(args), args);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
@@ -667,7 +667,7 @@ TEST(CollectCpuProfileSamples) {
v8::Integer::New(env->GetIsolate(), profiling_interval_ms)
};
v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 200, true);
+ RunProfiler(env.local(), function, args, arraysize(args), 200, true);
CHECK_LE(200, profile->GetSamplesCount());
uint64_t end_time = profile->GetEndTime();
@@ -723,7 +723,7 @@ TEST(SampleWhenFrameIsNotSetup) {
v8::Integer::New(env->GetIsolate(), repeat_count)
};
v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 100);
+ RunProfiler(env.local(), function, args, arraysize(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
@@ -843,7 +843,7 @@ TEST(NativeAccessorUninitializedIC) {
int32_t repeat_count = 1;
v8::Handle<v8::Value> args[] = { v8::Integer::New(isolate, repeat_count) };
v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 180);
+ RunProfiler(env.local(), function, args, arraysize(args), 180);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* startNode =
@@ -893,14 +893,14 @@ TEST(NativeAccessorMonomorphicIC) {
v8::Handle<v8::Value> args[] = {
v8::Integer::New(isolate, warm_up_iterations)
};
- function->Call(env->Global(), ARRAY_SIZE(args), args);
+ function->Call(env->Global(), arraysize(args), args);
accessors.set_warming_up(false);
}
int32_t repeat_count = 100;
v8::Handle<v8::Value> args[] = { v8::Integer::New(isolate, repeat_count) };
v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 200);
+ RunProfiler(env.local(), function, args, arraysize(args), 200);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* startNode =
@@ -954,7 +954,7 @@ TEST(NativeMethodUninitializedIC) {
int32_t repeat_count = 1;
v8::Handle<v8::Value> args[] = { v8::Integer::New(isolate, repeat_count) };
v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 100);
+ RunProfiler(env.local(), function, args, arraysize(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* startNode =
@@ -1004,14 +1004,14 @@ TEST(NativeMethodMonomorphicIC) {
v8::Handle<v8::Value> args[] = {
v8::Integer::New(isolate, warm_up_iterations)
};
- function->Call(env->Global(), ARRAY_SIZE(args), args);
+ function->Call(env->Global(), arraysize(args), args);
callbacks.set_warming_up(false);
}
int32_t repeat_count = 100;
v8::Handle<v8::Value> args[] = { v8::Integer::New(isolate, repeat_count) };
v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 100);
+ RunProfiler(env.local(), function, args, arraysize(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
GetChild(isolate, root, "start");
@@ -1105,7 +1105,7 @@ TEST(FunctionCallSample) {
v8::Integer::New(env->GetIsolate(), duration_ms)
};
v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 100);
+ RunProfiler(env.local(), function, args, arraysize(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
{
@@ -1188,7 +1188,7 @@ TEST(FunctionApplySample) {
};
v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 100);
+ RunProfiler(env.local(), function, args, arraysize(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
{
@@ -1321,7 +1321,7 @@ static const char* js_native_js_test_source =
static void CallJsFunction(const v8::FunctionCallbackInfo<v8::Value>& info) {
v8::Handle<v8::Function> function = info[0].As<v8::Function>();
v8::Handle<v8::Value> argv[] = { info[1] };
- function->Call(info.This(), ARRAY_SIZE(argv), argv);
+ function->Call(info.This(), arraysize(argv), argv);
}
diff --git a/deps/v8/test/cctest/test-dataflow.cc b/deps/v8/test/cctest/test-dataflow.cc
index fc1a7fa13f..43d950d860 100644
--- a/deps/v8/test/cctest/test-dataflow.cc
+++ b/deps/v8/test/cctest/test-dataflow.cc
@@ -35,7 +35,6 @@
using namespace v8::internal;
TEST(BitVector) {
- v8::internal::V8::Initialize(NULL);
Zone zone(CcTest::i_isolate());
{
BitVector v(15, &zone);
diff --git a/deps/v8/test/cctest/test-date.cc b/deps/v8/test/cctest/test-date.cc
index f2187955ad..2f722c2baf 100644
--- a/deps/v8/test/cctest/test-date.cc
+++ b/deps/v8/test/cctest/test-date.cc
@@ -132,7 +132,7 @@ TEST(DaylightSavingsTime) {
int local_offset_ms = -36000000; // -10 hours.
DateCacheMock* date_cache =
- new DateCacheMock(local_offset_ms, rules, ARRAY_SIZE(rules));
+ new DateCacheMock(local_offset_ms, rules, arraysize(rules));
reinterpret_cast<Isolate*>(isolate)->set_date_cache(date_cache);
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 5c0b0f392d..2f0674a34d 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -36,7 +36,6 @@
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/frames.h"
-#include "src/stub-cache.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
@@ -74,16 +73,23 @@ using ::v8::internal::StrLength;
class DebugLocalContext {
public:
inline DebugLocalContext(
+ v8::Isolate* isolate, v8::ExtensionConfiguration* extensions = 0,
+ v8::Handle<v8::ObjectTemplate> global_template =
+ v8::Handle<v8::ObjectTemplate>(),
+ v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>())
+ : scope_(isolate),
+ context_(v8::Context::New(isolate, extensions, global_template,
+ global_object)) {
+ context_->Enter();
+ }
+ inline DebugLocalContext(
v8::ExtensionConfiguration* extensions = 0,
v8::Handle<v8::ObjectTemplate> global_template =
v8::Handle<v8::ObjectTemplate>(),
v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>())
: scope_(CcTest::isolate()),
- context_(
- v8::Context::New(CcTest::isolate(),
- extensions,
- global_template,
- global_object)) {
+ context_(v8::Context::New(CcTest::isolate(), extensions,
+ global_template, global_object)) {
context_->Enter();
}
inline ~DebugLocalContext() {
@@ -108,7 +114,7 @@ class DebugLocalContext {
Handle<JSGlobalProxy> global(Handle<JSGlobalProxy>::cast(
v8::Utils::OpenHandle(*context_->Global())));
Handle<v8::internal::String> debug_string =
- factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("debug"));
+ factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("debug"));
v8::internal::Runtime::DefineObjectProperty(global, debug_string,
handle(debug_context->global_proxy(), isolate), DONT_ENUM).Check();
}
@@ -138,8 +144,7 @@ static v8::Local<v8::Function> CompileFunction(v8::Isolate* isolate,
const char* source,
const char* function_name) {
v8::Script::Compile(v8::String::NewFromUtf8(isolate, source))->Run();
- v8::Local<v8::Object> global =
- CcTest::isolate()->GetCurrentContext()->Global();
+ v8::Local<v8::Object> global = isolate->GetCurrentContext()->Global();
return v8::Local<v8::Function>::Cast(
global->Get(v8::String::NewFromUtf8(isolate, function_name)));
}
@@ -670,6 +675,8 @@ static void DebugEventBreakPointHitCount(
int exception_hit_count = 0;
int uncaught_exception_hit_count = 0;
int last_js_stack_height = -1;
+v8::Handle<v8::Function> debug_event_listener_callback;
+int debug_event_listener_callback_result;
static void DebugEventCounterClear() {
break_point_hit_count = 0;
@@ -710,9 +717,17 @@ static void DebugEventCounter(
static const int kArgc = 1;
v8::Handle<v8::Value> argv[kArgc] = { exec_state };
// Using exec_state as receiver is just to have a receiver.
- v8::Handle<v8::Value> result = frame_count->Call(exec_state, kArgc, argv);
+ v8::Handle<v8::Value> result = frame_count->Call(exec_state, kArgc, argv);
last_js_stack_height = result->Int32Value();
}
+
+ // Run callback from DebugEventListener and check the result.
+ if (!debug_event_listener_callback.IsEmpty()) {
+ v8::Handle<v8::Value> result =
+ debug_event_listener_callback->Call(event_data, 0, NULL);
+ CHECK(!result.IsEmpty());
+ CHECK_EQ(debug_event_listener_callback_result, result->Int32Value());
+ }
}
@@ -750,6 +765,7 @@ static void DebugEventEvaluate(
CHECK_NE(debug->break_id(), 0);
if (event == v8::Break) {
+ break_point_hit_count++;
for (int i = 0; checks[i].expr != NULL; i++) {
const int argc = 3;
v8::Handle<v8::Value> argv[argc] = {
@@ -2391,7 +2407,7 @@ TEST(DebugEvaluate) {
};
// Simple test function. The "y=0" is in the function foo to provide a break
- // location. For "y=0" the "y" is at position 15 in the barbar function
+ // location. For "y=0" the "y" is at position 15 in the foo function
// therefore setting breakpoint at position 15 will break at "y=0" and
// setting it higher will break after.
v8::Local<v8::Function> foo = CompileFunction(&env,
@@ -2424,6 +2440,34 @@ TEST(DebugEvaluate) {
checks = checks_hh;
foo->Call(env->Global(), 1, argv_foo);
+ // Test that overriding Object.prototype will not interfere into evaluation
+ // on call frame.
+ v8::Local<v8::Function> zoo =
+ CompileFunction(&env,
+ "x = undefined;"
+ "function zoo(t) {"
+ " var a=x;"
+ " Object.prototype.x = 42;"
+ " x=t;"
+ " y=0;" // To ensure break location.
+ " delete Object.prototype.x;"
+ " x=a;"
+ "}",
+ "zoo");
+ const int zoo_break_position = 50;
+
+ // Arguments with one parameter "Hello, world!"
+ v8::Handle<v8::Value> argv_zoo[1] = {
+ v8::String::NewFromUtf8(env->GetIsolate(), "Hello, world!")};
+
+ // Call zoo with breakpoint set at y=0.
+ DebugEventCounterClear();
+ bp = SetBreakPoint(zoo, zoo_break_position);
+ checks = checks_hu;
+ zoo->Call(env->Global(), 1, argv_zoo);
+ CHECK_EQ(1, break_point_hit_count);
+ ClearBreakPoint(bp);
+
// Test function with an inner function. The "y=0" is in function barbar
// to provide a break location. For "y=0" the "y" is at position 8 in the
// barbar function therefore setting breakpoint at position 8 will break at
@@ -3968,6 +4012,43 @@ TEST(BreakOnException) {
}
+TEST(EvalJSInDebugEventListenerOnNativeReThrownException) {
+ DebugLocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ env.ExposeDebug();
+
+ // Create functions for testing break on exception.
+ v8::Local<v8::Function> noThrowJS = CompileFunction(
+ &env, "function noThrowJS(){var a=[1]; a.push(2); return a.length;}",
+ "noThrowJS");
+
+ debug_event_listener_callback = noThrowJS;
+ debug_event_listener_callback_result = 2;
+
+ v8::V8::AddMessageListener(MessageCallbackCount);
+ v8::Debug::SetDebugEventListener(DebugEventCounter);
+ // Break on uncaught exception
+ ChangeBreakOnException(false, true);
+ DebugEventCounterClear();
+ MessageCallbackCountClear();
+
+ // ReThrow native error
+ {
+ v8::TryCatch tryCatch;
+ env->GetIsolate()->ThrowException(v8::Exception::TypeError(
+ v8::String::NewFromUtf8(env->GetIsolate(), "Type error")));
+ CHECK(tryCatch.HasCaught());
+ tryCatch.ReThrow();
+ }
+ CHECK_EQ(1, exception_hit_count);
+ CHECK_EQ(1, uncaught_exception_hit_count);
+ CHECK_EQ(0, message_callback_count); // FIXME: Should it be 1 ?
+ CHECK(!debug_event_listener_callback.IsEmpty());
+
+ debug_event_listener_callback.Clear();
+}
+
+
// Test break on exception from compiler errors. When compiling using
// v8::Script::Compile there is no JavaScript stack whereas when compiling using
// eval there are JavaScript frames.
@@ -4151,10 +4232,11 @@ TEST(DebugBreak) {
// Set the debug break flag.
v8::Debug::DebugBreak(env->GetIsolate());
+ CHECK(v8::Debug::CheckDebugBreak(env->GetIsolate()));
// Call all functions with different argument count.
break_point_hit_count = 0;
- for (unsigned int i = 0; i < ARRAY_SIZE(argv); i++) {
+ for (unsigned int i = 0; i < arraysize(argv); i++) {
f0->Call(env->Global(), i, argv);
f1->Call(env->Global(), i, argv);
f2->Call(env->Global(), i, argv);
@@ -4162,7 +4244,7 @@ TEST(DebugBreak) {
}
// One break for each function called.
- CHECK_EQ(4 * ARRAY_SIZE(argv), break_point_hit_count);
+ CHECK_EQ(4 * arraysize(argv), break_point_hit_count);
// Get rid of the debug event listener.
v8::Debug::SetDebugEventListener(NULL);
@@ -4183,6 +4265,12 @@ TEST(DisableBreak) {
const char* src = "function f() {g()};function g(){i=0; while(i<10){i++}}";
v8::Local<v8::Function> f = CompileFunction(&env, src, "f");
+ // Set, test and cancel debug break.
+ v8::Debug::DebugBreak(env->GetIsolate());
+ CHECK(v8::Debug::CheckDebugBreak(env->GetIsolate()));
+ v8::Debug::CancelDebugBreak(env->GetIsolate());
+ CHECK(!v8::Debug::CheckDebugBreak(env->GetIsolate()));
+
// Set the debug break flag.
v8::Debug::DebugBreak(env->GetIsolate());
@@ -4377,10 +4465,6 @@ TEST(InterceptorPropertyMirror) {
"named_values[%d] instanceof debug.PropertyMirror", i);
CHECK(CompileRun(buffer.start())->BooleanValue());
- SNPrintF(buffer, "named_values[%d].propertyType()", i);
- CHECK_EQ(v8::internal::INTERCEPTOR,
- CompileRun(buffer.start())->Int32Value());
-
SNPrintF(buffer, "named_values[%d].isNative()", i);
CHECK(CompileRun(buffer.start())->BooleanValue());
}
@@ -4696,7 +4780,7 @@ TEST(NoHiddenProperties) {
// The Wait() call blocks a thread until it is called for the Nth time, then all
// calls return. Each ThreadBarrier object can only be used once.
template <int N>
-class ThreadBarrier V8_FINAL {
+class ThreadBarrier FINAL {
public:
ThreadBarrier() : num_blocked_(0) {}
@@ -5106,12 +5190,20 @@ class V8Thread : public v8::base::Thread {
public:
V8Thread() : Thread(Options("V8Thread")) {}
void Run();
+ v8::Isolate* isolate() { return isolate_; }
+
+ private:
+ v8::Isolate* isolate_;
};
class DebuggerThread : public v8::base::Thread {
public:
- DebuggerThread() : Thread(Options("DebuggerThread")) {}
+ explicit DebuggerThread(v8::Isolate* isolate)
+ : Thread(Options("DebuggerThread")), isolate_(isolate) {}
void Run();
+
+ private:
+ v8::Isolate* isolate_;
};
@@ -5154,22 +5246,25 @@ void V8Thread::Run() {
"\n"
"foo();\n";
- v8::Isolate* isolate = CcTest::isolate();
- v8::Isolate::Scope isolate_scope(isolate);
- DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::Debug::SetMessageHandler(&ThreadedMessageHandler);
- v8::Handle<v8::ObjectTemplate> global_template =
- v8::ObjectTemplate::New(env->GetIsolate());
- global_template->Set(
- v8::String::NewFromUtf8(env->GetIsolate(), "ThreadedAtBarrier1"),
- v8::FunctionTemplate::New(isolate, ThreadedAtBarrier1));
- v8::Handle<v8::Context> context = v8::Context::New(isolate,
- NULL,
- global_template);
- v8::Context::Scope context_scope(context);
-
- CompileRun(source);
+ isolate_ = v8::Isolate::New();
+ threaded_debugging_barriers.barrier_3.Wait();
+ {
+ v8::Isolate::Scope isolate_scope(isolate_);
+ DebugLocalContext env(isolate_);
+ v8::HandleScope scope(isolate_);
+ v8::Debug::SetMessageHandler(&ThreadedMessageHandler);
+ v8::Handle<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(env->GetIsolate());
+ global_template->Set(
+ v8::String::NewFromUtf8(env->GetIsolate(), "ThreadedAtBarrier1"),
+ v8::FunctionTemplate::New(isolate_, ThreadedAtBarrier1));
+ v8::Handle<v8::Context> context =
+ v8::Context::New(isolate_, NULL, global_template);
+ v8::Context::Scope context_scope(context);
+
+ CompileRun(source);
+ }
+ isolate_->Dispose();
}
@@ -5185,21 +5280,21 @@ void DebuggerThread::Run() {
"\"type\":\"request\","
"\"command\":\"continue\"}";
- v8::Isolate* isolate = CcTest::isolate();
threaded_debugging_barriers.barrier_1.Wait();
- v8::Debug::DebugBreak(isolate);
+ v8::Debug::DebugBreak(isolate_);
threaded_debugging_barriers.barrier_2.Wait();
- v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_1, buffer));
- v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_2, buffer));
+ v8::Debug::SendCommand(isolate_, buffer, AsciiToUtf16(command_1, buffer));
+ v8::Debug::SendCommand(isolate_, buffer, AsciiToUtf16(command_2, buffer));
}
TEST(ThreadedDebugging) {
- DebuggerThread debugger_thread;
V8Thread v8_thread;
// Create a V8 environment
v8_thread.Start();
+ threaded_debugging_barriers.barrier_3.Wait();
+ DebuggerThread debugger_thread(v8_thread.isolate());
debugger_thread.Start();
v8_thread.Join();
@@ -5218,17 +5313,24 @@ class BreakpointsV8Thread : public v8::base::Thread {
public:
BreakpointsV8Thread() : Thread(Options("BreakpointsV8Thread")) {}
void Run();
+
+ v8::Isolate* isolate() { return isolate_; }
+
+ private:
+ v8::Isolate* isolate_;
};
class BreakpointsDebuggerThread : public v8::base::Thread {
public:
- explicit BreakpointsDebuggerThread(bool global_evaluate)
+ BreakpointsDebuggerThread(bool global_evaluate, v8::Isolate* isolate)
: Thread(Options("BreakpointsDebuggerThread")),
- global_evaluate_(global_evaluate) {}
+ global_evaluate_(global_evaluate),
+ isolate_(isolate) {}
void Run();
private:
bool global_evaluate_;
+ v8::Isolate* isolate_;
};
@@ -5273,16 +5375,20 @@ void BreakpointsV8Thread::Run() {
const char* source_2 = "cat(17);\n"
"cat(19);\n";
- v8::Isolate* isolate = CcTest::isolate();
- v8::Isolate::Scope isolate_scope(isolate);
- DebugLocalContext env;
- v8::HandleScope scope(isolate);
- v8::Debug::SetMessageHandler(&BreakpointsMessageHandler);
-
- CompileRun(source_1);
- breakpoints_barriers->barrier_1.Wait();
- breakpoints_barriers->barrier_2.Wait();
- CompileRun(source_2);
+ isolate_ = v8::Isolate::New();
+ breakpoints_barriers->barrier_3.Wait();
+ {
+ v8::Isolate::Scope isolate_scope(isolate_);
+ DebugLocalContext env(isolate_);
+ v8::HandleScope scope(isolate_);
+ v8::Debug::SetMessageHandler(&BreakpointsMessageHandler);
+
+ CompileRun(source_1);
+ breakpoints_barriers->barrier_1.Wait();
+ breakpoints_barriers->barrier_2.Wait();
+ CompileRun(source_2);
+ }
+ isolate_->Dispose();
}
@@ -5348,14 +5454,12 @@ void BreakpointsDebuggerThread::Run() {
"\"command\":\"continue\"}";
- v8::Isolate* isolate = CcTest::isolate();
- v8::Isolate::Scope isolate_scope(isolate);
// v8 thread initializes, runs source_1
breakpoints_barriers->barrier_1.Wait();
// 1:Set breakpoint in cat() (will get id 1).
- v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_1, buffer));
+ v8::Debug::SendCommand(isolate_, buffer, AsciiToUtf16(command_1, buffer));
// 2:Set breakpoint in dog() (will get id 2).
- v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_2, buffer));
+ v8::Debug::SendCommand(isolate_, buffer, AsciiToUtf16(command_2, buffer));
breakpoints_barriers->barrier_2.Wait();
// V8 thread starts compiling source_2.
// Automatic break happens, to run queued commands
@@ -5367,43 +5471,42 @@ void BreakpointsDebuggerThread::Run() {
// Must have hit breakpoint #1.
CHECK_EQ(1, break_event_breakpoint_id);
// 4:Evaluate dog() (which has a breakpoint).
- v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_3, buffer));
+ v8::Debug::SendCommand(isolate_, buffer, AsciiToUtf16(command_3, buffer));
// V8 thread hits breakpoint in dog().
breakpoints_barriers->semaphore_1.Wait(); // wait for break event
// Must have hit breakpoint #2.
CHECK_EQ(2, break_event_breakpoint_id);
// 5:Evaluate (x + 1).
- v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_4, buffer));
+ v8::Debug::SendCommand(isolate_, buffer, AsciiToUtf16(command_4, buffer));
// Evaluate (x + 1) finishes.
breakpoints_barriers->semaphore_1.Wait();
// Must have result 108.
CHECK_EQ(108, evaluate_int_result);
// 6:Continue evaluation of dog().
- v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_5, buffer));
+ v8::Debug::SendCommand(isolate_, buffer, AsciiToUtf16(command_5, buffer));
// Evaluate dog() finishes.
breakpoints_barriers->semaphore_1.Wait();
// Must have result 107.
CHECK_EQ(107, evaluate_int_result);
// 7:Continue evaluation of source_2, finish cat(17), hit breakpoint
// in cat(19).
- v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_6, buffer));
+ v8::Debug::SendCommand(isolate_, buffer, AsciiToUtf16(command_6, buffer));
// Message callback gets break event.
breakpoints_barriers->semaphore_1.Wait(); // wait for break event
// Must have hit breakpoint #1.
CHECK_EQ(1, break_event_breakpoint_id);
// 8: Evaluate dog() with breaks disabled.
- v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_7, buffer));
+ v8::Debug::SendCommand(isolate_, buffer, AsciiToUtf16(command_7, buffer));
// Evaluate dog() finishes.
breakpoints_barriers->semaphore_1.Wait();
// Must have result 116.
CHECK_EQ(116, evaluate_int_result);
// 9: Continue evaluation of source2, reach end.
- v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_8, buffer));
+ v8::Debug::SendCommand(isolate_, buffer, AsciiToUtf16(command_8, buffer));
}
void TestRecursiveBreakpointsGeneric(bool global_evaluate) {
- BreakpointsDebuggerThread breakpoints_debugger_thread(global_evaluate);
BreakpointsV8Thread breakpoints_v8_thread;
// Create a V8 environment
@@ -5411,6 +5514,9 @@ void TestRecursiveBreakpointsGeneric(bool global_evaluate) {
breakpoints_barriers = &stack_allocated_breakpoints_barriers;
breakpoints_v8_thread.Start();
+ breakpoints_barriers->barrier_3.Wait();
+ BreakpointsDebuggerThread breakpoints_debugger_thread(
+ global_evaluate, breakpoints_v8_thread.isolate());
breakpoints_debugger_thread.Start();
breakpoints_v8_thread.Join();
@@ -6522,6 +6628,10 @@ TEST(ProcessDebugMessages) {
}
+class SendCommandThread;
+static SendCommandThread* send_command_thread_ = NULL;
+
+
class SendCommandThread : public v8::base::Thread {
public:
explicit SendCommandThread(v8::Isolate* isolate)
@@ -6529,9 +6639,12 @@ class SendCommandThread : public v8::base::Thread {
semaphore_(0),
isolate_(isolate) {}
- static void ProcessDebugMessages(v8::Isolate* isolate, void* data) {
- v8::Debug::ProcessDebugMessages();
- reinterpret_cast<v8::base::Semaphore*>(data)->Signal();
+ static void CountingAndSignallingMessageHandler(
+ const v8::Debug::Message& message) {
+ if (message.IsResponse()) {
+ counting_message_handler_counter++;
+ send_command_thread_->semaphore_.Signal();
+ }
}
virtual void Run() {
@@ -6545,21 +6658,22 @@ class SendCommandThread : public v8::base::Thread {
int length = AsciiToUtf16(scripts_command, buffer);
// Send scripts command.
- for (int i = 0; i < 100; i++) {
+ for (int i = 0; i < 20; i++) {
+ v8::base::ElapsedTimer timer;
+ timer.Start();
CHECK_EQ(i, counting_message_handler_counter);
// Queue debug message.
v8::Debug::SendCommand(isolate_, buffer, length);
- // Synchronize with the main thread to force message processing.
- isolate_->RequestInterrupt(ProcessDebugMessages, &semaphore_);
+ // Wait for the message handler to pick up the response.
semaphore_.Wait();
+ i::PrintF("iteration %d took %f ms\n", i,
+ timer.Elapsed().InMillisecondsF());
}
v8::V8::TerminateExecution(isolate_);
}
- void StartSending() {
- semaphore_.Signal();
- }
+ void StartSending() { semaphore_.Signal(); }
private:
v8::base::Semaphore semaphore_;
@@ -6567,8 +6681,6 @@ class SendCommandThread : public v8::base::Thread {
};
-static SendCommandThread* send_command_thread_ = NULL;
-
static void StartSendingCommands(
const v8::FunctionCallbackInfo<v8::Value>& info) {
send_command_thread_->StartSending();
@@ -6582,7 +6694,8 @@ TEST(ProcessDebugMessagesThreaded) {
counting_message_handler_counter = 0;
- v8::Debug::SetMessageHandler(CountingMessageHandler);
+ v8::Debug::SetMessageHandler(
+ SendCommandThread::CountingAndSignallingMessageHandler);
send_command_thread_ = new SendCommandThread(isolate);
send_command_thread_->Start();
@@ -6592,7 +6705,7 @@ TEST(ProcessDebugMessagesThreaded) {
CompileRun("start(); while (true) { }");
- CHECK_EQ(100, counting_message_handler_counter);
+ CHECK_EQ(20, counting_message_handler_counter);
v8::Debug::SetMessageHandler(NULL);
CheckDebuggerUnloaded();
diff --git a/deps/v8/test/cctest/test-deoptimization.cc b/deps/v8/test/cctest/test-deoptimization.cc
index 3127acc6a6..a201ccd7e4 100644
--- a/deps/v8/test/cctest/test-deoptimization.cc
+++ b/deps/v8/test/cctest/test-deoptimization.cc
@@ -35,7 +35,6 @@
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/isolate.h"
-#include "src/stub-cache.h"
#include "test/cctest/cctest.h"
using ::v8::base::OS;
@@ -99,8 +98,8 @@ class AllowNativesSyntaxNoInlining {
// Abort any ongoing incremental marking to make sure that all weak global
// handle callbacks are processed.
-static void NonIncrementalGC() {
- CcTest::heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+static void NonIncrementalGC(i::Isolate* isolate) {
+ isolate->heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
}
@@ -128,7 +127,7 @@ TEST(DeoptimizeSimple) {
"function f() { g(); };"
"f();");
}
- NonIncrementalGC();
+ NonIncrementalGC(CcTest::i_isolate());
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
@@ -144,7 +143,7 @@ TEST(DeoptimizeSimple) {
"function f(x) { if (x) { g(); } else { return } };"
"f(true);");
}
- NonIncrementalGC();
+ NonIncrementalGC(CcTest::i_isolate());
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
@@ -168,7 +167,7 @@ TEST(DeoptimizeSimpleWithArguments) {
"function f(x, y, z) { g(1,x); y+z; };"
"f(1, \"2\", false);");
}
- NonIncrementalGC();
+ NonIncrementalGC(CcTest::i_isolate());
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
@@ -185,7 +184,7 @@ TEST(DeoptimizeSimpleWithArguments) {
"function f(x, y, z) { if (x) { g(x, y); } else { return y + z; } };"
"f(true, 1, \"2\");");
}
- NonIncrementalGC();
+ NonIncrementalGC(CcTest::i_isolate());
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
@@ -210,7 +209,7 @@ TEST(DeoptimizeSimpleNested) {
"function g(z) { count++; %DeoptimizeFunction(f); return z;}"
"function f(x,y,z) { return h(x, y, g(z)); };"
"result = f(1, 2, 3);");
- NonIncrementalGC();
+ NonIncrementalGC(CcTest::i_isolate());
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(6, env->Global()->Get(v8_str("result"))->Int32Value());
@@ -236,7 +235,7 @@ TEST(DeoptimizeRecursive) {
"function f(x) { calls++; if (x > 0) { f(x - 1); } else { g(); } };"
"f(10);");
}
- NonIncrementalGC();
+ NonIncrementalGC(CcTest::i_isolate());
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(11, env->Global()->Get(v8_str("calls"))->Int32Value());
@@ -269,7 +268,7 @@ TEST(DeoptimizeMultiple) {
"function f1(x) { return f2(x + 1, x + 1) + x; };"
"result = f1(1);");
}
- NonIncrementalGC();
+ NonIncrementalGC(CcTest::i_isolate());
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(14, env->Global()->Get(v8_str("result"))->Int32Value());
@@ -291,7 +290,7 @@ TEST(DeoptimizeConstructor) {
"function f() { g(); };"
"result = new f() instanceof f;");
}
- NonIncrementalGC();
+ NonIncrementalGC(CcTest::i_isolate());
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK(env->Global()->Get(v8_str("result"))->IsTrue());
@@ -308,7 +307,7 @@ TEST(DeoptimizeConstructor) {
"result = new f(1, 2);"
"result = result.x + result.y;");
}
- NonIncrementalGC();
+ NonIncrementalGC(CcTest::i_isolate());
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(3, env->Global()->Get(v8_str("result"))->Int32Value());
@@ -338,7 +337,7 @@ TEST(DeoptimizeConstructorMultiple) {
"function f1(x) { this.result = new f2(x + 1, x + 1).result + x; };"
"result = new f1(1).result;");
}
- NonIncrementalGC();
+ NonIncrementalGC(CcTest::i_isolate());
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(14, env->Global()->Get(v8_str("result"))->Int32Value());
@@ -346,51 +345,61 @@ TEST(DeoptimizeConstructorMultiple) {
}
-TEST(DeoptimizeBinaryOperationADDString) {
+UNINITIALIZED_TEST(DeoptimizeBinaryOperationADDString) {
i::FLAG_turbo_deoptimization = true;
i::FLAG_concurrent_recompilation = false;
AllowNativesSyntaxNoInlining options;
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
-
- const char* f_source = "function f(x, y) { return x + y; };";
-
+ v8::Isolate* isolate = v8::Isolate::New();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ isolate->Enter();
{
- // Compile function f and collect to type feedback to insert binary op stub
- // call in the optimized code.
- i::FLAG_prepare_always_opt = true;
- CompileRun("var count = 0;"
- "var result = 0;"
- "var deopt = false;"
- "function X() { };"
- "X.prototype.toString = function () {"
- " if (deopt) { count++; %DeoptimizeFunction(f); } return 'an X'"
- "};");
- CompileRun(f_source);
- CompileRun("for (var i = 0; i < 5; i++) {"
- " f('a+', new X());"
- "};");
-
- // Compile an optimized version of f.
- i::FLAG_always_opt = true;
- CompileRun(f_source);
- CompileRun("f('a+', new X());");
- CHECK(!CcTest::i_isolate()->use_crankshaft() ||
- GetJSFunction(env->Global(), "f")->IsOptimized());
-
- // Call f and force deoptimization while processing the binary operation.
- CompileRun("deopt = true;"
- "var result = f('a+', new X());");
- }
- NonIncrementalGC();
+ LocalContext env(isolate);
+ v8::HandleScope scope(env->GetIsolate());
+
+ const char* f_source = "function f(x, y) { return x + y; };";
+
+ {
+ // Compile function f and collect to type feedback to insert binary op
+ // stub call in the optimized code.
+ i::FLAG_prepare_always_opt = true;
+ CompileRun(
+ "var count = 0;"
+ "var result = 0;"
+ "var deopt = false;"
+ "function X() { };"
+ "X.prototype.toString = function () {"
+ " if (deopt) { count++; %DeoptimizeFunction(f); } return 'an X'"
+ "};");
+ CompileRun(f_source);
+ CompileRun(
+ "for (var i = 0; i < 5; i++) {"
+ " f('a+', new X());"
+ "};");
+
+ // Compile an optimized version of f.
+ i::FLAG_always_opt = true;
+ CompileRun(f_source);
+ CompileRun("f('a+', new X());");
+ CHECK(!i_isolate->use_crankshaft() ||
+ GetJSFunction(env->Global(), "f")->IsOptimized());
+
+ // Call f and force deoptimization while processing the binary operation.
+ CompileRun(
+ "deopt = true;"
+ "var result = f('a+', new X());");
+ }
+ NonIncrementalGC(i_isolate);
- CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- v8::Handle<v8::Value> result = env->Global()->Get(v8_str("result"));
- CHECK(result->IsString());
- v8::String::Utf8Value utf8(result);
- CHECK_EQ("a+an X", *utf8);
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
+ CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+ CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+ v8::Handle<v8::Value> result = env->Global()->Get(v8_str("result"));
+ CHECK(result->IsString());
+ v8::String::Utf8Value utf8(result);
+ CHECK_EQ("a+an X", *utf8);
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate));
+ }
+ isolate->Exit();
+ isolate->Dispose();
}
@@ -407,6 +416,7 @@ static void CompileConstructorWithDeoptimizingValueOf() {
static void TestDeoptimizeBinaryOpHelper(LocalContext* env,
const char* binary_op) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>((*env)->GetIsolate());
EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> f_source_buffer;
SNPrintF(f_source_buffer,
"function f(x, y) { return x %s y; };",
@@ -427,290 +437,355 @@ static void TestDeoptimizeBinaryOpHelper(LocalContext* env,
i::FLAG_always_opt = true;
CompileRun(f_source);
CompileRun("f(7, new X());");
- CHECK(!CcTest::i_isolate()->use_crankshaft() ||
+ CHECK(!i_isolate->use_crankshaft() ||
GetJSFunction((*env)->Global(), "f")->IsOptimized());
// Call f and force deoptimization while processing the binary operation.
CompileRun("deopt = true;"
"var result = f(7, new X());");
- NonIncrementalGC();
+ NonIncrementalGC(i_isolate);
CHECK(!GetJSFunction((*env)->Global(), "f")->IsOptimized());
}
-TEST(DeoptimizeBinaryOperationADD) {
+UNINITIALIZED_TEST(DeoptimizeBinaryOperationADD) {
i::FLAG_turbo_deoptimization = true;
i::FLAG_concurrent_recompilation = false;
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = v8::Isolate::New();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ isolate->Enter();
+ {
+ LocalContext env(isolate);
+ v8::HandleScope scope(env->GetIsolate());
- TestDeoptimizeBinaryOpHelper(&env, "+");
+ TestDeoptimizeBinaryOpHelper(&env, "+");
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK_EQ(15, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
+ CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+ CHECK_EQ(15, env->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate));
+ }
+ isolate->Exit();
+ isolate->Dispose();
}
-TEST(DeoptimizeBinaryOperationSUB) {
+UNINITIALIZED_TEST(DeoptimizeBinaryOperationSUB) {
i::FLAG_turbo_deoptimization = true;
i::FLAG_concurrent_recompilation = false;
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = v8::Isolate::New();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ isolate->Enter();
+ {
+ LocalContext env(isolate);
+ v8::HandleScope scope(env->GetIsolate());
- TestDeoptimizeBinaryOpHelper(&env, "-");
+ TestDeoptimizeBinaryOpHelper(&env, "-");
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK_EQ(-1, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
+ CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+ CHECK_EQ(-1, env->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate));
+ }
+ isolate->Exit();
+ isolate->Dispose();
}
-TEST(DeoptimizeBinaryOperationMUL) {
+UNINITIALIZED_TEST(DeoptimizeBinaryOperationMUL) {
i::FLAG_turbo_deoptimization = true;
i::FLAG_concurrent_recompilation = false;
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = v8::Isolate::New();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ isolate->Enter();
+ {
+ LocalContext env(isolate);
+ v8::HandleScope scope(env->GetIsolate());
- TestDeoptimizeBinaryOpHelper(&env, "*");
+ TestDeoptimizeBinaryOpHelper(&env, "*");
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK_EQ(56, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
+ CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+ CHECK_EQ(56, env->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate));
+ }
+ isolate->Exit();
+ isolate->Dispose();
}
-TEST(DeoptimizeBinaryOperationDIV) {
+UNINITIALIZED_TEST(DeoptimizeBinaryOperationDIV) {
i::FLAG_turbo_deoptimization = true;
i::FLAG_concurrent_recompilation = false;
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = v8::Isolate::New();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ isolate->Enter();
+ {
+ LocalContext env(isolate);
+ v8::HandleScope scope(env->GetIsolate());
- TestDeoptimizeBinaryOpHelper(&env, "/");
+ TestDeoptimizeBinaryOpHelper(&env, "/");
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK_EQ(0, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
+ CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+ CHECK_EQ(0, env->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate));
+ }
+ isolate->Exit();
+ isolate->Dispose();
}
-TEST(DeoptimizeBinaryOperationMOD) {
+UNINITIALIZED_TEST(DeoptimizeBinaryOperationMOD) {
i::FLAG_turbo_deoptimization = true;
i::FLAG_concurrent_recompilation = false;
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = v8::Isolate::New();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ isolate->Enter();
+ {
+ LocalContext env(isolate);
+ v8::HandleScope scope(env->GetIsolate());
- TestDeoptimizeBinaryOpHelper(&env, "%");
+ TestDeoptimizeBinaryOpHelper(&env, "%");
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK_EQ(7, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
+ CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+ CHECK_EQ(7, env->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate));
+ }
+ isolate->Exit();
+ isolate->Dispose();
}
-TEST(DeoptimizeCompare) {
+UNINITIALIZED_TEST(DeoptimizeCompare) {
i::FLAG_turbo_deoptimization = true;
i::FLAG_concurrent_recompilation = false;
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
-
- const char* f_source = "function f(x, y) { return x < y; };";
-
+ v8::Isolate* isolate = v8::Isolate::New();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ isolate->Enter();
{
- AllowNativesSyntaxNoInlining options;
- // Compile function f and collect to type feedback to insert compare ic
- // call in the optimized code.
- i::FLAG_prepare_always_opt = true;
- CompileRun("var count = 0;"
- "var result = 0;"
- "var deopt = false;"
- "function X() { };"
- "X.prototype.toString = function () {"
- " if (deopt) { count++; %DeoptimizeFunction(f); } return 'b'"
- "};");
- CompileRun(f_source);
- CompileRun("for (var i = 0; i < 5; i++) {"
- " f('a', new X());"
- "};");
-
- // Compile an optimized version of f.
- i::FLAG_always_opt = true;
- CompileRun(f_source);
- CompileRun("f('a', new X());");
- CHECK(!CcTest::i_isolate()->use_crankshaft() ||
- GetJSFunction(env->Global(), "f")->IsOptimized());
-
- // Call f and force deoptimization while processing the comparison.
- CompileRun("deopt = true;"
- "var result = f('a', new X());");
- }
- NonIncrementalGC();
+ LocalContext env(isolate);
+ v8::HandleScope scope(env->GetIsolate());
+
+ const char* f_source = "function f(x, y) { return x < y; };";
+
+ {
+ AllowNativesSyntaxNoInlining options;
+ // Compile function f and collect to type feedback to insert compare ic
+ // call in the optimized code.
+ i::FLAG_prepare_always_opt = true;
+ CompileRun(
+ "var count = 0;"
+ "var result = 0;"
+ "var deopt = false;"
+ "function X() { };"
+ "X.prototype.toString = function () {"
+ " if (deopt) { count++; %DeoptimizeFunction(f); } return 'b'"
+ "};");
+ CompileRun(f_source);
+ CompileRun(
+ "for (var i = 0; i < 5; i++) {"
+ " f('a', new X());"
+ "};");
+
+ // Compile an optimized version of f.
+ i::FLAG_always_opt = true;
+ CompileRun(f_source);
+ CompileRun("f('a', new X());");
+ CHECK(!i_isolate->use_crankshaft() ||
+ GetJSFunction(env->Global(), "f")->IsOptimized());
+
+ // Call f and force deoptimization while processing the comparison.
+ CompileRun(
+ "deopt = true;"
+ "var result = f('a', new X());");
+ }
+ NonIncrementalGC(i_isolate);
- CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK_EQ(true, env->Global()->Get(v8_str("result"))->BooleanValue());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
+ CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+ CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+ CHECK_EQ(true, env->Global()->Get(v8_str("result"))->BooleanValue());
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate));
+ }
+ isolate->Exit();
+ isolate->Dispose();
}
-TEST(DeoptimizeLoadICStoreIC) {
+UNINITIALIZED_TEST(DeoptimizeLoadICStoreIC) {
i::FLAG_turbo_deoptimization = true;
i::FLAG_concurrent_recompilation = false;
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
-
- // Functions to generate load/store/keyed load/keyed store IC calls.
- const char* f1_source = "function f1(x) { return x.y; };";
- const char* g1_source = "function g1(x) { x.y = 1; };";
- const char* f2_source = "function f2(x, y) { return x[y]; };";
- const char* g2_source = "function g2(x, y) { x[y] = 1; };";
-
+ v8::Isolate* isolate = v8::Isolate::New();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ isolate->Enter();
{
- AllowNativesSyntaxNoInlining options;
- // Compile functions and collect to type feedback to insert ic
- // calls in the optimized code.
- i::FLAG_prepare_always_opt = true;
- CompileRun("var count = 0;"
- "var result = 0;"
- "var deopt = false;"
- "function X() { };"
- "X.prototype.__defineGetter__('y', function () {"
- " if (deopt) { count++; %DeoptimizeFunction(f1); };"
- " return 13;"
- "});"
- "X.prototype.__defineSetter__('y', function () {"
- " if (deopt) { count++; %DeoptimizeFunction(g1); };"
- "});"
- "X.prototype.__defineGetter__('z', function () {"
- " if (deopt) { count++; %DeoptimizeFunction(f2); };"
- " return 13;"
- "});"
- "X.prototype.__defineSetter__('z', function () {"
- " if (deopt) { count++; %DeoptimizeFunction(g2); };"
- "});");
- CompileRun(f1_source);
- CompileRun(g1_source);
- CompileRun(f2_source);
- CompileRun(g2_source);
- CompileRun("for (var i = 0; i < 5; i++) {"
- " f1(new X());"
- " g1(new X());"
- " f2(new X(), 'z');"
- " g2(new X(), 'z');"
- "};");
-
- // Compile an optimized version of the functions.
- i::FLAG_always_opt = true;
- CompileRun(f1_source);
- CompileRun(g1_source);
- CompileRun(f2_source);
- CompileRun(g2_source);
- CompileRun("f1(new X());");
- CompileRun("g1(new X());");
- CompileRun("f2(new X(), 'z');");
- CompileRun("g2(new X(), 'z');");
- if (CcTest::i_isolate()->use_crankshaft()) {
- CHECK(GetJSFunction(env->Global(), "f1")->IsOptimized());
- CHECK(GetJSFunction(env->Global(), "g1")->IsOptimized());
- CHECK(GetJSFunction(env->Global(), "f2")->IsOptimized());
- CHECK(GetJSFunction(env->Global(), "g2")->IsOptimized());
+ LocalContext env(isolate);
+ v8::HandleScope scope(env->GetIsolate());
+
+ // Functions to generate load/store/keyed load/keyed store IC calls.
+ const char* f1_source = "function f1(x) { return x.y; };";
+ const char* g1_source = "function g1(x) { x.y = 1; };";
+ const char* f2_source = "function f2(x, y) { return x[y]; };";
+ const char* g2_source = "function g2(x, y) { x[y] = 1; };";
+
+ {
+ AllowNativesSyntaxNoInlining options;
+ // Compile functions and collect to type feedback to insert ic
+ // calls in the optimized code.
+ i::FLAG_prepare_always_opt = true;
+ CompileRun(
+ "var count = 0;"
+ "var result = 0;"
+ "var deopt = false;"
+ "function X() { };"
+ "X.prototype.__defineGetter__('y', function () {"
+ " if (deopt) { count++; %DeoptimizeFunction(f1); };"
+ " return 13;"
+ "});"
+ "X.prototype.__defineSetter__('y', function () {"
+ " if (deopt) { count++; %DeoptimizeFunction(g1); };"
+ "});"
+ "X.prototype.__defineGetter__('z', function () {"
+ " if (deopt) { count++; %DeoptimizeFunction(f2); };"
+ " return 13;"
+ "});"
+ "X.prototype.__defineSetter__('z', function () {"
+ " if (deopt) { count++; %DeoptimizeFunction(g2); };"
+ "});");
+ CompileRun(f1_source);
+ CompileRun(g1_source);
+ CompileRun(f2_source);
+ CompileRun(g2_source);
+ CompileRun(
+ "for (var i = 0; i < 5; i++) {"
+ " f1(new X());"
+ " g1(new X());"
+ " f2(new X(), 'z');"
+ " g2(new X(), 'z');"
+ "};");
+
+ // Compile an optimized version of the functions.
+ i::FLAG_always_opt = true;
+ CompileRun(f1_source);
+ CompileRun(g1_source);
+ CompileRun(f2_source);
+ CompileRun(g2_source);
+ CompileRun("f1(new X());");
+ CompileRun("g1(new X());");
+ CompileRun("f2(new X(), 'z');");
+ CompileRun("g2(new X(), 'z');");
+ if (i_isolate->use_crankshaft()) {
+ CHECK(GetJSFunction(env->Global(), "f1")->IsOptimized());
+ CHECK(GetJSFunction(env->Global(), "g1")->IsOptimized());
+ CHECK(GetJSFunction(env->Global(), "f2")->IsOptimized());
+ CHECK(GetJSFunction(env->Global(), "g2")->IsOptimized());
+ }
+
+ // Call functions and force deoptimization while processing the ics.
+ CompileRun(
+ "deopt = true;"
+ "var result = f1(new X());"
+ "g1(new X());"
+ "f2(new X(), 'z');"
+ "g2(new X(), 'z');");
}
-
- // Call functions and force deoptimization while processing the ics.
- CompileRun("deopt = true;"
- "var result = f1(new X());"
- "g1(new X());"
- "f2(new X(), 'z');"
- "g2(new X(), 'z');");
+ NonIncrementalGC(i_isolate);
+
+ CHECK(!GetJSFunction(env->Global(), "f1")->IsOptimized());
+ CHECK(!GetJSFunction(env->Global(), "g1")->IsOptimized());
+ CHECK(!GetJSFunction(env->Global(), "f2")->IsOptimized());
+ CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
+ CHECK_EQ(4, env->Global()->Get(v8_str("count"))->Int32Value());
+ CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
}
- NonIncrementalGC();
-
- CHECK(!GetJSFunction(env->Global(), "f1")->IsOptimized());
- CHECK(!GetJSFunction(env->Global(), "g1")->IsOptimized());
- CHECK(!GetJSFunction(env->Global(), "f2")->IsOptimized());
- CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
- CHECK_EQ(4, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
+ isolate->Exit();
+ isolate->Dispose();
}
-TEST(DeoptimizeLoadICStoreICNested) {
+UNINITIALIZED_TEST(DeoptimizeLoadICStoreICNested) {
i::FLAG_turbo_deoptimization = true;
i::FLAG_concurrent_recompilation = false;
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
-
- // Functions to generate load/store/keyed load/keyed store IC calls.
- const char* f1_source = "function f1(x) { return x.y; };";
- const char* g1_source = "function g1(x) { x.y = 1; };";
- const char* f2_source = "function f2(x, y) { return x[y]; };";
- const char* g2_source = "function g2(x, y) { x[y] = 1; };";
-
+ v8::Isolate* isolate = v8::Isolate::New();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ isolate->Enter();
{
- AllowNativesSyntaxNoInlining options;
- // Compile functions and collect to type feedback to insert ic
- // calls in the optimized code.
- i::FLAG_prepare_always_opt = true;
- CompileRun("var count = 0;"
- "var result = 0;"
- "var deopt = false;"
- "function X() { };"
- "X.prototype.__defineGetter__('y', function () {"
- " g1(this);"
- " return 13;"
- "});"
- "X.prototype.__defineSetter__('y', function () {"
- " f2(this, 'z');"
- "});"
- "X.prototype.__defineGetter__('z', function () {"
- " g2(this, 'z');"
- "});"
- "X.prototype.__defineSetter__('z', function () {"
- " if (deopt) {"
- " count++;"
- " %DeoptimizeFunction(f1);"
- " %DeoptimizeFunction(g1);"
- " %DeoptimizeFunction(f2);"
- " %DeoptimizeFunction(g2); };"
- "});");
- CompileRun(f1_source);
- CompileRun(g1_source);
- CompileRun(f2_source);
- CompileRun(g2_source);
- CompileRun("for (var i = 0; i < 5; i++) {"
- " f1(new X());"
- " g1(new X());"
- " f2(new X(), 'z');"
- " g2(new X(), 'z');"
- "};");
-
- // Compile an optimized version of the functions.
- i::FLAG_always_opt = true;
- CompileRun(f1_source);
- CompileRun(g1_source);
- CompileRun(f2_source);
- CompileRun(g2_source);
- CompileRun("f1(new X());");
- CompileRun("g1(new X());");
- CompileRun("f2(new X(), 'z');");
- CompileRun("g2(new X(), 'z');");
- if (CcTest::i_isolate()->use_crankshaft()) {
- CHECK(GetJSFunction(env->Global(), "f1")->IsOptimized());
- CHECK(GetJSFunction(env->Global(), "g1")->IsOptimized());
- CHECK(GetJSFunction(env->Global(), "f2")->IsOptimized());
- CHECK(GetJSFunction(env->Global(), "g2")->IsOptimized());
+ LocalContext env(isolate);
+ v8::HandleScope scope(env->GetIsolate());
+
+ // Functions to generate load/store/keyed load/keyed store IC calls.
+ const char* f1_source = "function f1(x) { return x.y; };";
+ const char* g1_source = "function g1(x) { x.y = 1; };";
+ const char* f2_source = "function f2(x, y) { return x[y]; };";
+ const char* g2_source = "function g2(x, y) { x[y] = 1; };";
+
+ {
+ AllowNativesSyntaxNoInlining options;
+ // Compile functions and collect to type feedback to insert ic
+ // calls in the optimized code.
+ i::FLAG_prepare_always_opt = true;
+ CompileRun(
+ "var count = 0;"
+ "var result = 0;"
+ "var deopt = false;"
+ "function X() { };"
+ "X.prototype.__defineGetter__('y', function () {"
+ " g1(this);"
+ " return 13;"
+ "});"
+ "X.prototype.__defineSetter__('y', function () {"
+ " f2(this, 'z');"
+ "});"
+ "X.prototype.__defineGetter__('z', function () {"
+ " g2(this, 'z');"
+ "});"
+ "X.prototype.__defineSetter__('z', function () {"
+ " if (deopt) {"
+ " count++;"
+ " %DeoptimizeFunction(f1);"
+ " %DeoptimizeFunction(g1);"
+ " %DeoptimizeFunction(f2);"
+ " %DeoptimizeFunction(g2); };"
+ "});");
+ CompileRun(f1_source);
+ CompileRun(g1_source);
+ CompileRun(f2_source);
+ CompileRun(g2_source);
+ CompileRun(
+ "for (var i = 0; i < 5; i++) {"
+ " f1(new X());"
+ " g1(new X());"
+ " f2(new X(), 'z');"
+ " g2(new X(), 'z');"
+ "};");
+
+ // Compile an optimized version of the functions.
+ i::FLAG_always_opt = true;
+ CompileRun(f1_source);
+ CompileRun(g1_source);
+ CompileRun(f2_source);
+ CompileRun(g2_source);
+ CompileRun("f1(new X());");
+ CompileRun("g1(new X());");
+ CompileRun("f2(new X(), 'z');");
+ CompileRun("g2(new X(), 'z');");
+ if (i_isolate->use_crankshaft()) {
+ CHECK(GetJSFunction(env->Global(), "f1")->IsOptimized());
+ CHECK(GetJSFunction(env->Global(), "g1")->IsOptimized());
+ CHECK(GetJSFunction(env->Global(), "f2")->IsOptimized());
+ CHECK(GetJSFunction(env->Global(), "g2")->IsOptimized());
+ }
+
+ // Call functions and force deoptimization while processing the ics.
+ CompileRun(
+ "deopt = true;"
+ "var result = f1(new X());");
}
+ NonIncrementalGC(i_isolate);
- // Call functions and force deoptimization while processing the ics.
- CompileRun("deopt = true;"
- "var result = f1(new X());");
+ CHECK(!GetJSFunction(env->Global(), "f1")->IsOptimized());
+ CHECK(!GetJSFunction(env->Global(), "g1")->IsOptimized());
+ CHECK(!GetJSFunction(env->Global(), "f2")->IsOptimized());
+ CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
+ CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+ CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
}
- NonIncrementalGC();
-
- CHECK(!GetJSFunction(env->Global(), "f1")->IsOptimized());
- CHECK(!GetJSFunction(env->Global(), "g1")->IsOptimized());
- CHECK(!GetJSFunction(env->Global(), "f2")->IsOptimized());
- CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
+ isolate->Exit();
+ isolate->Dispose();
}
diff --git a/deps/v8/test/cctest/test-dictionary.cc b/deps/v8/test/cctest/test-dictionary.cc
index 9a1914237f..14e5d69d43 100644
--- a/deps/v8/test/cctest/test-dictionary.cc
+++ b/deps/v8/test/cctest/test-dictionary.cc
@@ -99,8 +99,8 @@ static void TestHashMap(Handle<HashMap> table) {
for (int i = 0; i < 100; i++) {
Handle<JSReceiver> key = factory->NewJSArray(7);
CHECK_EQ(table->Lookup(key), CcTest::heap()->the_hole_value());
- CHECK_EQ(key->GetIdentityHash(),
- CcTest::heap()->undefined_value());
+ Object* identity_hash = key->GetIdentityHash();
+ CHECK_EQ(identity_hash, CcTest::heap()->undefined_value());
}
}
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index 8436df7c5a..49088f6e94 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -32,9 +32,9 @@
#include "src/debug.h"
#include "src/disasm.h"
#include "src/disassembler.h"
+#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "src/serialize.h"
-#include "src/stub-cache.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
@@ -416,6 +416,7 @@ TEST(DisasmIa320) {
__ addsd(xmm1, xmm0);
__ mulsd(xmm1, xmm0);
__ subsd(xmm1, xmm0);
+ __ subsd(xmm1, Operand(ebx, ecx, times_4, 10000));
__ divsd(xmm1, xmm0);
__ ucomisd(xmm0, xmm1);
__ cmpltsd(xmm0, xmm1);
diff --git a/deps/v8/test/cctest/test-disasm-mips.cc b/deps/v8/test/cctest/test-disasm-mips.cc
index cfd861e241..131f41384c 100644
--- a/deps/v8/test/cctest/test-disasm-mips.cc
+++ b/deps/v8/test/cctest/test-disasm-mips.cc
@@ -110,41 +110,127 @@ TEST(Type0) {
COMPARE(subu(v0, v1, s0),
"00701023 subu v0, v1, s0");
- COMPARE(mult(a0, a1),
- "00850018 mult a0, a1");
- COMPARE(mult(t2, t3),
- "014b0018 mult t2, t3");
- COMPARE(mult(v0, v1),
- "00430018 mult v0, v1");
-
- COMPARE(multu(a0, a1),
- "00850019 multu a0, a1");
- COMPARE(multu(t2, t3),
- "014b0019 multu t2, t3");
- COMPARE(multu(v0, v1),
- "00430019 multu v0, v1");
-
- COMPARE(div(a0, a1),
- "0085001a div a0, a1");
- COMPARE(div(t2, t3),
- "014b001a div t2, t3");
- COMPARE(div(v0, v1),
- "0043001a div v0, v1");
-
- COMPARE(divu(a0, a1),
- "0085001b divu a0, a1");
- COMPARE(divu(t2, t3),
- "014b001b divu t2, t3");
- COMPARE(divu(v0, v1),
- "0043001b divu v0, v1");
-
- if (kArchVariant != kLoongson) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ COMPARE(mult(a0, a1),
+ "00850018 mult a0, a1");
+ COMPARE(mult(t2, t3),
+ "014b0018 mult t2, t3");
+ COMPARE(mult(v0, v1),
+ "00430018 mult v0, v1");
+
+ COMPARE(multu(a0, a1),
+ "00850019 multu a0, a1");
+ COMPARE(multu(t2, t3),
+ "014b0019 multu t2, t3");
+ COMPARE(multu(v0, v1),
+ "00430019 multu v0, v1");
+
+ COMPARE(div(a0, a1),
+ "0085001a div a0, a1");
+ COMPARE(div(t2, t3),
+ "014b001a div t2, t3");
+ COMPARE(div(v0, v1),
+ "0043001a div v0, v1");
+
+ COMPARE(divu(a0, a1),
+ "0085001b divu a0, a1");
+ COMPARE(divu(t2, t3),
+ "014b001b divu t2, t3");
+ COMPARE(divu(v0, v1),
+ "0043001b divu v0, v1");
+
+ if (!IsMipsArchVariant(kLoongson)) {
+ COMPARE(mul(a0, a1, a2),
+ "70a62002 mul a0, a1, a2");
+ COMPARE(mul(t2, t3, t4),
+ "716c5002 mul t2, t3, t4");
+ COMPARE(mul(v0, v1, s0),
+ "70701002 mul v0, v1, s0");
+ }
+ } else { // MIPS32r6.
COMPARE(mul(a0, a1, a2),
- "70a62002 mul a0, a1, a2");
- COMPARE(mul(t2, t3, t4),
- "716c5002 mul t2, t3, t4");
- COMPARE(mul(v0, v1, s0),
- "70701002 mul v0, v1, s0");
+ "00a62098 mul a0, a1, a2");
+ COMPARE(muh(a0, a1, a2),
+ "00a620d8 muh a0, a1, a2");
+ COMPARE(mul(t1, t2, t3),
+ "014b4898 mul t1, t2, t3");
+ COMPARE(muh(t1, t2, t3),
+ "014b48d8 muh t1, t2, t3");
+ COMPARE(mul(v0, v1, a0),
+ "00641098 mul v0, v1, a0");
+ COMPARE(muh(v0, v1, a0),
+ "006410d8 muh v0, v1, a0");
+
+ COMPARE(mulu(a0, a1, a2),
+ "00a62099 mulu a0, a1, a2");
+ COMPARE(muhu(a0, a1, a2),
+ "00a620d9 muhu a0, a1, a2");
+ COMPARE(mulu(t1, t2, t3),
+ "014b4899 mulu t1, t2, t3");
+ COMPARE(muhu(t1, t2, t3),
+ "014b48d9 muhu t1, t2, t3");
+ COMPARE(mulu(v0, v1, a0),
+ "00641099 mulu v0, v1, a0");
+ COMPARE(muhu(v0, v1, a0),
+ "006410d9 muhu v0, v1, a0");
+
+ COMPARE(div(a0, a1, a2),
+ "00a6209a div a0, a1, a2");
+ COMPARE(mod(a0, a1, a2),
+ "00a620da mod a0, a1, a2");
+ COMPARE(div(t1, t2, t3),
+ "014b489a div t1, t2, t3");
+ COMPARE(mod(t1, t2, t3),
+ "014b48da mod t1, t2, t3");
+ COMPARE(div(v0, v1, a0),
+ "0064109a div v0, v1, a0");
+ COMPARE(mod(v0, v1, a0),
+ "006410da mod v0, v1, a0");
+
+ COMPARE(divu(a0, a1, a2),
+ "00a6209b divu a0, a1, a2");
+ COMPARE(modu(a0, a1, a2),
+ "00a620db modu a0, a1, a2");
+ COMPARE(divu(t1, t2, t3),
+ "014b489b divu t1, t2, t3");
+ COMPARE(modu(t1, t2, t3),
+ "014b48db modu t1, t2, t3");
+ COMPARE(divu(v0, v1, a0),
+ "0064109b divu v0, v1, a0");
+ COMPARE(modu(v0, v1, a0),
+ "006410db modu v0, v1, a0");
+
+ COMPARE(bovc(a0, a0, static_cast<int16_t>(0)),
+ "20840000 bovc a0, a0, 0");
+ COMPARE(bovc(a1, a0, static_cast<int16_t>(0)),
+ "20a40000 bovc a1, a0, 0");
+ COMPARE(bovc(a1, a0, 32767),
+ "20a47fff bovc a1, a0, 32767");
+ COMPARE(bovc(a1, a0, -32768),
+ "20a48000 bovc a1, a0, -32768");
+
+ COMPARE(bnvc(a0, a0, static_cast<int16_t>(0)),
+ "60840000 bnvc a0, a0, 0");
+ COMPARE(bnvc(a1, a0, static_cast<int16_t>(0)),
+ "60a40000 bnvc a1, a0, 0");
+ COMPARE(bnvc(a1, a0, 32767),
+ "60a47fff bnvc a1, a0, 32767");
+ COMPARE(bnvc(a1, a0, -32768),
+ "60a48000 bnvc a1, a0, -32768");
+
+ COMPARE(beqzc(a0, 0),
+ "d8800000 beqzc a0, 0x0");
+ COMPARE(beqzc(a0, 0xfffff), // 0x0fffff == 1048575.
+ "d88fffff beqzc a0, 0xfffff");
+ COMPARE(beqzc(a0, 0x100000), // 0x100000 == -1048576.
+ "d8900000 beqzc a0, 0x100000");
+
+ COMPARE(bnezc(a0, 0),
+ "f8800000 bnezc a0, 0x0");
+ COMPARE(bnezc(a0, 0xfffff), // 0x0fffff == 1048575.
+ "f88fffff bnezc a0, 0xfffff");
+ COMPARE(bnezc(a0, 0x100000), // 0x100000 == -1048576.
+ "f8900000 bnezc a0, 0x100000");
}
COMPARE(addiu(a0, a1, 0x0),
@@ -266,7 +352,7 @@ TEST(Type0) {
COMPARE(srav(v0, v1, fp),
"03c31007 srav v0, v1, fp");
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2)) {
COMPARE(rotr(a0, a1, 0),
"00252002 rotr a0, a1, 0");
COMPARE(rotr(s0, s1, 8),
@@ -369,7 +455,7 @@ TEST(Type0) {
COMPARE(sltiu(v0, v1, -1),
"2c62ffff sltiu v0, v1, -1");
- if (kArchVariant != kLoongson) {
+ if (!IsMipsArchVariant(kLoongson)) {
COMPARE(movz(a0, a1, a2),
"00a6200a movz a0, a1, a2");
COMPARE(movz(s0, s1, s2),
@@ -404,15 +490,24 @@ TEST(Type0) {
COMPARE(movf(v0, v1, 6),
"00781001 movf v0, v1, 6");
- COMPARE(clz(a0, a1),
- "70a42020 clz a0, a1");
- COMPARE(clz(s6, s7),
- "72f6b020 clz s6, s7");
- COMPARE(clz(v0, v1),
- "70621020 clz v0, v1");
+ if (IsMipsArchVariant(kMips32r6)) {
+ COMPARE(clz(a0, a1),
+ "00a02050 clz a0, a1");
+ COMPARE(clz(s6, s7),
+ "02e0b050 clz s6, s7");
+ COMPARE(clz(v0, v1),
+ "00601050 clz v0, v1");
+ } else {
+ COMPARE(clz(a0, a1),
+ "70a42020 clz a0, a1");
+ COMPARE(clz(s6, s7),
+ "72f6b020 clz s6, s7");
+ COMPARE(clz(v0, v1),
+ "70621020 clz v0, v1");
+ }
}
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2)) {
COMPARE(ins_(a0, a1, 31, 1),
"7ca4ffc4 ins a0, a1, 31, 1");
COMPARE(ins_(s6, s7, 30, 2),
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index 4778b04bb7..d238410fa4 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -32,9 +32,9 @@
#include "src/debug.h"
#include "src/disasm.h"
#include "src/disassembler.h"
+#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "src/serialize.h"
-#include "src/stub-cache.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
@@ -167,6 +167,7 @@ TEST(DisasmX64) {
__ imulq(rdx, Operand(rbx, rcx, times_4, 10000));
__ imulq(rdx, rcx, Immediate(12));
__ imulq(rdx, rcx, Immediate(1000));
+ __ imulq(rdx, Operand(rbx, rcx, times_4, 10000), Immediate(1000));
__ incq(rdx);
__ incq(Operand(rbx, rcx, times_4, 10000));
@@ -378,6 +379,7 @@ TEST(DisasmX64) {
__ cvttsd2si(rdx, Operand(rbx, rcx, times_4, 10000));
__ cvttsd2si(rdx, xmm1);
__ cvttsd2siq(rdx, xmm1);
+ __ cvttsd2siq(rdx, Operand(rbx, rcx, times_4, 10000));
__ movsd(xmm1, Operand(rbx, rcx, times_4, 10000));
__ movsd(Operand(rbx, rcx, times_4, 10000), xmm1);
// 128 bit move instructions.
diff --git a/deps/v8/test/cctest/test-disasm-x87.cc b/deps/v8/test/cctest/test-disasm-x87.cc
index 1515cc793b..6cd33e5574 100644
--- a/deps/v8/test/cctest/test-disasm-x87.cc
+++ b/deps/v8/test/cctest/test-disasm-x87.cc
@@ -32,9 +32,9 @@
#include "src/debug.h"
#include "src/disasm.h"
#include "src/disassembler.h"
+#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "src/serialize.h"
-#include "src/stub-cache.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
@@ -349,6 +349,7 @@ TEST(DisasmIa320) {
__ fprem1();
__ fincstp();
__ ftst();
+ __ fxam();
__ fxch(3);
__ fld_s(Operand(ebx, ecx, times_4, 10000));
__ fstp_s(Operand(ebx, ecx, times_4, 10000));
@@ -378,6 +379,12 @@ TEST(DisasmIa320) {
__ fninit();
__ nop();
+ __ fldcw(Operand(ebx, ecx, times_4, 10000));
+ __ fnstcw(Operand(ebx, ecx, times_4, 10000));
+ __ fadd_d(Operand(ebx, ecx, times_4, 10000));
+ __ fnsave(Operand(ebx, ecx, times_4, 10000));
+ __ frstor(Operand(ebx, ecx, times_4, 10000));
+
// xchg.
{
__ xchg(eax, eax);
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index bc503b58c6..ceceff63a0 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -30,7 +30,7 @@
#include "src/api.h"
#include "src/debug.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-hashing.cc b/deps/v8/test/cctest/test-hashing.cc
index 9857f9d88a..692861cfe4 100644
--- a/deps/v8/test/cctest/test-hashing.cc
+++ b/deps/v8/test/cctest/test-hashing.cc
@@ -47,94 +47,6 @@ typedef uint32_t (*HASH_FUNCTION)();
#define __ masm->
-void generate(MacroAssembler* masm, i::Vector<const uint8_t> string) {
- // GenerateHashInit takes the first character as an argument so it can't
- // handle the zero length string.
- DCHECK(string.length() > 0);
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
- __ push(ebx);
- __ push(ecx);
- __ mov(eax, Immediate(0));
- __ mov(ebx, Immediate(string.at(0)));
- StringHelper::GenerateHashInit(masm, eax, ebx, ecx);
- for (int i = 1; i < string.length(); i++) {
- __ mov(ebx, Immediate(string.at(i)));
- StringHelper::GenerateHashAddCharacter(masm, eax, ebx, ecx);
- }
- StringHelper::GenerateHashGetHash(masm, eax, ecx);
- __ pop(ecx);
- __ pop(ebx);
- __ Ret();
-#elif V8_TARGET_ARCH_X64
- __ pushq(kRootRegister);
- __ InitializeRootRegister();
- __ pushq(rbx);
- __ pushq(rcx);
- __ movp(rax, Immediate(0));
- __ movp(rbx, Immediate(string.at(0)));
- StringHelper::GenerateHashInit(masm, rax, rbx, rcx);
- for (int i = 1; i < string.length(); i++) {
- __ movp(rbx, Immediate(string.at(i)));
- StringHelper::GenerateHashAddCharacter(masm, rax, rbx, rcx);
- }
- StringHelper::GenerateHashGetHash(masm, rax, rcx);
- __ popq(rcx);
- __ popq(rbx);
- __ popq(kRootRegister);
- __ Ret();
-#elif V8_TARGET_ARCH_ARM
- __ push(kRootRegister);
- __ InitializeRootRegister();
-
- __ mov(r0, Operand(0));
- __ mov(ip, Operand(string.at(0)));
- StringHelper::GenerateHashInit(masm, r0, ip);
- for (int i = 1; i < string.length(); i++) {
- __ mov(ip, Operand(string.at(i)));
- StringHelper::GenerateHashAddCharacter(masm, r0, ip);
- }
- StringHelper::GenerateHashGetHash(masm, r0);
- __ pop(kRootRegister);
- __ mov(pc, Operand(lr));
-#elif V8_TARGET_ARCH_ARM64
- // The ARM64 assembler usually uses jssp (x28) as a stack pointer, but only
- // csp is initialized by the calling (C++) code.
- Register old_stack_pointer = __ StackPointer();
- __ SetStackPointer(csp);
- __ Push(root, xzr);
- __ InitializeRootRegister();
- __ Mov(x0, 0);
- __ Mov(x10, Operand(string.at(0)));
- StringHelper::GenerateHashInit(masm, x0, x10);
- for (int i = 1; i < string.length(); i++) {
- __ Mov(x10, Operand(string.at(i)));
- StringHelper::GenerateHashAddCharacter(masm, x0, x10);
- }
- StringHelper::GenerateHashGetHash(masm, x0, x10);
- __ Pop(xzr, root);
- __ Ret();
- __ SetStackPointer(old_stack_pointer);
-#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
- __ push(kRootRegister);
- __ InitializeRootRegister();
-
- __ mov(v0, zero_reg);
- __ li(t1, Operand(string.at(0)));
- StringHelper::GenerateHashInit(masm, v0, t1);
- for (int i = 1; i < string.length(); i++) {
- __ li(t1, Operand(string.at(i)));
- StringHelper::GenerateHashAddCharacter(masm, v0, t1);
- }
- StringHelper::GenerateHashGetHash(masm, v0);
- __ pop(kRootRegister);
- __ jr(ra);
- __ nop();
-#else
-#error Unsupported architecture.
-#endif
-}
-
-
void generate(MacroAssembler* masm, uint32_t key) {
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
__ push(ebx);
@@ -184,44 +96,6 @@ void generate(MacroAssembler* masm, uint32_t key) {
}
-void check(i::Vector<const uint8_t> string) {
- Isolate* isolate = CcTest::i_isolate();
- Factory* factory = isolate->factory();
- HandleScope scope(isolate);
-
- v8::internal::byte buffer[2048];
- MacroAssembler masm(isolate, buffer, sizeof buffer);
-
- generate(&masm, string);
-
- CodeDesc desc;
- masm.GetCode(&desc);
- Handle<Object> undefined(isolate->heap()->undefined_value(), isolate);
- Handle<Code> code = factory->NewCode(desc,
- Code::ComputeFlags(Code::STUB),
- undefined);
- CHECK(code->IsCode());
-
- HASH_FUNCTION hash = FUNCTION_CAST<HASH_FUNCTION>(code->entry());
- Handle<String> v8_string =
- factory->NewStringFromOneByte(string).ToHandleChecked();
- v8_string->set_hash_field(String::kEmptyHashField);
-#ifdef USE_SIMULATOR
- uint32_t codegen_hash = static_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(CALL_GENERATED_CODE(hash, 0, 0, 0, 0, 0)));
-#else
- uint32_t codegen_hash = hash();
-#endif
- uint32_t runtime_hash = v8_string->Hash();
- CHECK(runtime_hash == codegen_hash);
-}
-
-
-void check(i::Vector<const char> s) {
- check(i::Vector<const uint8_t>::cast(s));
-}
-
-
void check(uint32_t key) {
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
@@ -253,38 +127,11 @@ void check(uint32_t key) {
}
-void check_twochars(uint8_t a, uint8_t b) {
- uint8_t ab[2] = {a, b};
- check(i::Vector<const uint8_t>(ab, 2));
-}
-
-
static uint32_t PseudoRandom(uint32_t i, uint32_t j) {
return ~(~((i * 781) ^ (j * 329)));
}
-TEST(StringHash) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handle_scope(isolate);
- v8::Context::Scope context_scope(v8::Context::New(isolate));
-
- for (uint8_t a = 0; a < String::kMaxOneByteCharCode; a++) {
- // Numbers are hashed differently.
- if (a >= '0' && a <= '9') continue;
- for (uint8_t b = 0; b < String::kMaxOneByteCharCode; b++) {
- if (b >= '0' && b <= '9') continue;
- check_twochars(a, b);
- }
- }
- check(i::Vector<const char>("*", 1));
- check(i::Vector<const char>(".zZ", 3));
- check(i::Vector<const char>("muc", 3));
- check(i::Vector<const char>("(>'_')>", 7));
- check(i::Vector<const char>("-=[ vee eight ftw ]=-", 21));
-}
-
-
TEST(NumberHash) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index e456323bae..8f9b484391 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -441,8 +441,8 @@ TEST(HeapSnapshotConsString) {
CHECK_EQ(1, global->InternalFieldCount());
i::Factory* factory = CcTest::i_isolate()->factory();
- i::Handle<i::String> first = factory->NewStringFromStaticAscii("0123456789");
- i::Handle<i::String> second = factory->NewStringFromStaticAscii("0123456789");
+ i::Handle<i::String> first = factory->NewStringFromStaticChars("0123456789");
+ i::Handle<i::String> second = factory->NewStringFromStaticChars("0123456789");
i::Handle<i::String> cons_string =
factory->NewConsString(first, second).ToHandleChecked();
@@ -875,9 +875,9 @@ class TestJSONStream : public v8::OutputStream {
int abort_countdown_;
};
-class AsciiResource: public v8::String::ExternalAsciiStringResource {
+class OneByteResource : public v8::String::ExternalOneByteStringResource {
public:
- explicit AsciiResource(i::Vector<char> string): data_(string.start()) {
+ explicit OneByteResource(i::Vector<char> string) : data_(string.start()) {
length_ = string.length();
}
virtual const char* data() const { return data_; }
@@ -913,7 +913,7 @@ TEST(HeapSnapshotJSONSerialization) {
stream.WriteTo(json);
// Verify that snapshot string is valid JSON.
- AsciiResource* json_res = new AsciiResource(json);
+ OneByteResource* json_res = new OneByteResource(json);
v8::Local<v8::String> json_string =
v8::String::NewExternal(env->GetIsolate(), json_res);
env->Global()->Set(v8_str("json_snapshot"), json_string);
@@ -1863,12 +1863,16 @@ TEST(GetConstructorName) {
"Constructor2", i::V8HeapExplorer::GetConstructorName(*js_obj2)));
v8::Local<v8::Object> obj3 = js_global->Get(v8_str("obj3")).As<v8::Object>();
i::Handle<i::JSObject> js_obj3 = v8::Utils::OpenHandle(*obj3);
- CHECK_EQ(0, StringCmp(
- "Constructor3", i::V8HeapExplorer::GetConstructorName(*js_obj3)));
+ // TODO(verwaest): Restore to Constructor3 once supported by the
+ // heap-snapshot-generator.
+ CHECK_EQ(
+ 0, StringCmp("Object", i::V8HeapExplorer::GetConstructorName(*js_obj3)));
v8::Local<v8::Object> obj4 = js_global->Get(v8_str("obj4")).As<v8::Object>();
i::Handle<i::JSObject> js_obj4 = v8::Utils::OpenHandle(*obj4);
- CHECK_EQ(0, StringCmp(
- "Constructor4", i::V8HeapExplorer::GetConstructorName(*js_obj4)));
+ // TODO(verwaest): Restore to Constructor4 once supported by the
+ // heap-snapshot-generator.
+ CHECK_EQ(
+ 0, StringCmp("Object", i::V8HeapExplorer::GetConstructorName(*js_obj4)));
v8::Local<v8::Object> obj5 = js_global->Get(v8_str("obj5")).As<v8::Object>();
i::Handle<i::JSObject> js_obj5 = v8::Utils::OpenHandle(*obj5);
CHECK_EQ(0, StringCmp(
@@ -1983,6 +1987,46 @@ TEST(HiddenPropertiesFastCase) {
}
+TEST(AccessorInfo) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+
+ CompileRun("function foo(x) { }\n");
+ const v8::HeapSnapshot* snapshot =
+ heap_profiler->TakeHeapSnapshot(v8_str("AccessorInfoTest"));
+ CHECK(ValidateSnapshot(snapshot));
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ const v8::HeapGraphNode* foo =
+ GetProperty(global, v8::HeapGraphEdge::kProperty, "foo");
+ CHECK_NE(NULL, foo);
+ const v8::HeapGraphNode* map =
+ GetProperty(foo, v8::HeapGraphEdge::kInternal, "map");
+ CHECK_NE(NULL, map);
+ const v8::HeapGraphNode* descriptors =
+ GetProperty(map, v8::HeapGraphEdge::kInternal, "descriptors");
+ CHECK_NE(NULL, descriptors);
+ const v8::HeapGraphNode* length_name =
+ GetProperty(descriptors, v8::HeapGraphEdge::kInternal, "2");
+ CHECK_NE(NULL, length_name);
+ CHECK_EQ("length", *v8::String::Utf8Value(length_name->GetName()));
+ const v8::HeapGraphNode* length_accessor =
+ GetProperty(descriptors, v8::HeapGraphEdge::kInternal, "4");
+ CHECK_NE(NULL, length_accessor);
+ CHECK_EQ("system / ExecutableAccessorInfo",
+ *v8::String::Utf8Value(length_accessor->GetName()));
+ const v8::HeapGraphNode* name =
+ GetProperty(length_accessor, v8::HeapGraphEdge::kInternal, "name");
+ CHECK_NE(NULL, name);
+ const v8::HeapGraphNode* getter =
+ GetProperty(length_accessor, v8::HeapGraphEdge::kInternal, "getter");
+ CHECK_NE(NULL, getter);
+ const v8::HeapGraphNode* setter =
+ GetProperty(length_accessor, v8::HeapGraphEdge::kInternal, "setter");
+ CHECK_NE(NULL, setter);
+}
+
+
bool HasWeakEdge(const v8::HeapGraphNode* node) {
for (int i = 0; i < node->GetChildrenCount(); ++i) {
const v8::HeapGraphEdge* handle_edge = node->GetChild(i);
@@ -2307,7 +2351,7 @@ TEST(CheckCodeNames) {
"::(ArraySingleArgumentConstructorStub code)"
};
const v8::HeapGraphNode* node = GetNodeByPath(snapshot,
- stub_path, ARRAY_SIZE(stub_path));
+ stub_path, arraysize(stub_path));
CHECK_NE(NULL, node);
const char* builtin_path1[] = {
@@ -2315,18 +2359,15 @@ TEST(CheckCodeNames) {
"::(Builtins)",
"::(KeyedLoadIC_Generic builtin)"
};
- node = GetNodeByPath(snapshot, builtin_path1, ARRAY_SIZE(builtin_path1));
+ node = GetNodeByPath(snapshot, builtin_path1, arraysize(builtin_path1));
CHECK_NE(NULL, node);
- const char* builtin_path2[] = {
- "::(GC roots)",
- "::(Builtins)",
- "::(CompileUnoptimized builtin)"
- };
- node = GetNodeByPath(snapshot, builtin_path2, ARRAY_SIZE(builtin_path2));
+ const char* builtin_path2[] = {"::(GC roots)", "::(Builtins)",
+ "::(CompileLazy builtin)"};
+ node = GetNodeByPath(snapshot, builtin_path2, arraysize(builtin_path2));
CHECK_NE(NULL, node);
v8::String::Utf8Value node_name(node->GetName());
- CHECK_EQ("(CompileUnoptimized builtin)", *node_name);
+ CHECK_EQ("(CompileLazy builtin)", *node_name);
}
@@ -2416,7 +2457,7 @@ TEST(ArrayGrowLeftTrim) {
tracker->trace_tree()->Print(tracker);
AllocationTraceNode* node =
- FindNode(tracker, Vector<const char*>(names, ARRAY_SIZE(names)));
+ FindNode(tracker, Vector<const char*>(names, arraysize(names)));
CHECK_NE(NULL, node);
CHECK_GE(node->allocation_count(), 2);
CHECK_GE(node->allocation_size(), 4 * 5);
@@ -2443,7 +2484,7 @@ TEST(TrackHeapAllocations) {
const char* names[] = {"", "start", "f_0_0", "f_0_1", "f_0_2"};
AllocationTraceNode* node =
- FindNode(tracker, Vector<const char*>(names, ARRAY_SIZE(names)));
+ FindNode(tracker, Vector<const char*>(names, arraysize(names)));
CHECK_NE(NULL, node);
CHECK_GE(node->allocation_count(), 100);
CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
@@ -2492,7 +2533,7 @@ TEST(TrackBumpPointerAllocations) {
tracker->trace_tree()->Print(tracker);
AllocationTraceNode* node =
- FindNode(tracker, Vector<const char*>(names, ARRAY_SIZE(names)));
+ FindNode(tracker, Vector<const char*>(names, arraysize(names)));
CHECK_NE(NULL, node);
CHECK_GE(node->allocation_count(), 100);
CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
@@ -2518,7 +2559,7 @@ TEST(TrackBumpPointerAllocations) {
tracker->trace_tree()->Print(tracker);
AllocationTraceNode* node =
- FindNode(tracker, Vector<const char*>(names, ARRAY_SIZE(names)));
+ FindNode(tracker, Vector<const char*>(names, arraysize(names)));
CHECK_NE(NULL, node);
CHECK_LT(node->allocation_count(), 100);
@@ -2548,7 +2589,7 @@ TEST(TrackV8ApiAllocation) {
tracker->trace_tree()->Print(tracker);
AllocationTraceNode* node =
- FindNode(tracker, Vector<const char*>(names, ARRAY_SIZE(names)));
+ FindNode(tracker, Vector<const char*>(names, arraysize(names)));
CHECK_NE(NULL, node);
CHECK_GE(node->allocation_count(), 2);
CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
@@ -2650,7 +2691,7 @@ TEST(BoxObject) {
v8::Handle<v8::Object> global = global_proxy->GetPrototype().As<v8::Object>();
i::Factory* factory = CcTest::i_isolate()->factory();
- i::Handle<i::String> string = factory->NewStringFromStaticAscii("string");
+ i::Handle<i::String> string = factory->NewStringFromStaticChars("string");
i::Handle<i::Object> box = factory->NewBox(string);
global->Set(0, v8::ToApiHandle<v8::Object>(box));
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index ab000dc6a6..e526761b9c 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -34,8 +34,8 @@
#include "src/execution.h"
#include "src/factory.h"
#include "src/global-handles.h"
+#include "src/ic/ic.h"
#include "src/macro-assembler.h"
-#include "src/stub-cache.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
@@ -185,7 +185,7 @@ TEST(HeapObjects) {
CHECK(factory->nan_value()->IsNumber());
CHECK(std::isnan(factory->nan_value()->Number()));
- Handle<String> s = factory->NewStringFromStaticAscii("fisk hest ");
+ Handle<String> s = factory->NewStringFromStaticChars("fisk hest ");
CHECK(s->IsString());
CHECK_EQ(10, s->length());
@@ -341,7 +341,7 @@ TEST(GlobalHandles) {
{
HandleScope scope(isolate);
- Handle<Object> i = factory->NewStringFromStaticAscii("fisk");
+ Handle<Object> i = factory->NewStringFromStaticChars("fisk");
Handle<Object> u = factory->NewNumber(1.12344);
h1 = global_handles->Create(*i);
@@ -396,7 +396,7 @@ TEST(WeakGlobalHandlesScavenge) {
{
HandleScope scope(isolate);
- Handle<Object> i = factory->NewStringFromStaticAscii("fisk");
+ Handle<Object> i = factory->NewStringFromStaticChars("fisk");
Handle<Object> u = factory->NewNumber(1.12344);
h1 = global_handles->Create(*i);
@@ -438,7 +438,7 @@ TEST(WeakGlobalHandlesMark) {
{
HandleScope scope(isolate);
- Handle<Object> i = factory->NewStringFromStaticAscii("fisk");
+ Handle<Object> i = factory->NewStringFromStaticChars("fisk");
Handle<Object> u = factory->NewNumber(1.12344);
h1 = global_handles->Create(*i);
@@ -484,7 +484,7 @@ TEST(DeleteWeakGlobalHandle) {
{
HandleScope scope(isolate);
- Handle<Object> i = factory->NewStringFromStaticAscii("fisk");
+ Handle<Object> i = factory->NewStringFromStaticChars("fisk");
h = global_handles->Create(*i);
}
@@ -870,33 +870,34 @@ TEST(StringAllocation) {
const unsigned char chars[] = { 0xe5, 0xa4, 0xa7 };
for (int length = 0; length < 100; length++) {
v8::HandleScope scope(CcTest::isolate());
- char* non_ascii = NewArray<char>(3 * length + 1);
- char* ascii = NewArray<char>(length + 1);
- non_ascii[3 * length] = 0;
- ascii[length] = 0;
+ char* non_one_byte = NewArray<char>(3 * length + 1);
+ char* one_byte = NewArray<char>(length + 1);
+ non_one_byte[3 * length] = 0;
+ one_byte[length] = 0;
for (int i = 0; i < length; i++) {
- ascii[i] = 'a';
- non_ascii[3 * i] = chars[0];
- non_ascii[3 * i + 1] = chars[1];
- non_ascii[3 * i + 2] = chars[2];
+ one_byte[i] = 'a';
+ non_one_byte[3 * i] = chars[0];
+ non_one_byte[3 * i + 1] = chars[1];
+ non_one_byte[3 * i + 2] = chars[2];
}
- Handle<String> non_ascii_sym =
- factory->InternalizeUtf8String(
- Vector<const char>(non_ascii, 3 * length));
- CHECK_EQ(length, non_ascii_sym->length());
- Handle<String> ascii_sym =
- factory->InternalizeOneByteString(OneByteVector(ascii, length));
- CHECK_EQ(length, ascii_sym->length());
- Handle<String> non_ascii_str = factory->NewStringFromUtf8(
- Vector<const char>(non_ascii, 3 * length)).ToHandleChecked();
- non_ascii_str->Hash();
- CHECK_EQ(length, non_ascii_str->length());
- Handle<String> ascii_str = factory->NewStringFromUtf8(
- Vector<const char>(ascii, length)).ToHandleChecked();
- ascii_str->Hash();
- CHECK_EQ(length, ascii_str->length());
- DeleteArray(non_ascii);
- DeleteArray(ascii);
+ Handle<String> non_one_byte_sym = factory->InternalizeUtf8String(
+ Vector<const char>(non_one_byte, 3 * length));
+ CHECK_EQ(length, non_one_byte_sym->length());
+ Handle<String> one_byte_sym =
+ factory->InternalizeOneByteString(OneByteVector(one_byte, length));
+ CHECK_EQ(length, one_byte_sym->length());
+ Handle<String> non_one_byte_str =
+ factory->NewStringFromUtf8(Vector<const char>(non_one_byte, 3 * length))
+ .ToHandleChecked();
+ non_one_byte_str->Hash();
+ CHECK_EQ(length, non_one_byte_str->length());
+ Handle<String> one_byte_str =
+ factory->NewStringFromUtf8(Vector<const char>(one_byte, length))
+ .ToHandleChecked();
+ one_byte_str->Hash();
+ CHECK_EQ(length, one_byte_str->length());
+ DeleteArray(non_one_byte);
+ DeleteArray(one_byte);
}
}
@@ -934,10 +935,9 @@ TEST(Iteration) {
TENURED);
// Allocate a small string to OLD_DATA_SPACE and NEW_SPACE
+ objs[next_objs_index++] = factory->NewStringFromStaticChars("abcdefghij");
objs[next_objs_index++] =
- factory->NewStringFromStaticAscii("abcdefghij");
- objs[next_objs_index++] =
- factory->NewStringFromStaticAscii("abcdefghij", TENURED);
+ factory->NewStringFromStaticChars("abcdefghij", TENURED);
// Allocate a large string (for large object space).
int large_size = Page::kMaxRegularHeapObjectSize + 1;
@@ -992,11 +992,8 @@ TEST(Regression39128) {
// that region dirty marks are updated correctly.
// Step 1: prepare a map for the object. We add 1 inobject property to it.
- Handle<JSFunction> object_ctor(
- CcTest::i_isolate()->native_context()->object_function());
- CHECK(object_ctor->has_initial_map());
// Create a map with single inobject property.
- Handle<Map> my_map = Map::Create(object_ctor, 1);
+ Handle<Map> my_map = Map::Create(CcTest::i_isolate(), 1);
int n_properties = my_map->inobject_properties();
CHECK_GT(n_properties, 0);
@@ -1052,53 +1049,61 @@ TEST(Regression39128) {
}
-TEST(TestCodeFlushing) {
+UNINITIALIZED_TEST(TestCodeFlushing) {
// If we do not flush code this test is invalid.
if (!FLAG_flush_code) return;
i::FLAG_allow_natives_syntax = true;
i::FLAG_optimize_for_size = false;
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- Factory* factory = isolate->factory();
- v8::HandleScope scope(CcTest::isolate());
- const char* source = "function foo() {"
- " var x = 42;"
- " var y = 42;"
- " var z = x + y;"
- "};"
- "foo()";
- Handle<String> foo_name = factory->InternalizeUtf8String("foo");
+ v8::Isolate* isolate = v8::Isolate::New();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ isolate->Enter();
+ Factory* factory = i_isolate->factory();
+ {
+ v8::HandleScope scope(isolate);
+ v8::Context::New(isolate)->Enter();
+ const char* source =
+ "function foo() {"
+ " var x = 42;"
+ " var y = 42;"
+ " var z = x + y;"
+ "};"
+ "foo()";
+ Handle<String> foo_name = factory->InternalizeUtf8String("foo");
+
+ // This compile will add the code to the compilation cache.
+ {
+ v8::HandleScope scope(isolate);
+ CompileRun(source);
+ }
- // This compile will add the code to the compilation cache.
- { v8::HandleScope scope(CcTest::isolate());
- CompileRun(source);
- }
+ // Check function is compiled.
+ Handle<Object> func_value = Object::GetProperty(i_isolate->global_object(),
+ foo_name).ToHandleChecked();
+ CHECK(func_value->IsJSFunction());
+ Handle<JSFunction> function = Handle<JSFunction>::cast(func_value);
+ CHECK(function->shared()->is_compiled());
- // Check function is compiled.
- Handle<Object> func_value = Object::GetProperty(
- CcTest::i_isolate()->global_object(), foo_name).ToHandleChecked();
- CHECK(func_value->IsJSFunction());
- Handle<JSFunction> function = Handle<JSFunction>::cast(func_value);
- CHECK(function->shared()->is_compiled());
+ // The code will survive at least two GCs.
+ i_isolate->heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ i_isolate->heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CHECK(function->shared()->is_compiled());
- // The code will survive at least two GCs.
- CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
- CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
- CHECK(function->shared()->is_compiled());
+ // Simulate several GCs that use full marking.
+ const int kAgingThreshold = 6;
+ for (int i = 0; i < kAgingThreshold; i++) {
+ i_isolate->heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ }
- // Simulate several GCs that use full marking.
- const int kAgingThreshold = 6;
- for (int i = 0; i < kAgingThreshold; i++) {
- CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ // foo should no longer be in the compilation cache
+ CHECK(!function->shared()->is_compiled() || function->IsOptimized());
+ CHECK(!function->is_compiled() || function->IsOptimized());
+ // Call foo to get it recompiled.
+ CompileRun("foo()");
+ CHECK(function->shared()->is_compiled());
+ CHECK(function->is_compiled());
}
-
- // foo should no longer be in the compilation cache
- CHECK(!function->shared()->is_compiled() || function->IsOptimized());
- CHECK(!function->is_compiled() || function->IsOptimized());
- // Call foo to get it recompiled.
- CompileRun("foo()");
- CHECK(function->shared()->is_compiled());
- CHECK(function->is_compiled());
+ isolate->Exit();
+ isolate->Dispose();
}
@@ -1684,7 +1689,7 @@ static void FillUpNewSpace(NewSpace* new_space) {
Factory* factory = isolate->factory();
HandleScope scope(isolate);
AlwaysAllocateScope always_allocate(isolate);
- intptr_t available = new_space->EffectiveCapacity() - new_space->Size();
+ intptr_t available = new_space->Capacity() - new_space->Size();
intptr_t number_of_fillers = (available / FixedArray::SizeFor(32)) - 1;
for (intptr_t i = 0; i < number_of_fillers; i++) {
CHECK(heap->InNewSpace(*factory->NewFixedArray(32, NOT_TENURED)));
@@ -1707,20 +1712,20 @@ TEST(GrowAndShrinkNewSpace) {
// Explicitly growing should double the space capacity.
intptr_t old_capacity, new_capacity;
- old_capacity = new_space->Capacity();
+ old_capacity = new_space->TotalCapacity();
new_space->Grow();
- new_capacity = new_space->Capacity();
+ new_capacity = new_space->TotalCapacity();
CHECK(2 * old_capacity == new_capacity);
- old_capacity = new_space->Capacity();
+ old_capacity = new_space->TotalCapacity();
FillUpNewSpace(new_space);
- new_capacity = new_space->Capacity();
+ new_capacity = new_space->TotalCapacity();
CHECK(old_capacity == new_capacity);
// Explicitly shrinking should not affect space capacity.
- old_capacity = new_space->Capacity();
+ old_capacity = new_space->TotalCapacity();
new_space->Shrink();
- new_capacity = new_space->Capacity();
+ new_capacity = new_space->TotalCapacity();
CHECK(old_capacity == new_capacity);
// Let the scavenger empty the new space.
@@ -1728,17 +1733,17 @@ TEST(GrowAndShrinkNewSpace) {
CHECK_LE(new_space->Size(), old_capacity);
// Explicitly shrinking should halve the space capacity.
- old_capacity = new_space->Capacity();
+ old_capacity = new_space->TotalCapacity();
new_space->Shrink();
- new_capacity = new_space->Capacity();
+ new_capacity = new_space->TotalCapacity();
CHECK(old_capacity == 2 * new_capacity);
// Consecutive shrinking should not affect space capacity.
- old_capacity = new_space->Capacity();
+ old_capacity = new_space->TotalCapacity();
new_space->Shrink();
new_space->Shrink();
new_space->Shrink();
- new_capacity = new_space->Capacity();
+ new_capacity = new_space->TotalCapacity();
CHECK(old_capacity == new_capacity);
}
@@ -1757,13 +1762,13 @@ TEST(CollectingAllAvailableGarbageShrinksNewSpace) {
v8::HandleScope scope(CcTest::isolate());
NewSpace* new_space = heap->new_space();
intptr_t old_capacity, new_capacity;
- old_capacity = new_space->Capacity();
+ old_capacity = new_space->TotalCapacity();
new_space->Grow();
- new_capacity = new_space->Capacity();
+ new_capacity = new_space->TotalCapacity();
CHECK(2 * old_capacity == new_capacity);
FillUpNewSpace(new_space);
heap->CollectAllAvailableGarbage();
- new_capacity = new_space->Capacity();
+ new_capacity = new_space->TotalCapacity();
CHECK(old_capacity == new_capacity);
}
@@ -2853,6 +2858,7 @@ TEST(TransitionArrayShrinksDuringAllocToOnePropertyFound) {
root = GetByName("root");
AddPropertyTo(0, root, "prop9");
+ CcTest::i_isolate()->heap()->CollectGarbage(OLD_POINTER_SPACE);
// Count number of live transitions after marking. Note that one transition
// is left, because 'o' still holds an instance of one transition target.
@@ -3139,7 +3145,7 @@ TEST(IncrementalMarkingClearsTypeFeedbackInfo) {
*v8::Handle<v8::Function>::Cast(
CcTest::global()->Get(v8_str("f"))));
- Handle<FixedArray> feedback_vector(f->shared()->feedback_vector());
+ Handle<TypeFeedbackVector> feedback_vector(f->shared()->feedback_vector());
int expected_length = FLAG_vector_ics ? 4 : 2;
CHECK_EQ(expected_length, feedback_vector->length());
@@ -3155,7 +3161,7 @@ TEST(IncrementalMarkingClearsTypeFeedbackInfo) {
CHECK_EQ(expected_length, feedback_vector->length());
for (int i = 0; i < expected_length; i++) {
CHECK_EQ(feedback_vector->get(i),
- *TypeFeedbackInfo::UninitializedSentinel(CcTest::i_isolate()));
+ *TypeFeedbackVector::UninitializedSentinel(CcTest::i_isolate()));
}
}
@@ -3275,7 +3281,7 @@ TEST(IncrementalMarkingClearsPolymorphicIC) {
}
-class SourceResource: public v8::String::ExternalAsciiStringResource {
+class SourceResource : public v8::String::ExternalOneByteStringResource {
public:
explicit SourceResource(const char* data)
: data_(data), length_(strlen(data)) { }
@@ -3297,26 +3303,28 @@ class SourceResource: public v8::String::ExternalAsciiStringResource {
};
-void ReleaseStackTraceDataTest(const char* source, const char* accessor) {
+void ReleaseStackTraceDataTest(v8::Isolate* isolate, const char* source,
+ const char* accessor) {
// Test that the data retained by the Error.stack accessor is released
// after the first time the accessor is fired. We use external string
// to check whether the data is being released since the external string
// resource's callback is fired when the external string is GC'ed.
- v8::HandleScope scope(CcTest::isolate());
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ v8::HandleScope scope(isolate);
SourceResource* resource = new SourceResource(i::StrDup(source));
{
- v8::HandleScope scope(CcTest::isolate());
+ v8::HandleScope scope(isolate);
v8::Handle<v8::String> source_string =
- v8::String::NewExternal(CcTest::isolate(), resource);
- CcTest::heap()->CollectAllAvailableGarbage();
+ v8::String::NewExternal(isolate, resource);
+ i_isolate->heap()->CollectAllAvailableGarbage();
v8::Script::Compile(source_string)->Run();
CHECK(!resource->IsDisposed());
}
- // CcTest::heap()->CollectAllAvailableGarbage();
+ // i_isolate->heap()->CollectAllAvailableGarbage();
CHECK(!resource->IsDisposed());
CompileRun(accessor);
- CcTest::heap()->CollectAllAvailableGarbage();
+ i_isolate->heap()->CollectAllAvailableGarbage();
// External source has been released.
CHECK(resource->IsDisposed());
@@ -3324,7 +3332,7 @@ void ReleaseStackTraceDataTest(const char* source, const char* accessor) {
}
-TEST(ReleaseStackTraceData) {
+UNINITIALIZED_TEST(ReleaseStackTraceData) {
if (i::FLAG_always_opt) {
// TODO(ulan): Remove this once the memory leak via code_next_link is fixed.
// See: https://codereview.chromium.org/181833004/
@@ -3332,46 +3340,52 @@ TEST(ReleaseStackTraceData) {
}
FLAG_use_ic = false; // ICs retain objects.
FLAG_concurrent_recompilation = false;
- CcTest::InitializeVM();
- static const char* source1 = "var error = null; "
- /* Normal Error */ "try { "
- " throw new Error(); "
- "} catch (e) { "
- " error = e; "
- "} ";
- static const char* source2 = "var error = null; "
- /* Stack overflow */ "try { "
- " (function f() { f(); })(); "
- "} catch (e) { "
- " error = e; "
- "} ";
- static const char* source3 = "var error = null; "
- /* Normal Error */ "try { "
- /* as prototype */ " throw new Error(); "
- "} catch (e) { "
- " error = {}; "
- " error.__proto__ = e; "
- "} ";
- static const char* source4 = "var error = null; "
- /* Stack overflow */ "try { "
- /* as prototype */ " (function f() { f(); })(); "
- "} catch (e) { "
- " error = {}; "
- " error.__proto__ = e; "
- "} ";
- static const char* getter = "error.stack";
- static const char* setter = "error.stack = 0";
-
- ReleaseStackTraceDataTest(source1, setter);
- ReleaseStackTraceDataTest(source2, setter);
- // We do not test source3 and source4 with setter, since the setter is
- // supposed to (untypically) write to the receiver, not the holder. This is
- // to emulate the behavior of a data property.
-
- ReleaseStackTraceDataTest(source1, getter);
- ReleaseStackTraceDataTest(source2, getter);
- ReleaseStackTraceDataTest(source3, getter);
- ReleaseStackTraceDataTest(source4, getter);
+ v8::Isolate* isolate = v8::Isolate::New();
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::New(isolate)->Enter();
+ static const char* source1 = "var error = null; "
+ /* Normal Error */ "try { "
+ " throw new Error(); "
+ "} catch (e) { "
+ " error = e; "
+ "} ";
+ static const char* source2 = "var error = null; "
+ /* Stack overflow */ "try { "
+ " (function f() { f(); })(); "
+ "} catch (e) { "
+ " error = e; "
+ "} ";
+ static const char* source3 = "var error = null; "
+ /* Normal Error */ "try { "
+ /* as prototype */ " throw new Error(); "
+ "} catch (e) { "
+ " error = {}; "
+ " error.__proto__ = e; "
+ "} ";
+ static const char* source4 = "var error = null; "
+ /* Stack overflow */ "try { "
+ /* as prototype */ " (function f() { f(); })(); "
+ "} catch (e) { "
+ " error = {}; "
+ " error.__proto__ = e; "
+ "} ";
+ static const char* getter = "error.stack";
+ static const char* setter = "error.stack = 0";
+
+ ReleaseStackTraceDataTest(isolate, source1, setter);
+ ReleaseStackTraceDataTest(isolate, source2, setter);
+ // We do not test source3 and source4 with setter, since the setter is
+ // supposed to (untypically) write to the receiver, not the holder. This is
+ // to emulate the behavior of a data property.
+
+ ReleaseStackTraceDataTest(isolate, source1, getter);
+ ReleaseStackTraceDataTest(isolate, source2, getter);
+ ReleaseStackTraceDataTest(isolate, source3, getter);
+ ReleaseStackTraceDataTest(isolate, source4, getter);
+ }
+ isolate->Dispose();
}
@@ -4050,6 +4064,7 @@ static int GetCodeChainLength(Code* code) {
TEST(NextCodeLinkIsWeak) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_turbo_deoptimization = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
v8::internal::Heap* heap = CcTest::heap();
@@ -4348,78 +4363,90 @@ TEST(ArrayShiftSweeping) {
}
-TEST(PromotionQueue) {
+UNINITIALIZED_TEST(PromotionQueue) {
i::FLAG_expose_gc = true;
i::FLAG_max_semi_space_size = 2;
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
- NewSpace* new_space = heap->new_space();
-
- // In this test we will try to overwrite the promotion queue which is at the
- // end of to-space. To actually make that possible, we need at least two
- // semi-space pages and take advantage of fragementation.
- // (1) Grow semi-space to two pages.
- // (2) Create a few small long living objects and call the scavenger to
- // move them to the other semi-space.
- // (3) Create a huge object, i.e., remainder of first semi-space page and
- // create another huge object which should be of maximum allocatable memory
- // size of the second semi-space page.
- // (4) Call the scavenger again.
- // What will happen is: the scavenger will promote the objects created in (2)
- // and will create promotion queue entries at the end of the second
- // semi-space page during the next scavenge when it promotes the objects to
- // the old generation. The first allocation of (3) will fill up the first
- // semi-space page. The second allocation in (3) will not fit into the first
- // semi-space page, but it will overwrite the promotion queue which are in
- // the second semi-space page. If the right guards are in place, the promotion
- // queue will be evacuated in that case.
-
- // Grow the semi-space to two pages to make semi-space copy overwrite the
- // promotion queue, which will be at the end of the second page.
- intptr_t old_capacity = new_space->Capacity();
- new_space->Grow();
- CHECK(new_space->IsAtMaximumCapacity());
- CHECK(2 * old_capacity == new_space->Capacity());
-
- // Call the scavenger two times to get an empty new space
- heap->CollectGarbage(NEW_SPACE);
- heap->CollectGarbage(NEW_SPACE);
-
- // First create a few objects which will survive a scavenge, and will get
- // promoted to the old generation later on. These objects will create
- // promotion queue entries at the end of the second semi-space page.
- const int number_handles = 12;
- Handle<FixedArray> handles[number_handles];
- for (int i = 0; i < number_handles; i++) {
- handles[i] = isolate->factory()->NewFixedArray(1, NOT_TENURED);
+ v8::Isolate* isolate = v8::Isolate::New();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::New(isolate)->Enter();
+ Heap* heap = i_isolate->heap();
+ NewSpace* new_space = heap->new_space();
+
+ // In this test we will try to overwrite the promotion queue which is at the
+ // end of to-space. To actually make that possible, we need at least two
+ // semi-space pages and take advantage of fragmentation.
+ // (1) Grow semi-space to two pages.
+ // (2) Create a few small long living objects and call the scavenger to
+ // move them to the other semi-space.
+ // (3) Create a huge object, i.e., remainder of first semi-space page and
+ // create another huge object which should be of maximum allocatable memory
+ // size of the second semi-space page.
+ // (4) Call the scavenger again.
+ // What will happen is: the scavenger will promote the objects created in
+ // (2) and will create promotion queue entries at the end of the second
+ // semi-space page during the next scavenge when it promotes the objects to
+ // the old generation. The first allocation of (3) will fill up the first
+ // semi-space page. The second allocation in (3) will not fit into the
+ // first semi-space page, but it will overwrite the promotion queue which
+ // are in the second semi-space page. If the right guards are in place, the
+ // promotion queue will be evacuated in that case.
+
+ // Grow the semi-space to two pages to make semi-space copy overwrite the
+ // promotion queue, which will be at the end of the second page.
+ intptr_t old_capacity = new_space->TotalCapacity();
+
+ // If we are in a low memory config, we can't grow to two pages and we can't
+ // run this test. This also means the issue we are testing cannot arise, as
+ // there is no fragmentation.
+ if (new_space->IsAtMaximumCapacity()) return;
+
+ new_space->Grow();
+ CHECK(new_space->IsAtMaximumCapacity());
+ CHECK(2 * old_capacity == new_space->TotalCapacity());
+
+ // Call the scavenger two times to get an empty new space
+ heap->CollectGarbage(NEW_SPACE);
+ heap->CollectGarbage(NEW_SPACE);
+
+ // First create a few objects which will survive a scavenge, and will get
+ // promoted to the old generation later on. These objects will create
+ // promotion queue entries at the end of the second semi-space page.
+ const int number_handles = 12;
+ Handle<FixedArray> handles[number_handles];
+ for (int i = 0; i < number_handles; i++) {
+ handles[i] = i_isolate->factory()->NewFixedArray(1, NOT_TENURED);
+ }
+ heap->CollectGarbage(NEW_SPACE);
+
+ // Create the first huge object which will exactly fit the first semi-space
+ // page.
+ int new_linear_size =
+ static_cast<int>(*heap->new_space()->allocation_limit_address() -
+ *heap->new_space()->allocation_top_address());
+ int length = new_linear_size / kPointerSize - FixedArray::kHeaderSize;
+ Handle<FixedArray> first =
+ i_isolate->factory()->NewFixedArray(length, NOT_TENURED);
+ CHECK(heap->InNewSpace(*first));
+
+ // Create the second huge object of maximum allocatable second semi-space
+ // page size.
+ new_linear_size =
+ static_cast<int>(*heap->new_space()->allocation_limit_address() -
+ *heap->new_space()->allocation_top_address());
+ length = Page::kMaxRegularHeapObjectSize / kPointerSize -
+ FixedArray::kHeaderSize;
+ Handle<FixedArray> second =
+ i_isolate->factory()->NewFixedArray(length, NOT_TENURED);
+ CHECK(heap->InNewSpace(*second));
+
+ // This scavenge will corrupt memory if the promotion queue is not
+ // evacuated.
+ heap->CollectGarbage(NEW_SPACE);
}
- heap->CollectGarbage(NEW_SPACE);
-
- // Create the first huge object which will exactly fit the first semi-space
- // page.
- int new_linear_size = static_cast<int>(
- *heap->new_space()->allocation_limit_address() -
- *heap->new_space()->allocation_top_address());
- int length = new_linear_size / kPointerSize - FixedArray::kHeaderSize;
- Handle<FixedArray> first =
- isolate->factory()->NewFixedArray(length, NOT_TENURED);
- CHECK(heap->InNewSpace(*first));
-
- // Create the second huge object of maximum allocatable second semi-space
- // page size.
- new_linear_size = static_cast<int>(
- *heap->new_space()->allocation_limit_address() -
- *heap->new_space()->allocation_top_address());
- length = Page::kMaxRegularHeapObjectSize / kPointerSize -
- FixedArray::kHeaderSize;
- Handle<FixedArray> second =
- isolate->factory()->NewFixedArray(length, NOT_TENURED);
- CHECK(heap->InNewSpace(*second));
-
- // This scavenge will corrupt memory if the promotion queue is not evacuated.
- heap->CollectGarbage(NEW_SPACE);
+ isolate->Dispose();
}
@@ -4431,9 +4458,9 @@ TEST(Regress388880) {
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
- Handle<Map> map1 = Map::Create(isolate->object_function(), 1);
+ Handle<Map> map1 = Map::Create(isolate, 1);
Handle<Map> map2 =
- Map::CopyWithField(map1, factory->NewStringFromStaticAscii("foo"),
+ Map::CopyWithField(map1, factory->NewStringFromStaticChars("foo"),
HeapType::Any(isolate), NONE, Representation::Tagged(),
OMIT_TRANSITION).ToHandleChecked();
diff --git a/deps/v8/test/cctest/test-libplatform-default-platform.cc b/deps/v8/test/cctest/test-libplatform-default-platform.cc
deleted file mode 100644
index dac6db2a00..0000000000
--- a/deps/v8/test/cctest/test-libplatform-default-platform.cc
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/libplatform/default-platform.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/test-libplatform.h"
-
-using namespace v8::internal;
-using namespace v8::platform;
-
-
-TEST(DefaultPlatformMessagePump) {
- TaskCounter task_counter;
-
- DefaultPlatform platform;
-
- TestTask* task = new TestTask(&task_counter, true);
-
- CHECK(!platform.PumpMessageLoop(CcTest::isolate()));
-
- platform.CallOnForegroundThread(CcTest::isolate(), task);
-
- CHECK_EQ(1, task_counter.GetCount());
- CHECK(platform.PumpMessageLoop(CcTest::isolate()));
- CHECK_EQ(0, task_counter.GetCount());
- CHECK(!platform.PumpMessageLoop(CcTest::isolate()));
-}
diff --git a/deps/v8/test/cctest/test-libplatform.h b/deps/v8/test/cctest/test-libplatform.h
deleted file mode 100644
index 67147f33e6..0000000000
--- a/deps/v8/test/cctest/test-libplatform.h
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef TEST_LIBPLATFORM_H_
-#define TEST_LIBPLATFORM_H_
-
-#include "src/v8.h"
-
-#include "test/cctest/cctest.h"
-
-using namespace v8::internal;
-using namespace v8::platform;
-
-class TaskCounter {
- public:
- TaskCounter() : counter_(0) {}
- ~TaskCounter() { CHECK_EQ(0, counter_); }
-
- int GetCount() const {
- v8::base::LockGuard<v8::base::Mutex> guard(&lock_);
- return counter_;
- }
-
- void Inc() {
- v8::base::LockGuard<v8::base::Mutex> guard(&lock_);
- ++counter_;
- }
-
- void Dec() {
- v8::base::LockGuard<v8::base::Mutex> guard(&lock_);
- --counter_;
- }
-
- private:
- mutable v8::base::Mutex lock_;
- int counter_;
-
- DISALLOW_COPY_AND_ASSIGN(TaskCounter);
-};
-
-
-class TestTask : public v8::Task {
- public:
- TestTask(TaskCounter* task_counter, bool expected_to_run)
- : task_counter_(task_counter),
- expected_to_run_(expected_to_run),
- executed_(false) {
- task_counter_->Inc();
- }
-
- explicit TestTask(TaskCounter* task_counter)
- : task_counter_(task_counter), expected_to_run_(false), executed_(false) {
- task_counter_->Inc();
- }
-
- virtual ~TestTask() {
- CHECK_EQ(expected_to_run_, executed_);
- task_counter_->Dec();
- }
-
- // v8::Task implementation.
- virtual void Run() V8_OVERRIDE { executed_ = true; }
-
- private:
- TaskCounter* task_counter_;
- bool expected_to_run_;
- bool executed_;
-
- DISALLOW_COPY_AND_ASSIGN(TestTask);
-};
-
-
-class TestWorkerThread : public v8::base::Thread {
- public:
- explicit TestWorkerThread(v8::Task* task)
- : Thread(Options("libplatform TestWorkerThread")),
- semaphore_(0),
- task_(task) {}
- virtual ~TestWorkerThread() {}
-
- void Signal() { semaphore_.Signal(); }
-
- // Thread implementation.
- virtual void Run() V8_OVERRIDE {
- semaphore_.Wait();
- if (task_) {
- task_->Run();
- delete task_;
- }
- }
-
- private:
- v8::base::Semaphore semaphore_;
- v8::Task* task_;
-
- DISALLOW_COPY_AND_ASSIGN(TestWorkerThread);
-};
-
-#endif // TEST_LIBPLATFORM_H_
diff --git a/deps/v8/test/cctest/test-liveedit.cc b/deps/v8/test/cctest/test-liveedit.cc
index f5c22743bd..6a5f0b2997 100644
--- a/deps/v8/test/cctest/test-liveedit.cc
+++ b/deps/v8/test/cctest/test-liveedit.cc
@@ -158,7 +158,6 @@ void CompareStrings(const char* s1, const char* s2,
// --- T h e A c t u a l T e s t s
TEST(LiveEditDiffer) {
- v8::internal::V8::Initialize(NULL);
CompareStrings("zz1zzz12zz123zzz", "zzzzzzzzzz", 6);
CompareStrings("zz1zzz12zz123zzz", "zz0zzz0zz0zzz", 9);
CompareStrings("123456789", "987654321", 16);
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index d72e6f0e1e..482f89f9c4 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -61,9 +61,11 @@ class ScopedLoggerInitializer {
temp_file_(NULL),
// Need to run this prior to creating the scope.
trick_to_run_init_flags_(init_flags_()),
- scope_(CcTest::isolate()),
- env_(v8::Context::New(CcTest::isolate())),
- logger_(CcTest::i_isolate()->logger()) {
+ isolate_(v8::Isolate::New()),
+ isolate_scope_(isolate_),
+ scope_(isolate_),
+ env_(v8::Context::New(isolate_)),
+ logger_(reinterpret_cast<i::Isolate*>(isolate_)->logger()) {
env_->Enter();
}
@@ -77,6 +79,8 @@ class ScopedLoggerInitializer {
v8::Handle<v8::Context>& env() { return env_; }
+ v8::Isolate* isolate() { return isolate_; }
+
Logger* logger() { return logger_; }
FILE* StopLoggingGetTempFile() {
@@ -100,6 +104,8 @@ class ScopedLoggerInitializer {
const bool saved_prof_;
FILE* temp_file_;
const bool trick_to_run_init_flags_;
+ v8::Isolate* isolate_;
+ v8::Isolate::Scope isolate_scope_;
v8::HandleScope scope_;
v8::Handle<v8::Context> env_;
Logger* logger_;
@@ -330,41 +336,41 @@ static void ObjMethod1(const v8::FunctionCallbackInfo<v8::Value>& args) {
TEST(LogCallbacks) {
- v8::Isolate* isolate = CcTest::isolate();
- ScopedLoggerInitializer initialize_logger;
- Logger* logger = initialize_logger.logger();
-
- v8::Local<v8::FunctionTemplate> obj =
- v8::Local<v8::FunctionTemplate>::New(isolate,
- v8::FunctionTemplate::New(isolate));
- obj->SetClassName(v8_str("Obj"));
- v8::Handle<v8::ObjectTemplate> proto = obj->PrototypeTemplate();
- v8::Local<v8::Signature> signature =
- v8::Signature::New(isolate, obj);
- proto->Set(v8_str("method1"),
- v8::FunctionTemplate::New(isolate,
- ObjMethod1,
- v8::Handle<v8::Value>(),
- signature),
- static_cast<v8::PropertyAttribute>(v8::DontDelete));
-
- initialize_logger.env()->Global()->Set(v8_str("Obj"), obj->GetFunction());
- CompileRun("Obj.prototype.method1.toString();");
-
- logger->LogCompiledFunctions();
-
- bool exists = false;
- i::Vector<const char> log(
- i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
- CHECK(exists);
-
- i::EmbeddedVector<char, 100> ref_data;
- i::SNPrintF(ref_data,
- "code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"method1\"",
- reinterpret_cast<intptr_t>(ObjMethod1));
-
- CHECK_NE(NULL, StrNStr(log.start(), ref_data.start(), log.length()));
- log.Dispose();
+ v8::Isolate* isolate;
+ {
+ ScopedLoggerInitializer initialize_logger;
+ isolate = initialize_logger.isolate();
+ Logger* logger = initialize_logger.logger();
+
+ v8::Local<v8::FunctionTemplate> obj = v8::Local<v8::FunctionTemplate>::New(
+ isolate, v8::FunctionTemplate::New(isolate));
+ obj->SetClassName(v8_str("Obj"));
+ v8::Handle<v8::ObjectTemplate> proto = obj->PrototypeTemplate();
+ v8::Local<v8::Signature> signature = v8::Signature::New(isolate, obj);
+ proto->Set(v8_str("method1"),
+ v8::FunctionTemplate::New(isolate, ObjMethod1,
+ v8::Handle<v8::Value>(), signature),
+ static_cast<v8::PropertyAttribute>(v8::DontDelete));
+
+ initialize_logger.env()->Global()->Set(v8_str("Obj"), obj->GetFunction());
+ CompileRun("Obj.prototype.method1.toString();");
+
+ logger->LogCompiledFunctions();
+
+ bool exists = false;
+ i::Vector<const char> log(
+ i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
+ CHECK(exists);
+
+ i::EmbeddedVector<char, 100> ref_data;
+ i::SNPrintF(ref_data,
+ "code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"method1\"",
+ reinterpret_cast<intptr_t>(ObjMethod1));
+
+ CHECK_NE(NULL, StrNStr(log.start(), ref_data.start(), log.length()));
+ log.Dispose();
+ }
+ isolate->Dispose();
}
@@ -383,46 +389,49 @@ static void Prop2Getter(v8::Local<v8::String> property,
TEST(LogAccessorCallbacks) {
- v8::Isolate* isolate = CcTest::isolate();
- ScopedLoggerInitializer initialize_logger;
- Logger* logger = initialize_logger.logger();
-
- v8::Local<v8::FunctionTemplate> obj =
- v8::Local<v8::FunctionTemplate>::New(isolate,
- v8::FunctionTemplate::New(isolate));
- obj->SetClassName(v8_str("Obj"));
- v8::Handle<v8::ObjectTemplate> inst = obj->InstanceTemplate();
- inst->SetAccessor(v8_str("prop1"), Prop1Getter, Prop1Setter);
- inst->SetAccessor(v8_str("prop2"), Prop2Getter);
-
- logger->LogAccessorCallbacks();
-
- bool exists = false;
- i::Vector<const char> log(
- i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
- CHECK(exists);
-
- EmbeddedVector<char, 100> prop1_getter_record;
- i::SNPrintF(prop1_getter_record,
- "code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"get prop1\"",
- reinterpret_cast<intptr_t>(Prop1Getter));
- CHECK_NE(NULL,
- StrNStr(log.start(), prop1_getter_record.start(), log.length()));
-
- EmbeddedVector<char, 100> prop1_setter_record;
- i::SNPrintF(prop1_setter_record,
- "code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"set prop1\"",
- reinterpret_cast<intptr_t>(Prop1Setter));
- CHECK_NE(NULL,
- StrNStr(log.start(), prop1_setter_record.start(), log.length()));
-
- EmbeddedVector<char, 100> prop2_getter_record;
- i::SNPrintF(prop2_getter_record,
- "code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"get prop2\"",
- reinterpret_cast<intptr_t>(Prop2Getter));
- CHECK_NE(NULL,
- StrNStr(log.start(), prop2_getter_record.start(), log.length()));
- log.Dispose();
+ v8::Isolate* isolate;
+ {
+ ScopedLoggerInitializer initialize_logger;
+ isolate = initialize_logger.isolate();
+ Logger* logger = initialize_logger.logger();
+
+ v8::Local<v8::FunctionTemplate> obj = v8::Local<v8::FunctionTemplate>::New(
+ isolate, v8::FunctionTemplate::New(isolate));
+ obj->SetClassName(v8_str("Obj"));
+ v8::Handle<v8::ObjectTemplate> inst = obj->InstanceTemplate();
+ inst->SetAccessor(v8_str("prop1"), Prop1Getter, Prop1Setter);
+ inst->SetAccessor(v8_str("prop2"), Prop2Getter);
+
+ logger->LogAccessorCallbacks();
+
+ bool exists = false;
+ i::Vector<const char> log(
+ i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
+ CHECK(exists);
+
+ EmbeddedVector<char, 100> prop1_getter_record;
+ i::SNPrintF(prop1_getter_record,
+ "code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"get prop1\"",
+ reinterpret_cast<intptr_t>(Prop1Getter));
+ CHECK_NE(NULL,
+ StrNStr(log.start(), prop1_getter_record.start(), log.length()));
+
+ EmbeddedVector<char, 100> prop1_setter_record;
+ i::SNPrintF(prop1_setter_record,
+ "code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"set prop1\"",
+ reinterpret_cast<intptr_t>(Prop1Setter));
+ CHECK_NE(NULL,
+ StrNStr(log.start(), prop1_setter_record.start(), log.length()));
+
+ EmbeddedVector<char, 100> prop2_getter_record;
+ i::SNPrintF(prop2_getter_record,
+ "code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"get prop2\"",
+ reinterpret_cast<intptr_t>(Prop2Getter));
+ CHECK_NE(NULL,
+ StrNStr(log.start(), prop2_getter_record.start(), log.length()));
+ log.Dispose();
+ }
+ isolate->Dispose();
}
@@ -439,57 +448,63 @@ TEST(EquivalenceOfLoggingAndTraversal) {
// are using V8.
// Start with profiling to capture all code events from the beginning.
- ScopedLoggerInitializer initialize_logger;
- Logger* logger = initialize_logger.logger();
-
- // Compile and run a function that creates other functions.
- CompileRun(
- "(function f(obj) {\n"
- " obj.test =\n"
- " (function a(j) { return function b() { return j; } })(100);\n"
- "})(this);");
- logger->StopProfiler();
- CcTest::heap()->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
- logger->StringEvent("test-logging-done", "");
-
- // Iterate heap to find compiled functions, will write to log.
- logger->LogCompiledFunctions();
- logger->StringEvent("test-traversal-done", "");
-
- bool exists = false;
- i::Vector<const char> log(
- i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
- CHECK(exists);
- v8::Handle<v8::String> log_str = v8::String::NewFromUtf8(
- CcTest::isolate(), log.start(), v8::String::kNormalString, log.length());
- initialize_logger.env()->Global()->Set(v8_str("_log"), log_str);
-
- i::Vector<const unsigned char> source = TestSources::GetScriptsSource();
- v8::Handle<v8::String> source_str = v8::String::NewFromUtf8(
- CcTest::isolate(), reinterpret_cast<const char*>(source.start()),
- v8::String::kNormalString, source.length());
- v8::TryCatch try_catch;
- v8::Handle<v8::Script> script = CompileWithOrigin(source_str, "");
- if (script.IsEmpty()) {
- v8::String::Utf8Value exception(try_catch.Exception());
- printf("compile: %s\n", *exception);
- CHECK(false);
- }
- v8::Handle<v8::Value> result = script->Run();
- if (result.IsEmpty()) {
- v8::String::Utf8Value exception(try_catch.Exception());
- printf("run: %s\n", *exception);
- CHECK(false);
- }
- // The result either be a "true" literal or problem description.
- if (!result->IsTrue()) {
- v8::Local<v8::String> s = result->ToString();
- i::ScopedVector<char> data(s->Utf8Length() + 1);
- CHECK_NE(NULL, data.start());
- s->WriteUtf8(data.start());
- printf("%s\n", data.start());
- // Make sure that our output is written prior crash due to CHECK failure.
- fflush(stdout);
- CHECK(false);
+ v8::Isolate* isolate;
+ {
+ ScopedLoggerInitializer initialize_logger;
+ isolate = initialize_logger.isolate();
+ Logger* logger = initialize_logger.logger();
+
+ // Compile and run a function that creates other functions.
+ CompileRun(
+ "(function f(obj) {\n"
+ " obj.test =\n"
+ " (function a(j) { return function b() { return j; } })(100);\n"
+ "})(this);");
+ logger->StopProfiler();
+ reinterpret_cast<i::Isolate*>(isolate)->heap()->CollectAllGarbage(
+ i::Heap::kMakeHeapIterableMask);
+ logger->StringEvent("test-logging-done", "");
+
+ // Iterate heap to find compiled functions, will write to log.
+ logger->LogCompiledFunctions();
+ logger->StringEvent("test-traversal-done", "");
+
+ bool exists = false;
+ i::Vector<const char> log(
+ i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
+ CHECK(exists);
+ v8::Handle<v8::String> log_str = v8::String::NewFromUtf8(
+ isolate, log.start(), v8::String::kNormalString, log.length());
+ initialize_logger.env()->Global()->Set(v8_str("_log"), log_str);
+
+ i::Vector<const unsigned char> source = TestSources::GetScriptsSource();
+ v8::Handle<v8::String> source_str = v8::String::NewFromUtf8(
+ isolate, reinterpret_cast<const char*>(source.start()),
+ v8::String::kNormalString, source.length());
+ v8::TryCatch try_catch;
+ v8::Handle<v8::Script> script = CompileWithOrigin(source_str, "");
+ if (script.IsEmpty()) {
+ v8::String::Utf8Value exception(try_catch.Exception());
+ printf("compile: %s\n", *exception);
+ CHECK(false);
+ }
+ v8::Handle<v8::Value> result = script->Run();
+ if (result.IsEmpty()) {
+ v8::String::Utf8Value exception(try_catch.Exception());
+ printf("run: %s\n", *exception);
+ CHECK(false);
+ }
+ // The result either be a "true" literal or problem description.
+ if (!result->IsTrue()) {
+ v8::Local<v8::String> s = result->ToString();
+ i::ScopedVector<char> data(s->Utf8Length() + 1);
+ CHECK_NE(NULL, data.start());
+ s->WriteUtf8(data.start());
+ printf("%s\n", data.start());
+ // Make sure that our output is written prior crash due to CHECK failure.
+ fflush(stdout);
+ CHECK(false);
+ }
}
+ isolate->Dispose();
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm.cc b/deps/v8/test/cctest/test-macro-assembler-arm.cc
index 2cfad0df83..3ca02662fa 100644
--- a/deps/v8/test/cctest/test-macro-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-arm.cc
@@ -137,8 +137,6 @@ typedef int (*F5)(void*, void*, void*, void*, void*);
TEST(LoadAndStoreWithRepresentation) {
- v8::internal::V8::Initialize(NULL);
-
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
diff --git a/deps/v8/test/cctest/test-macro-assembler-ia32.cc b/deps/v8/test/cctest/test-macro-assembler-ia32.cc
index 4d37579918..b2b8c946c3 100644
--- a/deps/v8/test/cctest/test-macro-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-ia32.cc
@@ -50,8 +50,6 @@ typedef F0Type* F0;
TEST(LoadAndStoreWithRepresentation) {
- v8::internal::V8::Initialize(NULL);
-
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
index 33a4611540..6cb00e4456 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -149,9 +149,9 @@ static void TestNaN(const char *code) {
i::FixedDoubleArray* a = i::FixedDoubleArray::cast(array1->elements());
double value = a->get_scalar(0);
CHECK(std::isnan(value) &&
- i::BitCast<uint64_t>(value) ==
- i::BitCast<uint64_t>(
- i::FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+ bit_cast<uint64_t>(value) ==
+ bit_cast<uint64_t>(
+ i::FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 2c0e918057..7f20a8dd4b 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -153,7 +153,6 @@ static void TestMoveSmi(MacroAssembler* masm, Label* exit, int id, Smi* value) {
// Test that we can move a Smi value literally into a register.
TEST(SmiMove) {
- i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
@@ -239,7 +238,6 @@ void TestSmiCompare(MacroAssembler* masm, Label* exit, int id, int x, int y) {
// Test that we can compare smis for equality (and more).
TEST(SmiCompare) {
- i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
@@ -288,7 +286,6 @@ TEST(SmiCompare) {
TEST(Integer32ToSmi) {
- i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
@@ -415,7 +412,6 @@ void TestI64PlusConstantToSmi(MacroAssembler* masm,
TEST(Integer64PlusConstantToSmi) {
- i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
@@ -458,7 +454,6 @@ TEST(Integer64PlusConstantToSmi) {
TEST(SmiCheck) {
- i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
@@ -704,7 +699,6 @@ void TestSmiNeg(MacroAssembler* masm, Label* exit, int id, int x) {
TEST(SmiNeg) {
- i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
@@ -918,7 +912,6 @@ static void SmiAddOverflowTest(MacroAssembler* masm,
TEST(SmiAdd) {
- i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
@@ -1137,7 +1130,6 @@ static void SmiSubOverflowTest(MacroAssembler* masm,
TEST(SmiSub) {
- i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
@@ -1226,7 +1218,6 @@ void TestSmiMul(MacroAssembler* masm, Label* exit, int id, int x, int y) {
TEST(SmiMul) {
- i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
@@ -1330,7 +1321,6 @@ void TestSmiDiv(MacroAssembler* masm, Label* exit, int id, int x, int y) {
TEST(SmiDiv) {
- i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
@@ -1438,7 +1428,6 @@ void TestSmiMod(MacroAssembler* masm, Label* exit, int id, int x, int y) {
TEST(SmiMod) {
- i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
@@ -1533,7 +1522,6 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
TEST(SmiIndex) {
- i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
@@ -1600,7 +1588,6 @@ void TestSelectNonSmi(MacroAssembler* masm, Label* exit, int id, int x, int y) {
TEST(SmiSelectNonSmi) {
- i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
@@ -1677,7 +1664,6 @@ void TestSmiAnd(MacroAssembler* masm, Label* exit, int id, int x, int y) {
TEST(SmiAnd) {
- i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
@@ -1756,7 +1742,6 @@ void TestSmiOr(MacroAssembler* masm, Label* exit, int id, int x, int y) {
TEST(SmiOr) {
- i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
@@ -1837,7 +1822,6 @@ void TestSmiXor(MacroAssembler* masm, Label* exit, int id, int x, int y) {
TEST(SmiXor) {
- i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
@@ -1902,7 +1886,6 @@ void TestSmiNot(MacroAssembler* masm, Label* exit, int id, int x) {
TEST(SmiNot) {
- i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
@@ -1996,7 +1979,6 @@ void TestSmiShiftLeft(MacroAssembler* masm, Label* exit, int id, int x) {
TEST(SmiShiftLeft) {
- i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
@@ -2100,7 +2082,6 @@ void TestSmiShiftLogicalRight(MacroAssembler* masm,
TEST(SmiShiftLogicalRight) {
- i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
@@ -2167,7 +2148,6 @@ void TestSmiShiftArithmeticRight(MacroAssembler* masm,
TEST(SmiShiftArithmeticRight) {
- i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
@@ -2229,7 +2209,6 @@ void TestPositiveSmiPowerUp(MacroAssembler* masm, Label* exit, int id, int x) {
TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
- i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
@@ -2267,7 +2246,6 @@ TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
TEST(OperandOffset) {
- i::V8::Initialize(NULL);
uint32_t data[256];
for (uint32_t i = 0; i < 256; i++) { data[i] = i * 0x01010101; }
@@ -2621,8 +2599,6 @@ TEST(OperandOffset) {
TEST(LoadAndStoreWithRepresentation) {
- v8::internal::V8::Initialize(NULL);
-
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
diff --git a/deps/v8/test/cctest/test-macro-assembler-x87.cc b/deps/v8/test/cctest/test-macro-assembler-x87.cc
index 9aa40c0b10..0b057d818f 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x87.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x87.cc
@@ -50,8 +50,6 @@ typedef F0Type* F0;
TEST(LoadAndStoreWithRepresentation) {
- v8::internal::V8::Initialize(NULL);
-
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
diff --git a/deps/v8/test/cctest/test-mark-compact.cc b/deps/v8/test/cctest/test-mark-compact.cc
index 1d4b0d8e7d..c7d65310a4 100644
--- a/deps/v8/test/cctest/test-mark-compact.cc
+++ b/deps/v8/test/cctest/test-mark-compact.cc
@@ -139,13 +139,13 @@ TEST(MarkCompactCollector) {
heap->CollectGarbage(OLD_POINTER_SPACE, "trigger 1");
// keep allocating garbage in new space until it fails
- const int ARRAY_SIZE = 100;
+ const int arraysize = 100;
AllocationResult allocation;
do {
- allocation = heap->AllocateFixedArray(ARRAY_SIZE);
+ allocation = heap->AllocateFixedArray(arraysize);
} while (!allocation.IsRetry());
heap->CollectGarbage(NEW_SPACE, "trigger 2");
- heap->AllocateFixedArray(ARRAY_SIZE).ToObjectChecked();
+ heap->AllocateFixedArray(arraysize).ToObjectChecked();
// keep allocating maps until it fails
do {
@@ -301,15 +301,13 @@ TEST(ObjectGroups) {
{
Object** g1_objects[] = { g1s1.location(), g1s2.location() };
- Object** g1_children[] = { g1c1.location() };
Object** g2_objects[] = { g2s1.location(), g2s2.location() };
- Object** g2_children[] = { g2c1.location() };
global_handles->AddObjectGroup(g1_objects, 2, NULL);
- global_handles->AddImplicitReferences(
- Handle<HeapObject>::cast(g1s1).location(), g1_children, 1);
+ global_handles->SetReference(Handle<HeapObject>::cast(g1s1).location(),
+ g1c1.location());
global_handles->AddObjectGroup(g2_objects, 2, NULL);
- global_handles->AddImplicitReferences(
- Handle<HeapObject>::cast(g2s1).location(), g2_children, 1);
+ global_handles->SetReference(Handle<HeapObject>::cast(g2s1).location(),
+ g2c1.location());
}
// Do a full GC
heap->CollectGarbage(OLD_POINTER_SPACE);
@@ -330,15 +328,13 @@ TEST(ObjectGroups) {
// Groups are deleted, rebuild groups.
{
Object** g1_objects[] = { g1s1.location(), g1s2.location() };
- Object** g1_children[] = { g1c1.location() };
Object** g2_objects[] = { g2s1.location(), g2s2.location() };
- Object** g2_children[] = { g2c1.location() };
global_handles->AddObjectGroup(g1_objects, 2, NULL);
- global_handles->AddImplicitReferences(
- Handle<HeapObject>::cast(g1s1).location(), g1_children, 1);
+ global_handles->SetReference(Handle<HeapObject>::cast(g1s1).location(),
+ g1c1.location());
global_handles->AddObjectGroup(g2_objects, 2, NULL);
- global_handles->AddImplicitReferences(
- Handle<HeapObject>::cast(g2s1).location(), g2_children, 1);
+ global_handles->SetReference(Handle<HeapObject>::cast(g2s1).location(),
+ g2c1.location());
}
heap->CollectGarbage(OLD_POINTER_SPACE);
@@ -389,15 +385,9 @@ TEST(EmptyObjectGroups) {
v8::HandleScope handle_scope(CcTest::isolate());
- Handle<Object> object = global_handles->Create(
- CcTest::test_heap()->AllocateFixedArray(1).ToObjectChecked());
-
TestRetainedObjectInfo info;
global_handles->AddObjectGroup(NULL, 0, &info);
DCHECK(info.has_been_disposed());
-
- global_handles->AddImplicitReferences(
- Handle<HeapObject>::cast(object).location(), NULL, 0);
}
diff --git a/deps/v8/test/cctest/test-object-observe.cc b/deps/v8/test/cctest/test-object-observe.cc
index 679569e27c..d208a26922 100644
--- a/deps/v8/test/cctest/test-object-observe.cc
+++ b/deps/v8/test/cctest/test-object-observe.cc
@@ -253,7 +253,7 @@ static void ExpectRecords(v8::Isolate* isolate,
#define EXPECT_RECORDS(records, expectations) \
ExpectRecords(CcTest::isolate(), records, expectations, \
- ARRAY_SIZE(expectations))
+ arraysize(expectations))
TEST(APITestBasicMutation) {
v8::Isolate* v8_isolate = CcTest::isolate();
diff --git a/deps/v8/test/cctest/test-ordered-hash-table.cc b/deps/v8/test/cctest/test-ordered-hash-table.cc
index bb1e0145b5..9578936317 100644
--- a/deps/v8/test/cctest/test-ordered-hash-table.cc
+++ b/deps/v8/test/cctest/test-ordered-hash-table.cc
@@ -118,7 +118,8 @@ TEST(Map) {
CHECK(ordered_map->Lookup(obj)->IsTheHole());
ordered_map = OrderedHashMap::Put(ordered_map, obj, val);
CHECK_EQ(1, ordered_map->NumberOfElements());
- CHECK(ordered_map->Lookup(obj)->SameValue(*val));
+ Object* lookup = ordered_map->Lookup(obj);
+ CHECK(lookup->SameValue(*val));
bool was_present = false;
ordered_map = OrderedHashMap::Remove(ordered_map, obj, &was_present);
CHECK(was_present);
@@ -136,20 +137,28 @@ TEST(Map) {
ordered_map = OrderedHashMap::Put(ordered_map, obj2, val2);
ordered_map = OrderedHashMap::Put(ordered_map, obj3, val3);
CHECK_EQ(3, ordered_map->NumberOfElements());
- CHECK(ordered_map->Lookup(obj1)->SameValue(*val1));
- CHECK(ordered_map->Lookup(obj2)->SameValue(*val2));
- CHECK(ordered_map->Lookup(obj3)->SameValue(*val3));
+ lookup = ordered_map->Lookup(obj1);
+ CHECK(lookup->SameValue(*val1));
+ lookup = ordered_map->Lookup(obj2);
+ CHECK(lookup->SameValue(*val2));
+ lookup = ordered_map->Lookup(obj3);
+ CHECK(lookup->SameValue(*val3));
// Test growth
ordered_map = OrderedHashMap::Put(ordered_map, obj, val);
Handle<JSObject> obj4 = factory->NewJSObjectFromMap(map);
Handle<JSObject> val4 = factory->NewJSObjectFromMap(map);
ordered_map = OrderedHashMap::Put(ordered_map, obj4, val4);
- CHECK(ordered_map->Lookup(obj)->SameValue(*val));
- CHECK(ordered_map->Lookup(obj1)->SameValue(*val1));
- CHECK(ordered_map->Lookup(obj2)->SameValue(*val2));
- CHECK(ordered_map->Lookup(obj3)->SameValue(*val3));
- CHECK(ordered_map->Lookup(obj4)->SameValue(*val4));
+ lookup = ordered_map->Lookup(obj);
+ CHECK(lookup->SameValue(*val));
+ lookup = ordered_map->Lookup(obj1);
+ CHECK(lookup->SameValue(*val1));
+ lookup = ordered_map->Lookup(obj2);
+ CHECK(lookup->SameValue(*val2));
+ lookup = ordered_map->Lookup(obj3);
+ CHECK(lookup->SameValue(*val3));
+ lookup = ordered_map->Lookup(obj4);
+ CHECK(lookup->SameValue(*val4));
CHECK_EQ(5, ordered_map->NumberOfElements());
CHECK_EQ(4, ordered_map->NumberOfBuckets());
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 9cb5d69e6c..72f2298042 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -72,6 +72,7 @@ TEST(ScanKeywords) {
// The scanner should parse Harmony keywords for this test.
scanner.SetHarmonyScoping(true);
scanner.SetHarmonyModules(true);
+ scanner.SetHarmonyClasses(true);
scanner.Initialize(&stream);
CHECK_EQ(key_token.token, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -86,7 +87,7 @@ TEST(ScanKeywords) {
}
// Adding characters will make keyword matching fail.
static const char chars_to_append[] = { 'z', '0', '_' };
- for (int j = 0; j < static_cast<int>(ARRAY_SIZE(chars_to_append)); ++j) {
+ for (int j = 0; j < static_cast<int>(arraysize(chars_to_append)); ++j) {
i::MemMove(buffer, keyword, length);
buffer[length] = chars_to_append[j];
i::Utf8ToUtf16CharacterStream stream(buffer, length + 1);
@@ -144,8 +145,8 @@ TEST(ScanHTMLEndComments) {
};
// Parser/Scanner needs a stack limit.
- CcTest::i_isolate()->stack_guard()->SetStackLimit(GetCurrentStackPosition() -
- 128 * 1024);
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
+ i::GetCurrentStackPosition() - 128 * 1024);
uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
for (int i = 0; tests[i]; i++) {
const i::byte* source =
@@ -178,7 +179,7 @@ TEST(ScanHTMLEndComments) {
}
-class ScriptResource : public v8::String::ExternalAsciiStringResource {
+class ScriptResource : public v8::String::ExternalOneByteStringResource {
public:
ScriptResource(const char* data, size_t length)
: data_(data), length_(length) { }
@@ -197,8 +198,8 @@ TEST(UsingCachedData) {
v8::HandleScope handles(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- CcTest::i_isolate()->stack_guard()->SetStackLimit(GetCurrentStackPosition() -
- 128 * 1024);
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
+ i::GetCurrentStackPosition() - 128 * 1024);
// Source containing functions that might be lazily compiled and all types
// of symbols (string, propertyName, regexp).
@@ -250,8 +251,8 @@ TEST(PreparseFunctionDataIsUsed) {
v8::HandleScope handles(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- CcTest::i_isolate()->stack_guard()->SetStackLimit(GetCurrentStackPosition() -
- 128 * 1024);
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
+ i::GetCurrentStackPosition() - 128 * 1024);
const char* good_code[] = {
"function this_is_lazy() { var a; } function foo() { return 25; } foo();",
@@ -264,7 +265,7 @@ TEST(PreparseFunctionDataIsUsed) {
"var this_is_lazy = () => { if ( }; var foo = () => 25; foo();",
};
- for (unsigned i = 0; i < ARRAY_SIZE(good_code); i++) {
+ for (unsigned i = 0; i < arraysize(good_code); i++) {
v8::ScriptCompiler::Source good_source(v8_str(good_code[i]));
v8::ScriptCompiler::Compile(isolate, &good_source,
v8::ScriptCompiler::kProduceDataToCache);
@@ -291,8 +292,8 @@ TEST(PreparseFunctionDataIsUsed) {
TEST(StandAlonePreParser) {
v8::V8::Initialize();
- CcTest::i_isolate()->stack_guard()->SetStackLimit(GetCurrentStackPosition() -
- 128 * 1024);
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
+ i::GetCurrentStackPosition() - 128 * 1024);
const char* programs[] = {
"{label: 42}",
@@ -328,8 +329,8 @@ TEST(StandAlonePreParser) {
TEST(StandAlonePreParserNoNatives) {
v8::V8::Initialize();
- CcTest::i_isolate()->stack_guard()->SetStackLimit(GetCurrentStackPosition() -
- 128 * 1024);
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
+ i::GetCurrentStackPosition() - 128 * 1024);
const char* programs[] = {
"%ArgleBargle(glop);",
@@ -364,8 +365,8 @@ TEST(PreparsingObjectLiterals) {
v8::HandleScope handles(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- CcTest::i_isolate()->stack_guard()->SetStackLimit(GetCurrentStackPosition() -
- 128 * 1024);
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
+ i::GetCurrentStackPosition() - 128 * 1024);
{
const char* source = "var myo = {if: \"foo\"}; myo.if;";
@@ -397,7 +398,8 @@ TEST(RegressChromium62639) {
v8::V8::Initialize();
i::Isolate* isolate = CcTest::i_isolate();
- isolate->stack_guard()->SetStackLimit(GetCurrentStackPosition() - 128 * 1024);
+ isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
+ 128 * 1024);
const char* program = "var x = 'something';\n"
"escape: function() {}";
@@ -431,7 +433,8 @@ TEST(Regress928) {
// as with-content, which made it assume that a function inside
// the block could be lazily compiled, and an extra, unexpected,
// entry was added to the data.
- isolate->stack_guard()->SetStackLimit(GetCurrentStackPosition() - 128 * 1024);
+ isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
+ 128 * 1024);
const char* program =
"try { } catch (e) { var foo = function () { /* first */ } }"
@@ -474,8 +477,8 @@ TEST(Regress928) {
TEST(PreParseOverflow) {
v8::V8::Initialize();
- CcTest::i_isolate()->stack_guard()->SetStackLimit(GetCurrentStackPosition() -
- 128 * 1024);
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
+ i::GetCurrentStackPosition() - 128 * 1024);
size_t kProgramSize = 1024 * 1024;
i::SmartArrayPointer<char> program(i::NewArray<char>(kProgramSize + 1));
@@ -521,10 +524,8 @@ class TestExternalResource: public v8::String::ExternalStringResource {
#define CHECK_EQU(v1, v2) CHECK_EQ(static_cast<int>(v1), static_cast<int>(v2))
-void TestCharacterStream(const char* ascii_source,
- unsigned length,
- unsigned start = 0,
- unsigned end = 0) {
+void TestCharacterStream(const char* one_byte_source, unsigned length,
+ unsigned start = 0, unsigned end = 0) {
if (end == 0) end = length;
unsigned sub_length = end - start;
i::Isolate* isolate = CcTest::i_isolate();
@@ -532,20 +533,22 @@ void TestCharacterStream(const char* ascii_source,
i::HandleScope test_scope(isolate);
i::SmartArrayPointer<i::uc16> uc16_buffer(new i::uc16[length]);
for (unsigned i = 0; i < length; i++) {
- uc16_buffer[i] = static_cast<i::uc16>(ascii_source[i]);
+ uc16_buffer[i] = static_cast<i::uc16>(one_byte_source[i]);
}
- i::Vector<const char> ascii_vector(ascii_source, static_cast<int>(length));
- i::Handle<i::String> ascii_string =
- factory->NewStringFromAscii(ascii_vector).ToHandleChecked();
+ i::Vector<const char> one_byte_vector(one_byte_source,
+ static_cast<int>(length));
+ i::Handle<i::String> one_byte_string =
+ factory->NewStringFromAscii(one_byte_vector).ToHandleChecked();
TestExternalResource resource(uc16_buffer.get(), length);
i::Handle<i::String> uc16_string(
factory->NewExternalStringFromTwoByte(&resource).ToHandleChecked());
i::ExternalTwoByteStringUtf16CharacterStream uc16_stream(
i::Handle<i::ExternalTwoByteString>::cast(uc16_string), start, end);
- i::GenericStringUtf16CharacterStream string_stream(ascii_string, start, end);
+ i::GenericStringUtf16CharacterStream string_stream(one_byte_string, start,
+ end);
i::Utf8ToUtf16CharacterStream utf8_stream(
- reinterpret_cast<const i::byte*>(ascii_source), end);
+ reinterpret_cast<const i::byte*>(one_byte_source), end);
utf8_stream.SeekForward(start);
unsigned i = start;
@@ -554,7 +557,7 @@ void TestCharacterStream(const char* ascii_source,
CHECK_EQU(i, uc16_stream.pos());
CHECK_EQU(i, string_stream.pos());
CHECK_EQU(i, utf8_stream.pos());
- int32_t c0 = ascii_source[i];
+ int32_t c0 = one_byte_source[i];
int32_t c1 = uc16_stream.Advance();
int32_t c2 = string_stream.Advance();
int32_t c3 = utf8_stream.Advance();
@@ -568,7 +571,7 @@ void TestCharacterStream(const char* ascii_source,
}
while (i > start + sub_length / 4) {
// Pushback, re-read, pushback again.
- int32_t c0 = ascii_source[i - 1];
+ int32_t c0 = one_byte_source[i - 1];
CHECK_EQU(i, uc16_stream.pos());
CHECK_EQU(i, string_stream.pos());
CHECK_EQU(i, utf8_stream.pos());
@@ -611,7 +614,7 @@ void TestCharacterStream(const char* ascii_source,
CHECK_EQU(i, uc16_stream.pos());
CHECK_EQU(i, string_stream.pos());
CHECK_EQU(i, utf8_stream.pos());
- int32_t c0 = ascii_source[i];
+ int32_t c0 = one_byte_source[i];
int32_t c1 = uc16_stream.Advance();
int32_t c2 = string_stream.Advance();
int32_t c3 = utf8_stream.Advance();
@@ -807,7 +810,7 @@ void TestScanRegExp(const char* re_source, const char* expected) {
scanner.CurrentSymbol(&ast_value_factory)->string();
i::DisallowHeapAllocation no_alloc;
i::String::FlatContent content = val->GetFlatContent();
- CHECK(content.IsAscii());
+ CHECK(content.IsOneByte());
i::Vector<const uint8_t> actual = content.ToOneByteVector();
for (int i = 0; i < actual.length(); i++) {
CHECK_NE('\0', expected[i]);
@@ -1104,7 +1107,8 @@ TEST(ScopePositions) {
v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate());
v8::Context::Scope context_scope(context);
- isolate->stack_guard()->SetStackLimit(GetCurrentStackPosition() - 128 * 1024);
+ isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
+ 128 * 1024);
for (int i = 0; source_data[i].outer_prefix; i++) {
int kPrefixLen = Utf8LengthHelper(source_data[i].outer_prefix);
@@ -1127,7 +1131,10 @@ TEST(ScopePositions) {
CHECK_EQ(source->length(), kProgramSize);
i::Handle<i::Script> script = factory->NewScript(source);
i::CompilationInfoWithZone info(script);
- i::Parser parser(&info);
+ i::Parser::ParseInfo parse_info = {isolate->stack_guard()->real_climit(),
+ isolate->heap()->HashSeed(),
+ isolate->unicode_cache()};
+ i::Parser parser(&info, &parse_info);
parser.set_allow_lazy(true);
parser.set_allow_harmony_scoping(true);
parser.set_allow_arrow_functions(true);
@@ -1207,9 +1214,10 @@ enum ParserFlag {
kAllowNativesSyntax,
kAllowHarmonyScoping,
kAllowModules,
- kAllowGenerators,
kAllowHarmonyNumericLiterals,
- kAllowArrowFunctions
+ kAllowArrowFunctions,
+ kAllowClasses,
+ kAllowHarmonyObjectLiterals
};
@@ -1226,10 +1234,12 @@ void SetParserFlags(i::ParserBase<Traits>* parser,
parser->set_allow_natives_syntax(flags.Contains(kAllowNativesSyntax));
parser->set_allow_harmony_scoping(flags.Contains(kAllowHarmonyScoping));
parser->set_allow_modules(flags.Contains(kAllowModules));
- parser->set_allow_generators(flags.Contains(kAllowGenerators));
parser->set_allow_harmony_numeric_literals(
flags.Contains(kAllowHarmonyNumericLiterals));
+ parser->set_allow_harmony_object_literals(
+ flags.Contains(kAllowHarmonyObjectLiterals));
parser->set_allow_arrow_functions(flags.Contains(kAllowArrowFunctions));
+ parser->set_allow_classes(flags.Contains(kAllowClasses));
}
@@ -1260,7 +1270,10 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
{
i::Handle<i::Script> script = factory->NewScript(source);
i::CompilationInfoWithZone info(script);
- i::Parser parser(&info);
+ i::Parser::ParseInfo parse_info = {isolate->stack_guard()->real_climit(),
+ isolate->heap()->HashSeed(),
+ isolate->unicode_cache()};
+ i::Parser parser(&info, &parse_info);
SetParserFlags(&parser, flags);
info.MarkAsGlobal();
parser.Parse();
@@ -1431,12 +1444,19 @@ TEST(ParserSync) {
v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate());
v8::Context::Scope context_scope(context);
- CcTest::i_isolate()->stack_guard()->SetStackLimit(GetCurrentStackPosition() -
- 128 * 1024);
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
+ i::GetCurrentStackPosition() - 128 * 1024);
+
+ static const ParserFlag flags1[] = {
+ kAllowArrowFunctions,
+ kAllowClasses,
+ kAllowHarmonyNumericLiterals,
+ kAllowHarmonyObjectLiterals,
+ kAllowHarmonyScoping,
+ kAllowLazy,
+ kAllowModules,
+ };
- static const ParserFlag flags1[] = {kAllowLazy, kAllowHarmonyScoping,
- kAllowModules, kAllowGenerators,
- kAllowArrowFunctions};
for (int i = 0; context_data[i][0] != NULL; ++i) {
for (int j = 0; statement_data[j] != NULL; ++j) {
for (int k = 0; termination_data[k] != NULL; ++k) {
@@ -1456,7 +1476,7 @@ TEST(ParserSync) {
termination_data[k],
context_data[i][1]);
CHECK(length == kProgramSize);
- TestParserSync(program.start(), flags1, ARRAY_SIZE(flags1));
+ TestParserSync(program.start(), flags1, arraysize(flags1));
}
}
}
@@ -1465,11 +1485,11 @@ TEST(ParserSync) {
// interaction with the flags above, so test these separately to reduce
// the combinatorial explosion.
static const ParserFlag flags2[] = { kAllowHarmonyNumericLiterals };
- TestParserSync("0o1234", flags2, ARRAY_SIZE(flags2));
- TestParserSync("0b1011", flags2, ARRAY_SIZE(flags2));
+ TestParserSync("0o1234", flags2, arraysize(flags2));
+ TestParserSync("0b1011", flags2, arraysize(flags2));
static const ParserFlag flags3[] = { kAllowNativesSyntax };
- TestParserSync("%DebugPrint(123)", flags3, ARRAY_SIZE(flags3));
+ TestParserSync("%DebugPrint(123)", flags3, arraysize(flags3));
}
@@ -1507,16 +1527,23 @@ void RunParserSyncTest(const char* context_data[][2],
v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate());
v8::Context::Scope context_scope(context);
- CcTest::i_isolate()->stack_guard()->SetStackLimit(GetCurrentStackPosition() -
- 128 * 1024);
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
+ i::GetCurrentStackPosition() - 128 * 1024);
static const ParserFlag default_flags[] = {
- kAllowLazy, kAllowHarmonyScoping, kAllowModules,
- kAllowGenerators, kAllowNativesSyntax, kAllowArrowFunctions};
+ kAllowArrowFunctions,
+ kAllowClasses,
+ kAllowHarmonyNumericLiterals,
+ kAllowHarmonyObjectLiterals,
+ kAllowHarmonyScoping,
+ kAllowLazy,
+ kAllowModules,
+ kAllowNativesSyntax,
+ };
ParserFlag* generated_flags = NULL;
if (flags == NULL) {
flags = default_flags;
- flags_len = ARRAY_SIZE(default_flags);
+ flags_len = arraysize(default_flags);
if (always_true_flags != NULL) {
// Remove always_true_flags from default_flags.
CHECK(always_true_flags_len < flags_len);
@@ -1666,7 +1693,7 @@ TEST(NoErrorsEvalAndArgumentsStrict) {
static const ParserFlag always_flags[] = {kAllowArrowFunctions};
RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
- always_flags, ARRAY_SIZE(always_flags));
+ always_flags, arraysize(always_flags));
}
@@ -1699,7 +1726,7 @@ TEST(ErrorsFutureStrictReservedWords) {
static const ParserFlag always_flags[] = {kAllowArrowFunctions};
RunParserSyncTest(context_data, statement_data, kError, NULL, 0, always_flags,
- ARRAY_SIZE(always_flags));
+ arraysize(always_flags));
}
@@ -1728,7 +1755,7 @@ TEST(NoErrorsFutureStrictReservedWords) {
static const ParserFlag always_flags[] = {kAllowArrowFunctions};
RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
- always_flags, ARRAY_SIZE(always_flags));
+ always_flags, arraysize(always_flags));
}
@@ -1873,10 +1900,7 @@ TEST(NoErrorsYieldSloppyGeneratorsEnabled) {
NULL
};
- // This test requires kAllowGenerators to succeed.
- static const ParserFlag always_true_flags[] = { kAllowGenerators };
- RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
- always_true_flags, 1);
+ RunParserSyncTest(context_data, statement_data, kSuccess);
}
@@ -1969,12 +1993,7 @@ TEST(NoErrorsGenerator) {
NULL
};
- // This test requires kAllowGenerators to succeed.
- static const ParserFlag always_true_flags[] = {
- kAllowGenerators
- };
- RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
- always_true_flags, 1);
+ RunParserSyncTest(context_data, statement_data, kSuccess);
}
@@ -2085,12 +2104,7 @@ TEST(NoErrorsNameOfStrictGenerator) {
NULL
};
- // This test requires kAllowGenerators to succeed.
- static const ParserFlag always_true_flags[] = {
- kAllowGenerators
- };
- RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
- always_true_flags, 1);
+ RunParserSyncTest(context_data, statement_data, kSuccess);
}
@@ -2153,7 +2167,7 @@ TEST(NoErrorsIllegalWordsAsLabels) {
static const ParserFlag always_flags[] = {kAllowArrowFunctions};
RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
- always_flags, ARRAY_SIZE(always_flags));
+ always_flags, arraysize(always_flags));
}
@@ -2241,8 +2255,8 @@ TEST(DontRegressPreParserDataSizes) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handles(isolate);
- CcTest::i_isolate()->stack_guard()->SetStackLimit(GetCurrentStackPosition() -
- 128 * 1024);
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
+ i::GetCurrentStackPosition() - 128 * 1024);
struct TestCase {
const char* program;
@@ -2507,34 +2521,36 @@ TEST(ErrorsObjectLiteralChecking) {
};
const char* statement_data[] = {
- ",",
- "foo: 1, get foo() {}",
- "foo: 1, set foo(v) {}",
- "\"foo\": 1, get \"foo\"() {}",
- "\"foo\": 1, set \"foo\"(v) {}",
- "1: 1, get 1() {}",
- "1: 1, set 1() {}",
- // It's counter-intuitive, but these collide too (even in classic
- // mode). Note that we can have "foo" and foo as properties in classic mode,
- // but we cannot have "foo" and get foo, or foo and get "foo".
- "foo: 1, get \"foo\"() {}",
- "foo: 1, set \"foo\"(v) {}",
- "\"foo\": 1, get foo() {}",
- "\"foo\": 1, set foo(v) {}",
- "1: 1, get \"1\"() {}",
- "1: 1, set \"1\"() {}",
- "\"1\": 1, get 1() {}"
- "\"1\": 1, set 1(v) {}"
- // Wrong number of parameters
- "get bar(x) {}",
- "get bar(x, y) {}",
- "set bar() {}",
- "set bar(x, y) {}",
- // Parsing FunctionLiteral for getter or setter fails
- "get foo( +",
- "get foo() \"error\"",
- NULL
- };
+ ",",
+ "foo: 1, get foo() {}",
+ "foo: 1, set foo(v) {}",
+ "\"foo\": 1, get \"foo\"() {}",
+ "\"foo\": 1, set \"foo\"(v) {}",
+ "1: 1, get 1() {}",
+ "1: 1, set 1() {}",
+ "get foo() {}, get foo() {}",
+ "set foo(_) {}, set foo(_) {}",
+ // It's counter-intuitive, but these collide too (even in classic
+ // mode). Note that we can have "foo" and foo as properties in classic
+ // mode,
+ // but we cannot have "foo" and get foo, or foo and get "foo".
+ "foo: 1, get \"foo\"() {}",
+ "foo: 1, set \"foo\"(v) {}",
+ "\"foo\": 1, get foo() {}",
+ "\"foo\": 1, set foo(v) {}",
+ "1: 1, get \"1\"() {}",
+ "1: 1, set \"1\"() {}",
+ "\"1\": 1, get 1() {}"
+ "\"1\": 1, set 1(v) {}"
+ // Wrong number of parameters
+ "get bar(x) {}",
+ "get bar(x, y) {}",
+ "set bar() {}",
+ "set bar(x, y) {}",
+ // Parsing FunctionLiteral for getter or setter fails
+ "get foo( +",
+ "get foo() \"error\"",
+ NULL};
RunParserSyncTest(context_data, statement_data, kError);
}
@@ -2571,6 +2587,8 @@ TEST(NoErrorsObjectLiteralChecking) {
"\"foo\": 1, set \"bar\"(v) {}",
"1: 1, get 2() {}",
"1: 1, set 2(v) {}",
+ "get: 1, get foo() {}",
+ "set: 1, set foo(_) {}",
// Keywords, future reserved and strict future reserved are also allowed as
// property names.
"if: 4",
@@ -2793,7 +2811,7 @@ TEST(FuncNameInferrerTwoByte) {
"var obj1 = { oXj2 : { foo1: function() {} } }; "
"%FunctionGetInferredName(obj1.oXj2.foo1)");
uint16_t* two_byte_name = AsciiToTwoByteString("obj1.oXj2.foo1");
- // Make it really non-ASCII (replace the Xs with a non-ASCII character).
+ // Make it really non-Latin1 (replace the Xs with a non-Latin1 character).
two_byte_source[14] = two_byte_source[78] = two_byte_name[6] = 0x010d;
v8::Local<v8::String> source =
v8::String::NewFromTwoByte(isolate, two_byte_source);
@@ -3105,10 +3123,10 @@ TEST(InnerAssignment) {
int prefix_len = Utf8LengthHelper(prefix);
int midfix_len = Utf8LengthHelper(midfix);
int suffix_len = Utf8LengthHelper(suffix);
- for (unsigned i = 0; i < ARRAY_SIZE(outers); ++i) {
+ for (unsigned i = 0; i < arraysize(outers); ++i) {
const char* outer = outers[i].source;
int outer_len = Utf8LengthHelper(outer);
- for (unsigned j = 0; j < ARRAY_SIZE(inners); ++j) {
+ for (unsigned j = 0; j < arraysize(inners); ++j) {
for (unsigned outer_lazy = 0; outer_lazy < 2; ++outer_lazy) {
for (unsigned inner_lazy = 0; inner_lazy < 2; ++inner_lazy) {
if (outers[i].strict && inners[j].with) continue;
@@ -3132,7 +3150,10 @@ TEST(InnerAssignment) {
i::Handle<i::Script> script = factory->NewScript(source);
i::CompilationInfoWithZone info(script);
- i::Parser parser(&info);
+ i::Parser::ParseInfo parse_info = {
+ isolate->stack_guard()->real_climit(),
+ isolate->heap()->HashSeed(), isolate->unicode_cache()};
+ i::Parser parser(&info, &parse_info);
parser.set_allow_harmony_scoping(true);
CHECK(parser.Parse());
CHECK(i::Rewriter::Rewrite(&info));
@@ -3282,12 +3303,10 @@ TEST(ErrorsArrowFunctions) {
};
// The test is quite slow, so run it with a reduced set of flags.
- static const ParserFlag flags[] = {
- kAllowLazy, kAllowHarmonyScoping, kAllowGenerators
- };
+ static const ParserFlag flags[] = {kAllowLazy, kAllowHarmonyScoping};
static const ParserFlag always_flags[] = { kAllowArrowFunctions };
RunParserSyncTest(context_data, statement_data, kError, flags,
- ARRAY_SIZE(flags), always_flags, ARRAY_SIZE(always_flags));
+ arraysize(flags), always_flags, arraysize(always_flags));
}
@@ -3341,5 +3360,612 @@ TEST(NoErrorsArrowFunctions) {
static const ParserFlag always_flags[] = {kAllowArrowFunctions};
RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
- always_flags, ARRAY_SIZE(always_flags));
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(NoErrorsSuper) {
+ // Tests that parser and preparser accept 'super' keyword in right places.
+ const char* context_data[][2] = {{"", ";"},
+ {"k = ", ";"},
+ {"foo(", ");"},
+ {NULL, NULL}};
+
+ const char* statement_data[] = {
+ "super.x",
+ "super[27]",
+ "new super",
+ "new super()",
+ "new super(12, 45)",
+ "new new super",
+ "new new super()",
+ "new new super()()",
+ "z.super", // Ok, property lookup.
+ NULL};
+
+ static const ParserFlag always_flags[] = {kAllowClasses};
+ RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(ErrorsSuper) {
+ // Tests that parser and preparser generate same errors for 'super'.
+ const char* context_data[][2] = {{"", ";"},
+ {"k = ", ";"},
+ {"foo(", ");"},
+ {NULL, NULL}};
+
+ const char* statement_data[] = {
+ "super = x",
+ "y = super",
+ "f(super)",
+ NULL};
+
+ static const ParserFlag always_flags[] = {kAllowClasses};
+ RunParserSyncTest(context_data, statement_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(NoErrorsMethodDefinition) {
+ const char* context_data[][2] = {{"({", "});"},
+ {"'use strict'; ({", "});"},
+ {"({*", "});"},
+ {"'use strict'; ({*", "});"},
+ {NULL, NULL}};
+
+ const char* object_literal_body_data[] = {
+ "m() {}",
+ "m(x) { return x; }",
+ "m(x, y) {}, n() {}",
+ "set(x, y) {}",
+ "get(x, y) {}",
+ NULL
+ };
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyObjectLiterals};
+ RunParserSyncTest(context_data, object_literal_body_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(MethodDefinitionNames) {
+ const char* context_data[][2] = {{"({", "(x, y) {}});"},
+ {"'use strict'; ({", "(x, y) {}});"},
+ {"({*", "(x, y) {}});"},
+ {"'use strict'; ({*", "(x, y) {}});"},
+ {NULL, NULL}};
+
+ const char* name_data[] = {
+ "m",
+ "'m'",
+ "\"m\"",
+ "\"m n\"",
+ "true",
+ "false",
+ "null",
+ "0",
+ "1.2",
+ "1e1",
+ "1E1",
+ "1e+1",
+ "1e-1",
+
+ // Keywords
+ "async",
+ "await",
+ "break",
+ "case",
+ "catch",
+ "class",
+ "const",
+ "continue",
+ "debugger",
+ "default",
+ "delete",
+ "do",
+ "else",
+ "enum",
+ "export",
+ "extends",
+ "finally",
+ "for",
+ "function",
+ "if",
+ "implements",
+ "import",
+ "in",
+ "instanceof",
+ "interface",
+ "let",
+ "new",
+ "package",
+ "private",
+ "protected",
+ "public",
+ "return",
+ "static",
+ "super",
+ "switch",
+ "this",
+ "throw",
+ "try",
+ "typeof",
+ "var",
+ "void",
+ "while",
+ "with",
+ "yield",
+ NULL
+ };
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyObjectLiterals};
+ RunParserSyncTest(context_data, name_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(MethodDefinitionStrictFormalParamereters) {
+ const char* context_data[][2] = {{"({method(", "){}});"},
+ {"'use strict'; ({method(", "){}});"},
+ {"({*method(", "){}});"},
+ {"'use strict'; ({*method(", "){}});"},
+ {NULL, NULL}};
+
+ const char* params_data[] = {
+ "x, x",
+ "x, y, x",
+ "eval",
+ "arguments",
+ "var",
+ "const",
+ NULL
+ };
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyObjectLiterals};
+ RunParserSyncTest(context_data, params_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(MethodDefinitionDuplicateProperty) {
+ // Duplicate properties are allowed in ES6 but we haven't removed that check
+ // yet.
+ const char* context_data[][2] = {{"'use strict'; ({", "});"},
+ {NULL, NULL}};
+
+ const char* params_data[] = {
+ "x: 1, x() {}",
+ "x() {}, x: 1",
+ "x() {}, get x() {}",
+ "x() {}, set x(_) {}",
+ "x() {}, x() {}",
+ "x() {}, y() {}, x() {}",
+ "x() {}, \"x\"() {}",
+ "x() {}, 'x'() {}",
+ "0() {}, '0'() {}",
+ "1.0() {}, 1: 1",
+
+ "x: 1, *x() {}",
+ "*x() {}, x: 1",
+ "*x() {}, get x() {}",
+ "*x() {}, set x(_) {}",
+ "*x() {}, *x() {}",
+ "*x() {}, y() {}, *x() {}",
+ "*x() {}, *\"x\"() {}",
+ "*x() {}, *'x'() {}",
+ "*0() {}, *'0'() {}",
+ "*1.0() {}, 1: 1",
+
+ NULL
+ };
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyObjectLiterals};
+ RunParserSyncTest(context_data, params_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassExpressionNoErrors) {
+ const char* context_data[][2] = {{"(", ");"},
+ {"var C = ", ";"},
+ {"bar, ", ";"},
+ {NULL, NULL}};
+ const char* class_data[] = {
+ "class {}",
+ "class name {}",
+ "class extends F {}",
+ "class name extends F {}",
+ "class extends (F, G) {}",
+ "class name extends (F, G) {}",
+ "class extends class {} {}",
+ "class name extends class {} {}",
+ "class extends class base {} {}",
+ "class name extends class base {} {}",
+ NULL};
+
+ static const ParserFlag always_flags[] = {kAllowClasses};
+ RunParserSyncTest(context_data, class_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassDeclarationNoErrors) {
+ const char* context_data[][2] = {{"", ""},
+ {"{", "}"},
+ {"if (true) {", "}"},
+ {NULL, NULL}};
+ const char* statement_data[] = {
+ "class name {}",
+ "class name extends F {}",
+ "class name extends (F, G) {}",
+ "class name extends class {} {}",
+ "class name extends class base {} {}",
+ NULL};
+
+ static const ParserFlag always_flags[] = {kAllowClasses};
+ RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassBodyNoErrors) {
+ // Tests that parser and preparser accept valid class syntax.
+ const char* context_data[][2] = {{"(class {", "});"},
+ {"(class extends Base {", "});"},
+ {"class C {", "}"},
+ {"class C extends Base {", "}"},
+ {NULL, NULL}};
+ const char* class_body_data[] = {
+ ";",
+ ";;",
+ "m() {}",
+ "m() {};",
+ "; m() {}",
+ "m() {}; n(x) {}",
+ "get x() {}",
+ "set x(v) {}",
+ "get() {}",
+ "set() {}",
+ "*g() {}",
+ "*g() {};",
+ "; *g() {}",
+ "*g() {}; *h(x) {}",
+ "static() {}",
+ "static m() {}",
+ "static get x() {}",
+ "static set x(v) {}",
+ "static get() {}",
+ "static set() {}",
+ "static static() {}",
+ "static get static() {}",
+ "static set static(v) {}",
+ "*static() {}",
+ "*get() {}",
+ "*set() {}",
+ "static *g() {}",
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowClasses,
+ kAllowHarmonyObjectLiterals
+ };
+ RunParserSyncTest(context_data, class_body_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassPropertyNameNoErrors) {
+ const char* context_data[][2] = {{"(class {", "() {}});"},
+ {"(class { get ", "() {}});"},
+ {"(class { set ", "(v) {}});"},
+ {"(class { static ", "() {}});"},
+ {"(class { static get ", "() {}});"},
+ {"(class { static set ", "(v) {}});"},
+ {"(class { *", "() {}});"},
+ {"(class { static *", "() {}});"},
+ {"class C {", "() {}}"},
+ {"class C { get ", "() {}}"},
+ {"class C { set ", "(v) {}}"},
+ {"class C { static ", "() {}}"},
+ {"class C { static get ", "() {}}"},
+ {"class C { static set ", "(v) {}}"},
+ {"class C { *", "() {}}"},
+ {"class C { static *", "() {}}"},
+ {NULL, NULL}};
+ const char* name_data[] = {
+ "42",
+ "42.5",
+ "42e2",
+ "42e+2",
+ "42e-2",
+ "null",
+ "false",
+ "true",
+ "'str'",
+ "\"str\"",
+ "static",
+ "get",
+ "set",
+ "var",
+ "const",
+ "let",
+ "this",
+ "class",
+ "function",
+ "yield",
+ "if",
+ "else",
+ "for",
+ "while",
+ "do",
+ "try",
+ "catch",
+ "finally",
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowClasses,
+ kAllowHarmonyObjectLiterals
+ };
+ RunParserSyncTest(context_data, name_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassExpressionErrors) {
+ const char* context_data[][2] = {{"(", ");"},
+ {"var C = ", ";"},
+ {"bar, ", ";"},
+ {NULL, NULL}};
+ const char* class_data[] = {
+ "class",
+ "class name",
+ "class name extends",
+ "class extends",
+ "class {",
+ "class { m }",
+ "class { m; n }",
+ "class { m: 1 }",
+ "class { m(); n() }",
+ "class { get m }",
+ "class { get m() }",
+ "class { get m() { }",
+ "class { set m() {} }", // Missing required parameter.
+ "class { m() {}, n() {} }", // No commas allowed.
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowClasses,
+ kAllowHarmonyObjectLiterals
+ };
+ RunParserSyncTest(context_data, class_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassDeclarationErrors) {
+ const char* context_data[][2] = {{"", ""},
+ {"{", "}"},
+ {"if (true) {", "}"},
+ {NULL, NULL}};
+ const char* class_data[] = {
+ "class",
+ "class name",
+ "class name extends",
+ "class extends",
+ "class name {",
+ "class name { m }",
+ "class name { m; n }",
+ "class name { m: 1 }",
+ "class name { m(); n() }",
+ "class name { get x }",
+ "class name { get x() }",
+ "class name { set x() {) }", // missing required param
+ "class {}", // Name is required for declaration
+ "class extends base {}",
+ "class name { *",
+ "class name { * }",
+ "class name { *; }",
+ "class name { *get x() {} }",
+ "class name { *set x(_) {} }",
+ "class name { *static m() {} }",
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowClasses,
+ kAllowHarmonyNumericLiterals
+ };
+ RunParserSyncTest(context_data, class_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassNameErrors) {
+ const char* context_data[][2] = {{"class ", "{}"},
+ {"(class ", "{});"},
+ {"'use strict'; class ", "{}"},
+ {"'use strict'; (class ", "{});"},
+ {NULL, NULL}};
+ const char* class_name[] = {
+ "arguments",
+ "eval",
+ "implements",
+ "interface",
+ "let",
+ "package",
+ "private",
+ "protected",
+ "public",
+ "static",
+ "var",
+ "yield",
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowClasses,
+ kAllowHarmonyObjectLiterals
+ };
+ RunParserSyncTest(context_data, class_name, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassGetterParamNameErrors) {
+ const char* context_data[][2] = {
+ {"class C { get name(", ") {} }"},
+ {"(class { get name(", ") {} });"},
+ {"'use strict'; class C { get name(", ") {} }"},
+ {"'use strict'; (class { get name(", ") {} })"},
+ {NULL, NULL}
+ };
+
+ const char* class_name[] = {
+ "arguments",
+ "eval",
+ "implements",
+ "interface",
+ "let",
+ "package",
+ "private",
+ "protected",
+ "public",
+ "static",
+ "var",
+ "yield",
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowClasses,
+ kAllowHarmonyObjectLiterals
+ };
+ RunParserSyncTest(context_data, class_name, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassStaticPrototypeErrors) {
+ const char* context_data[][2] = {{"class C {", "}"},
+ {"(class {", "});"},
+ {NULL, NULL}};
+
+ const char* class_body_data[] = {
+ "static prototype() {}",
+ "static get prototype() {}",
+ "static set prototype(_) {}",
+ "static *prototype() {}",
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowClasses,
+ kAllowHarmonyObjectLiterals
+ };
+ RunParserSyncTest(context_data, class_body_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassSpecialConstructorErrors) {
+ const char* context_data[][2] = {{"class C {", "}"},
+ {"(class {", "});"},
+ {NULL, NULL}};
+
+ const char* class_body_data[] = {
+ "get constructor() {}",
+ "get constructor(_) {}",
+ "*constructor() {}",
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowClasses,
+ kAllowHarmonyObjectLiterals
+ };
+ RunParserSyncTest(context_data, class_body_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassConstructorNoErrors) {
+ const char* context_data[][2] = {{"class C {", "}"},
+ {"(class {", "});"},
+ {NULL, NULL}};
+
+ const char* class_body_data[] = {
+ "constructor() {}",
+ "static constructor() {}",
+ "static get constructor() {}",
+ "static set constructor(_) {}",
+ "static *constructor() {}",
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowClasses,
+ kAllowHarmonyObjectLiterals
+ };
+ RunParserSyncTest(context_data, class_body_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassMultipleConstructorErrors) {
+ // We currently do not allow any duplicate properties in class bodies. This
+ // test ensures that when we change that we still throw on duplicate
+ // constructors.
+ const char* context_data[][2] = {{"class C {", "}"},
+ {"(class {", "});"},
+ {NULL, NULL}};
+
+ const char* class_body_data[] = {
+ "constructor() {}; constructor() {}",
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowClasses,
+ kAllowHarmonyObjectLiterals
+ };
+ RunParserSyncTest(context_data, class_body_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+// TODO(arv): We should allow duplicate property names.
+// https://code.google.com/p/v8/issues/detail?id=3570
+DISABLED_TEST(ClassMultiplePropertyNamesNoErrors) {
+ const char* context_data[][2] = {{"class C {", "}"},
+ {"(class {", "});"},
+ {NULL, NULL}};
+
+ const char* class_body_data[] = {
+ "constructor() {}; static constructor() {}",
+ "m() {}; static m() {}",
+ "m() {}; m() {}",
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowClasses,
+ kAllowHarmonyObjectLiterals
+ };
+ RunParserSyncTest(context_data, class_body_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassesAreStrictErrors) {
+ const char* context_data[][2] = {{"", ""},
+ {"(", ");"},
+ {NULL, NULL}};
+
+ const char* class_body_data[] = {
+ "class C { method() { with ({}) {} } }",
+ "class C extends function() { with ({}) {} } {}",
+ "class C { *method() { with ({}) {} } }",
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowClasses,
+ kAllowHarmonyObjectLiterals
+ };
+ RunParserSyncTest(context_data, class_body_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
}
diff --git a/deps/v8/test/cctest/test-platform.cc b/deps/v8/test/cctest/test-platform.cc
index 3beaccea8e..100a5a78cf 100644
--- a/deps/v8/test/cctest/test-platform.cc
+++ b/deps/v8/test/cctest/test-platform.cc
@@ -1,77 +1,35 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "include/v8stdint.h"
+#include "src/base/build_config.h"
#include "src/base/platform/platform.h"
#include "test/cctest/cctest.h"
-using namespace ::v8::internal;
+#ifdef V8_CC_GNU
-#ifdef __GNUC__
-#define ASM __asm__ __volatile__
+static uintptr_t sp_addr = 0;
-#if defined(_M_X64) || defined(__x86_64__)
-#define GET_STACK_POINTER() \
- static int sp_addr = 0; \
- do { \
- ASM("mov %%rsp, %0" : "=g" (sp_addr)); \
- } while (0)
-#elif defined(_M_IX86) || defined(__i386__)
-#define GET_STACK_POINTER() \
- static int sp_addr = 0; \
- do { \
- ASM("mov %%esp, %0" : "=g" (sp_addr)); \
- } while (0)
-#elif defined(__ARMEL__)
-#define GET_STACK_POINTER() \
- static int sp_addr = 0; \
- do { \
- ASM("str %%sp, %0" : "=g" (sp_addr)); \
- } while (0)
-#elif defined(__AARCH64EL__)
-#define GET_STACK_POINTER() \
- static int sp_addr = 0; \
- do { \
- ASM("mov x16, sp; str x16, %0" : "=g" (sp_addr)); \
- } while (0)
-#elif defined(__MIPSEB__) || defined(__MIPSEL__)
-#define GET_STACK_POINTER() \
- static int sp_addr = 0; \
- do { \
- ASM("sw $sp, %0" : "=g" (sp_addr)); \
- } while (0)
+void GetStackPointer(const v8::FunctionCallbackInfo<v8::Value>& args) {
+#if V8_HOST_ARCH_X64
+ __asm__ __volatile__("mov %%rsp, %0" : "=g"(sp_addr));
+#elif V8_HOST_ARCH_IA32
+ __asm__ __volatile__("mov %%esp, %0" : "=g"(sp_addr));
+#elif V8_HOST_ARCH_ARM
+ __asm__ __volatile__("str %%sp, %0" : "=g"(sp_addr));
+#elif V8_HOST_ARCH_ARM64
+ __asm__ __volatile__("mov x16, sp; str x16, %0" : "=g"(sp_addr));
+#elif V8_HOST_ARCH_MIPS
+ __asm__ __volatile__("sw $sp, %0" : "=g"(sp_addr));
+#elif V8_HOST_ARCH_MIPS64
+ __asm__ __volatile__("sd $sp, %0" : "=g"(sp_addr));
#else
#error Host architecture was not detected as supported by v8
#endif
-void GetStackPointer(const v8::FunctionCallbackInfo<v8::Value>& args) {
- GET_STACK_POINTER();
- args.GetReturnValue().Set(v8_num(sp_addr));
+ args.GetReturnValue().Set(v8::Integer::NewFromUnsigned(
+ args.GetIsolate(), static_cast<uint32_t>(sp_addr)));
}
@@ -94,9 +52,7 @@ TEST(StackAlignment) {
v8::Local<v8::Function>::Cast(global_object->Get(v8_str("foo")));
v8::Local<v8::Value> result = foo->Call(global_object, 0, NULL);
- CHECK_EQ(0, result->Int32Value() % v8::base::OS::ActivationFrameAlignment());
+ CHECK_EQ(0, result->Uint32Value() % v8::base::OS::ActivationFrameAlignment());
}
-#undef GET_STACK_POINTERS
-#undef ASM
-#endif // __GNUC__
+#endif // V8_CC_GNU
diff --git a/deps/v8/test/cctest/test-random-number-generator.cc b/deps/v8/test/cctest/test-random-number-generator.cc
index a53205c9c8..04b58820a2 100644
--- a/deps/v8/test/cctest/test-random-number-generator.cc
+++ b/deps/v8/test/cctest/test-random-number-generator.cc
@@ -34,28 +34,16 @@
using namespace v8::internal;
-static const int kMaxRuns = 12345;
-static const int kRandomSeeds[] = {
- -1, 1, 42, 100, 1234567890, 987654321
-};
+static const int64_t kRandomSeeds[] = {-1, 1, 42, 100, 1234567890, 987654321};
TEST(RandomSeedFlagIsUsed) {
- for (unsigned n = 0; n < ARRAY_SIZE(kRandomSeeds); ++n) {
- FLAG_random_seed = kRandomSeeds[n];
+ for (unsigned n = 0; n < arraysize(kRandomSeeds); ++n) {
+ FLAG_random_seed = static_cast<int>(kRandomSeeds[n]);
v8::Isolate* i = v8::Isolate::New();
- v8::base::RandomNumberGenerator& rng1 =
+ v8::base::RandomNumberGenerator& rng =
*reinterpret_cast<Isolate*>(i)->random_number_generator();
- v8::base::RandomNumberGenerator rng2(kRandomSeeds[n]);
- for (int k = 1; k <= kMaxRuns; ++k) {
- int64_t i1, i2;
- rng1.NextBytes(&i1, sizeof(i1));
- rng2.NextBytes(&i2, sizeof(i2));
- CHECK_EQ(i2, i1);
- CHECK_EQ(rng2.NextInt(), rng1.NextInt());
- CHECK_EQ(rng2.NextInt(k), rng1.NextInt(k));
- CHECK_EQ(rng2.NextDouble(), rng1.NextDouble());
- }
+ CHECK_EQ(kRandomSeeds[n], rng.initial_seed());
i->Dispose();
}
}
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index 5c1764eacf..9d1d52e675 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -85,7 +85,6 @@ using namespace v8::internal;
static bool CheckParse(const char* input) {
- V8::Initialize(NULL);
v8::HandleScope scope(CcTest::isolate());
Zone zone(CcTest::i_isolate());
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
@@ -96,7 +95,6 @@ static bool CheckParse(const char* input) {
static void CheckParseEq(const char* input, const char* expected) {
- V8::Initialize(NULL);
v8::HandleScope scope(CcTest::isolate());
Zone zone(CcTest::i_isolate());
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
@@ -112,7 +110,6 @@ static void CheckParseEq(const char* input, const char* expected) {
static bool CheckSimple(const char* input) {
- V8::Initialize(NULL);
v8::HandleScope scope(CcTest::isolate());
Zone zone(CcTest::i_isolate());
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
@@ -131,7 +128,6 @@ struct MinMaxPair {
static MinMaxPair CheckMinMaxMatch(const char* input) {
- V8::Initialize(NULL);
v8::HandleScope scope(CcTest::isolate());
Zone zone(CcTest::i_isolate());
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
@@ -156,8 +152,6 @@ static MinMaxPair CheckMinMaxMatch(const char* input) {
}
TEST(Parser) {
- V8::Initialize(NULL);
-
CHECK_PARSE_ERROR("?");
CheckParseEq("abc", "'abc'");
@@ -407,7 +401,6 @@ TEST(ParserRegression) {
static void ExpectError(const char* input,
const char* expected) {
- V8::Initialize(NULL);
v8::HandleScope scope(CcTest::isolate());
Zone zone(CcTest::i_isolate());
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
@@ -494,7 +487,6 @@ static void TestCharacterClassEscapes(uc16 c, bool (pred)(uc16 c)) {
TEST(CharacterClassEscapes) {
- v8::internal::V8::Initialize(NULL);
TestCharacterClassEscapes('.', IsRegExpNewline);
TestCharacterClassEscapes('d', IsDigit);
TestCharacterClassEscapes('D', NotDigit);
@@ -505,11 +497,8 @@ TEST(CharacterClassEscapes) {
}
-static RegExpNode* Compile(const char* input,
- bool multiline,
- bool is_ascii,
+static RegExpNode* Compile(const char* input, bool multiline, bool is_one_byte,
Zone* zone) {
- V8::Initialize(NULL);
Isolate* isolate = CcTest::i_isolate();
FlatStringReader reader(isolate, CStrVector(input));
RegExpCompileData compile_data;
@@ -520,25 +509,17 @@ static RegExpNode* Compile(const char* input,
NewStringFromUtf8(CStrVector(input)).ToHandleChecked();
Handle<String> sample_subject =
isolate->factory()->NewStringFromUtf8(CStrVector("")).ToHandleChecked();
- RegExpEngine::Compile(&compile_data,
- false,
- false,
- multiline,
- pattern,
- sample_subject,
- is_ascii,
- zone);
+ RegExpEngine::Compile(&compile_data, false, false, multiline, false, pattern,
+ sample_subject, is_one_byte, zone);
return compile_data.node;
}
-static void Execute(const char* input,
- bool multiline,
- bool is_ascii,
+static void Execute(const char* input, bool multiline, bool is_one_byte,
bool dot_output = false) {
v8::HandleScope scope(CcTest::isolate());
Zone zone(CcTest::i_isolate());
- RegExpNode* node = Compile(input, multiline, is_ascii, &zone);
+ RegExpNode* node = Compile(input, multiline, is_one_byte, &zone);
USE(node);
#ifdef DEBUG
if (dot_output) {
@@ -574,7 +555,6 @@ static unsigned PseudoRandom(int i, int j) {
TEST(SplayTreeSimple) {
- v8::internal::V8::Initialize(NULL);
static const unsigned kLimit = 1000;
Zone zone(CcTest::i_isolate());
ZoneSplayTree<TestConfig> tree(&zone);
@@ -627,7 +607,6 @@ TEST(SplayTreeSimple) {
TEST(DispatchTableConstruction) {
- v8::internal::V8::Initialize(NULL);
// Initialize test data.
static const int kLimit = 1000;
static const int kRangeCount = 8;
@@ -756,16 +735,16 @@ TEST(MacroAssemblerNativeSuccess) {
Factory* factory = isolate->factory();
Zone zone(isolate);
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 4, &zone);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 4, &zone);
m.Succeed();
- Handle<String> source = factory->NewStringFromStaticAscii("");
+ Handle<String> source = factory->NewStringFromStaticChars("");
Handle<Object> code_object = m.GetCode(source);
Handle<Code> code = Handle<Code>::cast(code_object);
int captures[4] = {42, 37, 87, 117};
- Handle<String> input = factory->NewStringFromStaticAscii("foofoo");
+ Handle<String> input = factory->NewStringFromStaticChars("foofoo");
Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
const byte* start_adr =
reinterpret_cast<const byte*>(seq_input->GetCharsAddress());
@@ -793,7 +772,7 @@ TEST(MacroAssemblerNativeSimple) {
Factory* factory = isolate->factory();
Zone zone(isolate);
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 4, &zone);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 4, &zone);
Label fail, backtrack;
m.PushBacktrack(&fail);
@@ -814,12 +793,12 @@ TEST(MacroAssemblerNativeSimple) {
m.Bind(&fail);
m.Fail();
- Handle<String> source = factory->NewStringFromStaticAscii("^foo");
+ Handle<String> source = factory->NewStringFromStaticChars("^foo");
Handle<Object> code_object = m.GetCode(source);
Handle<Code> code = Handle<Code>::cast(code_object);
int captures[4] = {42, 37, 87, 117};
- Handle<String> input = factory->NewStringFromStaticAscii("foofoo");
+ Handle<String> input = factory->NewStringFromStaticChars("foofoo");
Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
@@ -837,7 +816,7 @@ TEST(MacroAssemblerNativeSimple) {
CHECK_EQ(-1, captures[2]);
CHECK_EQ(-1, captures[3]);
- input = factory->NewStringFromStaticAscii("barbarbar");
+ input = factory->NewStringFromStaticChars("barbarbar");
seq_input = Handle<SeqOneByteString>::cast(input);
start_adr = seq_input->GetCharsAddress();
@@ -880,7 +859,7 @@ TEST(MacroAssemblerNativeSimpleUC16) {
m.Bind(&fail);
m.Fail();
- Handle<String> source = factory->NewStringFromStaticAscii("^foo");
+ Handle<String> source = factory->NewStringFromStaticChars("^foo");
Handle<Object> code_object = m.GetCode(source);
Handle<Code> code = Handle<Code>::cast(code_object);
@@ -931,7 +910,7 @@ TEST(MacroAssemblerNativeBacktrack) {
Factory* factory = isolate->factory();
Zone zone(isolate);
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 0, &zone);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 0, &zone);
Label fail;
Label backtrack;
@@ -944,11 +923,11 @@ TEST(MacroAssemblerNativeBacktrack) {
m.Bind(&backtrack);
m.Fail();
- Handle<String> source = factory->NewStringFromStaticAscii("..........");
+ Handle<String> source = factory->NewStringFromStaticChars("..........");
Handle<Object> code_object = m.GetCode(source);
Handle<Code> code = Handle<Code>::cast(code_object);
- Handle<String> input = factory->NewStringFromStaticAscii("foofoo");
+ Handle<String> input = factory->NewStringFromStaticChars("foofoo");
Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
@@ -964,14 +943,14 @@ TEST(MacroAssemblerNativeBacktrack) {
}
-TEST(MacroAssemblerNativeBackReferenceASCII) {
+TEST(MacroAssemblerNativeBackReferenceLATIN1) {
v8::V8::Initialize();
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Zone zone(isolate);
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 4, &zone);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 4, &zone);
m.WriteCurrentPositionToRegister(0, 0);
m.AdvanceCurrentPosition(2);
@@ -988,11 +967,11 @@ TEST(MacroAssemblerNativeBackReferenceASCII) {
m.Bind(&missing_match);
m.Fail();
- Handle<String> source = factory->NewStringFromStaticAscii("^(..)..\1");
+ Handle<String> source = factory->NewStringFromStaticChars("^(..)..\1");
Handle<Object> code_object = m.GetCode(source);
Handle<Code> code = Handle<Code>::cast(code_object);
- Handle<String> input = factory->NewStringFromStaticAscii("fooofo");
+ Handle<String> input = factory->NewStringFromStaticChars("fooofo");
Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
@@ -1037,7 +1016,7 @@ TEST(MacroAssemblerNativeBackReferenceUC16) {
m.Bind(&missing_match);
m.Fail();
- Handle<String> source = factory->NewStringFromStaticAscii("^(..)..\1");
+ Handle<String> source = factory->NewStringFromStaticChars("^(..)..\1");
Handle<Object> code_object = m.GetCode(source);
Handle<Code> code = Handle<Code>::cast(code_object);
@@ -1072,7 +1051,7 @@ TEST(MacroAssemblernativeAtStart) {
Factory* factory = isolate->factory();
Zone zone(isolate);
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 0, &zone);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 0, &zone);
Label not_at_start, newline, fail;
m.CheckNotAtStart(&not_at_start);
@@ -1095,11 +1074,11 @@ TEST(MacroAssemblernativeAtStart) {
m.CheckNotCharacter('b', &fail);
m.Succeed();
- Handle<String> source = factory->NewStringFromStaticAscii("(^f|ob)");
+ Handle<String> source = factory->NewStringFromStaticChars("(^f|ob)");
Handle<Object> code_object = m.GetCode(source);
Handle<Code> code = Handle<Code>::cast(code_object);
- Handle<String> input = factory->NewStringFromStaticAscii("foobar");
+ Handle<String> input = factory->NewStringFromStaticChars("foobar");
Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
@@ -1131,7 +1110,7 @@ TEST(MacroAssemblerNativeBackRefNoCase) {
Factory* factory = isolate->factory();
Zone zone(isolate);
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 4, &zone);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 4, &zone);
Label fail, succ;
@@ -1156,12 +1135,11 @@ TEST(MacroAssemblerNativeBackRefNoCase) {
m.Succeed();
Handle<String> source =
- factory->NewStringFromStaticAscii("^(abc)\1\1(?!\1)...(?!\1)");
+ factory->NewStringFromStaticChars("^(abc)\1\1(?!\1)...(?!\1)");
Handle<Object> code_object = m.GetCode(source);
Handle<Code> code = Handle<Code>::cast(code_object);
- Handle<String> input =
- factory->NewStringFromStaticAscii("aBcAbCABCxYzab");
+ Handle<String> input = factory->NewStringFromStaticChars("aBcAbCABCxYzab");
Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
@@ -1190,7 +1168,7 @@ TEST(MacroAssemblerNativeRegisters) {
Factory* factory = isolate->factory();
Zone zone(isolate);
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 6, &zone);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 6, &zone);
uc16 foo_chars[3] = {'f', 'o', 'o'};
Vector<const uc16> foo(foo_chars, 3);
@@ -1256,14 +1234,12 @@ TEST(MacroAssemblerNativeRegisters) {
m.Bind(&fail);
m.Fail();
- Handle<String> source =
- factory->NewStringFromStaticAscii("<loop test>");
+ Handle<String> source = factory->NewStringFromStaticChars("<loop test>");
Handle<Object> code_object = m.GetCode(source);
Handle<Code> code = Handle<Code>::cast(code_object);
// String long enough for test (content doesn't matter).
- Handle<String> input =
- factory->NewStringFromStaticAscii("foofoofoofoofoo");
+ Handle<String> input = factory->NewStringFromStaticChars("foofoofoofoofoo");
Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
@@ -1293,7 +1269,7 @@ TEST(MacroAssemblerStackOverflow) {
Factory* factory = isolate->factory();
Zone zone(isolate);
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 0, &zone);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 0, &zone);
Label loop;
m.Bind(&loop);
@@ -1301,13 +1277,12 @@ TEST(MacroAssemblerStackOverflow) {
m.GoTo(&loop);
Handle<String> source =
- factory->NewStringFromStaticAscii("<stack overflow test>");
+ factory->NewStringFromStaticChars("<stack overflow test>");
Handle<Object> code_object = m.GetCode(source);
Handle<Code> code = Handle<Code>::cast(code_object);
// String long enough for test (content doesn't matter).
- Handle<String> input =
- factory->NewStringFromStaticAscii("dummy");
+ Handle<String> input = factory->NewStringFromStaticChars("dummy");
Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
@@ -1332,7 +1307,7 @@ TEST(MacroAssemblerNativeLotsOfRegisters) {
Factory* factory = isolate->factory();
Zone zone(isolate);
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 2, &zone);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 2, &zone);
// At least 2048, to ensure the allocated space for registers
// span one full page.
@@ -1348,13 +1323,12 @@ TEST(MacroAssemblerNativeLotsOfRegisters) {
m.Succeed();
Handle<String> source =
- factory->NewStringFromStaticAscii("<huge register space test>");
+ factory->NewStringFromStaticChars("<huge register space test>");
Handle<Object> code_object = m.GetCode(source);
Handle<Code> code = Handle<Code>::cast(code_object);
// String long enough for test (content doesn't matter).
- Handle<String> input =
- factory->NewStringFromStaticAscii("sample text");
+ Handle<String> input = factory->NewStringFromStaticChars("sample text");
Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
@@ -1377,7 +1351,6 @@ TEST(MacroAssemblerNativeLotsOfRegisters) {
#else // V8_INTERPRETED_REGEXP
TEST(MacroAssembler) {
- V8::Initialize(NULL);
byte codes[1024];
Zone zone(CcTest::i_isolate());
RegExpMacroAssemblerIrregexp m(Vector<byte>(codes, 1024), &zone);
@@ -1416,7 +1389,7 @@ TEST(MacroAssembler) {
Factory* factory = isolate->factory();
HandleScope scope(isolate);
- Handle<String> source = factory->NewStringFromStaticAscii("^f(o)o");
+ Handle<String> source = factory->NewStringFromStaticChars("^f(o)o");
Handle<ByteArray> array = Handle<ByteArray>::cast(m.GetCode(source));
int captures[5];
@@ -1443,7 +1416,6 @@ TEST(MacroAssembler) {
TEST(AddInverseToTable) {
- v8::internal::V8::Initialize(NULL);
static const int kLimit = 1000;
static const int kRangeCount = 16;
for (int t = 0; t < 10; t++) {
@@ -1603,7 +1575,6 @@ static void TestSimpleRangeCaseIndependence(CharacterRange input,
TEST(CharacterRangeCaseIndependence) {
- v8::internal::V8::Initialize(NULL);
TestSimpleRangeCaseIndependence(CharacterRange::Singleton('a'),
CharacterRange::Singleton('A'));
TestSimpleRangeCaseIndependence(CharacterRange::Singleton('z'),
@@ -1645,7 +1616,6 @@ static bool InClass(uc16 c, ZoneList<CharacterRange>* ranges) {
TEST(CharClassDifference) {
- v8::internal::V8::Initialize(NULL);
Zone zone(CcTest::i_isolate());
ZoneList<CharacterRange>* base =
new(&zone) ZoneList<CharacterRange>(1, &zone);
@@ -1673,7 +1643,6 @@ TEST(CharClassDifference) {
TEST(CanonicalizeCharacterSets) {
- v8::internal::V8::Initialize(NULL);
Zone zone(CcTest::i_isolate());
ZoneList<CharacterRange>* list =
new(&zone) ZoneList<CharacterRange>(4, &zone);
@@ -1735,7 +1704,6 @@ TEST(CanonicalizeCharacterSets) {
TEST(CharacterRangeMerge) {
- v8::internal::V8::Initialize(NULL);
Zone zone(CcTest::i_isolate());
ZoneList<CharacterRange> l1(4, &zone);
ZoneList<CharacterRange> l2(4, &zone);
@@ -1823,6 +1791,5 @@ TEST(CharacterRangeMerge) {
TEST(Graph) {
- V8::Initialize(NULL);
Execute("\\b\\w+\\b", false, true, true);
}
diff --git a/deps/v8/test/cctest/test-semaphore.cc b/deps/v8/test/cctest/test-semaphore.cc
deleted file mode 100644
index c7fca519dc..0000000000
--- a/deps/v8/test/cctest/test-semaphore.cc
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include "src/v8.h"
-
-#include "src/base/platform/platform.h"
-#include "test/cctest/cctest.h"
-
-
-using namespace ::v8::internal;
-
-
-class WaitAndSignalThread V8_FINAL : public v8::base::Thread {
- public:
- explicit WaitAndSignalThread(v8::base::Semaphore* semaphore)
- : Thread(Options("WaitAndSignalThread")), semaphore_(semaphore) {}
- virtual ~WaitAndSignalThread() {}
-
- virtual void Run() V8_OVERRIDE {
- for (int n = 0; n < 1000; ++n) {
- semaphore_->Wait();
- bool result =
- semaphore_->WaitFor(v8::base::TimeDelta::FromMicroseconds(1));
- DCHECK(!result);
- USE(result);
- semaphore_->Signal();
- }
- }
-
- private:
- v8::base::Semaphore* semaphore_;
-};
-
-
-TEST(WaitAndSignal) {
- v8::base::Semaphore semaphore(0);
- WaitAndSignalThread t1(&semaphore);
- WaitAndSignalThread t2(&semaphore);
-
- t1.Start();
- t2.Start();
-
- // Make something available.
- semaphore.Signal();
-
- t1.Join();
- t2.Join();
-
- semaphore.Wait();
-
- bool result = semaphore.WaitFor(v8::base::TimeDelta::FromMicroseconds(1));
- DCHECK(!result);
- USE(result);
-}
-
-
-TEST(WaitFor) {
- bool ok;
- v8::base::Semaphore semaphore(0);
-
- // Semaphore not signalled - timeout.
- ok = semaphore.WaitFor(v8::base::TimeDelta::FromMicroseconds(0));
- CHECK(!ok);
- ok = semaphore.WaitFor(v8::base::TimeDelta::FromMicroseconds(100));
- CHECK(!ok);
- ok = semaphore.WaitFor(v8::base::TimeDelta::FromMicroseconds(1000));
- CHECK(!ok);
-
- // Semaphore signalled - no timeout.
- semaphore.Signal();
- ok = semaphore.WaitFor(v8::base::TimeDelta::FromMicroseconds(0));
- CHECK(ok);
- semaphore.Signal();
- ok = semaphore.WaitFor(v8::base::TimeDelta::FromMicroseconds(100));
- CHECK(ok);
- semaphore.Signal();
- ok = semaphore.WaitFor(v8::base::TimeDelta::FromMicroseconds(1000));
- CHECK(ok);
-}
-
-
-static const char alphabet[] = "XKOAD";
-static const int kAlphabetSize = sizeof(alphabet) - 1;
-static const int kBufferSize = 4096; // GCD(buffer size, alphabet size) = 1
-static char buffer[kBufferSize];
-static const int kDataSize = kBufferSize * kAlphabetSize * 10;
-
-static v8::base::Semaphore free_space(kBufferSize);
-static v8::base::Semaphore used_space(0);
-
-
-class ProducerThread V8_FINAL : public v8::base::Thread {
- public:
- ProducerThread() : Thread(Options("ProducerThread")) {}
- virtual ~ProducerThread() {}
-
- virtual void Run() V8_OVERRIDE {
- for (int n = 0; n < kDataSize; ++n) {
- free_space.Wait();
- buffer[n % kBufferSize] = alphabet[n % kAlphabetSize];
- used_space.Signal();
- }
- }
-};
-
-
-class ConsumerThread V8_FINAL : public v8::base::Thread {
- public:
- ConsumerThread() : Thread(Options("ConsumerThread")) {}
- virtual ~ConsumerThread() {}
-
- virtual void Run() V8_OVERRIDE {
- for (int n = 0; n < kDataSize; ++n) {
- used_space.Wait();
- DCHECK_EQ(static_cast<int>(alphabet[n % kAlphabetSize]),
- static_cast<int>(buffer[n % kBufferSize]));
- free_space.Signal();
- }
- }
-};
-
-
-TEST(ProducerConsumer) {
- ProducerThread producer_thread;
- ConsumerThread consumer_thread;
- producer_thread.Start();
- consumer_thread.Start();
- producer_thread.Join();
- consumer_thread.Join();
-}
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 9ae90c4776..ed9419dfe0 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -35,10 +35,9 @@
#include "src/compilation-cache.h"
#include "src/debug.h"
#include "src/heap/spaces.h"
-#include "src/ic-inl.h"
#include "src/natives.h"
#include "src/objects.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
#include "src/scopeinfo.h"
#include "src/serialize.h"
#include "src/snapshot.h"
@@ -46,42 +45,6 @@
using namespace v8::internal;
-static const unsigned kCounters = 256;
-static int local_counters[kCounters];
-static const char* local_counter_names[kCounters];
-
-
-static unsigned CounterHash(const char* s) {
- unsigned hash = 0;
- while (*++s) {
- hash |= hash << 5;
- hash += *s;
- }
- return hash;
-}
-
-
-// Callback receiver to track counters in test.
-static int* counter_function(const char* name) {
- unsigned hash = CounterHash(name) % kCounters;
- unsigned original_hash = hash;
- USE(original_hash);
- while (true) {
- if (local_counter_names[hash] == name) {
- return &local_counters[hash];
- }
- if (local_counter_names[hash] == 0) {
- local_counter_names[hash] = name;
- return &local_counters[hash];
- }
- if (strcmp(local_counter_names[hash], name) == 0) {
- return &local_counters[hash];
- }
- hash = (hash + 1) % kCounters;
- DCHECK(hash != original_hash); // Hash table has been filled up.
- }
-}
-
template <class T>
static Address AddressOf(T id) {
@@ -102,7 +65,6 @@ static int make_code(TypeCode type, int id) {
TEST(ExternalReferenceEncoder) {
Isolate* isolate = CcTest::i_isolate();
- isolate->stats_table()->SetCounterFunction(counter_function);
v8::V8::Initialize();
ExternalReferenceEncoder encoder(isolate);
@@ -110,10 +72,6 @@ TEST(ExternalReferenceEncoder) {
Encode(encoder, Builtins::kArrayCode));
CHECK_EQ(make_code(v8::internal::RUNTIME_FUNCTION, Runtime::kAbort),
Encode(encoder, Runtime::kAbort));
- ExternalReference total_compile_size =
- ExternalReference(isolate->counters()->total_compile_size());
- CHECK_EQ(make_code(STATS_COUNTER, Counters::k_total_compile_size),
- encoder.Encode(total_compile_size.address()));
ExternalReference stack_limit_address =
ExternalReference::address_of_stack_limit(isolate);
CHECK_EQ(make_code(UNCLASSIFIED, 2),
@@ -137,7 +95,6 @@ TEST(ExternalReferenceEncoder) {
TEST(ExternalReferenceDecoder) {
Isolate* isolate = CcTest::i_isolate();
- isolate->stats_table()->SetCounterFunction(counter_function);
v8::V8::Initialize();
ExternalReferenceDecoder decoder(isolate);
@@ -146,12 +103,6 @@ TEST(ExternalReferenceDecoder) {
CHECK_EQ(AddressOf(Runtime::kAbort),
decoder.Decode(make_code(v8::internal::RUNTIME_FUNCTION,
Runtime::kAbort)));
- ExternalReference total_compile_size =
- ExternalReference(isolate->counters()->total_compile_size());
- CHECK_EQ(total_compile_size.address(),
- decoder.Decode(
- make_code(STATS_COUNTER,
- Counters::k_total_compile_size)));
CHECK_EQ(ExternalReference::address_of_stack_limit(isolate).address(),
decoder.Decode(make_code(UNCLASSIFIED, 2)));
CHECK_EQ(ExternalReference::address_of_real_stack_limit(isolate).address(),
@@ -186,14 +137,10 @@ class FileByteSink : public SnapshotByteSink {
virtual int Position() {
return ftell(fp_);
}
- void WriteSpaceUsed(
- int new_space_used,
- int pointer_space_used,
- int data_space_used,
- int code_space_used,
- int map_space_used,
- int cell_space_used,
- int property_cell_space_used);
+ void WriteSpaceUsed(int new_space_used, int pointer_space_used,
+ int data_space_used, int code_space_used,
+ int map_space_used, int cell_space_used,
+ int property_cell_space_used, int lo_space_used);
private:
FILE* fp_;
@@ -201,14 +148,11 @@ class FileByteSink : public SnapshotByteSink {
};
-void FileByteSink::WriteSpaceUsed(
- int new_space_used,
- int pointer_space_used,
- int data_space_used,
- int code_space_used,
- int map_space_used,
- int cell_space_used,
- int property_cell_space_used) {
+void FileByteSink::WriteSpaceUsed(int new_space_used, int pointer_space_used,
+ int data_space_used, int code_space_used,
+ int map_space_used, int cell_space_used,
+ int property_cell_space_used,
+ int lo_space_used) {
int file_name_length = StrLength(file_name_) + 10;
Vector<char> name = Vector<char>::New(file_name_length + 1);
SNPrintF(name, "%s.size", file_name_);
@@ -221,6 +165,7 @@ void FileByteSink::WriteSpaceUsed(
fprintf(fp, "map %d\n", map_space_used);
fprintf(fp, "cell %d\n", cell_space_used);
fprintf(fp, "property cell %d\n", property_cell_space_used);
+ fprintf(fp, "lo %d\n", lo_space_used);
fclose(fp);
}
@@ -230,53 +175,55 @@ static bool WriteToFile(Isolate* isolate, const char* snapshot_file) {
StartupSerializer ser(isolate, &file);
ser.Serialize();
- file.WriteSpaceUsed(
- ser.CurrentAllocationAddress(NEW_SPACE),
- ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
- ser.CurrentAllocationAddress(OLD_DATA_SPACE),
- ser.CurrentAllocationAddress(CODE_SPACE),
- ser.CurrentAllocationAddress(MAP_SPACE),
- ser.CurrentAllocationAddress(CELL_SPACE),
- ser.CurrentAllocationAddress(PROPERTY_CELL_SPACE));
+ file.WriteSpaceUsed(ser.CurrentAllocationAddress(NEW_SPACE),
+ ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
+ ser.CurrentAllocationAddress(OLD_DATA_SPACE),
+ ser.CurrentAllocationAddress(CODE_SPACE),
+ ser.CurrentAllocationAddress(MAP_SPACE),
+ ser.CurrentAllocationAddress(CELL_SPACE),
+ ser.CurrentAllocationAddress(PROPERTY_CELL_SPACE),
+ ser.CurrentAllocationAddress(LO_SPACE));
return true;
}
-static void Serialize() {
+static void Serialize(v8::Isolate* isolate) {
// We have to create one context. One reason for this is so that the builtins
// can be loaded from v8natives.js and their addresses can be processed. This
// will clear the pending fixups array, which would otherwise contain GC roots
// that would confuse the serialization/deserialization process.
- v8::Isolate* isolate = CcTest::isolate();
+ v8::Isolate::Scope isolate_scope(isolate);
{
v8::HandleScope scope(isolate);
v8::Context::New(isolate);
}
- Isolate* internal_isolate = CcTest::i_isolate();
+ Isolate* internal_isolate = reinterpret_cast<Isolate*>(isolate);
internal_isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, "serialize");
WriteToFile(internal_isolate, FLAG_testing_serialization_file);
}
// Test that the whole heap can be serialized.
-TEST(Serialize) {
+UNINITIALIZED_TEST(Serialize) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
- CcTest::i_isolate()->enable_serializer();
- v8::V8::Initialize();
- Serialize();
+ v8::Isolate::CreateParams params;
+ params.enable_serializer = true;
+ v8::Isolate* isolate = v8::Isolate::New(params);
+ Serialize(isolate);
}
}
// Test that heap serialization is non-destructive.
-TEST(SerializeTwice) {
+UNINITIALIZED_TEST(SerializeTwice) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
- CcTest::i_isolate()->enable_serializer();
- v8::V8::Initialize();
- Serialize();
- Serialize();
+ v8::Isolate::CreateParams params;
+ params.enable_serializer = true;
+ v8::Isolate* isolate = v8::Isolate::New(params);
+ Serialize(isolate);
+ Serialize(isolate);
}
}
@@ -293,7 +240,7 @@ static void ReserveSpaceForSnapshot(Deserializer* deserializer,
FILE* fp = v8::base::OS::FOpen(name.start(), "r");
name.Dispose();
int new_size, pointer_size, data_size, code_size, map_size, cell_size,
- property_cell_size;
+ property_cell_size, lo_size;
#ifdef _MSC_VER
// Avoid warning about unsafe fscanf from MSVC.
// Please note that this is only fine if %c and %s are not being used.
@@ -306,6 +253,7 @@ static void ReserveSpaceForSnapshot(Deserializer* deserializer,
CHECK_EQ(1, fscanf(fp, "map %d\n", &map_size));
CHECK_EQ(1, fscanf(fp, "cell %d\n", &cell_size));
CHECK_EQ(1, fscanf(fp, "property cell %d\n", &property_cell_size));
+ CHECK_EQ(1, fscanf(fp, "lo %d\n", &lo_size));
#ifdef _MSC_VER
#undef fscanf
#endif
@@ -317,333 +265,378 @@ static void ReserveSpaceForSnapshot(Deserializer* deserializer,
deserializer->set_reservation(MAP_SPACE, map_size);
deserializer->set_reservation(CELL_SPACE, cell_size);
deserializer->set_reservation(PROPERTY_CELL_SPACE, property_cell_size);
+ deserializer->set_reservation(LO_SPACE, lo_size);
}
-bool InitializeFromFile(const char* snapshot_file) {
+v8::Isolate* InitializeFromFile(const char* snapshot_file) {
int len;
byte* str = ReadBytes(snapshot_file, &len);
- if (!str) return false;
- bool success;
+ if (!str) return NULL;
+ v8::Isolate* v8_isolate = NULL;
{
SnapshotByteSource source(str, len);
Deserializer deserializer(&source);
ReserveSpaceForSnapshot(&deserializer, snapshot_file);
- success = V8::Initialize(&deserializer);
+ Isolate* isolate = Isolate::NewForTesting();
+ v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ v8::Isolate::Scope isolate_scope(v8_isolate);
+ isolate->Init(&deserializer);
}
DeleteArray(str);
- return success;
+ return v8_isolate;
}
-static void Deserialize() {
- CHECK(InitializeFromFile(FLAG_testing_serialization_file));
+static v8::Isolate* Deserialize() {
+ v8::Isolate* isolate = InitializeFromFile(FLAG_testing_serialization_file);
+ CHECK(isolate);
+ return isolate;
}
-static void SanityCheck() {
- Isolate* isolate = CcTest::i_isolate();
- v8::HandleScope scope(CcTest::isolate());
+static void SanityCheck(v8::Isolate* v8_isolate) {
+ Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+ v8::HandleScope scope(v8_isolate);
#ifdef VERIFY_HEAP
- CcTest::heap()->Verify();
+ isolate->heap()->Verify();
#endif
CHECK(isolate->global_object()->IsJSObject());
CHECK(isolate->native_context()->IsContext());
- CHECK(CcTest::heap()->string_table()->IsStringTable());
- isolate->factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("Empty"));
+ CHECK(isolate->heap()->string_table()->IsStringTable());
+ isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("Empty"));
}
-DEPENDENT_TEST(Deserialize, Serialize) {
+UNINITIALIZED_DEPENDENT_TEST(Deserialize, Serialize) {
// The serialize-deserialize tests only work if the VM is built without
// serialization. That doesn't matter. We don't need to be able to
// serialize a snapshot in a VM that is booted from a snapshot.
if (!Snapshot::HaveASnapshotToStartFrom()) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Deserialize();
+ v8::Isolate* isolate = Deserialize();
+ {
+ v8::HandleScope handle_scope(isolate);
+ v8::Isolate::Scope isolate_scope(isolate);
- v8::Local<v8::Context> env = v8::Context::New(isolate);
- env->Enter();
+ v8::Local<v8::Context> env = v8::Context::New(isolate);
+ env->Enter();
- SanityCheck();
+ SanityCheck(isolate);
+ }
+ isolate->Dispose();
}
}
-DEPENDENT_TEST(DeserializeFromSecondSerialization, SerializeTwice) {
+UNINITIALIZED_DEPENDENT_TEST(DeserializeFromSecondSerialization,
+ SerializeTwice) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Deserialize();
+ v8::Isolate* isolate = Deserialize();
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Context> env = v8::Context::New(isolate);
- env->Enter();
+ v8::Local<v8::Context> env = v8::Context::New(isolate);
+ env->Enter();
- SanityCheck();
+ SanityCheck(isolate);
+ }
+ isolate->Dispose();
}
}
-DEPENDENT_TEST(DeserializeAndRunScript2, Serialize) {
+UNINITIALIZED_DEPENDENT_TEST(DeserializeAndRunScript2, Serialize) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Deserialize();
+ v8::Isolate* isolate = Deserialize();
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+
- v8::Local<v8::Context> env = v8::Context::New(isolate);
- env->Enter();
+ v8::Local<v8::Context> env = v8::Context::New(isolate);
+ env->Enter();
- const char* c_source = "\"1234\".length";
- v8::Local<v8::String> source = v8::String::NewFromUtf8(isolate, c_source);
- v8::Local<v8::Script> script = v8::Script::Compile(source);
- CHECK_EQ(4, script->Run()->Int32Value());
+ const char* c_source = "\"1234\".length";
+ v8::Local<v8::String> source = v8::String::NewFromUtf8(isolate, c_source);
+ v8::Local<v8::Script> script = v8::Script::Compile(source);
+ CHECK_EQ(4, script->Run()->Int32Value());
+ }
+ isolate->Dispose();
}
}
-DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
- SerializeTwice) {
+UNINITIALIZED_DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
+ SerializeTwice) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Deserialize();
+ v8::Isolate* isolate = Deserialize();
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Context> env = v8::Context::New(isolate);
- env->Enter();
+ v8::Local<v8::Context> env = v8::Context::New(isolate);
+ env->Enter();
- const char* c_source = "\"1234\".length";
- v8::Local<v8::String> source = v8::String::NewFromUtf8(isolate, c_source);
- v8::Local<v8::Script> script = v8::Script::Compile(source);
- CHECK_EQ(4, script->Run()->Int32Value());
+ const char* c_source = "\"1234\".length";
+ v8::Local<v8::String> source = v8::String::NewFromUtf8(isolate, c_source);
+ v8::Local<v8::Script> script = v8::Script::Compile(source);
+ CHECK_EQ(4, script->Run()->Int32Value());
+ }
+ isolate->Dispose();
}
}
-TEST(PartialSerialization) {
+UNINITIALIZED_TEST(PartialSerialization) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
- Isolate* isolate = CcTest::i_isolate();
- CcTest::i_isolate()->enable_serializer();
- v8::V8::Initialize();
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- Heap* heap = isolate->heap();
-
- v8::Persistent<v8::Context> env;
- {
- HandleScope scope(isolate);
- env.Reset(v8_isolate, v8::Context::New(v8_isolate));
- }
- DCHECK(!env.IsEmpty());
+ v8::Isolate::CreateParams params;
+ params.enable_serializer = true;
+ v8::Isolate* v8_isolate = v8::Isolate::New(params);
+ Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+ v8_isolate->Enter();
{
- v8::HandleScope handle_scope(v8_isolate);
- v8::Local<v8::Context>::New(v8_isolate, env)->Enter();
- }
- // Make sure all builtin scripts are cached.
- { HandleScope scope(isolate);
- for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
- isolate->bootstrapper()->NativesSourceLookup(i);
+ Heap* heap = isolate->heap();
+
+ v8::Persistent<v8::Context> env;
+ {
+ HandleScope scope(isolate);
+ env.Reset(v8_isolate, v8::Context::New(v8_isolate));
+ }
+ DCHECK(!env.IsEmpty());
+ {
+ v8::HandleScope handle_scope(v8_isolate);
+ v8::Local<v8::Context>::New(v8_isolate, env)->Enter();
+ }
+ // Make sure all builtin scripts are cached.
+ {
+ HandleScope scope(isolate);
+ for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
+ isolate->bootstrapper()->NativesSourceLookup(i);
+ }
+ }
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
+
+ Object* raw_foo;
+ {
+ v8::HandleScope handle_scope(v8_isolate);
+ v8::Local<v8::String> foo = v8::String::NewFromUtf8(v8_isolate, "foo");
+ DCHECK(!foo.IsEmpty());
+ raw_foo = *(v8::Utils::OpenHandle(*foo));
}
- }
- heap->CollectAllGarbage(Heap::kNoGCFlags);
- heap->CollectAllGarbage(Heap::kNoGCFlags);
- Object* raw_foo;
- {
- v8::HandleScope handle_scope(v8_isolate);
- v8::Local<v8::String> foo = v8::String::NewFromUtf8(v8_isolate, "foo");
- DCHECK(!foo.IsEmpty());
- raw_foo = *(v8::Utils::OpenHandle(*foo));
- }
+ int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+ Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+ SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
- int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
- Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
- SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
-
- {
- v8::HandleScope handle_scope(v8_isolate);
- v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
+ {
+ v8::HandleScope handle_scope(v8_isolate);
+ v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
+ }
+ env.Reset();
+
+ FileByteSink startup_sink(startup_name.start());
+ StartupSerializer startup_serializer(isolate, &startup_sink);
+ startup_serializer.SerializeStrongReferences();
+
+ FileByteSink partial_sink(FLAG_testing_serialization_file);
+ PartialSerializer p_ser(isolate, &startup_serializer, &partial_sink);
+ p_ser.Serialize(&raw_foo);
+ startup_serializer.SerializeWeakReferences();
+
+ partial_sink.WriteSpaceUsed(
+ p_ser.CurrentAllocationAddress(NEW_SPACE),
+ p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
+ p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
+ p_ser.CurrentAllocationAddress(CODE_SPACE),
+ p_ser.CurrentAllocationAddress(MAP_SPACE),
+ p_ser.CurrentAllocationAddress(CELL_SPACE),
+ p_ser.CurrentAllocationAddress(PROPERTY_CELL_SPACE),
+ p_ser.CurrentAllocationAddress(LO_SPACE));
+
+ startup_sink.WriteSpaceUsed(
+ startup_serializer.CurrentAllocationAddress(NEW_SPACE),
+ startup_serializer.CurrentAllocationAddress(OLD_POINTER_SPACE),
+ startup_serializer.CurrentAllocationAddress(OLD_DATA_SPACE),
+ startup_serializer.CurrentAllocationAddress(CODE_SPACE),
+ startup_serializer.CurrentAllocationAddress(MAP_SPACE),
+ startup_serializer.CurrentAllocationAddress(CELL_SPACE),
+ startup_serializer.CurrentAllocationAddress(PROPERTY_CELL_SPACE),
+ startup_serializer.CurrentAllocationAddress(LO_SPACE));
+ startup_name.Dispose();
}
- env.Reset();
-
- FileByteSink startup_sink(startup_name.start());
- StartupSerializer startup_serializer(isolate, &startup_sink);
- startup_serializer.SerializeStrongReferences();
-
- FileByteSink partial_sink(FLAG_testing_serialization_file);
- PartialSerializer p_ser(isolate, &startup_serializer, &partial_sink);
- p_ser.Serialize(&raw_foo);
- startup_serializer.SerializeWeakReferences();
-
- partial_sink.WriteSpaceUsed(
- p_ser.CurrentAllocationAddress(NEW_SPACE),
- p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
- p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
- p_ser.CurrentAllocationAddress(CODE_SPACE),
- p_ser.CurrentAllocationAddress(MAP_SPACE),
- p_ser.CurrentAllocationAddress(CELL_SPACE),
- p_ser.CurrentAllocationAddress(PROPERTY_CELL_SPACE));
-
- startup_sink.WriteSpaceUsed(
- startup_serializer.CurrentAllocationAddress(NEW_SPACE),
- startup_serializer.CurrentAllocationAddress(OLD_POINTER_SPACE),
- startup_serializer.CurrentAllocationAddress(OLD_DATA_SPACE),
- startup_serializer.CurrentAllocationAddress(CODE_SPACE),
- startup_serializer.CurrentAllocationAddress(MAP_SPACE),
- startup_serializer.CurrentAllocationAddress(CELL_SPACE),
- startup_serializer.CurrentAllocationAddress(PROPERTY_CELL_SPACE));
- startup_name.Dispose();
+ v8_isolate->Exit();
+ v8_isolate->Dispose();
}
}
-DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
+UNINITIALIZED_DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
- CHECK(InitializeFromFile(startup_name.start()));
+ v8::Isolate* v8_isolate = InitializeFromFile(startup_name.start());
+ CHECK(v8_isolate);
startup_name.Dispose();
-
- const char* file_name = FLAG_testing_serialization_file;
-
- int snapshot_size = 0;
- byte* snapshot = ReadBytes(file_name, &snapshot_size);
-
- Isolate* isolate = CcTest::i_isolate();
- Object* root;
{
- SnapshotByteSource source(snapshot, snapshot_size);
- Deserializer deserializer(&source);
- ReserveSpaceForSnapshot(&deserializer, file_name);
- deserializer.DeserializePartial(isolate, &root);
- CHECK(root->IsString());
- }
- HandleScope handle_scope(isolate);
- Handle<Object> root_handle(root, isolate);
+ v8::Isolate::Scope isolate_scope(v8_isolate);
+ const char* file_name = FLAG_testing_serialization_file;
- Object* root2;
- {
- SnapshotByteSource source(snapshot, snapshot_size);
- Deserializer deserializer(&source);
- ReserveSpaceForSnapshot(&deserializer, file_name);
- deserializer.DeserializePartial(isolate, &root2);
- CHECK(root2->IsString());
- CHECK(*root_handle == root2);
+ int snapshot_size = 0;
+ byte* snapshot = ReadBytes(file_name, &snapshot_size);
+
+ Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+ Object* root;
+ {
+ SnapshotByteSource source(snapshot, snapshot_size);
+ Deserializer deserializer(&source);
+ ReserveSpaceForSnapshot(&deserializer, file_name);
+ deserializer.DeserializePartial(isolate, &root);
+ CHECK(root->IsString());
+ }
+ HandleScope handle_scope(isolate);
+ Handle<Object> root_handle(root, isolate);
+
+
+ Object* root2;
+ {
+ SnapshotByteSource source(snapshot, snapshot_size);
+ Deserializer deserializer(&source);
+ ReserveSpaceForSnapshot(&deserializer, file_name);
+ deserializer.DeserializePartial(isolate, &root2);
+ CHECK(root2->IsString());
+ CHECK(*root_handle == root2);
+ }
}
+ v8_isolate->Dispose();
}
}
-TEST(ContextSerialization) {
+UNINITIALIZED_TEST(ContextSerialization) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
- Isolate* isolate = CcTest::i_isolate();
- CcTest::i_isolate()->enable_serializer();
- v8::V8::Initialize();
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ v8::Isolate::CreateParams params;
+ params.enable_serializer = true;
+ v8::Isolate* v8_isolate = v8::Isolate::New(params);
+ Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
Heap* heap = isolate->heap();
-
- v8::Persistent<v8::Context> env;
{
- HandleScope scope(isolate);
- env.Reset(v8_isolate, v8::Context::New(v8_isolate));
- }
- DCHECK(!env.IsEmpty());
- {
- v8::HandleScope handle_scope(v8_isolate);
- v8::Local<v8::Context>::New(v8_isolate, env)->Enter();
- }
- // Make sure all builtin scripts are cached.
- { HandleScope scope(isolate);
- for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
- isolate->bootstrapper()->NativesSourceLookup(i);
+ v8::Isolate::Scope isolate_scope(v8_isolate);
+
+ v8::Persistent<v8::Context> env;
+ {
+ HandleScope scope(isolate);
+ env.Reset(v8_isolate, v8::Context::New(v8_isolate));
}
- }
- // If we don't do this then we end up with a stray root pointing at the
- // context even after we have disposed of env.
- heap->CollectAllGarbage(Heap::kNoGCFlags);
+ DCHECK(!env.IsEmpty());
+ {
+ v8::HandleScope handle_scope(v8_isolate);
+ v8::Local<v8::Context>::New(v8_isolate, env)->Enter();
+ }
+ // Make sure all builtin scripts are cached.
+ {
+ HandleScope scope(isolate);
+ for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
+ isolate->bootstrapper()->NativesSourceLookup(i);
+ }
+ }
+ // If we don't do this then we end up with a stray root pointing at the
+ // context even after we have disposed of env.
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
- int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
- Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
- SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+ int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+ Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+ SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
- {
- v8::HandleScope handle_scope(v8_isolate);
- v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
- }
+ {
+ v8::HandleScope handle_scope(v8_isolate);
+ v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
+ }
- i::Object* raw_context = *v8::Utils::OpenPersistent(env);
-
- env.Reset();
-
- FileByteSink startup_sink(startup_name.start());
- StartupSerializer startup_serializer(isolate, &startup_sink);
- startup_serializer.SerializeStrongReferences();
-
- FileByteSink partial_sink(FLAG_testing_serialization_file);
- PartialSerializer p_ser(isolate, &startup_serializer, &partial_sink);
- p_ser.Serialize(&raw_context);
- startup_serializer.SerializeWeakReferences();
-
- partial_sink.WriteSpaceUsed(
- p_ser.CurrentAllocationAddress(NEW_SPACE),
- p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
- p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
- p_ser.CurrentAllocationAddress(CODE_SPACE),
- p_ser.CurrentAllocationAddress(MAP_SPACE),
- p_ser.CurrentAllocationAddress(CELL_SPACE),
- p_ser.CurrentAllocationAddress(PROPERTY_CELL_SPACE));
-
- startup_sink.WriteSpaceUsed(
- startup_serializer.CurrentAllocationAddress(NEW_SPACE),
- startup_serializer.CurrentAllocationAddress(OLD_POINTER_SPACE),
- startup_serializer.CurrentAllocationAddress(OLD_DATA_SPACE),
- startup_serializer.CurrentAllocationAddress(CODE_SPACE),
- startup_serializer.CurrentAllocationAddress(MAP_SPACE),
- startup_serializer.CurrentAllocationAddress(CELL_SPACE),
- startup_serializer.CurrentAllocationAddress(PROPERTY_CELL_SPACE));
- startup_name.Dispose();
+ i::Object* raw_context = *v8::Utils::OpenPersistent(env);
+
+ env.Reset();
+
+ FileByteSink startup_sink(startup_name.start());
+ StartupSerializer startup_serializer(isolate, &startup_sink);
+ startup_serializer.SerializeStrongReferences();
+
+ FileByteSink partial_sink(FLAG_testing_serialization_file);
+ PartialSerializer p_ser(isolate, &startup_serializer, &partial_sink);
+ p_ser.Serialize(&raw_context);
+ startup_serializer.SerializeWeakReferences();
+
+ partial_sink.WriteSpaceUsed(
+ p_ser.CurrentAllocationAddress(NEW_SPACE),
+ p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
+ p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
+ p_ser.CurrentAllocationAddress(CODE_SPACE),
+ p_ser.CurrentAllocationAddress(MAP_SPACE),
+ p_ser.CurrentAllocationAddress(CELL_SPACE),
+ p_ser.CurrentAllocationAddress(PROPERTY_CELL_SPACE),
+ p_ser.CurrentAllocationAddress(LO_SPACE));
+
+ startup_sink.WriteSpaceUsed(
+ startup_serializer.CurrentAllocationAddress(NEW_SPACE),
+ startup_serializer.CurrentAllocationAddress(OLD_POINTER_SPACE),
+ startup_serializer.CurrentAllocationAddress(OLD_DATA_SPACE),
+ startup_serializer.CurrentAllocationAddress(CODE_SPACE),
+ startup_serializer.CurrentAllocationAddress(MAP_SPACE),
+ startup_serializer.CurrentAllocationAddress(CELL_SPACE),
+ startup_serializer.CurrentAllocationAddress(PROPERTY_CELL_SPACE),
+ startup_serializer.CurrentAllocationAddress(LO_SPACE));
+ startup_name.Dispose();
+ }
+ v8_isolate->Dispose();
}
}
-DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
+UNINITIALIZED_DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
- CHECK(InitializeFromFile(startup_name.start()));
+ v8::Isolate* v8_isolate = InitializeFromFile(startup_name.start());
+ CHECK(v8_isolate);
startup_name.Dispose();
-
- const char* file_name = FLAG_testing_serialization_file;
-
- int snapshot_size = 0;
- byte* snapshot = ReadBytes(file_name, &snapshot_size);
-
- Isolate* isolate = CcTest::i_isolate();
- Object* root;
{
- SnapshotByteSource source(snapshot, snapshot_size);
- Deserializer deserializer(&source);
- ReserveSpaceForSnapshot(&deserializer, file_name);
- deserializer.DeserializePartial(isolate, &root);
- CHECK(root->IsContext());
- }
- HandleScope handle_scope(isolate);
- Handle<Object> root_handle(root, isolate);
+ v8::Isolate::Scope isolate_scope(v8_isolate);
+ const char* file_name = FLAG_testing_serialization_file;
- Object* root2;
- {
- SnapshotByteSource source(snapshot, snapshot_size);
- Deserializer deserializer(&source);
- ReserveSpaceForSnapshot(&deserializer, file_name);
- deserializer.DeserializePartial(isolate, &root2);
- CHECK(root2->IsContext());
- CHECK(*root_handle != root2);
+ int snapshot_size = 0;
+ byte* snapshot = ReadBytes(file_name, &snapshot_size);
+
+ Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+ Object* root;
+ {
+ SnapshotByteSource source(snapshot, snapshot_size);
+ Deserializer deserializer(&source);
+ ReserveSpaceForSnapshot(&deserializer, file_name);
+ deserializer.DeserializePartial(isolate, &root);
+ CHECK(root->IsContext());
+ }
+ HandleScope handle_scope(isolate);
+ Handle<Object> root_handle(root, isolate);
+
+
+ Object* root2;
+ {
+ SnapshotByteSource source(snapshot, snapshot_size);
+ Deserializer deserializer(&source);
+ ReserveSpaceForSnapshot(&deserializer, file_name);
+ deserializer.DeserializePartial(isolate, &root2);
+ CHECK(root2->IsContext());
+ CHECK(*root_handle != root2);
+ }
}
+ v8_isolate->Dispose();
}
}
@@ -793,6 +786,121 @@ TEST(SerializeToplevelInternalizedString) {
}
+Vector<const uint8_t> ConstructSource(Vector<const uint8_t> head,
+ Vector<const uint8_t> body,
+ Vector<const uint8_t> tail, int repeats) {
+ int source_length = head.length() + body.length() * repeats + tail.length();
+ uint8_t* source = NewArray<uint8_t>(static_cast<size_t>(source_length));
+ CopyChars(source, head.start(), head.length());
+ for (int i = 0; i < repeats; i++) {
+ CopyChars(source + head.length() + i * body.length(), body.start(),
+ body.length());
+ }
+ CopyChars(source + head.length() + repeats * body.length(), tail.start(),
+ tail.length());
+ return Vector<const uint8_t>(const_cast<const uint8_t*>(source),
+ source_length);
+}
+
+
+TEST(SerializeToplevelLargeCodeObject) {
+ FLAG_serialize_toplevel = true;
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+ isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
+
+ v8::HandleScope scope(CcTest::isolate());
+
+ Vector<const uint8_t> source =
+ ConstructSource(STATIC_CHAR_VECTOR("var j=1; try { if (j) throw 1;"),
+ STATIC_CHAR_VECTOR("for(var i=0;i<1;i++)j++;"),
+ STATIC_CHAR_VECTOR("} catch (e) { j=7; } j"), 10000);
+ Handle<String> source_str =
+ isolate->factory()->NewStringFromOneByte(source).ToHandleChecked();
+
+ Handle<JSObject> global(isolate->context()->global_object());
+ ScriptData* cache = NULL;
+
+ Handle<SharedFunctionInfo> orig = Compiler::CompileScript(
+ source_str, Handle<String>(), 0, 0, false,
+ Handle<Context>(isolate->native_context()), NULL, &cache,
+ v8::ScriptCompiler::kProduceCodeCache, NOT_NATIVES_CODE);
+
+ CHECK(isolate->heap()->InSpace(orig->code(), LO_SPACE));
+
+ Handle<SharedFunctionInfo> copy;
+ {
+ DisallowCompilation no_compile_expected(isolate);
+ copy = Compiler::CompileScript(
+ source_str, Handle<String>(), 0, 0, false,
+ Handle<Context>(isolate->native_context()), NULL, &cache,
+ v8::ScriptCompiler::kConsumeCodeCache, NOT_NATIVES_CODE);
+ }
+ CHECK_NE(*orig, *copy);
+
+ Handle<JSFunction> copy_fun =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ copy, isolate->native_context());
+
+ Handle<Object> copy_result =
+ Execution::Call(isolate, copy_fun, global, 0, NULL).ToHandleChecked();
+
+ int result_int;
+ CHECK(copy_result->ToInt32(&result_int));
+ CHECK_EQ(7, result_int);
+
+ delete cache;
+ source.Dispose();
+}
+
+
+TEST(SerializeToplevelLargeString) {
+ FLAG_serialize_toplevel = true;
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+ isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
+
+ v8::HandleScope scope(CcTest::isolate());
+
+ Vector<const uint8_t> source = ConstructSource(
+ STATIC_CHAR_VECTOR("var s = \""), STATIC_CHAR_VECTOR("abcdef"),
+ STATIC_CHAR_VECTOR("\"; s"), 1000000);
+ Handle<String> source_str =
+ isolate->factory()->NewStringFromOneByte(source).ToHandleChecked();
+
+ Handle<JSObject> global(isolate->context()->global_object());
+ ScriptData* cache = NULL;
+
+ Handle<SharedFunctionInfo> orig = Compiler::CompileScript(
+ source_str, Handle<String>(), 0, 0, false,
+ Handle<Context>(isolate->native_context()), NULL, &cache,
+ v8::ScriptCompiler::kProduceCodeCache, NOT_NATIVES_CODE);
+
+ Handle<SharedFunctionInfo> copy;
+ {
+ DisallowCompilation no_compile_expected(isolate);
+ copy = Compiler::CompileScript(
+ source_str, Handle<String>(), 0, 0, false,
+ Handle<Context>(isolate->native_context()), NULL, &cache,
+ v8::ScriptCompiler::kConsumeCodeCache, NOT_NATIVES_CODE);
+ }
+ CHECK_NE(*orig, *copy);
+
+ Handle<JSFunction> copy_fun =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ copy, isolate->native_context());
+
+ Handle<Object> copy_result =
+ Execution::Call(isolate, copy_fun, global, 0, NULL).ToHandleChecked();
+
+ CHECK_EQ(6 * 1000000, Handle<String>::cast(copy_result)->length());
+ CHECK(isolate->heap()->InSpace(HeapObject::cast(*copy_result), LO_SPACE));
+
+ delete cache;
+ source.Dispose();
+}
+
+
TEST(SerializeToplevelIsolates) {
FLAG_serialize_toplevel = true;
@@ -800,7 +908,6 @@ TEST(SerializeToplevelIsolates) {
v8::ScriptCompiler::CachedData* cache;
v8::Isolate* isolate1 = v8::Isolate::New();
- v8::Isolate* isolate2 = v8::Isolate::New();
{
v8::Isolate::Scope iscope(isolate1);
v8::HandleScope scope(isolate1);
@@ -824,6 +931,7 @@ TEST(SerializeToplevelIsolates) {
}
isolate1->Dispose();
+ v8::Isolate* isolate2 = v8::Isolate::New();
{
v8::Isolate::Scope iscope(isolate2);
v8::HandleScope scope(isolate2);
diff --git a/deps/v8/test/cctest/test-spaces.cc b/deps/v8/test/cctest/test-spaces.cc
index 0062094400..d09c128d17 100644
--- a/deps/v8/test/cctest/test-spaces.cc
+++ b/deps/v8/test/cctest/test-spaces.cc
@@ -203,6 +203,33 @@ static void VerifyMemoryChunk(Isolate* isolate,
}
+TEST(Regress3540) {
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
+ CHECK(
+ memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
+ TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
+ CodeRange* code_range = new CodeRange(isolate);
+ const size_t code_range_size = 4 * MB;
+ if (!code_range->SetUp(code_range_size)) return;
+ Address address;
+ size_t size;
+ address = code_range->AllocateRawMemory(code_range_size - MB,
+ code_range_size - MB, &size);
+ CHECK(address != NULL);
+ Address null_address;
+ size_t null_size;
+ null_address = code_range->AllocateRawMemory(
+ code_range_size - MB, code_range_size - MB, &null_size);
+ CHECK(null_address == NULL);
+ code_range->FreeRawMemory(address, size);
+ delete code_range;
+ memory_allocator->TearDown();
+ delete memory_allocator;
+}
+
+
static unsigned int Pseudorandom() {
static uint32_t lo = 2345;
lo = 18273 * (lo & 0xFFFFF) + (lo >> 16);
@@ -212,9 +239,7 @@ static unsigned int Pseudorandom() {
TEST(MemoryChunk) {
Isolate* isolate = CcTest::i_isolate();
- isolate->InitializeLoggingAndCounters();
Heap* heap = isolate->heap();
- CHECK(heap->ConfigureHeapDefault());
size_t reserve_area_size = 1 * MB;
size_t initial_commit_area_size, second_commit_area_size;
@@ -268,9 +293,7 @@ TEST(MemoryChunk) {
TEST(MemoryAllocator) {
Isolate* isolate = CcTest::i_isolate();
- isolate->InitializeLoggingAndCounters();
Heap* heap = isolate->heap();
- CHECK(isolate->heap()->ConfigureHeapDefault());
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
CHECK(memory_allocator->SetUp(heap->MaxReserved(),
@@ -317,9 +340,7 @@ TEST(MemoryAllocator) {
TEST(NewSpace) {
Isolate* isolate = CcTest::i_isolate();
- isolate->InitializeLoggingAndCounters();
Heap* heap = isolate->heap();
- CHECK(heap->ConfigureHeapDefault());
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
CHECK(memory_allocator->SetUp(heap->MaxReserved(),
heap->MaxExecutableSize()));
@@ -345,9 +366,7 @@ TEST(NewSpace) {
TEST(OldSpace) {
Isolate* isolate = CcTest::i_isolate();
- isolate->InitializeLoggingAndCounters();
Heap* heap = isolate->heap();
- CHECK(heap->ConfigureHeapDefault());
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
CHECK(memory_allocator->SetUp(heap->MaxReserved(),
heap->MaxExecutableSize()));
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index b55780182b..ef13c4dadf 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -112,11 +112,11 @@ class Resource: public v8::String::ExternalStringResource {
};
-class AsciiResource: public v8::String::ExternalAsciiStringResource {
+class OneByteResource : public v8::String::ExternalOneByteStringResource {
public:
- AsciiResource(const char* data, size_t length)
+ OneByteResource(const char* data, size_t length)
: data_(data), length_(length) {}
- ~AsciiResource() { i::DeleteArray(data_); }
+ ~OneByteResource() { i::DeleteArray(data_); }
virtual const char* data() const { return data_; }
virtual size_t length() const { return length_; }
@@ -202,7 +202,7 @@ static void InitializeBuildingBlocks(Handle<String>* building_blocks,
for (int j = 0; j < len; j++) {
buf[j] = rng->next(0x80);
}
- AsciiResource* resource = new AsciiResource(buf, len);
+ OneByteResource* resource = new OneByteResource(buf, len);
building_blocks[i] =
v8::Utils::OpenHandle(
*v8::String::NewExternal(CcTest::isolate(), resource));
@@ -454,7 +454,7 @@ static Handle<String> ConstructLeft(
ConsStringGenerationData* data,
int depth) {
Factory* factory = CcTest::i_isolate()->factory();
- Handle<String> answer = factory->NewStringFromStaticAscii("");
+ Handle<String> answer = factory->NewStringFromStaticChars("");
data->stats_.leaves_++;
for (int i = 0; i < depth; i++) {
Handle<String> block = data->block(i);
@@ -473,7 +473,7 @@ static Handle<String> ConstructRight(
ConsStringGenerationData* data,
int depth) {
Factory* factory = CcTest::i_isolate()->factory();
- Handle<String> answer = factory->NewStringFromStaticAscii("");
+ Handle<String> answer = factory->NewStringFromStaticChars("");
data->stats_.leaves_++;
for (int i = depth - 1; i >= 0; i--) {
Handle<String> block = data->block(i);
@@ -848,23 +848,23 @@ TEST(StringCharacterStreamRandom) {
}
-static const int DEEP_ASCII_DEPTH = 100000;
+static const int kDeepOneByteDepth = 100000;
-TEST(DeepAscii) {
- printf("TestDeepAscii\n");
+TEST(DeepOneByte) {
CcTest::InitializeVM();
Factory* factory = CcTest::i_isolate()->factory();
v8::HandleScope scope(CcTest::isolate());
- char* foo = NewArray<char>(DEEP_ASCII_DEPTH);
- for (int i = 0; i < DEEP_ASCII_DEPTH; i++) {
+ char* foo = NewArray<char>(kDeepOneByteDepth);
+ for (int i = 0; i < kDeepOneByteDepth; i++) {
foo[i] = "foo "[i % 4];
}
- Handle<String> string = factory->NewStringFromOneByte(
- OneByteVector(foo, DEEP_ASCII_DEPTH)).ToHandleChecked();
- Handle<String> foo_string = factory->NewStringFromStaticAscii("foo");
- for (int i = 0; i < DEEP_ASCII_DEPTH; i += 10) {
+ Handle<String> string =
+ factory->NewStringFromOneByte(OneByteVector(foo, kDeepOneByteDepth))
+ .ToHandleChecked();
+ Handle<String> foo_string = factory->NewStringFromStaticChars("foo");
+ for (int i = 0; i < kDeepOneByteDepth; i += 10) {
string = factory->NewConsString(string, foo_string).ToHandleChecked();
}
Handle<String> flat_string =
@@ -872,7 +872,7 @@ TEST(DeepAscii) {
String::Flatten(flat_string);
for (int i = 0; i < 500; i++) {
- TraverseFirst(flat_string, string, DEEP_ASCII_DEPTH);
+ TraverseFirst(flat_string, string, kDeepOneByteDepth);
}
DeleteArray<char>(foo);
}
@@ -882,13 +882,13 @@ TEST(Utf8Conversion) {
// Smoke test for converting strings to utf-8.
CcTest::InitializeVM();
v8::HandleScope handle_scope(CcTest::isolate());
- // A simple ascii string
- const char* ascii_string = "abcdef12345";
- int len = v8::String::NewFromUtf8(CcTest::isolate(), ascii_string,
+ // A simple one-byte string
+ const char* one_byte_string = "abcdef12345";
+ int len = v8::String::NewFromUtf8(CcTest::isolate(), one_byte_string,
v8::String::kNormalString,
- StrLength(ascii_string))->Utf8Length();
- CHECK_EQ(StrLength(ascii_string), len);
- // A mixed ascii and non-ascii string
+ StrLength(one_byte_string))->Utf8Length();
+ CHECK_EQ(StrLength(one_byte_string), len);
+ // A mixed one-byte and two-byte string
// U+02E4 -> CB A4
// U+0064 -> 64
// U+12E4 -> E1 8B A4
@@ -934,79 +934,89 @@ TEST(ExternalShortStringAdd) {
CHECK_GT(kMaxLength, i::ConsString::kMinLength);
// Allocate two JavaScript arrays for holding short strings.
- v8::Handle<v8::Array> ascii_external_strings =
+ v8::Handle<v8::Array> one_byte_external_strings =
v8::Array::New(CcTest::isolate(), kMaxLength + 1);
- v8::Handle<v8::Array> non_ascii_external_strings =
+ v8::Handle<v8::Array> non_one_byte_external_strings =
v8::Array::New(CcTest::isolate(), kMaxLength + 1);
- // Generate short ascii and non-ascii external strings.
+ // Generate short one-byte and two-byte external strings.
for (int i = 0; i <= kMaxLength; i++) {
- char* ascii = NewArray<char>(i + 1);
+ char* one_byte = NewArray<char>(i + 1);
for (int j = 0; j < i; j++) {
- ascii[j] = 'a';
+ one_byte[j] = 'a';
}
// Terminating '\0' is left out on purpose. It is not required for external
// string data.
- AsciiResource* ascii_resource = new AsciiResource(ascii, i);
- v8::Local<v8::String> ascii_external_string =
- v8::String::NewExternal(CcTest::isolate(), ascii_resource);
+ OneByteResource* one_byte_resource = new OneByteResource(one_byte, i);
+ v8::Local<v8::String> one_byte_external_string =
+ v8::String::NewExternal(CcTest::isolate(), one_byte_resource);
- ascii_external_strings->Set(v8::Integer::New(CcTest::isolate(), i),
- ascii_external_string);
- uc16* non_ascii = NewArray<uc16>(i + 1);
+ one_byte_external_strings->Set(v8::Integer::New(CcTest::isolate(), i),
+ one_byte_external_string);
+ uc16* non_one_byte = NewArray<uc16>(i + 1);
for (int j = 0; j < i; j++) {
- non_ascii[j] = 0x1234;
+ non_one_byte[j] = 0x1234;
}
// Terminating '\0' is left out on purpose. It is not required for external
// string data.
- Resource* resource = new Resource(non_ascii, i);
- v8::Local<v8::String> non_ascii_external_string =
- v8::String::NewExternal(CcTest::isolate(), resource);
- non_ascii_external_strings->Set(v8::Integer::New(CcTest::isolate(), i),
- non_ascii_external_string);
+ Resource* resource = new Resource(non_one_byte, i);
+ v8::Local<v8::String> non_one_byte_external_string =
+ v8::String::NewExternal(CcTest::isolate(), resource);
+ non_one_byte_external_strings->Set(v8::Integer::New(CcTest::isolate(), i),
+ non_one_byte_external_string);
}
// Add the arrays with the short external strings in the global object.
v8::Handle<v8::Object> global = context->Global();
- global->Set(v8_str("external_ascii"), ascii_external_strings);
- global->Set(v8_str("external_non_ascii"), non_ascii_external_strings);
+ global->Set(v8_str("external_one_byte"), one_byte_external_strings);
+ global->Set(v8_str("external_non_one_byte"), non_one_byte_external_strings);
global->Set(v8_str("max_length"),
v8::Integer::New(CcTest::isolate(), kMaxLength));
- // Add short external ascii and non-ascii strings checking the result.
+ // Add short external one-byte and two-byte strings checking the result.
static const char* source =
- "function test() {"
- " var ascii_chars = 'aaaaaaaaaaaaaaaaaaaa';"
- " var non_ascii_chars = '\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234';" //NOLINT
- " if (ascii_chars.length != max_length) return 1;"
- " if (non_ascii_chars.length != max_length) return 2;"
- " var ascii = Array(max_length + 1);"
- " var non_ascii = Array(max_length + 1);"
- " for (var i = 0; i <= max_length; i++) {"
- " ascii[i] = ascii_chars.substring(0, i);"
- " non_ascii[i] = non_ascii_chars.substring(0, i);"
- " };"
- " for (var i = 0; i <= max_length; i++) {"
- " if (ascii[i] != external_ascii[i]) return 3;"
- " if (non_ascii[i] != external_non_ascii[i]) return 4;"
- " for (var j = 0; j < i; j++) {"
- " if (external_ascii[i] !="
- " (external_ascii[j] + external_ascii[i - j])) return 5;"
- " if (external_non_ascii[i] !="
- " (external_non_ascii[j] + external_non_ascii[i - j])) return 6;"
- " if (non_ascii[i] != (non_ascii[j] + non_ascii[i - j])) return 7;"
- " if (ascii[i] != (ascii[j] + ascii[i - j])) return 8;"
- " if (ascii[i] != (external_ascii[j] + ascii[i - j])) return 9;"
- " if (ascii[i] != (ascii[j] + external_ascii[i - j])) return 10;"
- " if (non_ascii[i] !="
- " (external_non_ascii[j] + non_ascii[i - j])) return 11;"
- " if (non_ascii[i] !="
- " (non_ascii[j] + external_non_ascii[i - j])) return 12;"
- " }"
- " }"
- " return 0;"
- "};"
- "test()";
+ "function test() {"
+ " var one_byte_chars = 'aaaaaaaaaaaaaaaaaaaa';"
+ " var non_one_byte_chars = "
+ "'\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1"
+ "234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\"
+ "u1234';" // NOLINT
+ " if (one_byte_chars.length != max_length) return 1;"
+ " if (non_one_byte_chars.length != max_length) return 2;"
+ " var one_byte = Array(max_length + 1);"
+ " var non_one_byte = Array(max_length + 1);"
+ " for (var i = 0; i <= max_length; i++) {"
+ " one_byte[i] = one_byte_chars.substring(0, i);"
+ " non_one_byte[i] = non_one_byte_chars.substring(0, i);"
+ " };"
+ " for (var i = 0; i <= max_length; i++) {"
+ " if (one_byte[i] != external_one_byte[i]) return 3;"
+ " if (non_one_byte[i] != external_non_one_byte[i]) return 4;"
+ " for (var j = 0; j < i; j++) {"
+ " if (external_one_byte[i] !="
+ " (external_one_byte[j] + external_one_byte[i - j])) return "
+ "5;"
+ " if (external_non_one_byte[i] !="
+ " (external_non_one_byte[j] + external_non_one_byte[i - "
+ "j])) return 6;"
+ " if (non_one_byte[i] != (non_one_byte[j] + non_one_byte[i - "
+ "j])) return 7;"
+ " if (one_byte[i] != (one_byte[j] + one_byte[i - j])) return 8;"
+ " if (one_byte[i] != (external_one_byte[j] + one_byte[i - j])) "
+ "return 9;"
+ " if (one_byte[i] != (one_byte[j] + external_one_byte[i - j])) "
+ "return 10;"
+ " if (non_one_byte[i] !="
+ " (external_non_one_byte[j] + non_one_byte[i - j])) return "
+ "11;"
+ " if (non_one_byte[i] !="
+ " (non_one_byte[j] + external_non_one_byte[i - j])) return "
+ "12;"
+ " }"
+ " }"
+ " return 0;"
+ "};"
+ "test()";
CHECK_EQ(0, CompileRun(source)->Int32Value());
}
@@ -1091,7 +1101,7 @@ TEST(SliceFromCons) {
Factory* factory = CcTest::i_isolate()->factory();
v8::HandleScope scope(CcTest::isolate());
Handle<String> string =
- factory->NewStringFromStaticAscii("parentparentparent");
+ factory->NewStringFromStaticChars("parentparentparent");
Handle<String> parent =
factory->NewConsString(string, string).ToHandleChecked();
CHECK(parent->IsConsString());
@@ -1109,11 +1119,11 @@ TEST(SliceFromCons) {
}
-class AsciiVectorResource : public v8::String::ExternalAsciiStringResource {
+class OneByteVectorResource : public v8::String::ExternalOneByteStringResource {
public:
- explicit AsciiVectorResource(i::Vector<const char> vector)
+ explicit OneByteVectorResource(i::Vector<const char> vector)
: data_(vector) {}
- virtual ~AsciiVectorResource() {}
+ virtual ~OneByteVectorResource() {}
virtual size_t length() const { return data_.length(); }
virtual const char* data() const { return data_.start(); }
private:
@@ -1126,10 +1136,10 @@ TEST(SliceFromExternal) {
CcTest::InitializeVM();
Factory* factory = CcTest::i_isolate()->factory();
v8::HandleScope scope(CcTest::isolate());
- AsciiVectorResource resource(
+ OneByteVectorResource resource(
i::Vector<const char>("abcdefghijklmnopqrstuvwxyz", 26));
Handle<String> string =
- factory->NewExternalStringFromAscii(&resource).ToHandleChecked();
+ factory->NewExternalStringFromOneByte(&resource).ToHandleChecked();
CHECK(string->IsExternalString());
Handle<String> slice = factory->NewSubString(string, 1, 25);
CHECK(slice->IsSlicedString());
@@ -1199,28 +1209,34 @@ TEST(SliceFromSlice) {
}
-TEST(AsciiArrayJoin) {
+UNINITIALIZED_TEST(OneByteArrayJoin) {
+ v8::Isolate::CreateParams create_params;
// Set heap limits.
- v8::ResourceConstraints constraints;
- constraints.set_max_semi_space_size(1);
- constraints.set_max_old_space_size(4);
- v8::SetResourceConstraints(CcTest::isolate(), &constraints);
-
- // String s is made of 2^17 = 131072 'c' characters and a is an array
- // starting with 'bad', followed by 2^14 times the string s. That means the
- // total length of the concatenated strings is 2^31 + 3. So on 32bit systems
- // summing the lengths of the strings (as Smis) overflows and wraps.
- LocalContext context;
- v8::HandleScope scope(CcTest::isolate());
- v8::TryCatch try_catch;
- CHECK(CompileRun(
- "var two_14 = Math.pow(2, 14);"
- "var two_17 = Math.pow(2, 17);"
- "var s = Array(two_17 + 1).join('c');"
- "var a = ['bad'];"
- "for (var i = 1; i <= two_14; i++) a.push(s);"
- "a.join("");").IsEmpty());
- CHECK(try_catch.HasCaught());
+ create_params.constraints.set_max_semi_space_size(1);
+ create_params.constraints.set_max_old_space_size(4);
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ isolate->Enter();
+
+ {
+ // String s is made of 2^17 = 131072 'c' characters and a is an array
+ // starting with 'bad', followed by 2^14 times the string s. That means the
+ // total length of the concatenated strings is 2^31 + 3. So on 32bit systems
+ // summing the lengths of the strings (as Smis) overflows and wraps.
+ LocalContext context(isolate);
+ v8::HandleScope scope(isolate);
+ v8::TryCatch try_catch;
+ CHECK(CompileRun(
+ "var two_14 = Math.pow(2, 14);"
+ "var two_17 = Math.pow(2, 17);"
+ "var s = Array(two_17 + 1).join('c');"
+ "var a = ['bad'];"
+ "for (var i = 1; i <= two_14; i++) a.push(s);"
+ "a.join("
+ ");").IsEmpty());
+ CHECK(try_catch.HasCaught());
+ }
+ isolate->Exit();
+ isolate->Dispose();
}
@@ -1281,14 +1297,14 @@ TEST(StringReplaceAtomTwoByteResult) {
v8::HandleScope scope(CcTest::isolate());
LocalContext context;
v8::Local<v8::Value> result = CompileRun(
- "var subject = 'ascii~only~string~'; "
+ "var subject = 'one_byte~only~string~'; "
"var replace = '\x80'; "
"subject.replace(/~/g, replace); ");
CHECK(result->IsString());
Handle<String> string = v8::Utils::OpenHandle(v8::String::Cast(*result));
CHECK(string->IsSeqTwoByteString());
- v8::Local<v8::String> expected = v8_str("ascii\x80only\x80string\x80");
+ v8::Local<v8::String> expected = v8_str("one_byte\x80only\x80string\x80");
CHECK(expected->Equals(result));
}
@@ -1375,7 +1391,7 @@ TEST(InvalidExternalString) {
Isolate* isolate = CcTest::i_isolate();
{ HandleScope scope(isolate);
DummyOneByteResource r;
- CHECK(isolate->factory()->NewExternalStringFromAscii(&r).is_null());
+ CHECK(isolate->factory()->NewExternalStringFromOneByte(&r).is_null());
CHECK(isolate->has_pending_exception());
isolate->clear_pending_exception();
}
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index a5ed7ab9bf..21d3b95f10 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -459,3 +459,15 @@ TEST(PostponeTerminateException) {
CHECK(try_catch.HasTerminated());
CHECK_EQ(2, callback_counter);
}
+
+
+TEST(ErrorObjectAfterTermination) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate());
+ v8::Context::Scope context_scope(context);
+ v8::V8::TerminateExecution(isolate);
+ v8::Local<v8::Value> error = v8::Exception::Error(v8_str("error"));
+ // TODO(yangguo): crbug/403509. Check for empty handle instead.
+ CHECK(error->IsUndefined());
+}
diff --git a/deps/v8/test/cctest/test-types.cc b/deps/v8/test/cctest/test-types.cc
index 8c5e41ca10..e9122b1c65 100644
--- a/deps/v8/test/cctest/test-types.cc
+++ b/deps/v8/test/cctest/test-types.cc
@@ -11,21 +11,37 @@
using namespace v8::internal;
+
// Testing auxiliaries (breaking the Type abstraction).
+
+
+static bool IsInteger(double x) {
+ return nearbyint(x) == x && !i::IsMinusZero(x); // Allows for infinities.
+}
+
+
+static bool IsInteger(i::Object* x) {
+ return x->IsNumber() && IsInteger(x->Number());
+}
+
+
+typedef uint32_t bitset;
+
+
struct ZoneRep {
typedef void* Struct;
static bool IsStruct(Type* t, int tag) {
return !IsBitset(t) && reinterpret_cast<intptr_t>(AsStruct(t)[0]) == tag;
}
- static bool IsBitset(Type* t) { return reinterpret_cast<intptr_t>(t) & 1; }
+ static bool IsBitset(Type* t) { return reinterpret_cast<uintptr_t>(t) & 1; }
static bool IsUnion(Type* t) { return IsStruct(t, 6); }
static Struct* AsStruct(Type* t) {
return reinterpret_cast<Struct*>(t);
}
- static int AsBitset(Type* t) {
- return static_cast<int>(reinterpret_cast<intptr_t>(t) >> 1);
+ static bitset AsBitset(Type* t) {
+ return static_cast<bitset>(reinterpret_cast<uintptr_t>(t) ^ 1u);
}
static Struct* AsUnion(Type* t) {
return AsStruct(t);
@@ -40,7 +56,7 @@ struct ZoneRep {
using Type::BitsetType::New;
using Type::BitsetType::Glb;
using Type::BitsetType::Lub;
- using Type::BitsetType::InherentLub;
+ using Type::BitsetType::IsInhabited;
};
};
@@ -55,7 +71,9 @@ struct HeapRep {
static bool IsUnion(Handle<HeapType> t) { return IsStruct(t, 6); }
static Struct* AsStruct(Handle<HeapType> t) { return FixedArray::cast(*t); }
- static int AsBitset(Handle<HeapType> t) { return Smi::cast(*t)->value(); }
+ static bitset AsBitset(Handle<HeapType> t) {
+ return static_cast<bitset>(reinterpret_cast<uintptr_t>(*t));
+ }
static Struct* AsUnion(Handle<HeapType> t) { return AsStruct(t); }
static int Length(Struct* structured) { return structured->length() - 1; }
@@ -65,10 +83,9 @@ struct HeapRep {
using HeapType::BitsetType::New;
using HeapType::BitsetType::Glb;
using HeapType::BitsetType::Lub;
- using HeapType::BitsetType::InherentLub;
- static int Glb(Handle<HeapType> type) { return Glb(*type); }
- static int Lub(Handle<HeapType> type) { return Lub(*type); }
- static int InherentLub(Handle<HeapType> type) { return InherentLub(*type); }
+ using HeapType::BitsetType::IsInhabited;
+ static bitset Glb(Handle<HeapType> type) { return Glb(*type); }
+ static bitset Lub(Handle<HeapType> type) { return Lub(*type); }
};
};
@@ -81,14 +98,19 @@ class Types {
#define DECLARE_TYPE(name, value) \
name = Type::name(region); \
types.push_back(name);
- BITSET_TYPE_LIST(DECLARE_TYPE)
+ PROPER_BITSET_TYPE_LIST(DECLARE_TYPE)
#undef DECLARE_TYPE
- object_map = isolate->factory()->NewMap(JS_OBJECT_TYPE, 3 * kPointerSize);
- array_map = isolate->factory()->NewMap(JS_ARRAY_TYPE, 4 * kPointerSize);
+ object_map = isolate->factory()->NewMap(
+ JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ array_map = isolate->factory()->NewMap(
+ JS_ARRAY_TYPE, JSArray::kSize);
+ number_map = isolate->factory()->NewMap(
+ HEAP_NUMBER_TYPE, HeapNumber::kSize);
uninitialized_map = isolate->factory()->uninitialized_map();
ObjectClass = Type::Class(object_map, region);
ArrayClass = Type::Class(array_map, region);
+ NumberClass = Type::Class(number_map, region);
UninitializedClass = Type::Class(uninitialized_map, region);
maps.push_back(object_map);
@@ -121,13 +143,15 @@ class Types {
types.push_back(Type::Constant(*it, region));
}
- doubles.push_back(-0.0);
- doubles.push_back(+0.0);
- doubles.push_back(-std::numeric_limits<double>::infinity());
- doubles.push_back(+std::numeric_limits<double>::infinity());
+ integers.push_back(isolate->factory()->NewNumber(-V8_INFINITY));
+ integers.push_back(isolate->factory()->NewNumber(+V8_INFINITY));
+ integers.push_back(isolate->factory()->NewNumber(-rng_->NextInt(10)));
+ integers.push_back(isolate->factory()->NewNumber(+rng_->NextInt(10)));
for (int i = 0; i < 10; ++i) {
- doubles.push_back(rng_->NextInt());
- doubles.push_back(rng_->NextDouble() * rng_->NextInt());
+ double x = rng_->NextInt();
+ integers.push_back(isolate->factory()->NewNumber(x));
+ x *= rng_->NextInt();
+ if (!IsMinusZero(x)) integers.push_back(isolate->factory()->NewNumber(x));
}
NumberArray = Type::Array(Number, region);
@@ -146,6 +170,7 @@ class Types {
Handle<i::Map> object_map;
Handle<i::Map> array_map;
+ Handle<i::Map> number_map;
Handle<i::Map> uninitialized_map;
Handle<i::Smi> smi;
@@ -161,6 +186,7 @@ class Types {
TypeHandle ObjectClass;
TypeHandle ArrayClass;
+ TypeHandle NumberClass;
TypeHandle UninitializedClass;
TypeHandle SmiConstant;
@@ -182,27 +208,11 @@ class Types {
typedef std::vector<TypeHandle> TypeVector;
typedef std::vector<Handle<i::Map> > MapVector;
typedef std::vector<Handle<i::Object> > ValueVector;
- typedef std::vector<double> DoubleVector;
TypeVector types;
MapVector maps;
ValueVector values;
- DoubleVector doubles; // Some floating-point values, excluding NaN.
-
- // Range type helper functions, partially copied from types.cc.
- // Note: dle(dmin(x,y), dmax(x,y)) holds iff neither x nor y is NaN.
- bool dle(double x, double y) {
- return x <= y && (x != 0 || IsMinusZero(x) || !IsMinusZero(y));
- }
- bool deq(double x, double y) {
- return dle(x, y) && dle(y, x);
- }
- double dmin(double x, double y) {
- return dle(x, y) ? x : y;
- }
- double dmax(double x, double y) {
- return dle(x, y) ? y : x;
- }
+ ValueVector integers; // "Integer" values used for range limits.
TypeHandle Of(Handle<i::Object> value) {
return Type::Of(value, region_);
@@ -212,16 +222,20 @@ class Types {
return Type::NowOf(value, region_);
}
+ TypeHandle Class(Handle<i::Map> map) {
+ return Type::Class(map, region_);
+ }
+
TypeHandle Constant(Handle<i::Object> value) {
return Type::Constant(value, region_);
}
- TypeHandle Range(double min, double max) {
+ TypeHandle Range(Handle<i::Object> min, Handle<i::Object> max) {
return Type::Range(min, max, region_);
}
- TypeHandle Class(Handle<i::Map> map) {
- return Type::Class(map, region_);
+ TypeHandle Context(TypeHandle outer) {
+ return Type::Context(outer, region_);
}
TypeHandle Array1(TypeHandle element) {
@@ -258,20 +272,31 @@ class Types {
return types[rng_->NextInt(static_cast<int>(types.size()))];
}
- TypeHandle Fuzz(int depth = 5) {
+ TypeHandle Fuzz(int depth = 4) {
switch (rng_->NextInt(depth == 0 ? 3 : 20)) {
case 0: { // bitset
- int n = 0
#define COUNT_BITSET_TYPES(type, value) + 1
- BITSET_TYPE_LIST(COUNT_BITSET_TYPES)
+ int n = 0 PROPER_BITSET_TYPE_LIST(COUNT_BITSET_TYPES);
#undef COUNT_BITSET_TYPES
- ;
- int i = rng_->NextInt(n);
- #define PICK_BITSET_TYPE(type, value) \
- if (i-- == 0) return Type::type(region_);
- BITSET_TYPE_LIST(PICK_BITSET_TYPE)
- #undef PICK_BITSET_TYPE
- UNREACHABLE();
+ // Pick a bunch of named bitsets and return their intersection.
+ TypeHandle result = Type::Any(region_);
+ for (int i = 0, m = 1 + rng_->NextInt(3); i < m; ++i) {
+ int j = rng_->NextInt(n);
+ #define PICK_BITSET_TYPE(type, value) \
+ if (j-- == 0) { \
+ TypeHandle tmp = Type::Intersect( \
+ result, Type::type(region_), region_); \
+ if (tmp->Is(Type::None()) && i != 0) { \
+ break; \
+ } { \
+ result = tmp; \
+ continue; \
+ } \
+ }
+ PROPER_BITSET_TYPE_LIST(PICK_BITSET_TYPE)
+ #undef PICK_BITSET_TYPE
+ }
+ return result;
}
case 1: { // class
int i = rng_->NextInt(static_cast<int>(maps.size()));
@@ -281,18 +306,26 @@ class Types {
int i = rng_->NextInt(static_cast<int>(values.size()));
return Type::Constant(values[i], region_);
}
- case 3: { // context
+ case 3: { // range
+ int i = rng_->NextInt(static_cast<int>(integers.size()));
+ int j = rng_->NextInt(static_cast<int>(integers.size()));
+ i::Handle<i::Object> min = integers[i];
+ i::Handle<i::Object> max = integers[j];
+ if (min->Number() > max->Number()) std::swap(min, max);
+ return Type::Range(min, max, region_);
+ }
+ case 4: { // context
int depth = rng_->NextInt(3);
TypeHandle type = Type::Internal(region_);
for (int i = 0; i < depth; ++i) type = Type::Context(type, region_);
return type;
}
- case 4: { // array
+ case 5: { // array
TypeHandle element = Fuzz(depth / 2);
return Type::Array(element, region_);
}
- case 5:
- case 6: { // function
+ case 6:
+ case 7: { // function
TypeHandle result = Fuzz(depth / 2);
TypeHandle receiver = Fuzz(depth / 2);
int arity = rng_->NextInt(3);
@@ -330,7 +363,6 @@ struct Tests : Rep {
typedef typename TypesInstance::TypeVector::iterator TypeIterator;
typedef typename TypesInstance::MapVector::iterator MapIterator;
typedef typename TypesInstance::ValueVector::iterator ValueIterator;
- typedef typename TypesInstance::DoubleVector::iterator DoubleIterator;
Isolate* isolate;
HandleScope scope;
@@ -347,14 +379,15 @@ struct Tests : Rep {
bool Equal(TypeHandle type1, TypeHandle type2) {
return
type1->Equals(type2) &&
- Rep::IsBitset(type1) == Rep::IsBitset(type2) &&
- Rep::IsUnion(type1) == Rep::IsUnion(type2) &&
+ this->IsBitset(type1) == this->IsBitset(type2) &&
+ this->IsUnion(type1) == this->IsUnion(type2) &&
type1->NumClasses() == type2->NumClasses() &&
type1->NumConstants() == type2->NumConstants() &&
- (!Rep::IsBitset(type1) ||
- Rep::AsBitset(type1) == Rep::AsBitset(type2)) &&
- (!Rep::IsUnion(type1) ||
- Rep::Length(Rep::AsUnion(type1)) == Rep::Length(Rep::AsUnion(type2)));
+ (!this->IsBitset(type1) ||
+ this->AsBitset(type1) == this->AsBitset(type2)) &&
+ (!this->IsUnion(type1) ||
+ this->Length(this->AsUnion(type1)) ==
+ this->Length(this->AsUnion(type2)));
}
void CheckEqual(TypeHandle type1, TypeHandle type2) {
@@ -364,36 +397,37 @@ struct Tests : Rep {
void CheckSub(TypeHandle type1, TypeHandle type2) {
CHECK(type1->Is(type2));
CHECK(!type2->Is(type1));
- if (Rep::IsBitset(type1) && Rep::IsBitset(type2)) {
- CHECK_NE(Rep::AsBitset(type1), Rep::AsBitset(type2));
+ if (this->IsBitset(type1) && this->IsBitset(type2)) {
+ CHECK(this->AsBitset(type1) != this->AsBitset(type2));
}
}
void CheckUnordered(TypeHandle type1, TypeHandle type2) {
CHECK(!type1->Is(type2));
CHECK(!type2->Is(type1));
- if (Rep::IsBitset(type1) && Rep::IsBitset(type2)) {
- CHECK_NE(Rep::AsBitset(type1), Rep::AsBitset(type2));
+ if (this->IsBitset(type1) && this->IsBitset(type2)) {
+ CHECK(this->AsBitset(type1) != this->AsBitset(type2));
}
}
- void CheckOverlap(TypeHandle type1, TypeHandle type2, TypeHandle mask) {
+ void CheckOverlap(TypeHandle type1, TypeHandle type2) {
CHECK(type1->Maybe(type2));
CHECK(type2->Maybe(type1));
- if (Rep::IsBitset(type1) && Rep::IsBitset(type2)) {
- CHECK_NE(0,
- Rep::AsBitset(type1) & Rep::AsBitset(type2) & Rep::AsBitset(mask));
- }
}
- void CheckDisjoint(TypeHandle type1, TypeHandle type2, TypeHandle mask) {
+ void CheckDisjoint(TypeHandle type1, TypeHandle type2) {
CHECK(!type1->Is(type2));
CHECK(!type2->Is(type1));
CHECK(!type1->Maybe(type2));
CHECK(!type2->Maybe(type1));
- if (Rep::IsBitset(type1) && Rep::IsBitset(type2)) {
- CHECK_EQ(0,
- Rep::AsBitset(type1) & Rep::AsBitset(type2) & Rep::AsBitset(mask));
+ }
+
+ void IsSomeType() {
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ TypeHandle t = *it;
+ CHECK(1 ==
+ this->IsBitset(t) + t->IsClass() + t->IsConstant() + t->IsRange() +
+ this->IsUnion(t) + t->IsArray() + t->IsFunction() + t->IsContext());
}
}
@@ -402,8 +436,8 @@ struct Tests : Rep {
CHECK(this->IsBitset(T.None));
CHECK(this->IsBitset(T.Any));
- CHECK_EQ(0, this->AsBitset(T.None));
- CHECK_EQ(-1, this->AsBitset(T.Any));
+ CHECK(bitset(0) == this->AsBitset(T.None));
+ CHECK(bitset(0xfffffffeu) == this->AsBitset(T.Any));
// Union(T1, T2) is bitset for bitsets T1,T2
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
@@ -445,22 +479,23 @@ struct Tests : Rep {
TypeHandle type2 = *it2;
TypeHandle union12 = T.Union(type1, type2);
if (this->IsBitset(type1) && this->IsBitset(type2)) {
- CHECK_EQ(
- this->AsBitset(type1) | this->AsBitset(type2),
+ CHECK(
+ (this->AsBitset(type1) | this->AsBitset(type2)) ==
this->AsBitset(union12));
}
}
}
- // Intersect(T1, T2) is bitwise conjunction for bitsets T1,T2
+ // Intersect(T1, T2) is bitwise conjunction for bitsets T1,T2 (modulo None)
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
TypeHandle type1 = *it1;
TypeHandle type2 = *it2;
TypeHandle intersect12 = T.Intersect(type1, type2);
if (this->IsBitset(type1) && this->IsBitset(type2)) {
- CHECK_EQ(
- this->AsBitset(type1) & this->AsBitset(type2),
+ bitset bits = this->AsBitset(type1) & this->AsBitset(type2);
+ CHECK(
+ (Rep::BitsetType::IsInhabited(bits) ? bits : 0) ==
this->AsBitset(intersect12));
}
}
@@ -562,50 +597,78 @@ struct Tests : Rep {
void Range() {
// Constructor
- for (DoubleIterator i = T.doubles.begin(); i != T.doubles.end(); ++i) {
- for (DoubleIterator j = T.doubles.begin(); j != T.doubles.end(); ++j) {
- double min = T.dmin(*i, *j);
- double max = T.dmax(*i, *j);
+ for (ValueIterator i = T.integers.begin(); i != T.integers.end(); ++i) {
+ for (ValueIterator j = T.integers.begin(); j != T.integers.end(); ++j) {
+ i::Handle<i::Object> min = *i;
+ i::Handle<i::Object> max = *j;
+ if (min->Number() > max->Number()) std::swap(min, max);
TypeHandle type = T.Range(min, max);
CHECK(type->IsRange());
}
}
// Range attributes
- for (DoubleIterator i = T.doubles.begin(); i != T.doubles.end(); ++i) {
- for (DoubleIterator j = T.doubles.begin(); j != T.doubles.end(); ++j) {
- double min = T.dmin(*i, *j);
- double max = T.dmax(*i, *j);
- printf("RangeType: min, max = %f, %f\n", min, max);
+ for (ValueIterator i = T.integers.begin(); i != T.integers.end(); ++i) {
+ for (ValueIterator j = T.integers.begin(); j != T.integers.end(); ++j) {
+ i::Handle<i::Object> min = *i;
+ i::Handle<i::Object> max = *j;
+ if (min->Number() > max->Number()) std::swap(min, max);
TypeHandle type = T.Range(min, max);
- printf("RangeType: Min, Max = %f, %f\n",
- type->AsRange()->Min(), type->AsRange()->Max());
- CHECK(min == type->AsRange()->Min());
- CHECK(max == type->AsRange()->Max());
- }
- }
-
-// TODO(neis): enable once subtyping is updated.
-// // Functionality & Injectivity: Range(min1, max1) = Range(min2, max2) <=>
-// // min1 = min2 /\ max1 = max2
-// for (DoubleIterator i1 = T.doubles.begin(); i1 != T.doubles.end(); ++i1) {
-// for (DoubleIterator j1 = T.doubles.begin(); j1 != T.doubles.end(); ++j1) {
-// for (DoubleIterator i2 = T.doubles.begin();
-// i2 != T.doubles.end(); ++i2) {
-// for (DoubleIterator j2 = T.doubles.begin();
-// j2 != T.doubles.end(); ++j2) {
-// double min1 = T.dmin(*i1, *j1);
-// double max1 = T.dmax(*i1, *j1);
-// double min2 = T.dmin(*i2, *j2);
-// double max2 = T.dmax(*i2, *j2);
-// TypeHandle type1 = T.Range(min1, max1);
-// TypeHandle type2 = T.Range(min2, max2);
-// CHECK(Equal(type1, type2) ==
-// (T.deq(min1, min2) && T.deq(max1, max2)));
-// }
-// }
-// }
-// }
+ CHECK(*min == *type->AsRange()->Min());
+ CHECK(*max == *type->AsRange()->Max());
+ }
+ }
+
+ // Functionality & Injectivity:
+ // Range(min1, max1) = Range(min2, max2) <=> min1 = min2 /\ max1 = max2
+ for (ValueIterator i1 = T.integers.begin();
+ i1 != T.integers.end(); ++i1) {
+ for (ValueIterator j1 = i1;
+ j1 != T.integers.end(); ++j1) {
+ for (ValueIterator i2 = T.integers.begin();
+ i2 != T.integers.end(); ++i2) {
+ for (ValueIterator j2 = i2;
+ j2 != T.integers.end(); ++j2) {
+ i::Handle<i::Object> min1 = *i1;
+ i::Handle<i::Object> max1 = *j1;
+ i::Handle<i::Object> min2 = *i2;
+ i::Handle<i::Object> max2 = *j2;
+ if (min1->Number() > max1->Number()) std::swap(min1, max1);
+ if (min2->Number() > max2->Number()) std::swap(min2, max2);
+ TypeHandle type1 = T.Range(min1, max1);
+ TypeHandle type2 = T.Range(min2, max2);
+ CHECK(Equal(type1, type2) == (*min1 == *min2 && *max1 == *max2));
+ }
+ }
+ }
+ }
+ }
+
+ void Context() {
+ // Constructor
+ for (int i = 0; i < 20; ++i) {
+ TypeHandle type = T.Random();
+ TypeHandle context = T.Context(type);
+ CHECK(context->Iscontext());
+ }
+
+ // Attributes
+ for (int i = 0; i < 20; ++i) {
+ TypeHandle type = T.Random();
+ TypeHandle context = T.Context(type);
+ CheckEqual(type, context->AsContext()->Outer());
+ }
+
+ // Functionality & Injectivity: Context(T1) = Context(T2) iff T1 = T2
+ for (int i = 0; i < 20; ++i) {
+ for (int j = 0; j < 20; ++j) {
+ TypeHandle type1 = T.Random();
+ TypeHandle type2 = T.Random();
+ TypeHandle context1 = T.Context(type1);
+ TypeHandle context2 = T.Context(type2);
+ CHECK(Equal(context1, context2) == Equal(type1, type2));
+ }
+ }
}
void Array() {
@@ -713,15 +776,26 @@ struct Tests : Rep {
CHECK(const_type->Is(of_type));
}
- // Constant(V)->Is(T) iff Of(V)->Is(T) or T->Maybe(Constant(V))
+ // If Of(V)->Is(T), then Constant(V)->Is(T)
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
Handle<i::Object> value = *vt;
TypeHandle type = *it;
TypeHandle const_type = T.Constant(value);
TypeHandle of_type = T.Of(value);
- CHECK(const_type->Is(type) ==
- (of_type->Is(type) || type->Maybe(const_type)));
+ CHECK(!of_type->Is(type) || const_type->Is(type));
+ }
+ }
+
+ // If Constant(V)->Is(T), then Of(V)->Is(T) or T->Maybe(Constant(V))
+ for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ Handle<i::Object> value = *vt;
+ TypeHandle type = *it;
+ TypeHandle const_type = T.Constant(value);
+ TypeHandle of_type = T.Of(value);
+ CHECK(!const_type->Is(type) ||
+ of_type->Is(type) || type->Maybe(const_type));
}
}
}
@@ -743,19 +817,32 @@ struct Tests : Rep {
CHECK(nowof_type->Is(of_type));
}
- // Constant(V)->NowIs(T) iff NowOf(V)->NowIs(T) or T->Maybe(Constant(V))
+ // If NowOf(V)->NowIs(T), then Constant(V)->NowIs(T)
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
Handle<i::Object> value = *vt;
TypeHandle type = *it;
TypeHandle const_type = T.Constant(value);
TypeHandle nowof_type = T.NowOf(value);
- CHECK(const_type->NowIs(type) ==
- (nowof_type->NowIs(type) || type->Maybe(const_type)));
+ CHECK(!nowof_type->NowIs(type) || const_type->NowIs(type));
}
}
- // Constant(V)->Is(T) implies NowOf(V)->Is(T) or T->Maybe(Constant(V))
+ // If Constant(V)->NowIs(T),
+ // then NowOf(V)->NowIs(T) or T->Maybe(Constant(V))
+ for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ Handle<i::Object> value = *vt;
+ TypeHandle type = *it;
+ TypeHandle const_type = T.Constant(value);
+ TypeHandle nowof_type = T.NowOf(value);
+ CHECK(!const_type->NowIs(type) ||
+ nowof_type->NowIs(type) || type->Maybe(const_type));
+ }
+ }
+
+ // If Constant(V)->Is(T),
+ // then NowOf(V)->Is(T) or T->Maybe(Constant(V))
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
Handle<i::Object> value = *vt;
@@ -763,23 +850,63 @@ struct Tests : Rep {
TypeHandle const_type = T.Constant(value);
TypeHandle nowof_type = T.NowOf(value);
CHECK(!const_type->Is(type) ||
- (nowof_type->Is(type) || type->Maybe(const_type)));
+ nowof_type->Is(type) || type->Maybe(const_type));
}
}
}
- void Bounds() {
- // Ordering: (T->BitsetGlb())->Is(T->BitsetLub())
+ void MinMax() {
+ // If b is regular numeric bitset, then Range(b->Min(), b->Max())->Is(b).
+ // TODO(neis): Need to ignore representation for this to be true.
+ /*
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
TypeHandle type = *it;
- TypeHandle glb =
- Rep::BitsetType::New(Rep::BitsetType::Glb(type), T.region());
- TypeHandle lub =
- Rep::BitsetType::New(Rep::BitsetType::Lub(type), T.region());
- CHECK(glb->Is(lub));
+ if (this->IsBitset(type) && type->Is(T.Number) &&
+ !type->Is(T.None) && !type->Is(T.NaN)) {
+ TypeHandle range = T.Range(
+ isolate->factory()->NewNumber(type->Min()),
+ isolate->factory()->NewNumber(type->Max()));
+ CHECK(range->Is(type));
+ }
+ }
+ */
+
+ // If b is regular numeric bitset, then b->Min() and b->Max() are integers.
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ TypeHandle type = *it;
+ if (this->IsBitset(type) && type->Is(T.Number) &&
+ !type->Is(T.None) && !type->Is(T.NaN)) {
+ CHECK(IsInteger(type->Min()) && IsInteger(type->Max()));
+ }
}
- // Lower bound: (T->BitsetGlb())->Is(T)
+ // If b1 and b2 are regular numeric bitsets with b1->Is(b2), then
+ // b1->Min() >= b2->Min() and b1->Max() <= b2->Max().
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ TypeHandle type1 = *it1;
+ TypeHandle type2 = *it2;
+ if (this->IsBitset(type1) && type1->Is(type2) && type2->Is(T.Number) &&
+ !type1->Is(T.NaN) && !type2->Is(T.NaN)) {
+ CHECK(type1->Min() >= type2->Min());
+ CHECK(type1->Max() <= type2->Max());
+ }
+ }
+ }
+
+ // Lub(Range(x,y))->Min() <= x and y <= Lub(Range(x,y))->Max()
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ TypeHandle type = *it;
+ if (type->IsRange()) {
+ TypeHandle lub = Rep::BitsetType::New(
+ Rep::BitsetType::Lub(type), T.region());
+ CHECK(lub->Min() <= type->Min() && type->Max() <= lub->Max());
+ }
+ }
+ }
+
+ void BitsetGlb() {
+ // Lower: (T->BitsetGlb())->Is(T)
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
TypeHandle type = *it;
TypeHandle glb =
@@ -787,7 +914,33 @@ struct Tests : Rep {
CHECK(glb->Is(type));
}
- // Upper bound: T->Is(T->BitsetLub())
+ // Greatest: If T1->IsBitset() and T1->Is(T2), then T1->Is(T2->BitsetGlb())
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ TypeHandle type1 = *it1;
+ TypeHandle type2 = *it2;
+ TypeHandle glb2 =
+ Rep::BitsetType::New(Rep::BitsetType::Glb(type2), T.region());
+ CHECK(!this->IsBitset(type1) || !type1->Is(type2) || type1->Is(glb2));
+ }
+ }
+
+ // Monotonicity: T1->Is(T2) implies (T1->BitsetGlb())->Is(T2->BitsetGlb())
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ TypeHandle type1 = *it1;
+ TypeHandle type2 = *it2;
+ TypeHandle glb1 =
+ Rep::BitsetType::New(Rep::BitsetType::Glb(type1), T.region());
+ TypeHandle glb2 =
+ Rep::BitsetType::New(Rep::BitsetType::Glb(type2), T.region());
+ CHECK(!type1->Is(type2) || glb1->Is(glb2));
+ }
+ }
+ }
+
+ void BitsetLub() {
+ // Upper: T->Is(T->BitsetLub())
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
TypeHandle type = *it;
TypeHandle lub =
@@ -795,18 +948,32 @@ struct Tests : Rep {
CHECK(type->Is(lub));
}
- // Inherent bound: (T->BitsetLub())->Is(T->InherentBitsetLub())
- for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
- TypeHandle lub =
- Rep::BitsetType::New(Rep::BitsetType::Lub(type), T.region());
- TypeHandle inherent =
- Rep::BitsetType::New(Rep::BitsetType::InherentLub(type), T.region());
- CHECK(lub->Is(inherent));
+ // Least: If T2->IsBitset() and T1->Is(T2), then (T1->BitsetLub())->Is(T2)
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ TypeHandle type1 = *it1;
+ TypeHandle type2 = *it2;
+ TypeHandle lub1 =
+ Rep::BitsetType::New(Rep::BitsetType::Lub(type1), T.region());
+ CHECK(!this->IsBitset(type2) || !type1->Is(type2) || lub1->Is(type2));
+ }
+ }
+
+ // Monotonicity: T1->Is(T2) implies (T1->BitsetLub())->Is(T2->BitsetLub())
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ TypeHandle type1 = *it1;
+ TypeHandle type2 = *it2;
+ TypeHandle lub1 =
+ Rep::BitsetType::New(Rep::BitsetType::Lub(type1), T.region());
+ TypeHandle lub2 =
+ Rep::BitsetType::New(Rep::BitsetType::Lub(type2), T.region());
+ CHECK(!type1->Is(type2) || lub1->Is(lub2));
+ }
}
}
- void Is() {
+ void Is1() {
// Least Element (Bottom): None->Is(T)
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
TypeHandle type = *it;
@@ -858,17 +1025,26 @@ struct Tests : Rep {
}
}
- // Constant(V1)->Is(Constant(V2)) iff V1 = V2
- for (ValueIterator vt1 = T.values.begin(); vt1 != T.values.end(); ++vt1) {
- for (ValueIterator vt2 = T.values.begin(); vt2 != T.values.end(); ++vt2) {
- Handle<i::Object> value1 = *vt1;
- Handle<i::Object> value2 = *vt2;
- TypeHandle const_type1 = T.Constant(value1);
- TypeHandle const_type2 = T.Constant(value2);
- CHECK(const_type1->Is(const_type2) == (*value1 == *value2));
+ // (In-)Compatibilities.
+ for (TypeIterator i = T.types.begin(); i != T.types.end(); ++i) {
+ for (TypeIterator j = T.types.begin(); j != T.types.end(); ++j) {
+ TypeHandle type1 = *i;
+ TypeHandle type2 = *j;
+ CHECK(!type1->Is(type2) || this->IsBitset(type2) ||
+ this->IsUnion(type2) || this->IsUnion(type1) ||
+ (type1->IsClass() && type2->IsClass()) ||
+ (type1->IsConstant() && type2->IsConstant()) ||
+ (type1->IsConstant() && type2->IsRange()) ||
+ (type1->IsRange() && type2->IsRange()) ||
+ (type1->IsContext() && type2->IsContext()) ||
+ (type1->IsArray() && type2->IsArray()) ||
+ (type1->IsFunction() && type2->IsFunction()) ||
+ type1->Equals(T.None));
}
}
+ }
+ void Is2() {
// Class(M1)->Is(Class(M2)) iff M1 = M2
for (MapIterator mt1 = T.maps.begin(); mt1 != T.maps.end(); ++mt1) {
for (MapIterator mt2 = T.maps.begin(); mt2 != T.maps.end(); ++mt2) {
@@ -880,29 +1056,117 @@ struct Tests : Rep {
}
}
- // Constant(V)->Is(Class(M)) never
- for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
- for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
- Handle<i::Map> map = *mt;
- Handle<i::Object> value = *vt;
- TypeHandle constant_type = T.Constant(value);
- TypeHandle class_type = T.Class(map);
- CHECK(!constant_type->Is(class_type));
+ // Range(X1, Y1)->Is(Range(X2, Y2)) iff X1 >= X2 /\ Y1 <= Y2
+ for (ValueIterator i1 = T.integers.begin();
+ i1 != T.integers.end(); ++i1) {
+ for (ValueIterator j1 = i1;
+ j1 != T.integers.end(); ++j1) {
+ for (ValueIterator i2 = T.integers.begin();
+ i2 != T.integers.end(); ++i2) {
+ for (ValueIterator j2 = i2;
+ j2 != T.integers.end(); ++j2) {
+ i::Handle<i::Object> min1 = *i1;
+ i::Handle<i::Object> max1 = *j1;
+ i::Handle<i::Object> min2 = *i2;
+ i::Handle<i::Object> max2 = *j2;
+ if (min1->Number() > max1->Number()) std::swap(min1, max1);
+ if (min2->Number() > max2->Number()) std::swap(min2, max2);
+ TypeHandle type1 = T.Range(min1, max1);
+ TypeHandle type2 = T.Range(min2, max2);
+ CHECK(type1->Is(type2) ==
+ (min1->Number() >= min2->Number() &&
+ max1->Number() <= max2->Number()));
+ }
+ }
}
}
- // Class(M)->Is(Constant(V)) never
- for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
- for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
- Handle<i::Map> map = *mt;
- Handle<i::Object> value = *vt;
- TypeHandle constant_type = T.Constant(value);
- TypeHandle class_type = T.Class(map);
- CHECK(!class_type->Is(constant_type));
+ // Constant(V1)->Is(Constant(V2)) iff V1 = V2
+ for (ValueIterator vt1 = T.values.begin(); vt1 != T.values.end(); ++vt1) {
+ for (ValueIterator vt2 = T.values.begin(); vt2 != T.values.end(); ++vt2) {
+ Handle<i::Object> value1 = *vt1;
+ Handle<i::Object> value2 = *vt2;
+ TypeHandle const_type1 = T.Constant(value1);
+ TypeHandle const_type2 = T.Constant(value2);
+ CHECK(const_type1->Is(const_type2) == (*value1 == *value2));
}
}
- // Basic types
+ // Context(T1)->Is(Context(T2)) iff T1 = T2
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ TypeHandle outer1 = *it1;
+ TypeHandle outer2 = *it2;
+ TypeHandle type1 = T.Context(outer1);
+ TypeHandle type2 = T.Context(outer2);
+ CHECK(type1->Is(type2) == outer1->Equals(outer2));
+ }
+ }
+
+ // Array(T1)->Is(Array(T2)) iff T1 = T2
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ TypeHandle element1 = *it1;
+ TypeHandle element2 = *it2;
+ TypeHandle type1 = T.Array1(element1);
+ TypeHandle type2 = T.Array1(element2);
+ CHECK(type1->Is(type2) == element1->Equals(element2));
+ }
+ }
+
+ // Function0(S1, T1)->Is(Function0(S2, T2)) iff S1 = S2 and T1 = T2
+ for (TypeIterator i = T.types.begin(); i != T.types.end(); ++i) {
+ for (TypeIterator j = T.types.begin(); j != T.types.end(); ++j) {
+ TypeHandle result1 = *i;
+ TypeHandle receiver1 = *j;
+ TypeHandle type1 = T.Function0(result1, receiver1);
+ TypeHandle result2 = T.Random();
+ TypeHandle receiver2 = T.Random();
+ TypeHandle type2 = T.Function0(result2, receiver2);
+ CHECK(type1->Is(type2) ==
+ (result1->Equals(result2) && receiver1->Equals(receiver2)));
+ }
+ }
+
+
+ // Range-specific subtyping
+
+ // If IsInteger(v) then Constant(v)->Is(Range(v, v)).
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ TypeHandle type = *it;
+ if (type->IsConstant() && IsInteger(*type->AsConstant()->Value())) {
+ CHECK(type->Is(
+ T.Range(type->AsConstant()->Value(), type->AsConstant()->Value())));
+ }
+ }
+
+ // If Constant(x)->Is(Range(min,max)) then IsInteger(v) and min <= x <= max.
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ TypeHandle type1 = *it1;
+ TypeHandle type2 = *it2;
+ if (type1->IsConstant() && type2->IsRange() && type1->Is(type2)) {
+ double x = type1->AsConstant()->Value()->Number();
+ double min = type2->AsRange()->Min()->Number();
+ double max = type2->AsRange()->Max()->Number();
+ CHECK(IsInteger(x) && min <= x && x <= max);
+ }
+ }
+ }
+
+ // Lub(Range(x,y))->Is(T.Union(T.Integral32, T.OtherNumber))
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ TypeHandle type = *it;
+ if (type->IsRange()) {
+ TypeHandle lub = Rep::BitsetType::New(
+ Rep::BitsetType::Lub(type), T.region());
+ CHECK(lub->Is(T.Union(T.Integral32, T.OtherNumber)));
+ }
+ }
+
+
+ // Subtyping between concrete basic types
+
CheckUnordered(T.Boolean, T.Null);
CheckUnordered(T.Undefined, T.Null);
CheckUnordered(T.Boolean, T.Undefined);
@@ -931,7 +1195,9 @@ struct Tests : Rep {
CheckUnordered(T.Object, T.Proxy);
CheckUnordered(T.Array, T.Function);
- // Structural types
+
+ // Subtyping between concrete structural types
+
CheckSub(T.ObjectClass, T.Object);
CheckSub(T.ArrayClass, T.Object);
CheckSub(T.ArrayClass, T.Array);
@@ -1089,16 +1355,6 @@ struct Tests : Rep {
CHECK(type->Contains(value) == const_type->Is(type));
}
}
-
- // Of(V)->Is(T) implies T->Contains(V)
- for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
- TypeHandle type = *it;
- Handle<i::Object> value = *vt;
- TypeHandle of_type = T.Of(value);
- CHECK(!of_type->Is(type) || type->Contains(value));
- }
- }
}
void NowContains() {
@@ -1130,16 +1386,6 @@ struct Tests : Rep {
CHECK(!nowof_type->NowIs(type) || type->NowContains(value));
}
}
-
- // NowOf(V)->NowIs(T) implies T->NowContains(V)
- for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
- TypeHandle type = *it;
- Handle<i::Object> value = *vt;
- TypeHandle nowof_type = T.Of(value);
- CHECK(!nowof_type->NowIs(type) || type->NowContains(value));
- }
- }
}
void Maybe() {
@@ -1223,6 +1469,8 @@ struct Tests : Rep {
}
// Constant(V)->Maybe(Class(M)) never
+ // This does NOT hold!
+ /*
for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
Handle<i::Map> map = *mt;
@@ -1232,8 +1480,11 @@ struct Tests : Rep {
CHECK(!const_type->Maybe(class_type));
}
}
+ */
// Class(M)->Maybe(Constant(V)) never
+ // This does NOT hold!
+ /*
for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
Handle<i::Map> map = *mt;
@@ -1243,67 +1494,62 @@ struct Tests : Rep {
CHECK(!class_type->Maybe(const_type));
}
}
+ */
// Basic types
- CheckDisjoint(T.Boolean, T.Null, T.Semantic);
- CheckDisjoint(T.Undefined, T.Null, T.Semantic);
- CheckDisjoint(T.Boolean, T.Undefined, T.Semantic);
-
- CheckOverlap(T.SignedSmall, T.Number, T.Semantic);
- CheckOverlap(T.NaN, T.Number, T.Semantic);
- CheckDisjoint(T.Signed32, T.NaN, T.Semantic);
-
- CheckOverlap(T.UniqueName, T.Name, T.Semantic);
- CheckOverlap(T.String, T.Name, T.Semantic);
- CheckOverlap(T.InternalizedString, T.String, T.Semantic);
- CheckOverlap(T.InternalizedString, T.UniqueName, T.Semantic);
- CheckOverlap(T.InternalizedString, T.Name, T.Semantic);
- CheckOverlap(T.Symbol, T.UniqueName, T.Semantic);
- CheckOverlap(T.Symbol, T.Name, T.Semantic);
- CheckOverlap(T.String, T.UniqueName, T.Semantic);
- CheckDisjoint(T.String, T.Symbol, T.Semantic);
- CheckDisjoint(T.InternalizedString, T.Symbol, T.Semantic);
-
- CheckOverlap(T.Object, T.Receiver, T.Semantic);
- CheckOverlap(T.Array, T.Object, T.Semantic);
- CheckOverlap(T.Function, T.Object, T.Semantic);
- CheckOverlap(T.Proxy, T.Receiver, T.Semantic);
- CheckDisjoint(T.Object, T.Proxy, T.Semantic);
- CheckDisjoint(T.Array, T.Function, T.Semantic);
+ CheckDisjoint(T.Boolean, T.Null);
+ CheckDisjoint(T.Undefined, T.Null);
+ CheckDisjoint(T.Boolean, T.Undefined);
+ CheckOverlap(T.SignedSmall, T.Number);
+ CheckOverlap(T.NaN, T.Number);
+ CheckDisjoint(T.Signed32, T.NaN);
+ CheckOverlap(T.UniqueName, T.Name);
+ CheckOverlap(T.String, T.Name);
+ CheckOverlap(T.InternalizedString, T.String);
+ CheckOverlap(T.InternalizedString, T.UniqueName);
+ CheckOverlap(T.InternalizedString, T.Name);
+ CheckOverlap(T.Symbol, T.UniqueName);
+ CheckOverlap(T.Symbol, T.Name);
+ CheckOverlap(T.String, T.UniqueName);
+ CheckDisjoint(T.String, T.Symbol);
+ CheckDisjoint(T.InternalizedString, T.Symbol);
+ CheckOverlap(T.Object, T.Receiver);
+ CheckOverlap(T.Array, T.Object);
+ CheckOverlap(T.Function, T.Object);
+ CheckOverlap(T.Proxy, T.Receiver);
+ CheckDisjoint(T.Object, T.Proxy);
+ CheckDisjoint(T.Array, T.Function);
// Structural types
- CheckOverlap(T.ObjectClass, T.Object, T.Semantic);
- CheckOverlap(T.ArrayClass, T.Object, T.Semantic);
- CheckOverlap(T.ObjectClass, T.ObjectClass, T.Semantic);
- CheckOverlap(T.ArrayClass, T.ArrayClass, T.Semantic);
- CheckDisjoint(T.ObjectClass, T.ArrayClass, T.Semantic);
-
- CheckOverlap(T.SmiConstant, T.SignedSmall, T.Semantic);
- CheckOverlap(T.SmiConstant, T.Signed32, T.Semantic);
- CheckOverlap(T.SmiConstant, T.Number, T.Semantic);
- CheckOverlap(T.ObjectConstant1, T.Object, T.Semantic);
- CheckOverlap(T.ObjectConstant2, T.Object, T.Semantic);
- CheckOverlap(T.ArrayConstant, T.Object, T.Semantic);
- CheckOverlap(T.ArrayConstant, T.Array, T.Semantic);
- CheckOverlap(T.ObjectConstant1, T.ObjectConstant1, T.Semantic);
- CheckDisjoint(T.ObjectConstant1, T.ObjectConstant2, T.Semantic);
- CheckDisjoint(T.ObjectConstant1, T.ArrayConstant, T.Semantic);
-
- CheckDisjoint(T.ObjectConstant1, T.ObjectClass, T.Semantic);
- CheckDisjoint(T.ObjectConstant2, T.ObjectClass, T.Semantic);
- CheckDisjoint(T.ObjectConstant1, T.ArrayClass, T.Semantic);
- CheckDisjoint(T.ObjectConstant2, T.ArrayClass, T.Semantic);
- CheckDisjoint(T.ArrayConstant, T.ObjectClass, T.Semantic);
-
- CheckOverlap(T.NumberArray, T.Array, T.Semantic);
- CheckDisjoint(T.NumberArray, T.AnyArray, T.Semantic);
- CheckDisjoint(T.NumberArray, T.StringArray, T.Semantic);
-
- CheckOverlap(T.MethodFunction, T.Function, T.Semantic);
- CheckDisjoint(T.SignedFunction1, T.NumberFunction1, T.Semantic);
- CheckDisjoint(T.SignedFunction1, T.NumberFunction2, T.Semantic);
- CheckDisjoint(T.NumberFunction1, T.NumberFunction2, T.Semantic);
- CheckDisjoint(T.SignedFunction1, T.MethodFunction, T.Semantic);
+ CheckOverlap(T.ObjectClass, T.Object);
+ CheckOverlap(T.ArrayClass, T.Object);
+ CheckOverlap(T.ObjectClass, T.ObjectClass);
+ CheckOverlap(T.ArrayClass, T.ArrayClass);
+ CheckDisjoint(T.ObjectClass, T.ArrayClass);
+ CheckOverlap(T.SmiConstant, T.SignedSmall);
+ CheckOverlap(T.SmiConstant, T.Signed32);
+ CheckOverlap(T.SmiConstant, T.Number);
+ CheckOverlap(T.ObjectConstant1, T.Object);
+ CheckOverlap(T.ObjectConstant2, T.Object);
+ CheckOverlap(T.ArrayConstant, T.Object);
+ CheckOverlap(T.ArrayConstant, T.Array);
+ CheckOverlap(T.ObjectConstant1, T.ObjectConstant1);
+ CheckDisjoint(T.ObjectConstant1, T.ObjectConstant2);
+ CheckDisjoint(T.ObjectConstant1, T.ArrayConstant);
+ CheckDisjoint(T.ObjectConstant1, T.ArrayClass);
+ CheckDisjoint(T.ObjectConstant2, T.ArrayClass);
+ CheckDisjoint(T.ArrayConstant, T.ObjectClass);
+ CheckOverlap(T.NumberArray, T.Array);
+ CheckDisjoint(T.NumberArray, T.AnyArray);
+ CheckDisjoint(T.NumberArray, T.StringArray);
+ CheckOverlap(T.MethodFunction, T.Function);
+ CheckDisjoint(T.SignedFunction1, T.NumberFunction1);
+ CheckDisjoint(T.SignedFunction1, T.NumberFunction2);
+ CheckDisjoint(T.NumberFunction1, T.NumberFunction2);
+ CheckDisjoint(T.SignedFunction1, T.MethodFunction);
+ CheckOverlap(T.ObjectConstant1, T.ObjectClass); // !!!
+ CheckOverlap(T.ObjectConstant2, T.ObjectClass); // !!!
+ CheckOverlap(T.NumberClass, T.Intersect(T.Number, T.Untagged)); // !!!
}
void Union1() {
@@ -1340,6 +1586,10 @@ struct Tests : Rep {
}
// Associativity: Union(T1, Union(T2, T3)) = Union(Union(T1, T2), T3)
+ // This does NOT hold! For example:
+ // (Unsigned32 \/ Range(0,5)) \/ Range(-5,0) = Unsigned32 \/ Range(-5,0)
+ // Unsigned32 \/ (Range(0,5) \/ Range(-5,0)) = Unsigned32 \/ Range(-5,5)
+ /*
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
@@ -1354,6 +1604,7 @@ struct Tests : Rep {
}
}
}
+ */
// Meet: T1->Is(Union(T1, T2)) and T2->Is(Union(T1, T2))
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
@@ -1375,10 +1626,12 @@ struct Tests : Rep {
if (type1->Is(type2)) CheckEqual(union12, type2);
}
}
- }
- void Union2() {
// Monotonicity: T1->Is(T2) implies Union(T1, T3)->Is(Union(T2, T3))
+ // This does NOT hold. For example:
+ // Range(-5,-1) <= Signed32
+ // Range(-5,-1) \/ Range(1,5) = Range(-5,5) </= Signed32 \/ Range(1,5)
+ /*
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
@@ -1391,8 +1644,16 @@ struct Tests : Rep {
}
}
}
+ */
+ }
+ void Union2() {
// Monotonicity: T1->Is(T3) and T2->Is(T3) implies Union(T1, T2)->Is(T3)
+ // This does NOT hold. For example:
+ // Range(-2^33, -2^33) <= OtherNumber
+ // Range(2^33, 2^33) <= OtherNumber
+ // Range(-2^33, 2^33) </= OtherNumber
+ /*
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
@@ -1404,11 +1665,14 @@ struct Tests : Rep {
}
}
}
+ */
+ }
+ void Union3() {
// Monotonicity: T1->Is(T2) or T1->Is(T3) implies T1->Is(Union(T2, T3))
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
+ for (TypeIterator it3 = it2; it3 != T.types.end(); ++it3) {
TypeHandle type1 = *it1;
TypeHandle type2 = *it2;
TypeHandle type3 = *it3;
@@ -1417,12 +1681,14 @@ struct Tests : Rep {
}
}
}
+ }
+ void Union4() {
// Class-class
CheckSub(T.Union(T.ObjectClass, T.ArrayClass), T.Object);
CheckUnordered(T.Union(T.ObjectClass, T.ArrayClass), T.Array);
- CheckOverlap(T.Union(T.ObjectClass, T.ArrayClass), T.Array, T.Semantic);
- CheckDisjoint(T.Union(T.ObjectClass, T.ArrayClass), T.Number, T.Semantic);
+ CheckOverlap(T.Union(T.ObjectClass, T.ArrayClass), T.Array);
+ CheckDisjoint(T.Union(T.ObjectClass, T.ArrayClass), T.Number);
// Constant-constant
CheckSub(T.Union(T.ObjectConstant1, T.ObjectConstant2), T.Object);
@@ -1430,11 +1696,11 @@ struct Tests : Rep {
CheckUnordered(
T.Union(T.ObjectConstant1, T.ObjectConstant2), T.ObjectClass);
CheckOverlap(
- T.Union(T.ObjectConstant1, T.ArrayConstant), T.Array, T.Semantic);
- CheckDisjoint(
- T.Union(T.ObjectConstant1, T.ArrayConstant), T.Number, T.Semantic);
+ T.Union(T.ObjectConstant1, T.ArrayConstant), T.Array);
CheckDisjoint(
- T.Union(T.ObjectConstant1, T.ArrayConstant), T.ObjectClass, T.Semantic);
+ T.Union(T.ObjectConstant1, T.ArrayConstant), T.Number);
+ CheckOverlap(
+ T.Union(T.ObjectConstant1, T.ArrayConstant), T.ObjectClass); // !!!
// Bitset-array
CHECK(this->IsBitset(T.Union(T.AnyArray, T.Array)));
@@ -1442,8 +1708,8 @@ struct Tests : Rep {
CheckEqual(T.Union(T.AnyArray, T.Array), T.Array);
CheckUnordered(T.Union(T.AnyArray, T.String), T.Array);
- CheckOverlap(T.Union(T.NumberArray, T.String), T.Object, T.Semantic);
- CheckDisjoint(T.Union(T.NumberArray, T.String), T.Number, T.Semantic);
+ CheckOverlap(T.Union(T.NumberArray, T.String), T.Object);
+ CheckDisjoint(T.Union(T.NumberArray, T.String), T.Number);
// Bitset-function
CHECK(this->IsBitset(T.Union(T.MethodFunction, T.Function)));
@@ -1451,24 +1717,24 @@ struct Tests : Rep {
CheckEqual(T.Union(T.MethodFunction, T.Function), T.Function);
CheckUnordered(T.Union(T.NumberFunction1, T.String), T.Function);
- CheckOverlap(T.Union(T.NumberFunction2, T.String), T.Object, T.Semantic);
- CheckDisjoint(T.Union(T.NumberFunction1, T.String), T.Number, T.Semantic);
+ CheckOverlap(T.Union(T.NumberFunction2, T.String), T.Object);
+ CheckDisjoint(T.Union(T.NumberFunction1, T.String), T.Number);
// Bitset-class
CheckSub(
T.Union(T.ObjectClass, T.SignedSmall), T.Union(T.Object, T.Number));
CheckSub(T.Union(T.ObjectClass, T.Array), T.Object);
CheckUnordered(T.Union(T.ObjectClass, T.String), T.Array);
- CheckOverlap(T.Union(T.ObjectClass, T.String), T.Object, T.Semantic);
- CheckDisjoint(T.Union(T.ObjectClass, T.String), T.Number, T.Semantic);
+ CheckOverlap(T.Union(T.ObjectClass, T.String), T.Object);
+ CheckDisjoint(T.Union(T.ObjectClass, T.String), T.Number);
// Bitset-constant
CheckSub(
T.Union(T.ObjectConstant1, T.Signed32), T.Union(T.Object, T.Number));
CheckSub(T.Union(T.ObjectConstant1, T.Array), T.Object);
CheckUnordered(T.Union(T.ObjectConstant1, T.String), T.Array);
- CheckOverlap(T.Union(T.ObjectConstant1, T.String), T.Object, T.Semantic);
- CheckDisjoint(T.Union(T.ObjectConstant1, T.String), T.Number, T.Semantic);
+ CheckOverlap(T.Union(T.ObjectConstant1, T.String), T.Object);
+ CheckDisjoint(T.Union(T.ObjectConstant1, T.String), T.Number);
// Class-constant
CheckSub(T.Union(T.ObjectConstant1, T.ArrayClass), T.Object);
@@ -1477,10 +1743,9 @@ struct Tests : Rep {
T.Union(T.ObjectConstant1, T.ArrayClass), T.Union(T.Array, T.Object));
CheckUnordered(T.Union(T.ObjectConstant1, T.ArrayClass), T.ArrayConstant);
CheckDisjoint(
- T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectConstant2,
- T.Semantic);
- CheckDisjoint(
- T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectClass, T.Semantic);
+ T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectConstant2);
+ CheckOverlap(
+ T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectClass); // !!!
// Bitset-union
CheckSub(
@@ -1534,7 +1799,7 @@ struct Tests : Rep {
T.Union(T.Number, T.Array));
}
- void Intersect1() {
+ void Intersect() {
// Identity: Intersect(T, Any) = T
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
TypeHandle type = *it;
@@ -1569,6 +1834,12 @@ struct Tests : Rep {
// Associativity:
// Intersect(T1, Intersect(T2, T3)) = Intersect(Intersect(T1, T2), T3)
+ // This does NOT hold. For example:
+ // (Class(..stringy1..) /\ Class(..stringy2..)) /\ Constant(..string..) =
+ // None
+ // Class(..stringy1..) /\ (Class(..stringy2..) /\ Constant(..string..)) =
+ // Constant(..string..)
+ /*
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
@@ -1583,8 +1854,15 @@ struct Tests : Rep {
}
}
}
+ */
// Join: Intersect(T1, T2)->Is(T1) and Intersect(T1, T2)->Is(T2)
+ // This does NOT hold. For example:
+ // Class(..stringy..) /\ Constant(..string..) = Constant(..string..)
+ // Currently, not even the disjunction holds:
+ // Class(Internal/TaggedPtr) /\ (Any/Untagged \/ Context(..)) =
+ // Class(Internal/TaggedPtr) \/ Context(..)
+ /*
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
TypeHandle type1 = *it1;
@@ -1594,6 +1872,7 @@ struct Tests : Rep {
CHECK(intersect12->Is(type2));
}
}
+ */
// Lower Boundedness: T1->Is(T2) implies Intersect(T1, T2) = T1
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
@@ -1604,10 +1883,13 @@ struct Tests : Rep {
if (type1->Is(type2)) CheckEqual(intersect12, type1);
}
}
- }
- void Intersect2() {
// Monotonicity: T1->Is(T2) implies Intersect(T1, T3)->Is(Intersect(T2, T3))
+ // This does NOT hold. For example:
+ // Class(OtherObject/TaggedPtr) <= Any/TaggedPtr
+ // Class(OtherObject/TaggedPtr) /\ Any/UntaggedInt1 = Class(..)
+ // Any/TaggedPtr /\ Any/UntaggedInt1 = None
+ /*
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
@@ -1620,8 +1902,14 @@ struct Tests : Rep {
}
}
}
+ */
// Monotonicity: T1->Is(T3) or T2->Is(T3) implies Intersect(T1, T2)->Is(T3)
+ // This does NOT hold. For example:
+ // Class(..stringy..) <= Class(..stringy..)
+ // Class(..stringy..) /\ Constant(..string..) = Constant(..string..)
+ // Constant(..string..) </= Class(..stringy..)
+ /*
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
@@ -1634,6 +1922,7 @@ struct Tests : Rep {
}
}
}
+ */
// Monotonicity: T1->Is(T2) and T1->Is(T3) implies T1->Is(Intersect(T2, T3))
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
@@ -1651,16 +1940,16 @@ struct Tests : Rep {
// Bitset-class
CheckEqual(T.Intersect(T.ObjectClass, T.Object), T.ObjectClass);
- CheckSub(T.Intersect(T.ObjectClass, T.Array), T.Representation);
- CheckSub(T.Intersect(T.ObjectClass, T.Number), T.Representation);
+ CheckEqual(T.Intersect(T.ObjectClass, T.Array), T.None);
+ CheckEqual(T.Intersect(T.ObjectClass, T.Number), T.None);
// Bitset-array
CheckEqual(T.Intersect(T.NumberArray, T.Object), T.NumberArray);
- CheckSub(T.Intersect(T.AnyArray, T.Function), T.Representation);
+ CheckEqual(T.Intersect(T.AnyArray, T.Function), T.None);
// Bitset-function
CheckEqual(T.Intersect(T.MethodFunction, T.Object), T.MethodFunction);
- CheckSub(T.Intersect(T.NumberFunction1, T.Array), T.Representation);
+ CheckEqual(T.Intersect(T.NumberFunction1, T.Array), T.None);
// Bitset-union
CheckEqual(
@@ -1671,7 +1960,7 @@ struct Tests : Rep {
->IsInhabited());
// Class-constant
- CHECK(!T.Intersect(T.ObjectConstant1, T.ObjectClass)->IsInhabited());
+ CHECK(T.Intersect(T.ObjectConstant1, T.ObjectClass)->IsInhabited()); // !!!
CHECK(!T.Intersect(T.ArrayClass, T.ObjectConstant2)->IsInhabited());
// Array-union
@@ -1704,8 +1993,8 @@ struct Tests : Rep {
T.Intersect(T.ArrayClass, T.Union(T.Object, T.SmiConstant)),
T.ArrayClass);
CHECK(
- !T.Intersect(T.Union(T.ObjectClass, T.ArrayConstant), T.ArrayClass)
- ->IsInhabited());
+ T.Intersect(T.Union(T.ObjectClass, T.ArrayConstant), T.ArrayClass)
+ ->IsInhabited()); // !!!
// Constant-union
CheckEqual(
@@ -1716,9 +2005,9 @@ struct Tests : Rep {
T.Intersect(T.SmiConstant, T.Union(T.Number, T.ObjectConstant2)),
T.SmiConstant);
CHECK(
- !T.Intersect(
+ T.Intersect(
T.Union(T.ArrayConstant, T.ObjectClass), T.ObjectConstant1)
- ->IsInhabited());
+ ->IsInhabited()); // !!!
// Union-union
CheckEqual(
@@ -1739,16 +2028,24 @@ struct Tests : Rep {
CheckEqual(
T.Intersect(
T.Union(
- T.Union(T.ObjectConstant2, T.ObjectConstant1), T.ArrayClass),
+ T.ArrayClass,
+ T.Union(T.ObjectConstant2, T.ObjectConstant1)),
T.Union(
T.ObjectConstant1,
T.Union(T.ArrayConstant, T.ObjectConstant2))),
- T.Union(T.ObjectConstant2, T.ObjectConstant1));
+ T.Union(
+ T.ArrayConstant,
+ T.Union(T.ObjectConstant2, T.ObjectConstant1))); // !!!
}
- void Distributivity1() {
- // Distributivity:
+ void Distributivity() {
// Union(T1, Intersect(T2, T3)) = Intersect(Union(T1, T2), Union(T1, T3))
+ // This does NOT hold. For example:
+ // Untagged \/ (Untagged /\ Class(../Tagged)) = Untagged \/ Class(../Tagged)
+ // (Untagged \/ Untagged) /\ (Untagged \/ Class(../Tagged)) =
+ // Untagged /\ (Untagged \/ Class(../Tagged)) = Untagged
+ // because Untagged <= Untagged \/ Class(../Tagged)
+ /*
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
@@ -1764,11 +2061,14 @@ struct Tests : Rep {
}
}
}
- }
+ */
- void Distributivity2() {
- // Distributivity:
// Intersect(T1, Union(T2, T3)) = Union(Intersect(T1, T2), Intersect(T1,T3))
+ // This does NOT hold. For example:
+ // Untagged /\ (Untagged \/ Class(../Tagged)) = Untagged
+ // (Untagged /\ Untagged) \/ (Untagged /\ Class(../Tagged)) =
+ // Untagged \/ Class(../Tagged)
+ /*
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
@@ -1784,6 +2084,7 @@ struct Tests : Rep {
}
}
}
+ */
}
template<class Type2, class TypeHandle2, class Region2, class Rep2>
@@ -1815,6 +2116,13 @@ typedef Tests<Type, Type*, Zone, ZoneRep> ZoneTests;
typedef Tests<HeapType, Handle<HeapType>, Isolate, HeapRep> HeapTests;
+TEST(IsSomeType) {
+ CcTest::InitializeVM();
+ ZoneTests().IsSomeType();
+ HeapTests().IsSomeType();
+}
+
+
TEST(BitsetType) {
CcTest::InitializeVM();
ZoneTests().Bitset();
@@ -1871,17 +2179,31 @@ TEST(NowOf) {
}
-TEST(Bounds) {
+TEST(BitsetGlb) {
+ CcTest::InitializeVM();
+ ZoneTests().BitsetGlb();
+ HeapTests().BitsetGlb();
+}
+
+
+TEST(BitsetLub) {
+ CcTest::InitializeVM();
+ ZoneTests().BitsetLub();
+ HeapTests().BitsetLub();
+}
+
+
+TEST(Is1) {
CcTest::InitializeVM();
- ZoneTests().Bounds();
- HeapTests().Bounds();
+ ZoneTests().Is1();
+ HeapTests().Is1();
}
-TEST(Is) {
+TEST(Is2) {
CcTest::InitializeVM();
- ZoneTests().Is();
- HeapTests().Is();
+ ZoneTests().Is2();
+ HeapTests().Is2();
}
@@ -1920,38 +2242,40 @@ TEST(Union1) {
}
+/*
TEST(Union2) {
CcTest::InitializeVM();
ZoneTests().Union2();
HeapTests().Union2();
}
+*/
-TEST(Intersect1) {
+TEST(Union3) {
CcTest::InitializeVM();
- ZoneTests().Intersect1();
- HeapTests().Intersect1();
+ ZoneTests().Union3();
+ HeapTests().Union3();
}
-TEST(Intersect2) {
+TEST(Union4) {
CcTest::InitializeVM();
- ZoneTests().Intersect2();
- HeapTests().Intersect2();
+ ZoneTests().Union4();
+ HeapTests().Union4();
}
-TEST(Distributivity1) {
+TEST(Intersect) {
CcTest::InitializeVM();
- ZoneTests().Distributivity1();
- HeapTests().Distributivity1();
+ ZoneTests().Intersect();
+ HeapTests().Intersect();
}
-TEST(Distributivity2) {
+TEST(Distributivity) {
CcTest::InitializeVM();
- ZoneTests().Distributivity2();
- HeapTests().Distributivity2();
+ ZoneTests().Distributivity();
+ HeapTests().Distributivity();
}
diff --git a/deps/v8/test/cctest/test-utils.cc b/deps/v8/test/cctest/test-utils.cc
index cf539305b3..9ea8b2b6a3 100644
--- a/deps/v8/test/cctest/test-utils.cc
+++ b/deps/v8/test/cctest/test-utils.cc
@@ -223,7 +223,7 @@ TEST(SequenceCollectorRegression) {
// TODO(svenpanne) Unconditionally test this when our infrastructure is fixed.
-#if !V8_CC_MSVC && !V8_OS_NACL
+#if !V8_OS_NACL
TEST(CPlusPlus11Features) {
struct S {
bool x;
diff --git a/deps/v8/test/compiler-unittests/DEPS b/deps/v8/test/compiler-unittests/DEPS
deleted file mode 100644
index 8aa02395f5..0000000000
--- a/deps/v8/test/compiler-unittests/DEPS
+++ /dev/null
@@ -1,6 +0,0 @@
-include_rules = [
- "+src",
- "+testing/gtest",
- "+testing/gtest-type-names.h",
- "+testing/gmock",
-]
diff --git a/deps/v8/test/compiler-unittests/arm/instruction-selector-arm-unittest.cc b/deps/v8/test/compiler-unittests/arm/instruction-selector-arm-unittest.cc
deleted file mode 100644
index b781ac8f9f..0000000000
--- a/deps/v8/test/compiler-unittests/arm/instruction-selector-arm-unittest.cc
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "test/compiler-unittests/instruction-selector-unittest.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class InstructionSelectorARMTest : public InstructionSelectorTest {};
-
-
-TARGET_TEST_F(InstructionSelectorARMTest, Int32AddP) {
- StreamBuilder m(this, kMachineWord32, kMachineWord32, kMachineWord32);
- m.Return(m.Int32Add(m.Parameter(0), m.Parameter(1)));
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArmAdd, s[0]->arch_opcode());
- EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
- EXPECT_EQ(2U, s[0]->InputCount());
- EXPECT_EQ(1U, s[0]->OutputCount());
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/compiler-unittests/change-lowering-unittest.cc b/deps/v8/test/compiler-unittests/change-lowering-unittest.cc
deleted file mode 100644
index 68de48013c..0000000000
--- a/deps/v8/test/compiler-unittests/change-lowering-unittest.cc
+++ /dev/null
@@ -1,257 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/change-lowering.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/graph.h"
-#include "src/compiler/node-properties-inl.h"
-#include "src/compiler/simplified-operator.h"
-#include "src/factory.h"
-#include "test/compiler-unittests/compiler-unittests.h"
-#include "test/compiler-unittests/node-matchers.h"
-#include "testing/gtest-type-names.h"
-
-using testing::_;
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-template <typename T>
-class ChangeLoweringTest : public CompilerTest {
- public:
- static const size_t kPointerSize = sizeof(T);
-
- explicit ChangeLoweringTest(int num_parameters = 1)
- : graph_(zone()), common_(zone()), simplified_(zone()) {
- graph()->SetStart(graph()->NewNode(common()->Start(num_parameters)));
- }
- virtual ~ChangeLoweringTest() {}
-
- protected:
- Node* Parameter(int32_t index = 0) {
- return graph()->NewNode(common()->Parameter(index), graph()->start());
- }
-
- Reduction Reduce(Node* node) {
- CompilationInfo info(isolate(), zone());
- Linkage linkage(&info);
- ChangeLowering<kPointerSize> reducer(graph(), &linkage);
- return reducer.Reduce(node);
- }
-
- Graph* graph() { return &graph_; }
- Factory* factory() const { return isolate()->factory(); }
- CommonOperatorBuilder* common() { return &common_; }
- SimplifiedOperatorBuilder* simplified() { return &simplified_; }
-
- PrintableUnique<HeapObject> true_unique() {
- return PrintableUnique<HeapObject>::CreateImmovable(
- zone(), factory()->true_value());
- }
- PrintableUnique<HeapObject> false_unique() {
- return PrintableUnique<HeapObject>::CreateImmovable(
- zone(), factory()->false_value());
- }
-
- private:
- Graph graph_;
- CommonOperatorBuilder common_;
- SimplifiedOperatorBuilder simplified_;
-};
-
-
-typedef ::testing::Types<int32_t, int64_t> ChangeLoweringTypes;
-TYPED_TEST_CASE(ChangeLoweringTest, ChangeLoweringTypes);
-
-
-TARGET_TYPED_TEST(ChangeLoweringTest, ChangeBitToBool) {
- Node* val = this->Parameter(0);
- Node* node =
- this->graph()->NewNode(this->simplified()->ChangeBitToBool(), val);
- Reduction reduction = this->Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* phi = reduction.replacement();
- EXPECT_THAT(phi, IsPhi(IsHeapConstant(this->true_unique()),
- IsHeapConstant(this->false_unique()), _));
-
- Node* merge = NodeProperties::GetControlInput(phi);
- ASSERT_EQ(IrOpcode::kMerge, merge->opcode());
-
- Node* if_true = NodeProperties::GetControlInput(merge, 0);
- ASSERT_EQ(IrOpcode::kIfTrue, if_true->opcode());
-
- Node* if_false = NodeProperties::GetControlInput(merge, 1);
- ASSERT_EQ(IrOpcode::kIfFalse, if_false->opcode());
-
- Node* branch = NodeProperties::GetControlInput(if_true);
- EXPECT_EQ(branch, NodeProperties::GetControlInput(if_false));
- EXPECT_THAT(branch, IsBranch(val, this->graph()->start()));
-}
-
-
-TARGET_TYPED_TEST(ChangeLoweringTest, StringAdd) {
- Node* node = this->graph()->NewNode(this->simplified()->StringAdd(),
- this->Parameter(0), this->Parameter(1));
- Reduction reduction = this->Reduce(node);
- EXPECT_FALSE(reduction.Changed());
-}
-
-
-class ChangeLowering32Test : public ChangeLoweringTest<int32_t> {
- public:
- virtual ~ChangeLowering32Test() {}
-};
-
-
-TARGET_TEST_F(ChangeLowering32Test, ChangeBoolToBit) {
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeBoolToBit(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- EXPECT_THAT(reduction.replacement(),
- IsWord32Equal(val, IsHeapConstant(true_unique())));
-}
-
-
-TARGET_TEST_F(ChangeLowering32Test, ChangeInt32ToTagged) {
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* phi = reduction.replacement();
- ASSERT_EQ(IrOpcode::kPhi, phi->opcode());
-
- Node* smi = NodeProperties::GetValueInput(phi, 1);
- ASSERT_THAT(smi, IsProjection(0, IsInt32AddWithOverflow(val, val)));
-
- Node* heap_number = NodeProperties::GetValueInput(phi, 0);
- ASSERT_EQ(IrOpcode::kCall, heap_number->opcode());
-
- Node* merge = NodeProperties::GetControlInput(phi);
- ASSERT_EQ(IrOpcode::kMerge, merge->opcode());
-
- const int32_t kValueOffset = HeapNumber::kValueOffset - kHeapObjectTag;
- EXPECT_THAT(NodeProperties::GetControlInput(merge, 0),
- IsStore(kMachineFloat64, kNoWriteBarrier, heap_number,
- IsInt32Constant(kValueOffset),
- IsChangeInt32ToFloat64(val), _, heap_number));
-
- Node* if_true = NodeProperties::GetControlInput(heap_number);
- ASSERT_EQ(IrOpcode::kIfTrue, if_true->opcode());
-
- Node* if_false = NodeProperties::GetControlInput(merge, 1);
- ASSERT_EQ(IrOpcode::kIfFalse, if_false->opcode());
-
- Node* branch = NodeProperties::GetControlInput(if_true);
- EXPECT_EQ(branch, NodeProperties::GetControlInput(if_false));
- EXPECT_THAT(branch,
- IsBranch(IsProjection(1, IsInt32AddWithOverflow(val, val)),
- graph()->start()));
-}
-
-
-TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToFloat64) {
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- const int32_t kShiftAmount =
- kSmiTagSize + SmiTagging<kPointerSize>::kSmiShiftSize;
- const int32_t kValueOffset = HeapNumber::kValueOffset - kHeapObjectTag;
- Node* phi = reduction.replacement();
- ASSERT_THAT(
- phi, IsPhi(IsLoad(kMachineFloat64, val, IsInt32Constant(kValueOffset), _),
- IsChangeInt32ToFloat64(
- IsWord32Sar(val, IsInt32Constant(kShiftAmount))),
- _));
-
- Node* merge = NodeProperties::GetControlInput(phi);
- ASSERT_EQ(IrOpcode::kMerge, merge->opcode());
-
- Node* if_true = NodeProperties::GetControlInput(merge, 0);
- ASSERT_EQ(IrOpcode::kIfTrue, if_true->opcode());
-
- Node* if_false = NodeProperties::GetControlInput(merge, 1);
- ASSERT_EQ(IrOpcode::kIfFalse, if_false->opcode());
-
- Node* branch = NodeProperties::GetControlInput(if_true);
- EXPECT_EQ(branch, NodeProperties::GetControlInput(if_false));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- EXPECT_THAT(branch, IsBranch(IsWord32And(val, IsInt32Constant(kSmiTagMask)),
- graph()->start()));
-}
-
-
-class ChangeLowering64Test : public ChangeLoweringTest<int64_t> {
- public:
- virtual ~ChangeLowering64Test() {}
-};
-
-
-TARGET_TEST_F(ChangeLowering64Test, ChangeBoolToBit) {
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeBoolToBit(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- EXPECT_THAT(reduction.replacement(),
- IsWord64Equal(val, IsHeapConstant(true_unique())));
-}
-
-
-TARGET_TEST_F(ChangeLowering64Test, ChangeInt32ToTagged) {
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- const int32_t kShiftAmount =
- kSmiTagSize + SmiTagging<kPointerSize>::kSmiShiftSize;
- EXPECT_THAT(reduction.replacement(),
- IsWord64Shl(val, IsInt32Constant(kShiftAmount)));
-}
-
-
-TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToFloat64) {
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- const int32_t kShiftAmount =
- kSmiTagSize + SmiTagging<kPointerSize>::kSmiShiftSize;
- const int32_t kValueOffset = HeapNumber::kValueOffset - kHeapObjectTag;
- Node* phi = reduction.replacement();
- ASSERT_THAT(
- phi, IsPhi(IsLoad(kMachineFloat64, val, IsInt32Constant(kValueOffset), _),
- IsChangeInt32ToFloat64(IsConvertInt64ToInt32(
- IsWord64Sar(val, IsInt32Constant(kShiftAmount)))),
- _));
-
- Node* merge = NodeProperties::GetControlInput(phi);
- ASSERT_EQ(IrOpcode::kMerge, merge->opcode());
-
- Node* if_true = NodeProperties::GetControlInput(merge, 0);
- ASSERT_EQ(IrOpcode::kIfTrue, if_true->opcode());
-
- Node* if_false = NodeProperties::GetControlInput(merge, 1);
- ASSERT_EQ(IrOpcode::kIfFalse, if_false->opcode());
-
- Node* branch = NodeProperties::GetControlInput(if_true);
- EXPECT_EQ(branch, NodeProperties::GetControlInput(if_false));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- EXPECT_THAT(branch, IsBranch(IsWord64And(val, IsInt32Constant(kSmiTagMask)),
- graph()->start()));
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/compiler-unittests/compiler-unittests.cc b/deps/v8/test/compiler-unittests/compiler-unittests.cc
deleted file mode 100644
index 2ce4c93ee2..0000000000
--- a/deps/v8/test/compiler-unittests/compiler-unittests.cc
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "include/libplatform/libplatform.h"
-#include "test/compiler-unittests/compiler-unittests.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-using testing::IsNull;
-using testing::NotNull;
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// static
-v8::Isolate* CompilerTest::isolate_ = NULL;
-
-
-CompilerTest::CompilerTest()
- : isolate_scope_(isolate_),
- handle_scope_(isolate_),
- context_scope_(v8::Context::New(isolate_)),
- zone_(isolate()) {}
-
-
-CompilerTest::~CompilerTest() {}
-
-
-// static
-void CompilerTest::SetUpTestCase() {
- Test::SetUpTestCase();
- EXPECT_THAT(isolate_, IsNull());
- isolate_ = v8::Isolate::New();
- ASSERT_THAT(isolate_, NotNull());
-}
-
-
-// static
-void CompilerTest::TearDownTestCase() {
- ASSERT_THAT(isolate_, NotNull());
- isolate_->Dispose();
- isolate_ = NULL;
- Test::TearDownTestCase();
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-
-namespace {
-
-class CompilerTestEnvironment V8_FINAL : public ::testing::Environment {
- public:
- CompilerTestEnvironment() : platform_(NULL) {}
- ~CompilerTestEnvironment() {}
-
- virtual void SetUp() V8_OVERRIDE {
- EXPECT_THAT(platform_, IsNull());
- platform_ = v8::platform::CreateDefaultPlatform();
- v8::V8::InitializePlatform(platform_);
- ASSERT_TRUE(v8::V8::Initialize());
- }
-
- virtual void TearDown() V8_OVERRIDE {
- ASSERT_THAT(platform_, NotNull());
- v8::V8::Dispose();
- v8::V8::ShutdownPlatform();
- delete platform_;
- platform_ = NULL;
- }
-
- private:
- v8::Platform* platform_;
-};
-
-}
-
-
-int main(int argc, char** argv) {
- testing::InitGoogleMock(&argc, argv);
- testing::AddGlobalTestEnvironment(new CompilerTestEnvironment);
- v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
- return RUN_ALL_TESTS();
-}
diff --git a/deps/v8/test/compiler-unittests/instruction-selector-unittest.cc b/deps/v8/test/compiler-unittests/instruction-selector-unittest.cc
deleted file mode 100644
index 70186529af..0000000000
--- a/deps/v8/test/compiler-unittests/instruction-selector-unittest.cc
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "test/compiler-unittests/instruction-selector-unittest.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
- InstructionSelector::Features features,
- InstructionSelectorTest::StreamBuilderMode mode) {
- Schedule* schedule = Export();
- EXPECT_NE(0, graph()->NodeCount());
- CompilationInfo info(test_->isolate(), test_->zone());
- Linkage linkage(&info, call_descriptor());
- InstructionSequence sequence(&linkage, graph(), schedule);
- SourcePositionTable source_position_table(graph());
- InstructionSelector selector(&sequence, &source_position_table, features);
- selector.SelectInstructions();
- if (FLAG_trace_turbo) {
- OFStream out(stdout);
- out << "--- Code sequence after instruction selection ---" << endl
- << sequence;
- }
- Stream s;
- for (InstructionSequence::const_iterator i = sequence.begin();
- i != sequence.end(); ++i) {
- Instruction* instr = *i;
- if (instr->opcode() < 0) continue;
- if (mode == kTargetInstructions) {
- switch (instr->arch_opcode()) {
-#define CASE(Name) \
- case k##Name: \
- break;
- TARGET_ARCH_OPCODE_LIST(CASE)
-#undef CASE
- default:
- continue;
- }
- }
- for (size_t i = 0; i < instr->OutputCount(); ++i) {
- InstructionOperand* output = instr->OutputAt(i);
- EXPECT_NE(InstructionOperand::IMMEDIATE, output->kind());
- if (output->IsConstant()) {
- s.constants_.insert(std::make_pair(
- output->index(), sequence.GetConstant(output->index())));
- }
- }
- for (size_t i = 0; i < instr->InputCount(); ++i) {
- InstructionOperand* input = instr->InputAt(i);
- EXPECT_NE(InstructionOperand::CONSTANT, input->kind());
- if (input->IsImmediate()) {
- s.immediates_.insert(std::make_pair(
- input->index(), sequence.GetImmediate(input->index())));
- }
- }
- s.instructions_.push_back(instr);
- }
- return s;
-}
-
-
-TARGET_TEST_F(InstructionSelectorTest, ReturnP) {
- StreamBuilder m(this, kMachineWord32, kMachineWord32);
- m.Return(m.Parameter(0));
- Stream s = m.Build(kAllInstructions);
- ASSERT_EQ(2U, s.size());
- EXPECT_EQ(kArchNop, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(kArchRet, s[1]->arch_opcode());
- EXPECT_EQ(1U, s[1]->InputCount());
-}
-
-
-TARGET_TEST_F(InstructionSelectorTest, ReturnImm) {
- StreamBuilder m(this, kMachineWord32);
- m.Return(m.Int32Constant(0));
- Stream s = m.Build(kAllInstructions);
- ASSERT_EQ(2U, s.size());
- EXPECT_EQ(kArchNop, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(InstructionOperand::CONSTANT, s[0]->OutputAt(0)->kind());
- EXPECT_EQ(0, s.ToInt32(s[0]->OutputAt(0)));
- EXPECT_EQ(kArchRet, s[1]->arch_opcode());
- EXPECT_EQ(1U, s[1]->InputCount());
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/compiler-unittests/instruction-selector-unittest.h b/deps/v8/test/compiler-unittests/instruction-selector-unittest.h
deleted file mode 100644
index 3a7b590757..0000000000
--- a/deps/v8/test/compiler-unittests/instruction-selector-unittest.h
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_UNITTESTS_INSTRUCTION_SELECTOR_UNITTEST_H_
-#define V8_COMPILER_UNITTESTS_INSTRUCTION_SELECTOR_UNITTEST_H_
-
-#include <deque>
-
-#include "src/compiler/instruction-selector.h"
-#include "src/compiler/raw-machine-assembler.h"
-#include "test/compiler-unittests/compiler-unittests.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class InstructionSelectorTest : public CompilerTest {
- public:
- InstructionSelectorTest() {}
- virtual ~InstructionSelectorTest() {}
-
- protected:
- class Stream;
-
- enum StreamBuilderMode { kAllInstructions, kTargetInstructions };
-
- class StreamBuilder V8_FINAL : public RawMachineAssembler {
- public:
- StreamBuilder(InstructionSelectorTest* test, MachineType return_type)
- : RawMachineAssembler(new (test->zone()) Graph(test->zone()),
- CallDescriptorBuilder(test->zone(), return_type)),
- test_(test) {}
- StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
- MachineType parameter0_type)
- : RawMachineAssembler(new (test->zone()) Graph(test->zone()),
- CallDescriptorBuilder(test->zone(), return_type,
- parameter0_type)),
- test_(test) {}
- StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
- MachineType parameter0_type, MachineType parameter1_type)
- : RawMachineAssembler(
- new (test->zone()) Graph(test->zone()),
- CallDescriptorBuilder(test->zone(), return_type, parameter0_type,
- parameter1_type)),
- test_(test) {}
-
- Stream Build(CpuFeature feature) {
- return Build(InstructionSelector::Features(feature));
- }
- Stream Build(CpuFeature feature1, CpuFeature feature2) {
- return Build(InstructionSelector::Features(feature1, feature2));
- }
- Stream Build(StreamBuilderMode mode = kTargetInstructions) {
- return Build(InstructionSelector::Features(), mode);
- }
- Stream Build(InstructionSelector::Features features,
- StreamBuilderMode mode = kTargetInstructions);
-
- private:
- MachineCallDescriptorBuilder* CallDescriptorBuilder(
- Zone* zone, MachineType return_type) {
- return new (zone) MachineCallDescriptorBuilder(return_type, 0, NULL);
- }
-
- MachineCallDescriptorBuilder* CallDescriptorBuilder(
- Zone* zone, MachineType return_type, MachineType parameter0_type) {
- MachineType* parameter_types = zone->NewArray<MachineType>(1);
- parameter_types[0] = parameter0_type;
- return new (zone)
- MachineCallDescriptorBuilder(return_type, 1, parameter_types);
- }
-
- MachineCallDescriptorBuilder* CallDescriptorBuilder(
- Zone* zone, MachineType return_type, MachineType parameter0_type,
- MachineType parameter1_type) {
- MachineType* parameter_types = zone->NewArray<MachineType>(2);
- parameter_types[0] = parameter0_type;
- parameter_types[1] = parameter1_type;
- return new (zone)
- MachineCallDescriptorBuilder(return_type, 2, parameter_types);
- }
-
- private:
- InstructionSelectorTest* test_;
- };
-
- class Stream V8_FINAL {
- public:
- size_t size() const { return instructions_.size(); }
- const Instruction* operator[](size_t index) const {
- EXPECT_LT(index, size());
- return instructions_[index];
- }
-
- int32_t ToInt32(const InstructionOperand* operand) const {
- return ToConstant(operand).ToInt32();
- }
-
- private:
- Constant ToConstant(const InstructionOperand* operand) const {
- ConstantMap::const_iterator i;
- if (operand->IsConstant()) {
- i = constants_.find(operand->index());
- EXPECT_NE(constants_.end(), i);
- } else {
- EXPECT_EQ(InstructionOperand::IMMEDIATE, operand->kind());
- i = immediates_.find(operand->index());
- EXPECT_NE(immediates_.end(), i);
- }
- EXPECT_EQ(operand->index(), i->first);
- return i->second;
- }
-
- friend class StreamBuilder;
-
- typedef std::map<int, Constant> ConstantMap;
-
- ConstantMap constants_;
- ConstantMap immediates_;
- std::deque<Instruction*> instructions_;
- };
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_UNITTESTS_INSTRUCTION_SELECTOR_UNITTEST_H_
diff --git a/deps/v8/test/compiler-unittests/node-matchers.h b/deps/v8/test/compiler-unittests/node-matchers.h
deleted file mode 100644
index 09da07a7f5..0000000000
--- a/deps/v8/test/compiler-unittests/node-matchers.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_UNITTESTS_NODE_MATCHERS_H_
-#define V8_COMPILER_UNITTESTS_NODE_MATCHERS_H_
-
-#include "src/compiler/machine-operator.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class HeapObject;
-template <class T>
-class PrintableUnique;
-
-namespace compiler {
-
-// Forward declarations.
-class Node;
-
-using testing::Matcher;
-
-Matcher<Node*> IsBranch(const Matcher<Node*>& value_matcher,
- const Matcher<Node*>& control_matcher);
-Matcher<Node*> IsHeapConstant(
- const Matcher<PrintableUnique<HeapObject> >& value_matcher);
-Matcher<Node*> IsInt32Constant(const Matcher<int32_t>& value_matcher);
-Matcher<Node*> IsPhi(const Matcher<Node*>& value0_matcher,
- const Matcher<Node*>& value1_matcher,
- const Matcher<Node*>& merge_matcher);
-Matcher<Node*> IsProjection(const Matcher<int32_t>& index_matcher,
- const Matcher<Node*>& base_matcher);
-
-Matcher<Node*> IsLoad(const Matcher<MachineType>& type_matcher,
- const Matcher<Node*>& base_matcher,
- const Matcher<Node*>& index_matcher,
- const Matcher<Node*>& effect_matcher);
-Matcher<Node*> IsStore(const Matcher<MachineType>& type_matcher,
- const Matcher<WriteBarrierKind>& write_barrier_matcher,
- const Matcher<Node*>& base_matcher,
- const Matcher<Node*>& index_matcher,
- const Matcher<Node*>& value_matcher,
- const Matcher<Node*>& effect_matcher,
- const Matcher<Node*>& control_matcher);
-Matcher<Node*> IsWord32And(const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsWord32Sar(const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsWord32Equal(const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsWord64And(const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsWord64Shl(const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsWord64Sar(const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsWord64Equal(const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsInt32AddWithOverflow(const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsConvertInt64ToInt32(const Matcher<Node*>& input_matcher);
-Matcher<Node*> IsChangeInt32ToFloat64(const Matcher<Node*>& input_matcher);
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_UNITTESTS_NODE_MATCHERS_H_
diff --git a/deps/v8/test/compiler-unittests/testcfg.py b/deps/v8/test/compiler-unittests/testcfg.py
deleted file mode 100644
index 4eec956f7e..0000000000
--- a/deps/v8/test/compiler-unittests/testcfg.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import os
-import shutil
-
-from testrunner.local import commands
-from testrunner.local import testsuite
-from testrunner.local import utils
-from testrunner.objects import testcase
-
-
-class CompilerUnitTestsSuite(testsuite.TestSuite):
- def __init__(self, name, root):
- super(CompilerUnitTestsSuite, self).__init__(name, root)
-
- def ListTests(self, context):
- shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
- if utils.IsWindows():
- shell += ".exe"
- output = commands.Execute(context.command_prefix +
- [shell, "--gtest_list_tests"] +
- context.extra_flags)
- if output.exit_code != 0:
- print output.stdout
- print output.stderr
- return []
- tests = []
- test_case = ''
- for test_desc in output.stdout.strip().split():
- if test_desc.endswith('.'):
- test_case = test_desc
- else:
- test = testcase.TestCase(self, test_case + test_desc, dependency=None)
- tests.append(test)
- tests.sort()
- return tests
-
- def GetFlagsForTestCase(self, testcase, context):
- return (testcase.flags + ["--gtest_filter=" + testcase.path] +
- ["--gtest_random_seed=%s" % context.random_seed] +
- ["--gtest_print_time=0"] +
- context.mode_flags)
-
- def shell(self):
- return "compiler-unittests"
-
-
-def GetSuite(name, root):
- return CompilerUnitTestsSuite(name, root)
diff --git a/deps/v8/test/fuzz-natives/fuzz-natives.status b/deps/v8/test/fuzz-natives/fuzz-natives.status
index 7165c3845a..c81188aef8 100644
--- a/deps/v8/test/fuzz-natives/fuzz-natives.status
+++ b/deps/v8/test/fuzz-natives/fuzz-natives.status
@@ -33,7 +33,7 @@
# TODO(danno): Fix these internal function that are only callable form stubs
# and un-blacklist them!
- "CompileUnoptimized": [SKIP],
+ "CompileLazy": [SKIP],
"NotifyDeoptimized": [SKIP],
"NotifyStubFailure": [SKIP],
"NewSloppyArguments": [SKIP],
diff --git a/deps/v8/test/heap-unittests/heap-unittests.status b/deps/v8/test/heap-unittests/heap-unittests.status
new file mode 100644
index 0000000000..d439913ccf
--- /dev/null
+++ b/deps/v8/test/heap-unittests/heap-unittests.status
@@ -0,0 +1,6 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[
+]
diff --git a/deps/v8/test/intl/intl.status b/deps/v8/test/intl/intl.status
index 007943a323..d48d695165 100644
--- a/deps/v8/test/intl/intl.status
+++ b/deps/v8/test/intl/intl.status
@@ -25,10 +25,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# The following tests use getDefaultTimeZone().
[
[ALWAYS, {
+ # The following tests use getDefaultTimeZone().
'date-format/resolved-options': [FAIL],
'date-format/timezone': [FAIL],
'general/v8Intl-exists': [FAIL],
diff --git a/deps/v8/test/libplatform-unittests/libplatform-unittests.status b/deps/v8/test/libplatform-unittests/libplatform-unittests.status
new file mode 100644
index 0000000000..d439913ccf
--- /dev/null
+++ b/deps/v8/test/libplatform-unittests/libplatform-unittests.status
@@ -0,0 +1,6 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[
+]
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index 00f6e34720..234bf0f35c 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -25,10 +25,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# All tests in the bug directory are expected to fail.
[
[ALWAYS, {
+ # All tests in the bug directory are expected to fail.
'bugs/*': [FAIL],
}], # ALWAYS
]
diff --git a/deps/v8/test/mjsunit/array-sort.js b/deps/v8/test/mjsunit/array-sort.js
index 3fa623a656..62755426ad 100644
--- a/deps/v8/test/mjsunit/array-sort.js
+++ b/deps/v8/test/mjsunit/array-sort.js
@@ -404,3 +404,47 @@ function cmpTest(a, b) {
return a.val - b.val;
}
arr.sort(cmpTest);
+
+function TestSortDoesNotDependOnObjectPrototypeHasOwnProperty() {
+ Array.prototype.sort.call({
+ __proto__: { hasOwnProperty: null, 0: 1 },
+ length: 5
+ });
+
+ var arr = new Array(2);
+ Object.defineProperty(arr, 0, { get: function() {}, set: function() {} });
+ arr.hasOwnProperty = null;
+ arr.sort();
+}
+
+TestSortDoesNotDependOnObjectPrototypeHasOwnProperty();
+
+function TestSortDoesNotDependOnArrayPrototypePush() {
+ // InsertionSort is used for arrays which length <= 22
+ var arr = [];
+ for (var i = 0; i < 22; i++) arr[i] = {};
+ Array.prototype.push = function() {
+ fail('Should not call push');
+ };
+ arr.sort();
+
+ // Quicksort is used for arrays which length > 22
+ // Arrays which length > 1000 guarantee GetThirdIndex is executed
+ arr = [];
+ for (var i = 0; i < 2000; ++i) arr[i] = {};
+ arr.sort();
+}
+
+TestSortDoesNotDependOnArrayPrototypePush();
+
+function TestSortDoesNotDependOnArrayPrototypeSort() {
+ var arr = [];
+ for (var i = 0; i < 2000; i++) arr[i] = {};
+ var sortfn = Array.prototype.sort;
+ Array.prototype.sort = function() {
+ fail('Should not call sort');
+ };
+ sortfn.call(arr);
+}
+
+TestSortDoesNotDependOnArrayPrototypeSort();
diff --git a/deps/v8/test/mjsunit/asm/int32array-unaligned.js b/deps/v8/test/mjsunit/asm/int32array-unaligned.js
new file mode 100644
index 0000000000..698ec5e312
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/int32array-unaligned.js
@@ -0,0 +1,43 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Module(stdlib, foreign, heap) {
+ "use asm";
+ var MEM32 = new stdlib.Int32Array(heap);
+ function load(i) {
+ i = i|0;
+ i = MEM32[i >> 2] | 0;
+ return i;
+ }
+ function store(i, v) {
+ i = i|0;
+ v = v|0;
+ MEM32[i >> 2] = v;
+ }
+ return { load: load, store: store };
+}
+
+var m = Module(this, {}, new ArrayBuffer(1024));
+
+m.store(0, 0x12345678);
+m.store(4, -1);
+m.store(8, -1);
+for (var i = 0; i < 4; ++i) {
+ assertEquals(0x12345678, m.load(i));
+}
+for (var i = 4; i < 12; ++i) {
+ assertEquals(-1, m.load(i));
+}
+for (var j = 4; j < 8; ++j) {
+ m.store(j, 0x11223344);
+ for (var i = 0; i < 4; ++i) {
+ assertEquals(0x12345678, m.load(i));
+ }
+ for (var i = 4; i < 8; ++i) {
+ assertEquals(0x11223344, m.load(i));
+ }
+ for (var i = 8; i < 12; ++i) {
+ assertEquals(-1, m.load(i));
+ }
+}
diff --git a/deps/v8/test/mjsunit/asm/math-abs.js b/deps/v8/test/mjsunit/asm/math-abs.js
new file mode 100644
index 0000000000..6387749e03
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/math-abs.js
@@ -0,0 +1,84 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Module(stdlib) {
+ "use asm";
+
+ var abs = stdlib.Math.abs;
+
+ // f: double -> double
+ function f(a) {
+ a = +a;
+ return +abs(a);
+ }
+
+ // g: unsigned -> double
+ function g(a) {
+ a = a>>>0;
+ return +abs(a);
+ }
+
+ // h: signed -> double
+ function h(a) {
+ a = a|0;
+ return +abs(a);
+ }
+
+ return { f: f, g: g, h: h };
+}
+
+var m = Module({ Math: Math });
+var f = m.f;
+var g = m.g;
+var h = m.h;
+
+assertTrue(isNaN(f(NaN)));
+assertTrue(isNaN(f(undefined)));
+assertTrue(isNaN(f(function() {})));
+
+assertEquals("Infinity", String(1/f(0)));
+assertEquals("Infinity", String(1/f(-0)));
+assertEquals("Infinity", String(f(Infinity)));
+assertEquals("Infinity", String(f(-Infinity)));
+
+assertEquals(0, f(0));
+assertEquals(0.1, f(0.1));
+assertEquals(0.5, f(0.5));
+assertEquals(0.1, f(-0.1));
+assertEquals(0.5, f(-0.5));
+assertEquals(1, f(1));
+assertEquals(1.1, f(1.1));
+assertEquals(1.5, f(1.5));
+assertEquals(1, f(-1));
+assertEquals(1.1, f(-1.1));
+assertEquals(1.5, f(-1.5));
+
+assertEquals(0, g(0));
+assertEquals(0, g(0.1));
+assertEquals(0, g(0.5));
+assertEquals(0, g(-0.1));
+assertEquals(0, g(-0.5));
+assertEquals(1, g(1));
+assertEquals(1, g(1.1));
+assertEquals(1, g(1.5));
+assertEquals(4294967295, g(-1));
+assertEquals(4294967295, g(-1.1));
+assertEquals(4294967295, g(-1.5));
+
+assertEquals(0, h(0));
+assertEquals(0, h(0.1));
+assertEquals(0, h(0.5));
+assertEquals(0, h(-0.1));
+assertEquals(0, h(-0.5));
+assertEquals(1, h(1));
+assertEquals(1, h(1.1));
+assertEquals(1, h(1.5));
+assertEquals(1, h(-1));
+assertEquals(1, h(-1.1));
+assertEquals(1, h(-1.5));
+
+assertEquals(Number.MIN_VALUE, f(Number.MIN_VALUE));
+assertEquals(Number.MIN_VALUE, f(-Number.MIN_VALUE));
+assertEquals(Number.MAX_VALUE, f(Number.MAX_VALUE));
+assertEquals(Number.MAX_VALUE, f(-Number.MAX_VALUE));
diff --git a/deps/v8/test/mjsunit/asm/math-fround.js b/deps/v8/test/mjsunit/asm/math-fround.js
new file mode 100644
index 0000000000..b1d37e904e
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/math-fround.js
@@ -0,0 +1,38 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Module(stdlib) {
+ "use asm";
+
+ var fround = stdlib.Math.fround;
+
+ // f: double -> float
+ function f(a) {
+ a = +a;
+ return fround(a);
+ }
+
+ return { f: f };
+}
+
+var f = Module({ Math: Math }).f;
+
+assertTrue(isNaN(f(NaN)));
+assertTrue(isNaN(f(undefined)));
+assertTrue(isNaN(f(function() {})));
+
+assertEquals("Infinity", String(1/f(0)));
+assertEquals("-Infinity", String(1/f(-0)));
+assertEquals("Infinity", String(f(Infinity)));
+assertEquals("-Infinity", String(f(-Infinity)));
+assertEquals("Infinity", String(f(1E200)));
+assertEquals("-Infinity", String(f(-1E200)));
+assertEquals("Infinity", String(1/f(1E-300)));
+assertEquals("-Infinity", String(1/f(-1E-300)));
+
+assertEquals(0, f(0));
+assertEquals(1, f(1));
+assertEquals(1.5, f(1.5));
+assertEquals(1.3370000123977661, f(1.337));
+assertEquals(-4.300000190734863, f(-4.3));
diff --git a/deps/v8/test/mjsunit/compiler/opt-next-call.js b/deps/v8/test/mjsunit/compiler/opt-next-call.js
new file mode 100644
index 0000000000..6366c7d72e
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/opt-next-call.js
@@ -0,0 +1,13 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ return "fooed";
+}
+
+%OptimizeFunctionOnNextCall(foo);
+assertEquals("fooed", foo());
+assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/osr-warm.js b/deps/v8/test/mjsunit/compiler/osr-warm.js
index 65ada1e114..73e1fd5cd2 100644
--- a/deps/v8/test/mjsunit/compiler/osr-warm.js
+++ b/deps/v8/test/mjsunit/compiler/osr-warm.js
@@ -35,7 +35,7 @@ function f1(x) {
}
assertEquals(0, f1(1));
-assertEquals(0, f1(10000000));
+assertEquals(0, f1(200000));
function f2(x) {
var sum = 1;
@@ -47,4 +47,4 @@ function f2(x) {
}
assertEquals(2, f2(1));
-assertEquals(10000001, f2(10000000));
+assertEquals(200001, f2(200000));
diff --git a/deps/v8/test/cctest/test-libplatform-worker-thread.cc b/deps/v8/test/mjsunit/compiler/regress-411262.js
index ba6b51fd02..ffbfe2e823 100644
--- a/deps/v8/test/cctest/test-libplatform-worker-thread.cc
+++ b/deps/v8/test/mjsunit/compiler/regress-411262.js
@@ -1,4 +1,4 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
+// Copyright 2014 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,41 +25,13 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/v8.h"
+// Flags: --allow-natives-syntax
-#include "src/libplatform/task-queue.h"
-#include "src/libplatform/worker-thread.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/test-libplatform.h"
-
-using namespace v8::internal;
-using namespace v8::platform;
-
-
-TEST(WorkerThread) {
- TaskQueue queue;
- TaskCounter task_counter;
-
- TestTask* task1 = new TestTask(&task_counter, true);
- TestTask* task2 = new TestTask(&task_counter, true);
- TestTask* task3 = new TestTask(&task_counter, true);
- TestTask* task4 = new TestTask(&task_counter, true);
-
- WorkerThread* thread1 = new WorkerThread(&queue);
- WorkerThread* thread2 = new WorkerThread(&queue);
-
- CHECK_EQ(4, task_counter.GetCount());
-
- queue.Append(task1);
- queue.Append(task2);
- queue.Append(task3);
- queue.Append(task4);
-
- // TaskQueue DCHECKs that it is empty in its destructor.
- queue.Terminate();
-
- delete thread1;
- delete thread2;
-
- CHECK_EQ(0, task_counter.GetCount());
+function b() {
+}
+function f() {
+ b.apply(this, arguments);
}
+
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/compiler/shift-shr.js b/deps/v8/test/mjsunit/compiler/shift-shr.js
new file mode 100644
index 0000000000..a300b2a5c9
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/shift-shr.js
@@ -0,0 +1,26 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --noopt-safe-uint32-operations
+
+// Check the results of `left >>> right`. The result is always unsigned (and
+// therefore positive).
+function test_shr(left) {
+ var errors = 0;
+
+ for (var i = 1; i < 1024; i++) {
+ var temp = left >>> i;
+ if (temp < 0) {
+ errors++;
+ }
+ }
+
+ return errors;
+}
+
+assertEquals(0, test_shr(1));
+%OptimizeFunctionOnNextCall(test_shr);
+for (var i = 5; i >= -5; i--) {
+ assertEquals(0, test_shr(i));
+}
diff --git a/deps/v8/test/mjsunit/cross-realm-filtering.js b/deps/v8/test/mjsunit/cross-realm-filtering.js
index 902cceb58f..9523e8cc1a 100644
--- a/deps/v8/test/mjsunit/cross-realm-filtering.js
+++ b/deps/v8/test/mjsunit/cross-realm-filtering.js
@@ -70,72 +70,3 @@ assertSame(Realm.shared.caller_1, Realm.shared.result_1);
Realm.eval(realms[0], script);
assertSame(Realm.shared.caller_0, Realm.shared.result_0);
assertSame(null, Realm.shared.result_1);
-
-
-// Check function constructor.
-var ctor_script = "Function.constructor";
-var ctor_a_script =
- "(function() { return Function.constructor.apply(this, ['return 1;']); })";
-var ctor_b_script = "Function.constructor.bind(this, 'return 1;')";
-var ctor_c_script =
- "(function() { return Function.constructor.call(this, 'return 1;'); })";
-Realm.shared = {
- ctor_0 : Realm.eval(realms[0], ctor_script),
- ctor_1 : Realm.eval(realms[1], ctor_script),
- ctor_a_0 : Realm.eval(realms[0], ctor_a_script),
- ctor_a_1 : Realm.eval(realms[1], ctor_a_script),
- ctor_b_0 : Realm.eval(realms[0], ctor_b_script),
- ctor_b_1 : Realm.eval(realms[1], ctor_b_script),
- ctor_c_0 : Realm.eval(realms[0], ctor_c_script),
- ctor_c_1 : Realm.eval(realms[1], ctor_c_script),
-}
-
-var script_0 = " \
- var ctor_0 = Realm.shared.ctor_0; \
- Realm.shared.direct_0 = ctor_0('return 1'); \
- Realm.shared.indirect_0 = (function() { return ctor_0('return 1;'); })(); \
- Realm.shared.apply_0 = ctor_0.apply(this, ['return 1']); \
- Realm.shared.bind_0 = ctor_0.bind(this, 'return 1')(); \
- Realm.shared.call_0 = ctor_0.call(this, 'return 1'); \
- Realm.shared.a_0 = Realm.shared.ctor_a_0(); \
- Realm.shared.b_0 = Realm.shared.ctor_b_0(); \
- Realm.shared.c_0 = Realm.shared.ctor_c_0(); \
-";
-
-script = script_0 + script_0.replace(/_0/g, "_1");
-
-Realm.eval(realms[0], script);
-assertSame(1, Realm.shared.direct_0());
-assertSame(1, Realm.shared.indirect_0());
-assertSame(1, Realm.shared.apply_0());
-assertSame(1, Realm.shared.bind_0());
-assertSame(1, Realm.shared.call_0());
-assertSame(1, Realm.shared.a_0());
-assertSame(1, Realm.shared.b_0());
-assertSame(1, Realm.shared.c_0());
-assertSame(undefined, Realm.shared.direct_1);
-assertSame(undefined, Realm.shared.indirect_1);
-assertSame(undefined, Realm.shared.apply_1);
-assertSame(undefined, Realm.shared.bind_1);
-assertSame(undefined, Realm.shared.call_1);
-assertSame(1, Realm.shared.a_1());
-assertSame(undefined, Realm.shared.b_1);
-assertSame(1, Realm.shared.c_1());
-
-Realm.eval(realms[1], script);
-assertSame(undefined, Realm.shared.direct_0);
-assertSame(undefined, Realm.shared.indirect_0);
-assertSame(undefined, Realm.shared.apply_0);
-assertSame(undefined, Realm.shared.bind_0);
-assertSame(undefined, Realm.shared.call_0);
-assertSame(1, Realm.shared.a_0());
-assertSame(undefined, Realm.shared.b_0);
-assertSame(1, Realm.shared.c_1());
-assertSame(1, Realm.shared.direct_1());
-assertSame(1, Realm.shared.indirect_1());
-assertSame(1, Realm.shared.apply_1());
-assertSame(1, Realm.shared.bind_1());
-assertSame(1, Realm.shared.call_1());
-assertSame(1, Realm.shared.a_1());
-assertSame(1, Realm.shared.b_1());
-assertSame(1, Realm.shared.c_1());
diff --git a/deps/v8/test/mjsunit/debug-backtrace-text.js b/deps/v8/test/mjsunit/debug-backtrace-text.js
index 61648fa4e2..3bfaeb0dad 100644
--- a/deps/v8/test/mjsunit/debug-backtrace-text.js
+++ b/deps/v8/test/mjsunit/debug-backtrace-text.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --turbo-deoptimization
// The functions used for testing backtraces.
function Point(x, y) {
diff --git a/deps/v8/test/mjsunit/debug-break-inline.js b/deps/v8/test/mjsunit/debug-break-inline.js
index 4418fa8d1b..3ef4d4eafe 100644
--- a/deps/v8/test/mjsunit/debug-break-inline.js
+++ b/deps/v8/test/mjsunit/debug-break-inline.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --allow-natives-syntax --turbo-deoptimization
// This test tests that deoptimization due to debug breaks works for
// inlined functions where the full-code is generated before the
diff --git a/deps/v8/test/mjsunit/debug-clearbreakpointgroup.js b/deps/v8/test/mjsunit/debug-clearbreakpointgroup.js
index 0cfc5c9583..137dfecbec 100644
--- a/deps/v8/test/mjsunit/debug-clearbreakpointgroup.js
+++ b/deps/v8/test/mjsunit/debug-clearbreakpointgroup.js
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug
+// Flags: --turbo-deoptimization
// Get the Debug object exposed from the debug context global object.
var Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/debug-evaluate-arguments.js b/deps/v8/test/mjsunit/debug-evaluate-arguments.js
index 92b745f1da..9765f19370 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-arguments.js
+++ b/deps/v8/test/mjsunit/debug-evaluate-arguments.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --turbo-deoptimization
// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/debug-evaluate-closure.js b/deps/v8/test/mjsunit/debug-evaluate-closure.js
index 778defd0ab..cf507b57d2 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-closure.js
+++ b/deps/v8/test/mjsunit/debug-evaluate-closure.js
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --turbo-deoptimization
Debug = debug.Debug;
var listened = false;
diff --git a/deps/v8/test/mjsunit/debug-evaluate-with.js b/deps/v8/test/mjsunit/debug-evaluate-with.js
index c19a707432..3f3310f9f5 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-with.js
+++ b/deps/v8/test/mjsunit/debug-evaluate-with.js
@@ -26,6 +26,8 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug
+// Flags: --turbo-deoptimization
+
// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/debug-receiver.js b/deps/v8/test/mjsunit/debug-receiver.js
index 21cdde84a2..2d5d2e08de 100644
--- a/deps/v8/test/mjsunit/debug-receiver.js
+++ b/deps/v8/test/mjsunit/debug-receiver.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --turbo-deoptimization
// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/debug-scopes.js b/deps/v8/test/mjsunit/debug-scopes.js
index ce37d24023..4823496e3c 100644
--- a/deps/v8/test/mjsunit/debug-scopes.js
+++ b/deps/v8/test/mjsunit/debug-scopes.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --allow-natives-syntax --turbo-deoptimization
// The functions used for testing backtraces. They are at the top to make the
// testing of source line/column easier.
diff --git a/deps/v8/test/mjsunit/debug-script.js b/deps/v8/test/mjsunit/debug-script.js
index 5b5e75962f..5ffada11ba 100644
--- a/deps/v8/test/mjsunit/debug-script.js
+++ b/deps/v8/test/mjsunit/debug-script.js
@@ -59,7 +59,7 @@ for (i = 0; i < scripts.length; i++) {
}
// This has to be updated if the number of native scripts change.
-assertTrue(named_native_count == 25 || named_native_count == 26);
+assertTrue(named_native_count == 26 || named_native_count == 27);
// Only the 'gc' extension is loaded.
assertEquals(1, extension_count);
// This script and mjsunit.js has been loaded. If using d8, d8 loads
diff --git a/deps/v8/test/mjsunit/debug-step-2.js b/deps/v8/test/mjsunit/debug-step-2.js
index 502b426ee2..5fe7466cb7 100644
--- a/deps/v8/test/mjsunit/debug-step-2.js
+++ b/deps/v8/test/mjsunit/debug-step-2.js
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug
+// Flags: --turbo-deoptimization
// This test tests that full code compiled without debug break slots
// is recompiled with debug break slots when debugging is started.
diff --git a/deps/v8/test/mjsunit/debug-stepin-property-function-call.js b/deps/v8/test/mjsunit/debug-stepin-property-function-call.js
new file mode 100644
index 0000000000..081fb24fb7
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-stepin-property-function-call.js
@@ -0,0 +1,153 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug --nocrankshaft
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var exception = null;
+var state = 1;
+
+// Simple debug event handler which first time will cause 'step in' action
+// to get into g.call and than check that execution is stopped inside
+// function 'g'.
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ if (state == 1) {
+ exec_state.prepareStep(Debug.StepAction.StepIn, 3);
+ state = 2;
+ } else if (state == 2) {
+ assertTrue(event_data.sourceLineText().indexOf("Expected to step") > 0,
+ "source line: \"" + event_data.sourceLineText() + "\"");
+ state = 3;
+ }
+ }
+ } catch(e) {
+ print("Exception: " + e);
+ exception = e;
+ }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+var count = 0;
+var obj = {
+ fun: function() {
+ ++count;
+ return count; // Expected to step
+ }
+};
+obj.fun2 = obj.fun;
+
+function testCall_Dots() {
+ debugger;
+ obj.fun();
+}
+
+function testCall_Quotes() {
+ debugger;
+ obj["fun"]();
+}
+
+function testCall_Call() {
+ debugger;
+ obj.fun.call(obj);
+}
+
+function testCall_Apply() {
+ debugger;
+ obj.fun.apply(obj);
+}
+
+function testCall_Variable() {
+ var functionName = "fun";
+ debugger;
+ obj[functionName]();
+}
+
+function testCall_Fun2() {
+ debugger;
+ obj.fun2();
+}
+
+function testCall_InternStrings() {
+ var cache = { "fun": "fun" };
+ var functionName = "fu" + "n";
+ debugger;
+ obj[cache[functionName]]();
+}
+
+function testCall_ViaFunRef() {
+ var functionName = "fu" + "n";
+ var funRef = obj[functionName];
+ debugger;
+ funRef();
+}
+
+// bug 2888
+function testCall_RuntimeVariable1() {
+ var functionName = "fu" + "n";
+ debugger;
+ obj[functionName]();
+}
+
+// bug 2888
+function testCall_RuntimeVariable2() {
+ var functionName = "un".replace(/u/, "fu");
+ debugger;
+ obj[functionName]();
+}
+
+// bug 2888
+function testCall_RuntimeVariable3() {
+ var expr = "fu" + "n";
+ const functionName = expr;
+ assertEquals("fun", functionName);
+ debugger;
+ obj[functionName]();
+}
+
+var functionsCalled = 0;
+for (var n in this) {
+ if (n.substr(0, 4) != 'test' || typeof this[n] !== "function") {
+ continue;
+ }
+ state = 1;
+ print("Running " + n + "...");
+ this[n]();
+ ++functionsCalled;
+ assertNull(exception, n);
+ assertEquals(3, state, n);
+ assertEquals(functionsCalled, count, n);
+}
+
+assertEquals(11, functionsCalled);
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/deopt-global-accessor.js b/deps/v8/test/mjsunit/deopt-global-accessor.js
new file mode 100644
index 0000000000..5c544a0fa0
--- /dev/null
+++ b/deps/v8/test/mjsunit/deopt-global-accessor.js
@@ -0,0 +1,23 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+x = 1;
+x = 2;
+x = 3;
+
+function f() {
+ return x;
+}
+
+f();
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
+
+Object.defineProperty(this, "x", {get:function() { return 100; }});
+
+assertEquals(100, f());
diff --git a/deps/v8/test/mjsunit/es6/arguments-iterator.js b/deps/v8/test/mjsunit/es6/arguments-iterator.js
new file mode 100644
index 0000000000..a65bf8bad5
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/arguments-iterator.js
@@ -0,0 +1,230 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+
+// Note in general that "arguments.foo" and "var o = arguments; o.foo"
+// are treated differently by full-codegen, and so both cases need to be
+// tested.
+
+function TestDirectArgumentsIteratorProperty() {
+ assertTrue(arguments.hasOwnProperty(Symbol.iterator));
+ assertFalse(arguments.propertyIsEnumerable(Symbol.iterator));
+ var descriptor = Object.getOwnPropertyDescriptor(arguments, Symbol.iterator);
+ assertTrue(descriptor.writable);
+ assertFalse(descriptor.enumerable);
+ assertTrue(descriptor.configurable);
+ assertEquals(descriptor.value, [].values);
+ assertEquals(arguments[Symbol.iterator], [].values);
+}
+TestDirectArgumentsIteratorProperty();
+
+
+function TestIndirectArgumentsIteratorProperty() {
+ var o = arguments;
+ assertTrue(o.hasOwnProperty(Symbol.iterator));
+ assertFalse(o.propertyIsEnumerable(Symbol.iterator));
+ assertEquals(o[Symbol.iterator], [].values);
+}
+TestIndirectArgumentsIteratorProperty();
+
+
+function assertIteratorResult(value, done, result) {
+ assertEquals({value: value, done: done}, result);
+}
+
+
+function TestDirectValues1(a, b, c) {
+ var iterator = arguments[Symbol.iterator]();
+ assertIteratorResult(a, false, iterator.next());
+ assertIteratorResult(b, false, iterator.next());
+ assertIteratorResult(c, false, iterator.next());
+ assertIteratorResult(undefined, true, iterator.next());
+}
+TestDirectValues1(1, 2, 3);
+
+
+function TestIndirectValues1(a, b, c) {
+ var args = arguments;
+ var iterator = args[Symbol.iterator]();
+ assertIteratorResult(a, false, iterator.next());
+ assertIteratorResult(b, false, iterator.next());
+ assertIteratorResult(c, false, iterator.next());
+ assertIteratorResult(undefined, true, iterator.next());
+}
+TestIndirectValues1(1, 2, 3);
+
+
+function TestDirectValues2(a, b, c) {
+ var iterator = arguments[Symbol.iterator]();
+ assertIteratorResult(a, false, iterator.next());
+ assertIteratorResult(b, false, iterator.next());
+ assertIteratorResult(c, false, iterator.next());
+ assertIteratorResult(undefined, true, iterator.next());
+
+ arguments[3] = 4;
+ arguments.length = 4;
+ assertIteratorResult(undefined, true, iterator.next());
+}
+TestDirectValues2(1, 2, 3);
+
+
+function TestIndirectValues2(a, b, c) {
+ var args = arguments;
+ var iterator = args[Symbol.iterator]();
+ assertIteratorResult(a, false, iterator.next());
+ assertIteratorResult(b, false, iterator.next());
+ assertIteratorResult(c, false, iterator.next());
+ assertIteratorResult(undefined, true, iterator.next());
+
+ arguments[3] = 4;
+ arguments.length = 4;
+ assertIteratorResult(undefined, true, iterator.next());
+}
+TestIndirectValues2(1, 2, 3);
+
+
+function TestDirectValues3(a, b, c) {
+ var iterator = arguments[Symbol.iterator]();
+ assertIteratorResult(a, false, iterator.next());
+ assertIteratorResult(b, false, iterator.next());
+
+ arguments.length = 2;
+ assertIteratorResult(undefined, true, iterator.next());
+}
+TestDirectValues3(1, 2, 3);
+
+
+function TestIndirectValues3(a, b, c) {
+ var args = arguments;
+ var iterator = args[Symbol.iterator]();
+ assertIteratorResult(a, false, iterator.next());
+ assertIteratorResult(b, false, iterator.next());
+
+ arguments.length = 2;
+ assertIteratorResult(undefined, true, iterator.next());
+}
+TestIndirectValues3(1, 2, 3);
+
+
+function TestDirectValues4(a, b, c) {
+ var iterator = arguments[Symbol.iterator]();
+ assertIteratorResult(a, false, iterator.next());
+ assertIteratorResult(b, false, iterator.next());
+ assertIteratorResult(c, false, iterator.next());
+
+ arguments.length = 4;
+ assertIteratorResult(undefined, false, iterator.next());
+ assertIteratorResult(undefined, true, iterator.next());
+}
+TestDirectValues4(1, 2, 3);
+
+
+function TestIndirectValues4(a, b, c) {
+ var args = arguments;
+ var iterator = args[Symbol.iterator]();
+ assertIteratorResult(a, false, iterator.next());
+ assertIteratorResult(b, false, iterator.next());
+ assertIteratorResult(c, false, iterator.next());
+
+ arguments.length = 4;
+ assertIteratorResult(undefined, false, iterator.next());
+ assertIteratorResult(undefined, true, iterator.next());
+}
+TestIndirectValues4(1, 2, 3);
+
+
+function TestForOf() {
+ var i = 0;
+ for (var value of arguments) {
+ assertEquals(arguments[i++], value);
+ }
+
+ assertEquals(arguments.length, i);
+}
+TestForOf(1, 2, 3, 4, 5);
+
+
+function TestAssignmentToIterator() {
+ var i = 0;
+ arguments[Symbol.iterator] = [].entries;
+ for (var entry of arguments) {
+ assertEquals([i, arguments[i]], entry);
+ i++;
+ }
+
+ assertEquals(arguments.length, i);
+}
+TestAssignmentToIterator(1, 2, 3, 4, 5);
+
+
+function TestArgumentsMutation() {
+ var i = 0;
+ for (var x of arguments) {
+ assertEquals(arguments[i], x);
+ arguments[i+1] *= 2;
+ i++;
+ }
+
+ assertEquals(arguments.length, i);
+}
+TestArgumentsMutation(1, 2, 3, 4, 5);
+
+
+function TestSloppyArgumentsAliasing(a0, a1, a2, a3, a4) {
+ var i = 0;
+ for (var x of arguments) {
+ assertEquals(arguments[i], x);
+ a0 = a1; a1 = a2; a3 = a4;
+ i++;
+ }
+
+ assertEquals(arguments.length, i);
+}
+TestSloppyArgumentsAliasing(1, 2, 3, 4, 5);
+
+
+function TestStrictArgumentsAliasing(a0, a1, a2, a3, a4) {
+ "use strict";
+ var i = 0;
+ for (var x of arguments) {
+ a0 = a1; a1 = a2; a3 = a4;
+ assertEquals(arguments[i], x);
+ i++;
+ }
+
+ assertEquals(arguments.length, i);
+}
+TestStrictArgumentsAliasing(1, 2, 3, 4, 5);
+
+
+function TestArgumentsAsProto() {
+ "use strict";
+
+ var o = {__proto__:arguments};
+ assertSame([].values, o[Symbol.iterator]);
+ // Make o dict-mode.
+ %OptimizeObjectForAddingMultipleProperties(o, 0);
+ assertFalse(o.hasOwnProperty(Symbol.iterator));
+ assertSame([].values, o[Symbol.iterator]);
+ o[Symbol.iterator] = 10;
+ assertTrue(o.hasOwnProperty(Symbol.iterator));
+ assertEquals(10, o[Symbol.iterator]);
+ assertSame([].values, arguments[Symbol.iterator]);
+
+ // Frozen o.
+ o = Object.freeze({__proto__:arguments});
+ assertSame([].values, o[Symbol.iterator]);
+ assertFalse(o.hasOwnProperty(Symbol.iterator));
+ assertSame([].values, o[Symbol.iterator]);
+ // This should throw, but currently it doesn't, because
+ // ExecutableAccessorInfo callbacks don't see the current strict mode.
+ // See note in accessors.cc:SetPropertyOnInstanceIfInherited.
+ o[Symbol.iterator] = 10;
+ assertFalse(o.hasOwnProperty(Symbol.iterator));
+ assertEquals([].values, o[Symbol.iterator]);
+ assertSame([].values, arguments[Symbol.iterator]);
+}
+TestArgumentsAsProto();
diff --git a/deps/v8/test/mjsunit/es6/array-iterator.js b/deps/v8/test/mjsunit/es6/array-iterator.js
index 63a7415b96..b24ee5712f 100644
--- a/deps/v8/test/mjsunit/es6/array-iterator.js
+++ b/deps/v8/test/mjsunit/es6/array-iterator.js
@@ -175,10 +175,9 @@ function TestForArrayValues() {
assertEquals(8, buffer.length);
- for (var i = 0; i < buffer.length - 1; i++) {
+ for (var i = 0; i < buffer.length; i++) {
assertSame(array[i], buffer[i]);
}
- assertTrue(isNaN(buffer[buffer.length - 1]));
}
TestForArrayValues();
@@ -210,10 +209,9 @@ function TestForArrayEntries() {
assertEquals(8, buffer.length);
- for (var i = 0; i < buffer.length - 1; i++) {
+ for (var i = 0; i < buffer.length; i++) {
assertSame(array[i], buffer[i][1]);
}
- assertTrue(isNaN(buffer[buffer.length - 1][1]));
for (var i = 0; i < buffer.length; i++) {
assertEquals(i, buffer[i][0]);
@@ -232,10 +230,9 @@ function TestForArray() {
assertEquals(8, buffer.length);
- for (var i = 0; i < buffer.length - 1; i++) {
+ for (var i = 0; i < buffer.length; i++) {
assertSame(array[i], buffer[i]);
}
- assertTrue(isNaN(buffer[buffer.length - 1]));
}
TestForArrayValues();
diff --git a/deps/v8/test/mjsunit/es6/collections.js b/deps/v8/test/mjsunit/es6/collections.js
index 1e2f232ee8..940c0b9d1f 100644
--- a/deps/v8/test/mjsunit/es6/collections.js
+++ b/deps/v8/test/mjsunit/es6/collections.js
@@ -117,7 +117,8 @@ function TestMapBehavior2(m) {
TestMapping(m, i / 10, new Object);
TestMapping(m, 'key-' + i, new Object);
}
- var keys = [ +0, -0, +Infinity, -Infinity, true, false, null, undefined ];
+ // -0 is handled in TestMinusZeroMap
+ var keys = [ 0, +Infinity, -Infinity, true, false, null, undefined ];
for (var i = 0; i < keys.length; i++) {
TestMapping(m, keys[i], new Object);
}
@@ -495,24 +496,26 @@ for (var i = 9; i >= 0; i--) {
(function TestMinusZeroSet() {
- var m = new Set();
- m.add(0);
- m.add(-0);
- assertEquals(1, m.size);
- assertTrue(m.has(0));
- assertTrue(m.has(-0));
+ var s = new Set();
+ s.add(-0);
+ assertSame(0, s.values().next().value);
+ s.add(0);
+ assertEquals(1, s.size);
+ assertTrue(s.has(0));
+ assertTrue(s.has(-0));
})();
(function TestMinusZeroMap() {
var m = new Map();
- m.set(0, 'plus');
m.set(-0, 'minus');
+ assertSame(0, m.keys().next().value);
+ m.set(0, 'plus');
assertEquals(1, m.size);
assertTrue(m.has(0));
assertTrue(m.has(-0));
- assertEquals('minus', m.get(0));
- assertEquals('minus', m.get(-0));
+ assertEquals('plus', m.get(0));
+ assertEquals('plus', m.get(-0));
})();
@@ -1015,6 +1018,9 @@ function TestSetConstructor(ctor) {
assertThrows(function() {
new ctor({});
}, TypeError);
+ assertThrows(function() {
+ new ctor(true);
+ }, TypeError);
// @@iterator not callable
assertThrows(function() {
@@ -1141,6 +1147,46 @@ TestSetConstructorNextNotAnObject(Set);
TestSetConstructorNextNotAnObject(WeakSet);
+(function TestWeakSetConstructorNonObjectKeys() {
+ assertThrows(function() {
+ new WeakSet([1]);
+ }, TypeError);
+})();
+
+
+function TestSetConstructorIterableValue(ctor) {
+ 'use strict';
+ // Strict mode is required to prevent implicit wrapping in the getter.
+ Object.defineProperty(Number.prototype, Symbol.iterator, {
+ get: function() {
+ assertEquals('object', typeof this);
+ return function() {
+ return oneAndTwo.keys();
+ };
+ },
+ configurable: true
+ });
+
+ var set = new ctor(42);
+ assertSize(2, set);
+ assertTrue(set.has(k1));
+ assertTrue(set.has(k2));
+
+ delete Number.prototype[Symbol.iterator];
+}
+TestSetConstructorIterableValue(Set);
+TestSetConstructorIterableValue(WeakSet);
+
+
+(function TestSetConstructorStringValue() {
+ var s = new Set('abc');
+ assertSize(3, s);
+ assertTrue(s.has('a'));
+ assertTrue(s.has('b'));
+ assertTrue(s.has('c'));
+})();
+
+
function TestMapConstructor(ctor) {
var m = new ctor(null);
assertSize(0, m);
@@ -1152,6 +1198,9 @@ function TestMapConstructor(ctor) {
assertThrows(function() {
new ctor({});
}, TypeError);
+ assertThrows(function() {
+ new ctor(true);
+ }, TypeError);
// @@iterator not callable
assertThrows(function() {
@@ -1286,3 +1335,34 @@ function TestMapConstructorIteratorNotObjectValues(ctor) {
}
TestMapConstructorIteratorNotObjectValues(Map);
TestMapConstructorIteratorNotObjectValues(WeakMap);
+
+
+(function TestWeakMapConstructorNonObjectKeys() {
+ assertThrows(function() {
+ new WeakMap([[1, 2]])
+ }, TypeError);
+})();
+
+
+function TestMapConstructorIterableValue(ctor) {
+ 'use strict';
+ // Strict mode is required to prevent implicit wrapping in the getter.
+ Object.defineProperty(Number.prototype, Symbol.iterator, {
+ get: function() {
+ assertEquals('object', typeof this);
+ return function() {
+ return oneAndTwo.entries();
+ };
+ },
+ configurable: true
+ });
+
+ var map = new ctor(42);
+ assertSize(2, map);
+ assertEquals(1, map.get(k1));
+ assertEquals(2, map.get(k2));
+
+ delete Number.prototype[Symbol.iterator];
+}
+TestMapConstructorIterableValue(Map);
+TestMapConstructorIterableValue(WeakMap);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-by-default-reject-handler.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-by-default-reject-handler.js
new file mode 100644
index 0000000000..63151df016
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-by-default-reject-handler.js
@@ -0,0 +1,86 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when we only listen to uncaught exceptions and
+// there is only a default reject handler for the to-be-rejected Promise.
+// We expect two Exception debug events:
+// - when the first Promise is rejected and only has default reject handlers.
+// - when the default reject handler passes the rejection on.
+
+Debug = debug.Debug;
+
+var expected_events = 2;
+var log = [];
+
+var resolve, reject;
+var p0 = new Promise(function(res, rej) { resolve = res; reject = rej; });
+var p1 = p0.then(function() {
+ log.push("p0.then");
+ return Promise.reject(new Error("123"));
+});
+var p2 = p1.then(function() {
+ log.push("p1.then");
+});
+
+var q = new Promise(function(res, rej) {
+ log.push("resolve q");
+ res();
+});
+
+q.then(function() {
+ log.push("resolve p");
+ resolve();
+})
+
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Exception) {
+ expected_events--;
+ assertTrue(expected_events >= 0);
+ assertTrue(event_data.uncaught());
+ assertTrue(event_data.promise() instanceof Promise);
+ if (expected_events == 1) {
+ // p1 is rejected, uncaught except for its default reject handler.
+ assertEquals(0, exec_state.frameCount());
+ assertSame(p1, event_data.promise());
+ } else {
+ // p2 is rejected by p1's default reject handler.
+ assertEquals(0, exec_state.frameCount());
+ assertSame(p2, event_data.promise());
+ }
+ }
+ } catch (e) {
+ %AbortJS(e + "\n" + e.stack);
+ }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
+
+log.push("end main");
+
+function testDone(iteration) {
+ function checkResult() {
+ try {
+ assertTrue(iteration < 10);
+ if (expected_events === 0) {
+ assertEquals(["resolve q", "end main", "resolve p", "p0.then"], log);
+ } else {
+ testDone(iteration + 1);
+ }
+ } catch (e) {
+ %AbortJS(e + "\n" + e.stack);
+ }
+ }
+
+ // Run testDone through the Object.observe processing loop.
+ var dummy = {};
+ Object.observe(dummy, checkResult);
+ dummy.dummy = dummy;
+}
+
+testDone(0);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-by-default-reject-handler.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-by-default-reject-handler.js
new file mode 100644
index 0000000000..36b5565e5f
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-by-default-reject-handler.js
@@ -0,0 +1,87 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when we only listen to uncaught exceptions and
+// there is only a default reject handler for the to-be-rejected Promise.
+// We expect two Exception debug events:
+// - when the first Promise is rejected and only has default reject handlers.
+// - when the default reject handler passes the rejection on.
+
+Debug = debug.Debug;
+
+var expected_events = 2;
+var log = [];
+
+var resolve, reject;
+var p0 = new Promise(function(res, rej) { resolve = res; reject = rej; });
+var p1 = p0.then(function() {
+ log.push("p0.then");
+ throw new Error("123"); // event
+});
+var p2 = p1.then(function() {
+ log.push("p1.then");
+});
+
+var q = new Promise(function(res, rej) {
+ log.push("resolve q");
+ res();
+});
+
+q.then(function() {
+ log.push("resolve p");
+ resolve();
+})
+
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Exception) {
+ expected_events--;
+ assertTrue(expected_events >= 0);
+ assertTrue(event_data.uncaught());
+ assertTrue(event_data.promise() instanceof Promise);
+ if (expected_events == 1) {
+ // p1 is rejected, uncaught except for its default reject handler.
+ assertTrue(
+ exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
+ assertSame(p1, event_data.promise());
+ } else {
+ // p2 is rejected by p1's default reject handler.
+ assertEquals(0, exec_state.frameCount());
+ assertSame(p2, event_data.promise());
+ }
+ }
+ } catch (e) {
+ %AbortJS(e + "\n" + e.stack);
+ }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
+
+log.push("end main");
+
+function testDone(iteration) {
+ function checkResult() {
+ try {
+ assertTrue(iteration < 10);
+ if (expected_events === 0) {
+ assertEquals(["resolve q", "end main", "resolve p", "p0.then"], log);
+ } else {
+ testDone(iteration + 1);
+ }
+ } catch (e) {
+ %AbortJS(e + "\n" + e.stack);
+ }
+ }
+
+ // Run testDone through the Object.observe processing loop.
+ var dummy = {};
+ Object.observe(dummy, checkResult);
+ dummy.dummy = dummy;
+}
+
+testDone(0);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-eventually-caught.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-eventually-caught.js
new file mode 100644
index 0000000000..19610f77e8
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-eventually-caught.js
@@ -0,0 +1,42 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when we only listen to uncaught exceptions and
+// there is a catch handler for the to-be-rejected Promise.
+// We expect no Exception debug events, since the default reject handler passes
+// the rejection on to a user-defined reject handler.
+
+Debug = debug.Debug;
+
+var resolve, reject;
+var p0 = new Promise(function(res, rej) { resolve = res; reject = rej; });
+
+var p1 = p0.then(function() {
+ throw new Error();
+});
+
+var p2 = p1.then(function() { });
+var p3 = p2.catch(function() { });
+
+var q = new Promise(function(res, rej) {
+ res();
+});
+
+q.then(function() {
+ resolve();
+})
+
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ assertTrue(event != Debug.DebugEvent.Exception);
+ } catch (e) {
+ %AbortJS(e + "\n" + e.stack);
+ }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
diff --git a/deps/v8/test/mjsunit/es6/debug-stepin-generators.js b/deps/v8/test/mjsunit/es6/debug-stepin-generators.js
new file mode 100644
index 0000000000..f48c5ef75f
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/debug-stepin-generators.js
@@ -0,0 +1,45 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug
+var exception = null;
+var yields = 0;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var source = exec_state.frame(0).sourceLineText();
+ print(source);
+ if (/stop stepping/.test(source)) return;
+ if (/yield/.test(source)) yields++;
+ exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+ } catch (e) {
+ print(e, e.stack);
+ exception = e;
+ }
+};
+
+Debug.setListener(listener);
+
+function* g() {
+ for (var i = 0; i < 3; ++i) {
+ yield i;
+ }
+}
+
+var i = g();
+debugger;
+for (var num of g()) {}
+i.next();
+
+print(); // stop stepping
+
+// Not stepped into.
+i.next();
+i.next();
+
+assertNull(exception);
+assertEquals(4, yields);
diff --git a/deps/v8/test/mjsunit/harmony/generators-debug-liveedit.js b/deps/v8/test/mjsunit/es6/generators-debug-liveedit.js
index 341ef483c5..6f0c443afc 100644
--- a/deps/v8/test/mjsunit/harmony/generators-debug-liveedit.js
+++ b/deps/v8/test/mjsunit/es6/generators-debug-liveedit.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --harmony-generators
+// Flags: --expose-debug-as debug
var Debug = debug.Debug;
var LiveEdit = Debug.LiveEdit;
diff --git a/deps/v8/test/mjsunit/harmony/generators-debug-scopes.js b/deps/v8/test/mjsunit/es6/generators-debug-scopes.js
index ad0ea53de5..d55e5612de 100644
--- a/deps/v8/test/mjsunit/harmony/generators-debug-scopes.js
+++ b/deps/v8/test/mjsunit/es6/generators-debug-scopes.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --harmony-generators
+// Flags: --expose-debug-as debug
var Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/harmony/generators-iteration.js b/deps/v8/test/mjsunit/es6/generators-iteration.js
index 1a793678d9..b6fcdaa487 100644
--- a/deps/v8/test/mjsunit/harmony/generators-iteration.js
+++ b/deps/v8/test/mjsunit/es6/generators-iteration.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-generators --expose-gc
+// Flags: --expose-gc
// Test generator iteration.
diff --git a/deps/v8/test/mjsunit/es6/generators-mirror.js b/deps/v8/test/mjsunit/es6/generators-mirror.js
new file mode 100644
index 0000000000..6925285882
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/generators-mirror.js
@@ -0,0 +1,84 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+// Test the mirror object for functions.
+
+function *generator(f) {
+ "use strict";
+ yield;
+ f();
+ yield;
+}
+
+function MirrorRefCache(json_refs) {
+ var tmp = eval('(' + json_refs + ')');
+ this.refs_ = [];
+ for (var i = 0; i < tmp.length; i++) {
+ this.refs_[tmp[i].handle] = tmp[i];
+ }
+}
+
+MirrorRefCache.prototype.lookup = function(handle) {
+ return this.refs_[handle];
+}
+
+function TestGeneratorMirror(g, test) {
+ // Create mirror and JSON representation.
+ var mirror = debug.MakeMirror(g);
+ var serializer = debug.MakeMirrorSerializer();
+ var json = JSON.stringify(serializer.serializeValue(mirror));
+ var refs = new MirrorRefCache(
+ JSON.stringify(serializer.serializeReferencedObjects()));
+
+ // Check the mirror hierachy.
+ assertTrue(mirror instanceof debug.Mirror);
+ assertTrue(mirror instanceof debug.ValueMirror);
+ assertTrue(mirror instanceof debug.ObjectMirror);
+ assertTrue(mirror instanceof debug.GeneratorMirror);
+
+ // Check the mirror properties.
+ assertTrue(mirror.isGenerator());
+ assertEquals('generator', mirror.type());
+ assertFalse(mirror.isPrimitive());
+ assertEquals('Generator', mirror.className());
+
+ assertTrue(mirror.receiver().isUndefined());
+ assertEquals(generator, mirror.func().value());
+
+ test(mirror);
+}
+
+var iter = generator(function () {
+ assertEquals('running', debug.MakeMirror(iter).status());
+})
+
+// Note that line numbers are 0-based, not 1-based.
+function assertSourceLocation(loc, line, column) {
+ assertEquals(line, loc.line);
+ assertEquals(column, loc.column);
+}
+
+TestGeneratorMirror(iter, function (mirror) {
+ assertEquals('suspended', mirror.status())
+ assertSourceLocation(mirror.sourceLocation(), 7, 19);
+});
+
+iter.next();
+TestGeneratorMirror(iter, function (mirror) {
+ assertEquals('suspended', mirror.status())
+ assertSourceLocation(mirror.sourceLocation(), 9, 2);
+});
+
+iter.next();
+TestGeneratorMirror(iter, function (mirror) {
+ assertEquals('suspended', mirror.status())
+ assertSourceLocation(mirror.sourceLocation(), 11, 2);
+});
+
+iter.next();
+TestGeneratorMirror(iter, function (mirror) {
+ assertEquals('closed', mirror.status())
+ assertEquals(undefined, mirror.sourceLocation());
+});
diff --git a/deps/v8/test/mjsunit/harmony/generators-objects.js b/deps/v8/test/mjsunit/es6/generators-objects.js
index c1cda07db4..8a052ff5e6 100644
--- a/deps/v8/test/mjsunit/harmony/generators-objects.js
+++ b/deps/v8/test/mjsunit/es6/generators-objects.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-generators --harmony-scoping --allow-natives-syntax
+// Flags: --harmony-scoping --allow-natives-syntax
// Test instantations of generators.
diff --git a/deps/v8/test/mjsunit/harmony/generators-parsing.js b/deps/v8/test/mjsunit/es6/generators-parsing.js
index 21790b0e13..e4408365d3 100644
--- a/deps/v8/test/mjsunit/harmony/generators-parsing.js
+++ b/deps/v8/test/mjsunit/es6/generators-parsing.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-generators
-
// Test basic generator syntax.
// Yield statements.
diff --git a/deps/v8/test/mjsunit/harmony/generators-poisoned-properties.js b/deps/v8/test/mjsunit/es6/generators-poisoned-properties.js
index 39a583ec97..44d823a503 100644
--- a/deps/v8/test/mjsunit/harmony/generators-poisoned-properties.js
+++ b/deps/v8/test/mjsunit/es6/generators-poisoned-properties.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-generators
-
function assertIteratorResult(value, done, result) {
assertEquals({value: value, done: done}, result);
}
diff --git a/deps/v8/test/mjsunit/harmony/generators-relocation.js b/deps/v8/test/mjsunit/es6/generators-relocation.js
index 4074235c82..6babb148be 100644
--- a/deps/v8/test/mjsunit/harmony/generators-relocation.js
+++ b/deps/v8/test/mjsunit/es6/generators-relocation.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --harmony-generators
+// Flags: --expose-debug-as debug
var Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/harmony/generators-runtime.js b/deps/v8/test/mjsunit/es6/generators-runtime.js
index 9fb7075492..8fa70b62e0 100644
--- a/deps/v8/test/mjsunit/harmony/generators-runtime.js
+++ b/deps/v8/test/mjsunit/es6/generators-runtime.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-generators
-
// Test aspects of the generator runtime.
// See:
diff --git a/deps/v8/test/mjsunit/es6/iteration-semantics.js b/deps/v8/test/mjsunit/es6/iteration-semantics.js
index 7849b29abe..544c94d915 100644
--- a/deps/v8/test/mjsunit/es6/iteration-semantics.js
+++ b/deps/v8/test/mjsunit/es6/iteration-semantics.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-generators --harmony-scoping --harmony-proxies
+// Flags: --harmony-scoping --harmony-proxies
// Test for-of semantics.
diff --git a/deps/v8/test/mjsunit/es6/math-expm1.js b/deps/v8/test/mjsunit/es6/math-expm1.js
index b4e31a959b..7cbb1b485f 100644
--- a/deps/v8/test/mjsunit/es6/math-expm1.js
+++ b/deps/v8/test/mjsunit/es6/math-expm1.js
@@ -8,19 +8,22 @@ assertTrue(isNaN(Math.expm1(NaN)));
assertTrue(isNaN(Math.expm1(function() {})));
assertTrue(isNaN(Math.expm1({ toString: function() { return NaN; } })));
assertTrue(isNaN(Math.expm1({ valueOf: function() { return "abc"; } })));
-assertEquals("Infinity", String(1/Math.expm1(0)));
-assertEquals("-Infinity", String(1/Math.expm1(-0)));
-assertEquals("Infinity", String(Math.expm1(Infinity)));
+assertEquals(Infinity, 1/Math.expm1(0));
+assertEquals(-Infinity, 1/Math.expm1(-0));
+assertEquals(Infinity, Math.expm1(Infinity));
assertEquals(-1, Math.expm1(-Infinity));
-for (var x = 0.1; x < 700; x += 0.1) {
+
+// Sanity check:
+// Math.expm1(x) stays reasonably close to Math.exp(x) - 1 for large values.
+for (var x = 1; x < 700; x += 0.25) {
var expected = Math.exp(x) - 1;
- assertEqualsDelta(expected, Math.expm1(x), expected * 1E-14);
+ assertEqualsDelta(expected, Math.expm1(x), expected * 1E-15);
expected = Math.exp(-x) - 1;
- assertEqualsDelta(expected, Math.expm1(-x), -expected * 1E-14);
+ assertEqualsDelta(expected, Math.expm1(-x), -expected * 1E-15);
}
-// Values close to 0:
+// Approximation for values close to 0:
// Use six terms of Taylor expansion at 0 for exp(x) as test expectation:
// exp(x) - 1 == exp(0) + exp(0) * x + x * x / 2 + ... - 1
// == x + x * x / 2 + x * x * x / 6 + ...
@@ -32,7 +35,44 @@ function expm1(x) {
1/362880 + x * (1/3628800))))))))));
}
+// Sanity check:
+// Math.expm1(x) stays reasonabliy close to the Taylor series for small values.
for (var x = 1E-1; x > 1E-300; x *= 0.8) {
var expected = expm1(x);
- assertEqualsDelta(expected, Math.expm1(x), expected * 1E-14);
+ assertEqualsDelta(expected, Math.expm1(x), expected * 1E-15);
}
+
+
+// Tests related to the fdlibm implementation.
+// Test overflow.
+assertEquals(Infinity, Math.expm1(709.8));
+// Test largest double value.
+assertEquals(Infinity, Math.exp(1.7976931348623157e308));
+// Cover various code paths.
+assertEquals(-1, Math.expm1(-56 * Math.LN2));
+assertEquals(-1, Math.expm1(-50));
+// Test most negative double value.
+assertEquals(-1, Math.expm1(-1.7976931348623157e308));
+// Test argument reduction.
+// Cases for 0.5*log(2) < |x| < 1.5*log(2).
+assertEquals(Math.E - 1, Math.expm1(1));
+assertEquals(1/Math.E - 1, Math.expm1(-1));
+// Cases for 1.5*log(2) < |x|.
+assertEquals(6.38905609893065, Math.expm1(2));
+assertEquals(-0.8646647167633873, Math.expm1(-2));
+// Cases where Math.expm1(x) = x.
+assertEquals(0, Math.expm1(0));
+assertEquals(Math.pow(2,-55), Math.expm1(Math.pow(2,-55)));
+// Tests for the case where argument reduction has x in the primary range.
+// Test branch for k = 0.
+assertEquals(0.18920711500272105, Math.expm1(0.25 * Math.LN2));
+// Test branch for k = -1.
+assertEquals(-0.5, Math.expm1(-Math.LN2));
+// Test branch for k = 1.
+assertEquals(1, Math.expm1(Math.LN2));
+// Test branch for k <= -2 || k > 56. k = -3.
+assertEquals(1.4411518807585582e17, Math.expm1(57 * Math.LN2));
+// Test last branch for k < 20, k = 19.
+assertEquals(524286.99999999994, Math.expm1(19 * Math.LN2));
+// Test the else branch, k = 20.
+assertEquals(1048575, Math.expm1(20 * Math.LN2));
diff --git a/deps/v8/test/mjsunit/es6/math-hyperbolic.js b/deps/v8/test/mjsunit/es6/math-hyperbolic.js
index 1ceb95182b..8970f6ef65 100644
--- a/deps/v8/test/mjsunit/es6/math-hyperbolic.js
+++ b/deps/v8/test/mjsunit/es6/math-hyperbolic.js
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(3468): we rely on a precise Math.exp.
+// Flags: --no-fast-math
+
[Math.sinh, Math.cosh, Math.tanh, Math.asinh, Math.acosh, Math.atanh].
forEach(function(fun) {
assertTrue(isNaN(fun(NaN)));
@@ -66,14 +69,14 @@ function test_id(fun, rev, value) {
});
-assertEquals("Infinity", String(Math.cosh(-Infinity)));
-assertEquals("Infinity", String(Math.cosh(Infinity)));
-assertEquals("Infinity", String(Math.cosh("-Infinity")));
-assertEquals("Infinity", String(Math.cosh("Infinity")));
+assertEquals(Infinity, Math.cosh(-Infinity));
+assertEquals(Infinity, Math.cosh(Infinity));
+assertEquals(Infinity, Math.cosh("-Infinity"));
+assertEquals(Infinity, Math.cosh("Infinity"));
-assertEquals("-Infinity", String(Math.atanh(-1)));
-assertEquals("Infinity", String(Math.atanh(1)));
+assertEquals(-Infinity, Math.atanh(-1));
+assertEquals(Infinity, Math.atanh(1));
// Math.atanh(x) is NaN for |x| > 1 and NaN
[1.000000000001, Math.PI, 10000000, 2, Infinity, NaN].forEach(function(x) {
@@ -82,6 +85,8 @@ assertEquals("Infinity", String(Math.atanh(1)));
});
+assertEquals(0, Math.sinh(0));
+assertEquals(-Infinity, 1/Math.sinh(-0));
assertEquals(1, Math.tanh(Infinity));
assertEquals(-1, Math.tanh(-Infinity));
assertEquals(1, Math.cosh(0));
@@ -97,15 +102,13 @@ assertEquals("Infinity", String(Math.acosh(Infinity)));
// Some random samples.
-assertEqualsDelta(0.5210953054937, Math.sinh(0.5), 1E-12);
-assertEqualsDelta(74.203210577788, Math.sinh(5), 1E-12);
-assertEqualsDelta(-0.5210953054937, Math.sinh(-0.5), 1E-12);
-assertEqualsDelta(-74.203210577788, Math.sinh(-5), 1E-12);
+assertEqualsDelta(74.20321057778875, Math.sinh(5), 1E-12);
+assertEqualsDelta(-74.20321057778875, Math.sinh(-5), 1E-12);
-assertEqualsDelta(1.1276259652063, Math.cosh(0.5), 1E-12);
-assertEqualsDelta(74.209948524787, Math.cosh(5), 1E-12);
-assertEqualsDelta(1.1276259652063, Math.cosh(-0.5), 1E-12);
-assertEqualsDelta(74.209948524787, Math.cosh(-5), 1E-12);
+assertEqualsDelta(1.1276259652063807, Math.cosh(0.5), 1E-12);
+assertEqualsDelta(74.20994852478785, Math.cosh(5), 1E-12);
+assertEqualsDelta(1.1276259652063807, Math.cosh(-0.5), 1E-12);
+assertEqualsDelta(74.20994852478785, Math.cosh(-5), 1E-12);
assertEqualsDelta(0.4621171572600, Math.tanh(0.5), 1E-12);
assertEqualsDelta(0.9999092042625, Math.tanh(5), 1E-12);
@@ -134,3 +137,52 @@ assertEqualsDelta(-0.1003353477311, Math.atanh(-0.1), 1E-12);
[1-(1E-16), 0, 1E-10, 1E-50].forEach(function(x) {
assertEqualsDelta(Math.atanh(x), -Math.atanh(-x), 1E-12);
});
+
+
+// Implementation-specific tests for sinh.
+// Case |x| < 2^-28
+assertEquals(Math.pow(2, -29), Math.sinh(Math.pow(2, -29)));
+assertEquals(-Math.pow(2, -29), Math.sinh(-Math.pow(2, -29)));
+// Case |x| < 1
+assertEquals(0.5210953054937474, Math.sinh(0.5));
+assertEquals(-0.5210953054937474, Math.sinh(-0.5));
+// sinh(10*log(2)) = 1048575/2048, case |x| < 22
+assertEquals(1048575/2048, Math.sinh(10*Math.LN2));
+assertEquals(-1048575/2048, Math.sinh(-10*Math.LN2));
+// Case |x| < 22
+assertEquals(11013.232874703393, Math.sinh(10));
+assertEquals(-11013.232874703393, Math.sinh(-10));
+// Case |x| in [22, log(maxdouble)]
+assertEquals(2.1474836479999983e9, Math.sinh(32*Math.LN2));
+assertEquals(-2.1474836479999983e9, Math.sinh(-32*Math.LN2));
+// Case |x| in [22, log(maxdouble)]
+assertEquals(1.3440585709080678e43, Math.sinh(100));
+assertEquals(-1.3440585709080678e43, Math.sinh(-100));
+// No overflow, case |x| in [log(maxdouble), threshold]
+assertEquals(1.7976931348621744e308, Math.sinh(710.4758600739439));
+assertEquals(-1.7976931348621744e308, Math.sinh(-710.4758600739439));
+// Overflow, case |x| > threshold
+assertEquals(Infinity, Math.sinh(710.475860073944));
+assertEquals(-Infinity, Math.sinh(-710.475860073944));
+assertEquals(Infinity, Math.sinh(1000));
+assertEquals(-Infinity, Math.sinh(-1000));
+
+// Implementation-specific tests for cosh.
+// Case |x| < 2^-55
+assertEquals(1, Math.cosh(Math.pow(2, -56)));
+assertEquals(1, Math.cosh(-Math.pow(2, -56)));
+// Case |x| < 1/2*log(2). cosh(Math.LN2/4) = (sqrt(2)+1)/2^(5/4)
+assertEquals(1.0150517651282178, Math.cosh(Math.LN2/4));
+assertEquals(1.0150517651282178, Math.cosh(-Math.LN2/4));
+// Case 1/2*log(2) < |x| < 22. cosh(10*Math.LN2) = 1048577/2048
+assertEquals(512.00048828125, Math.cosh(10*Math.LN2));
+assertEquals(512.00048828125, Math.cosh(-10*Math.LN2));
+// Case 22 <= |x| < log(maxdouble)
+assertEquals(2.1474836479999983e9, Math.cosh(32*Math.LN2));
+assertEquals(2.1474836479999983e9, Math.cosh(-32*Math.LN2));
+// Case log(maxdouble) <= |x| <= overflowthreshold
+assertEquals(1.7976931348621744e308, Math.cosh(710.4758600739439));
+assertEquals(1.7976931348621744e308, Math.cosh(-710.4758600739439));
+// Overflow.
+assertEquals(Infinity, Math.cosh(710.475860073944));
+assertEquals(Infinity, Math.cosh(-710.475860073944));
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-2681.js b/deps/v8/test/mjsunit/es6/regress/regress-2681.js
index 9841d84843..8d8e4adb38 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-2681.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-2681.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-gc --noincremental-marking --harmony-generators
+// Flags: --expose-gc --noincremental-marking
// Check that we are not flushing code for generators.
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-2691.js b/deps/v8/test/mjsunit/es6/regress/regress-2691.js
index e17be10814..d7d0c4f175 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-2691.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-2691.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-generators
-
// Check that yield* on non-objects raises a TypeError.
assertThrows('(function*() { yield* 10 })().next()', TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-3280.js b/deps/v8/test/mjsunit/es6/regress/regress-3280.js
index 2fc72cc867..2dadd02840 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-3280.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-3280.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-generators --expose-debug-as debug
+// Flags: --expose-debug-as debug
var Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/es6/symbols.js b/deps/v8/test/mjsunit/es6/symbols.js
index 0b07002700..60737af4b9 100644
--- a/deps/v8/test/mjsunit/es6/symbols.js
+++ b/deps/v8/test/mjsunit/es6/symbols.js
@@ -112,7 +112,8 @@ TestValueOf()
function TestToString() {
for (var i in symbols) {
- assertThrows(function() { String(symbols[i]) }, TypeError)
+ assertThrows(function() { new String(symbols[i]) }, TypeError)
+ assertEquals(symbols[i].toString(), String(symbols[i]))
assertThrows(function() { symbols[i] + "" }, TypeError)
assertThrows(function() { String(Object(symbols[i])) }, TypeError)
assertTrue(isValidSymbolString(symbols[i].toString()))
diff --git a/deps/v8/test/mjsunit/es6/unscopables.js b/deps/v8/test/mjsunit/es6/unscopables.js
index 678536dba4..36365d2d82 100644
--- a/deps/v8/test/mjsunit/es6/unscopables.js
+++ b/deps/v8/test/mjsunit/es6/unscopables.js
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-unscopables
-// Flags: --harmony-collections
-
var global = this;
var globalProto = Object.getPrototypeOf(global);
diff --git a/deps/v8/test/mjsunit/harmony/array-of.js b/deps/v8/test/mjsunit/harmony/array-of.js
new file mode 100644
index 0000000000..c0a8ed183e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-of.js
@@ -0,0 +1,164 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Based on Mozilla Array.of() tests at http://dxr.mozilla.org/mozilla-central/source/js/src/jit-test/tests/collections
+
+// Flags: --harmony-arrays
+
+
+
+// Array.of makes real arrays.
+
+function check(a) {
+ assertEquals(Object.getPrototypeOf(a), Array.prototype);
+ assertEquals(Array.isArray(a), true);
+ a[9] = 9;
+ assertEquals(a.length, 10);
+}
+
+
+check(Array.of());
+check(Array.of(0));
+check(Array.of(0, 1, 2));
+var f = Array.of;
+check(f());
+
+
+// Array.of basics
+
+var a = Array.of();
+
+assertEquals(a.length, 0);
+a = Array.of(undefined, null, 3.14, []);
+assertEquals(a, [undefined, null, 3.14, []]);
+a = [];
+for (var i = 0; i < 1000; i++)
+ a[i] = i;
+assertEquals(Array.of.apply(null, a), a);
+
+
+// Array.of does not leave holes
+
+assertEquals(Array.of(undefined), [undefined]);
+assertEquals(Array.of(undefined, undefined), [undefined, undefined]);
+assertEquals(Array.of.apply(null, [,,undefined]), [undefined, undefined, undefined]);
+assertEquals(Array.of.apply(null, Array(4)), [undefined, undefined, undefined, undefined]);
+
+
+// Array.of can be transplanted to other classes.
+
+var hits = 0;
+function Bag() {
+ hits++;
+}
+Bag.of = Array.of;
+
+hits = 0;
+var actual = Bag.of("zero", "one");
+assertEquals(hits, 1);
+
+hits = 0;
+var expected = new Bag;
+expected[0] = "zero";
+expected[1] = "one";
+expected.length = 2;
+assertEquals(areSame(actual, expected), true);
+
+hits = 0;
+actual = Array.of.call(Bag, "zero", "one");
+assertEquals(hits, 1);
+assertEquals(areSame(actual, expected), true);
+
+function areSame(object, array) {
+ var result = object.length == array.length;
+ for (var i = 0; i < object.length; i++) {
+ result = result && object[i] == array[i];
+ }
+ return result;
+}
+
+
+// Array.of does not trigger prototype setters.
+// (It defines elements rather than assigning to them.)
+
+var status = "pass";
+Object.defineProperty(Array.prototype, "0", {set: function(v) {status = "FAIL 1"}});
+assertEquals(Array.of(1)[0], 1);
+assertEquals(status, "pass");
+
+Object.defineProperty(Bag.prototype, "0", {set: function(v) {status = "FAIL 2"}});
+assertEquals(Bag.of(1)[0], 1);
+assertEquals(status, "pass");
+
+
+// Array.of passes the number of arguments to the constructor it calls.
+
+var hits = 0;
+
+function Herd(n) {
+ assertEquals(arguments.length, 1);
+ assertEquals(n, 5);
+ hits++;
+}
+
+Herd.of = Array.of;
+Herd.of("sheep", "cattle", "elephants", "whales", "seals");
+assertEquals(hits, 1);
+
+
+// Array.of calls a "length" setter if one is present.
+
+var hits = 0;
+var lastObj = null, lastVal = undefined;
+function setter(v) {
+ hits++;
+ lastObj = this;
+ lastVal = v;
+}
+
+// when the setter is on the new object
+function Pack() {
+ Object.defineProperty(this, "length", {set: setter});
+}
+Pack.of = Array.of;
+var pack = Pack.of("wolves", "cards", "cigarettes", "lies");
+assertEquals(lastObj, pack);
+assertEquals(lastVal, 4);
+
+// when the setter is on the new object's prototype
+function Bevy() {}
+Object.defineProperty(Bevy.prototype, "length", {set: setter});
+Bevy.of = Array.of;
+var bevy = Bevy.of("quail");
+assertEquals(lastObj, bevy);
+assertEquals(lastVal, 1);
+
+
+// Array.of does a strict assignment to the new object's .length.
+// The assignment is strict even if the code we're calling from is not strict.
+
+function Empty() {}
+Empty.of = Array.of;
+Object.defineProperty(Empty.prototype, "length", {get: function() { return 0; }});
+
+var nothing = new Empty;
+nothing.length = 2; // no exception; this is not a strict mode assignment
+
+assertThrows(function() { Empty.of(); }, TypeError);
+
+
+// Check superficial features of Array.of.
+
+var desc = Object.getOwnPropertyDescriptor(Array, "of");
+
+assertEquals(desc.configurable, true);
+assertEquals(desc.enumerable, false);
+assertEquals(desc.writable, true);
+assertEquals(Array.of.length, 0);
+assertThrows(function() { new Array.of() }, TypeError); // not a constructor
+
+// When the this-value passed in is not a constructor, the result is an array.
+[undefined, null, false, "cow"].forEach(function(val) {
+ assertEquals(Array.isArray(Array.of(val)), true);
+});
diff --git a/deps/v8/test/mjsunit/harmony/arrow-functions.js b/deps/v8/test/mjsunit/harmony/arrow-functions.js
index 22b1c94f7f..0ffa936991 100644
--- a/deps/v8/test/mjsunit/harmony/arrow-functions.js
+++ b/deps/v8/test/mjsunit/harmony/arrow-functions.js
@@ -8,7 +8,8 @@
// "new" operator on them.
assertEquals("function", typeof (() => {}));
assertEquals(Function.prototype, Object.getPrototypeOf(() => {}));
-assertThrows("new (() => {})", TypeError);
+assertThrows(function() { new (() => {}); }, TypeError);
+assertFalse("prototype" in (() => {}));
// Check the different syntax variations
assertEquals(1, (() => 1)());
diff --git a/deps/v8/test/mjsunit/harmony/object-literals-method.js b/deps/v8/test/mjsunit/harmony/object-literals-method.js
new file mode 100644
index 0000000000..71f44d10bc
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/object-literals-method.js
@@ -0,0 +1,248 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-object-literals --allow-natives-syntax
+
+
+(function TestBasics() {
+ var object = {
+ method() {
+ return 42;
+ }
+ };
+ assertEquals(42, object.method());
+})();
+
+
+(function TestThis() {
+ var object = {
+ method() {
+ assertEquals(object, this);
+ }
+ };
+ object.method();
+})();
+
+
+(function TestDescriptor() {
+ var object = {
+ method() {
+ return 42;
+ }
+ };
+
+ var desc = Object.getOwnPropertyDescriptor(object, 'method');
+ assertTrue(desc.enumerable);
+ assertTrue(desc.configurable);
+ assertTrue(desc.writable);
+ assertEquals('function', typeof desc.value);
+
+ assertEquals(42, desc.value());
+})();
+
+
+(function TestProto() {
+ var object = {
+ method() {}
+ };
+
+ assertEquals(Function.prototype, Object.getPrototypeOf(object.method));
+})();
+
+
+(function TestNotConstructable() {
+ var object = {
+ method() {}
+ };
+
+ assertThrows(function() {
+ new object.method;
+ });
+})();
+
+
+(function TestFunctionName() {
+ var object = {
+ method() {},
+ 1() {},
+ 2.0() {}
+ };
+ var f = object.method;
+ assertEquals('method', f.name);
+ var g = object[1];
+ assertEquals('1', g.name);
+ var h = object[2];
+ assertEquals('2', h.name);
+})();
+
+
+(function TestNoBinding() {
+ var method = 'local';
+ var calls = 0;
+ var object = {
+ method() {
+ calls++;
+ assertEquals('local', method);
+ }
+ };
+ object.method();
+ assertEquals(1, calls);
+})();
+
+
+(function TestNoPrototype() {
+ var object = {
+ method() {}
+ };
+ var f = object.method;
+ assertFalse(f.hasOwnProperty('prototype'));
+ assertEquals(undefined, f.prototype);
+
+ f.prototype = 42;
+ assertEquals(42, f.prototype);
+})();
+
+
+(function TestToString() {
+ var object = {
+ method() { 42; }
+ };
+ assertEquals('method() { 42; }', object.method.toString());
+})();
+
+
+(function TestOptimized() {
+ var object = {
+ method() { return 42; }
+ };
+ assertEquals(42, object.method());
+ assertEquals(42, object.method());
+ %OptimizeFunctionOnNextCall(object.method);
+ assertEquals(42, object.method());
+ assertFalse(object.method.hasOwnProperty('prototype'));
+})();
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+
+var GeneratorFunction = function*() {}.__proto__.constructor;
+
+
+function assertIteratorResult(value, done, result) {
+ assertEquals({value: value, done: done}, result);
+}
+
+
+(function TestGeneratorBasics() {
+ var object = {
+ *method() {
+ yield 1;
+ }
+ };
+ var g = object.method();
+ assertIteratorResult(1, false, g.next());
+ assertIteratorResult(undefined, true, g.next());
+})();
+
+
+(function TestGeneratorThis() {
+ var object = {
+ *method() {
+ yield this;
+ }
+ };
+ var g = object.method();
+ assertIteratorResult(object, false, g.next());
+ assertIteratorResult(undefined, true, g.next());
+})();
+
+
+(function TestGeneratorSymbolIterator() {
+ var object = {
+ *method() {}
+ };
+ var g = object.method();
+ assertEquals(g, g[Symbol.iterator]());
+})();
+
+
+(function TestGeneratorDescriptor() {
+ var object = {
+ *method() {
+ yield 1;
+ }
+ };
+
+ var desc = Object.getOwnPropertyDescriptor(object, 'method');
+ assertTrue(desc.enumerable);
+ assertTrue(desc.configurable);
+ assertTrue(desc.writable);
+ assertEquals('function', typeof desc.value);
+
+ var g = desc.value();
+ assertIteratorResult(1, false, g.next());
+ assertIteratorResult(undefined, true, g.next());
+})();
+
+
+(function TestGeneratorProto() {
+ var object = {
+ *method() {}
+ };
+
+ assertEquals(GeneratorFunction.prototype,
+ Object.getPrototypeOf(object.method));
+})();
+
+
+(function TestGeneratorConstructable() {
+ var object = {
+ *method() {
+ yield 1;
+ }
+ };
+
+ var g = new object.method();
+ assertIteratorResult(1, false, g.next());
+ assertIteratorResult(undefined, true, g.next());
+})();
+
+
+(function TestGeneratorName() {
+ var object = {
+ *method() {},
+ *1() {},
+ *2.0() {}
+ };
+ var f = object.method;
+ assertEquals('method', f.name);
+ var g = object[1];
+ assertEquals('1', g.name);
+ var h = object[2];
+ assertEquals('2', h.name);
+})();
+
+
+(function TestGeneratorNoBinding() {
+ var method = 'local';
+ var calls = 0;
+ var object = {
+ *method() {
+ calls++;
+ assertEquals('local', method);
+ }
+ };
+ var g = object.method();
+ assertIteratorResult(undefined, true, g.next());
+ assertEquals(1, calls);
+})();
+
+
+(function TestGeneratorToString() {
+ var object = {
+ *method() { yield 1; }
+ };
+ assertEquals('*method() { yield 1; }', object.method.toString());
+})();
diff --git a/deps/v8/test/mjsunit/harmony/private.js b/deps/v8/test/mjsunit/harmony/private.js
index 4b29fd863e..218094c3d5 100644
--- a/deps/v8/test/mjsunit/harmony/private.js
+++ b/deps/v8/test/mjsunit/harmony/private.js
@@ -83,7 +83,8 @@ TestConstructor()
function TestToString() {
for (var i in symbols) {
- assertThrows(function() { String(symbols[i]) }, TypeError)
+ assertThrows(function() {new String(symbols[i]) }, TypeError)
+ assertEquals(symbols[i].toString(), String(symbols[i]))
assertThrows(function() { symbols[i] + "" }, TypeError)
assertTrue(isValidSymbolString(symbols[i].toString()))
assertTrue(isValidSymbolString(Object(symbols[i]).toString()))
diff --git a/deps/v8/test/mjsunit/harmony/proxies-with-unscopables.js b/deps/v8/test/mjsunit/harmony/proxies-with-unscopables.js
index b982480feb..191bad301e 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-with-unscopables.js
+++ b/deps/v8/test/mjsunit/harmony/proxies-with-unscopables.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-unscopables
// Flags: --harmony-proxies
diff --git a/deps/v8/test/mjsunit/harmony/regexp-sticky.js b/deps/v8/test/mjsunit/harmony/regexp-sticky.js
new file mode 100644
index 0000000000..bd7f646d00
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-sticky.js
@@ -0,0 +1,132 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-regexps
+
+var re = /foo.bar/;
+
+assertTrue(!!"foo*bar".match(re));
+assertTrue(!!"..foo*bar".match(re));
+
+var plain = /foobar/;
+
+assertTrue(!!"foobar".match(plain));
+assertTrue(!!"..foobar".match(plain));
+
+var sticky = /foo.bar/y;
+
+assertTrue(!!"foo*bar".match(sticky));
+assertEquals(0, sticky.lastIndex);
+assertFalse(!!"..foo*bar".match(sticky));
+
+var stickyplain = /foobar/y;
+
+assertTrue(!!"foobar".match(stickyplain));
+assertEquals(0, stickyplain.lastIndex);
+assertFalse(!!"..foobar".match(stickyplain));
+
+var global = /foo.bar/g;
+
+assertTrue(global.test("foo*bar"));
+assertFalse(global.test("..foo*bar"));
+global.lastIndex = 0;
+assertTrue(global.test("..foo*bar"));
+
+var plainglobal = /foobar/g;
+
+assertTrue(plainglobal.test("foobar"));
+assertFalse(plainglobal.test("foobar"));
+plainglobal.lastIndex = 0;
+assertTrue(plainglobal.test("foobar"));
+
+var stickyglobal = /foo.bar/gy;
+
+assertTrue(stickyglobal.test("foo*bar"));
+assertEquals(7, stickyglobal.lastIndex);
+assertFalse(stickyglobal.test("..foo*bar"));
+stickyglobal.lastIndex = 0;
+assertFalse(stickyglobal.test("..foo*bar"));
+stickyglobal.lastIndex = 2;
+assertTrue(stickyglobal.test("..foo*bar"));
+assertEquals(9, stickyglobal.lastIndex);
+
+var stickyplainglobal = /foobar/yg;
+assertTrue(stickyplainglobal.sticky);
+stickyplainglobal.sticky = false;
+
+assertTrue(stickyplainglobal.test("foobar"));
+assertEquals(6, stickyplainglobal.lastIndex);
+assertFalse(stickyplainglobal.test("..foobar"));
+stickyplainglobal.lastIndex = 0;
+assertFalse(stickyplainglobal.test("..foobar"));
+stickyplainglobal.lastIndex = 2;
+assertTrue(stickyplainglobal.test("..foobar"));
+assertEquals(8, stickyplainglobal.lastIndex);
+
+assertEquals("/foo.bar/gy", "" + stickyglobal);
+assertEquals("/foo.bar/g", "" + global);
+
+assertTrue(stickyglobal.sticky);
+stickyglobal.sticky = false;
+assertTrue(stickyglobal.sticky);
+
+var stickyglobal2 = new RegExp("foo.bar", "gy");
+assertTrue(stickyglobal2.test("foo*bar"));
+assertEquals(7, stickyglobal2.lastIndex);
+assertFalse(stickyglobal2.test("..foo*bar"));
+stickyglobal2.lastIndex = 0;
+assertFalse(stickyglobal2.test("..foo*bar"));
+stickyglobal2.lastIndex = 2;
+assertTrue(stickyglobal2.test("..foo*bar"));
+assertEquals(9, stickyglobal2.lastIndex);
+
+assertEquals("/foo.bar/gy", "" + stickyglobal2);
+
+assertTrue(stickyglobal2.sticky);
+stickyglobal2.sticky = false;
+assertTrue(stickyglobal2.sticky);
+
+sticky.lastIndex = -1; // Causes sticky regexp to fail fast
+assertFalse(sticky.test("..foo.bar"));
+assertEquals(0, sticky.lastIndex);
+
+sticky.lastIndex = -1; // Causes sticky regexp to fail fast
+assertFalse(!!sticky.exec("..foo.bar"));
+assertEquals(0, sticky.lastIndex);
+
+// ES6 draft says: Even when the y flag is used with a pattern, ^ always
+// matches only at the beginning of Input, or (if Multiline is true) at the
+// beginning of a line.
+var hat = /^foo/y;
+hat.lastIndex = 2;
+assertFalse(hat.test("..foo"));
+
+var mhat = /^foo/my;
+mhat.lastIndex = 2;
+assertFalse(mhat.test("..foo"));
+mhat.lastIndex = 2;
+assertTrue(mhat.test(".\nfoo"));
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-405844.js b/deps/v8/test/mjsunit/harmony/regress/regress-405844.js
new file mode 100644
index 0000000000..fbe7310d79
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-405844.js
@@ -0,0 +1,13 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-proxies
+
+var proxy = Proxy.create({ fix: function() { return {}; } });
+Object.preventExtensions(proxy);
+Object.observe(proxy, function(){});
+
+var functionProxy = Proxy.createFunction({ fix: function() { return {}; } }, function(){});
+Object.preventExtensions(functionProxy);
+Object.observe(functionProxy, function(){});
diff --git a/deps/v8/test/mjsunit/harmony/super.js b/deps/v8/test/mjsunit/harmony/super.js
new file mode 100644
index 0000000000..809ba1071d
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/super.js
@@ -0,0 +1,234 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-classes
+
+
+(function TestSuperNamedLoads() {
+ function Base() { }
+ function Derived() {
+ this.derivedDataProperty = "xxx";
+ }
+ Derived.prototype = Object.create(Base.prototype);
+
+ function fBase() { return "Base " + this.toString(); }
+
+ Base.prototype.f = fBase.toMethod(Base.prototype);
+
+ function fDerived() {
+ assertEquals("Base this is Derived", super.f());
+ var a = super.x;
+ assertEquals(15, a);
+ assertEquals(15, super.x);
+ assertEquals(27, this.x);
+
+ return "Derived"
+ }
+
+ Base.prototype.x = 15;
+ Base.prototype.toString = function() { return "this is Base"; };
+ Derived.prototype.toString = function() { return "this is Derived"; };
+ Derived.prototype.x = 27;
+ Derived.prototype.f = fDerived.toMethod(Derived.prototype);
+
+ assertEquals("Base this is Base", new Base().f());
+ assertEquals("Derived", new Derived().f());
+}());
+
+
+(function TestSuperKeywordNonMethod() {
+ function f() {
+ super.unknown();
+ }
+
+ assertThrows(f, ReferenceError);
+}());
+
+
+(function TestGetter() {
+ function Base() {}
+ var derived;
+ Base.prototype = {
+ constructor: Base,
+ get x() {
+ assertSame(this, derived);
+ return this._x;
+ },
+ _x: 'base'
+ };
+
+ function Derived() {}
+ Derived.__proto__ = Base;
+ Derived.prototype = {
+ __proto__: Base.prototype,
+ constructor: Derived,
+ _x: 'derived'
+ };
+ Derived.prototype.testGetter = function() {
+ return super.x;
+ }.toMethod(Derived.prototype);
+ Derived.prototype.testGetterStrict = function() {
+ 'use strict';
+ return super.x;
+ }.toMethod(Derived.prototype);
+ derived = new Derived();
+ assertEquals('derived', derived.testGetter());
+ derived = new Derived();
+ assertEquals('derived', derived.testGetterStrict());
+}());
+
+
+(function TestSetter() {
+ function Base() {}
+ Base.prototype = {
+ constructor: Base,
+ get x() {
+ return this._x;
+ },
+ set x(v) {
+ this._x = v;
+ },
+ _x: 'base'
+ };
+
+ function Derived() {}
+ Derived.__proto__ = Base;
+ Derived.prototype = {
+ __proto__: Base.prototype,
+ constructor: Derived,
+ _x: 'derived'
+ };
+ Derived.prototype.testSetter = function() {
+ assertEquals('foobar', super.x = 'foobar');
+ assertEquals('foobarabc', super.x += 'abc');
+ }.toMethod(Derived.prototype);
+ var d = new Derived();
+ d.testSetter();
+ assertEquals('base', Base.prototype._x);
+ assertEquals('foobarabc', d._x);
+ d._x = '';
+ Derived.prototype.testSetterStrict = function() {
+ 'use strict';
+ assertEquals('foobar', super.x = 'foobar');
+ assertEquals('foobarabc', super.x += 'abc');
+ }.toMethod(Derived.prototype);
+ d.testSetterStrict();
+ assertEquals('base', Base.prototype._x);
+ assertEquals('foobarabc', d._x);
+}());
+
+
+(function TestAccessorsOnPrimitives() {
+ var getCalled = false;
+ var setCalled = false;
+ function Base() {}
+ Base.prototype = {
+ constructor: Base,
+ get x() {
+ getCalled = true;
+ return 1;
+ },
+ set x(v) {
+ setCalled = true;
+ return v;
+ },
+ };
+
+ function Derived() {}
+ Derived.prototype = {
+ __proto__: Base.prototype,
+ constructor: Derived,
+ };
+ Derived.prototype.testSetter = function() {
+ assertTrue(42 == this);
+ getCalled = false;
+ setCalled = false;
+ assertEquals(1, super.x);
+ assertTrue(getCalled);
+ assertFalse(setCalled);
+
+ setCalled = false;
+ getCalled = false;
+ assertEquals(5, super.x = 5);
+ assertFalse(getCalled);
+ assertTrue(setCalled);
+
+ getCalled = false;
+ setCalled = false;
+ assertEquals(6, super.x += 5);
+ assertTrue(getCalled);
+ assertTrue(setCalled);
+ }.toMethod(Derived.prototype);
+
+ Derived.prototype.testSetterStrict = function() {
+ 'use strict';
+ assertTrue(42 == this);
+ getCalled = false;
+ setCalled = false;
+ assertEquals(1, super.x);
+ assertTrue(getCalled);
+ assertFalse(setCalled);
+
+ setCalled = false;
+ getCalled = false;
+ assertEquals(5, super.x = 5);
+ assertFalse(getCalled);
+ assertTrue(setCalled);
+
+ getCalled = false;
+ setCalled = false;
+ assertEquals(6, super.x += 5);
+ assertTrue(getCalled);
+ assertTrue(setCalled);
+ }.toMethod(Derived.prototype);
+
+ Derived.prototype.testSetter.call(42);
+ Derived.prototype.testSetterStrict.call(42);
+
+ function DerivedFromString() {}
+ DerivedFromString.prototype = Object.create(String.prototype);
+
+ function f() {
+ 'use strict';
+ assertTrue(42 == this);
+ assertEquals(String.prototype.toString, super.toString);
+ var except = false;
+ try {
+ super.toString();
+ } catch(e) { except = true; }
+ assertTrue(except);
+ }
+ f.toMethod(DerivedFromString.prototype).call(42);
+}());
+
+
+(function TestSetterFailures() {
+ function Base() {}
+ function Derived() {}
+ Derived.prototype = { __proto__ : Base.prototype };
+ Derived.prototype.mSloppy = function () {
+ super.x = 10;
+ assertEquals(undefined, super.x);
+ }.toMethod(Derived.prototype);
+
+ Derived.prototype.mStrict = function () {
+ "use strict";
+ super.x = 10;
+ }.toMethod(Derived.prototype);
+ var d = new Derived();
+ d.mSloppy();
+ assertEquals(undefined, d.x);
+ var d1 = new Derived();
+ assertThrows(function() { d.mStrict(); }, ReferenceError);
+ assertEquals(undefined, d.x);
+}());
+
+
+(function TestUnsupportedCases() {
+ function f1(x) { return super[x]; }
+ var o = {}
+ assertThrows(function(){f1.toMethod(o)(x);}, ReferenceError);
+ function f2() { super.x++; }
+ assertThrows(function(){f2.toMethod(o)();}, ReferenceError);
+}());
diff --git a/deps/v8/test/mjsunit/harmony/toMethod.js b/deps/v8/test/mjsunit/harmony/toMethod.js
new file mode 100644
index 0000000000..ad51b2ff38
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/toMethod.js
@@ -0,0 +1,115 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-classes --allow-natives-syntax
+
+
+(function TestSingleClass() {
+ function f(x) {
+ var a = [0, 1, 2]
+ return a[x];
+ }
+
+ function ClassD() { }
+
+ assertEquals(1, f(1));
+ var g = f.toMethod(ClassD.prototype);
+ assertEquals(1, g(1));
+ assertEquals(undefined, f[%HomeObjectSymbol()]);
+ assertEquals(ClassD.prototype, g[%HomeObjectSymbol()]);
+}());
+
+
+(function TestClassHierarchy() {
+ function f(x) {
+ return function g(y) { x++; return x + y; };
+ }
+
+ function Base() {}
+ function Derived() { }
+ Derived.prototype = Object.create(Base.prototype);
+
+ var q = f(0);
+ assertEquals(2, q(1));
+ assertEquals(3, q(1));
+ var g = q.toMethod(Derived.prototype);
+ assertFalse(g === q);
+ assertEquals(4, g(1));
+ assertEquals(5, q(1));
+}());
+
+
+(function TestErrorCases() {
+ var sFun = Function.prototype.toMethod;
+ assertThrows(function() { sFun.call({}); }, TypeError);
+ assertThrows(function() { sFun.call({}, {}); }, TypeError);
+ function f(){};
+ assertThrows(function() { f.toMethod(1); }, TypeError);
+}());
+
+
+(function TestPrototypeChain() {
+ var o = {};
+ var o1 = {};
+ function f() { }
+
+ function g() { }
+
+ var fMeth = f.toMethod(o);
+ assertEquals(o, fMeth[%HomeObjectSymbol()]);
+ g.__proto__ = fMeth;
+ assertEquals(undefined, g[%HomeObjectSymbol()]);
+ var gMeth = g.toMethod(o1);
+ assertEquals(fMeth, gMeth.__proto__);
+ assertEquals(o, fMeth[%HomeObjectSymbol()]);
+ assertEquals(o1, gMeth[%HomeObjectSymbol()]);
+}());
+
+
+(function TestBoundFunction() {
+ var o = {};
+ var p = {};
+
+
+ function f(x, y, z, w) {
+ assertEquals(o, this);
+ assertEquals(1, x);
+ assertEquals(2, y);
+ assertEquals(3, z);
+ assertEquals(4, w);
+ return x+y+z+w;
+ }
+
+ var fBound = f.bind(o, 1, 2, 3);
+ var fMeth = fBound.toMethod(p);
+ assertEquals(10, fMeth(4));
+ assertEquals(10, fMeth.call(p, 4));
+ var fBound1 = fBound.bind(o, 4);
+ assertEquals(10, fBound1());
+ var fMethBound = fMeth.bind(o, 4);
+ assertEquals(10, fMethBound());
+}());
+
+(function TestOptimized() {
+ function f(o) {
+ return o.x;
+ }
+ var o = {x : 15};
+ assertEquals(15, f(o));
+ assertEquals(15, f(o));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(15, f(o));
+ var g = f.toMethod({});
+ var o1 = {y : 1024, x : "abc"};
+ assertEquals("abc", f(o1));
+ assertEquals("abc", g(o1));
+} ());
+
+(function TestExtensibility() {
+ function f() {}
+ Object.preventExtensions(f);
+ assertFalse(Object.isExtensible(f));
+ var m = f.toMethod({});
+ assertTrue(Object.isExtensible(m));
+}());
diff --git a/deps/v8/test/mjsunit/keyed-named-access.js b/deps/v8/test/mjsunit/keyed-named-access.js
new file mode 100644
index 0000000000..11f8fb50d8
--- /dev/null
+++ b/deps/v8/test/mjsunit/keyed-named-access.js
@@ -0,0 +1,72 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var k = "x";
+var o1 = {x: 10};
+var o2 = {x: 11, y: 20};
+var o3 = {x: 12, y: 20, z: 100};
+
+function f(o) {
+ var result = 0;
+ for (var i = 0; i < 100; i++) {
+ result += o[k];
+ }
+ return result;
+}
+
+f(o1);
+f(o1);
+f(o1);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(1000, f(o1));
+
+f(o2);
+f(o2);
+f(o2);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(1100, f(o2));
+
+f(o3);
+f(o3);
+f(o3);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(1200, f(o3));
+
+(function CountOperationDeoptimizationGetter() {
+ var global = {};
+ global.__defineGetter__("A", function () { return "x"; });
+
+ function h() {
+ return "A";
+ }
+
+ function g(a, b, c) {
+ try {
+ return a + b.toString() + c;
+ } catch (e) { }
+ }
+
+ function test(o) {
+ return g(1, o[h()]--, 10);
+ }
+
+ test(global);
+ test(global);
+ %OptimizeFunctionOnNextCall(test);
+ print(test(global));
+})();
+
+
+(function CountOperationDeoptimizationPoint() {
+ function test() {
+ this[0, ""]--;
+ }
+
+ test();
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
diff --git a/deps/v8/test/mjsunit/lithium/SeqStringSetChar.js b/deps/v8/test/mjsunit/lithium/SeqStringSetChar.js
index 3c890a8489..c5bd1450f9 100644
--- a/deps/v8/test/mjsunit/lithium/SeqStringSetChar.js
+++ b/deps/v8/test/mjsunit/lithium/SeqStringSetChar.js
@@ -29,13 +29,13 @@
function MyStringFromCharCode(code, i) {
var one_byte = %NewString(3, true);
- %_OneByteSeqStringSetChar(one_byte, 0, code);
- %_OneByteSeqStringSetChar(one_byte, 1, code);
- %_OneByteSeqStringSetChar(one_byte, i, code);
+ %_OneByteSeqStringSetChar(0, code, one_byte);
+ %_OneByteSeqStringSetChar(1, code, one_byte);
+ %_OneByteSeqStringSetChar(i, code, one_byte);
var two_byte = %NewString(3, false);
- %_TwoByteSeqStringSetChar(two_byte, 0, code);
- %_TwoByteSeqStringSetChar(two_byte, 1, code);
- %_TwoByteSeqStringSetChar(two_byte, i, code);
+ %_TwoByteSeqStringSetChar(0, code, two_byte);
+ %_TwoByteSeqStringSetChar(1, code, two_byte);
+ %_TwoByteSeqStringSetChar(i, code, two_byte);
return one_byte + two_byte;
}
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 228d243643..04c09777f6 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -51,6 +51,10 @@
# Issue 3389: deopt_every_n_garbage_collections is unsafe
'regress/regress-2653': [SKIP],
+ # This test relies on --noopt-safe-uint32-operations, which is broken. See
+ # issue 3487 for details.
+ 'compiler/shift-shr': [SKIP],
+
##############################################################################
# TurboFan compiler failures.
@@ -59,53 +63,38 @@
# from the deoptimizer to do that.
'arguments-indirect': [PASS, NO_VARIANTS],
- # TODO(mstarzinger): Sometimes the try-catch blacklist fails.
- 'debug-references': [PASS, NO_VARIANTS],
- 'regress/regress-263': [PASS, NO_VARIANTS],
+ # TODO(rossberg): Typer doesn't like contexts very much.
+ 'harmony/block-conflicts': [PASS, NO_VARIANTS],
+ 'harmony/block-for': [PASS, NO_VARIANTS],
+ 'harmony/block-leave': [PASS, NO_VARIANTS],
+ 'harmony/block-let-crankshaft': [PASS, NO_VARIANTS],
+ 'harmony/empty-for': [PASS, NO_VARIANTS],
- # Some tests are over-restrictive about object layout.
+ # TODO(verwaest): Some tests are over-restrictive about object layout.
'array-constructor-feedback': [PASS, NO_VARIANTS],
'array-feedback': [PASS, NO_VARIANTS],
+ 'compare-known-objects-slow': [PASS, NO_VARIANTS],
+ 'elements-kind': [PASS, NO_VARIANTS],
# Some tests are just too slow to run for now.
- 'big-object-literal': [PASS, NO_VARIANTS],
'bit-not': [PASS, NO_VARIANTS],
'json2': [PASS, NO_VARIANTS],
'packed-elements': [PASS, NO_VARIANTS],
'unbox-double-arrays': [PASS, NO_VARIANTS],
'whitespaces': [PASS, NO_VARIANTS],
- 'compiler/optimized-for-in': [PASS, NO_VARIANTS],
'compiler/osr-assert': [PASS, NO_VARIANTS],
- 'compiler/osr-regress-max-locals': [PASS, NO_VARIANTS],
- 'es7/object-observe': [PASS, NO_VARIANTS],
'regress/regress-2185-2': [PASS, NO_VARIANTS],
- 'regress/regress-284': [PASS, NO_VARIANTS],
- 'regress/string-set-char-deopt': [PASS, NO_VARIANTS],
- 'tools/profviz': [PASS, NO_VARIANTS],
-
- # Support for breakpoints requires special relocation info for DebugBreak.
- 'debug-clearbreakpointgroup': [PASS, NO_VARIANTS],
- 'debug-step-2': [PASS, NO_VARIANTS],
- 'regress/regress-debug-deopt-while-recompile': [PASS, NO_VARIANTS],
- 'regress/regress-opt-after-debug-deopt': [PASS, NO_VARIANTS],
# Support for %GetFrameDetails is missing and requires checkpoints.
- 'debug-backtrace-text': [PASS, NO_VARIANTS],
- 'debug-break-inline': [PASS, NO_VARIANTS],
- 'debug-evaluate-arguments': [PASS, NO_VARIANTS],
'debug-evaluate-bool-constructor': [PASS, NO_VARIANTS],
- 'debug-evaluate-closure': [PASS, NO_VARIANTS],
'debug-evaluate-const': [PASS, NO_VARIANTS],
'debug-evaluate-locals-optimized-double': [PASS, NO_VARIANTS],
'debug-evaluate-locals-optimized': [PASS, NO_VARIANTS],
'debug-evaluate-locals': [PASS, NO_VARIANTS],
'debug-evaluate-with-context': [PASS, NO_VARIANTS],
- 'debug-evaluate-with': [PASS, NO_VARIANTS],
'debug-liveedit-double-call': [PASS, NO_VARIANTS],
'debug-liveedit-restart-frame': [PASS, NO_VARIANTS],
- 'debug-receiver': [PASS, NO_VARIANTS],
'debug-return-value': [PASS, NO_VARIANTS],
- 'debug-scopes': [PASS, NO_VARIANTS],
'debug-set-variable-value': [PASS, NO_VARIANTS],
'debug-step-stub-callfunction': [PASS, NO_VARIANTS],
'debug-stepin-accessor': [PASS, NO_VARIANTS],
@@ -127,37 +116,14 @@
'es6/debug-promises/throw-uncaught-all': [PASS, NO_VARIANTS],
'es6/debug-promises/throw-uncaught-uncaught': [PASS, NO_VARIANTS],
'es6/debug-promises/reject-uncaught-late': [PASS, NO_VARIANTS],
+ 'es6/debug-promises/throw-caught-by-default-reject-handler': [PASS, NO_VARIANTS],
+ 'es6/generators-debug-scopes': [PASS, NO_VARIANTS],
'harmony/debug-blockscopes': [PASS, NO_VARIANTS],
- 'harmony/generators-debug-scopes': [PASS, NO_VARIANTS],
'regress/regress-1081309': [PASS, NO_VARIANTS],
- 'regress/regress-1170187': [PASS, NO_VARIANTS],
- 'regress/regress-119609': [PASS, NO_VARIANTS],
- 'regress/regress-131994': [PASS, NO_VARIANTS],
'regress/regress-269': [PASS, NO_VARIANTS],
- 'regress/regress-325676': [PASS, NO_VARIANTS],
- 'regress/regress-crbug-107996': [PASS, NO_VARIANTS],
- 'regress/regress-crbug-171715': [PASS, NO_VARIANTS],
- 'regress/regress-crbug-222893': [PASS, NO_VARIANTS],
'regress/regress-crbug-259300': [PASS, NO_VARIANTS],
'regress/regress-frame-details-null-receiver': [PASS, NO_VARIANTS],
- # Support for ES6 generators is missing.
- 'regress-3225': [PASS, NO_VARIANTS],
- 'harmony/generators-debug-liveedit': [PASS, NO_VARIANTS],
- 'harmony/generators-iteration': [PASS, NO_VARIANTS],
- 'harmony/generators-parsing': [PASS, NO_VARIANTS],
- 'harmony/generators-poisoned-properties': [PASS, NO_VARIANTS],
- 'harmony/generators-relocation': [PASS, NO_VARIANTS],
- 'harmony/regress/regress-2681': [PASS, NO_VARIANTS],
- 'harmony/regress/regress-2691': [PASS, NO_VARIANTS],
- 'harmony/regress/regress-3280': [PASS, NO_VARIANTS],
-
- # Support for ES6 for-of iteration is missing.
- 'es6/array-iterator': [PASS, NO_VARIANTS],
- 'es6/iteration-semantics': [PASS, NO_VARIANTS],
- 'es6/string-iterator': [PASS, NO_VARIANTS],
- 'es6/typed-array-iterator': [PASS, NO_VARIANTS],
-
##############################################################################
# Too slow in debug mode with --stress-opt mode.
'compiler/regress-stacktrace-methods': [PASS, ['mode == debug', SKIP]],
@@ -267,36 +233,10 @@
# TODO(mstarzinger): Takes too long with TF.
'array-sort': [PASS, NO_VARIANTS],
+ 'regress/regress-91008': [PASS, NO_VARIANTS],
}], # 'gc_stress == True'
##############################################################################
-['no_i18n', {
- # Don't call runtime functions that don't exist without i18n support.
- 'runtime-gen/availablelocalesof': [SKIP],
- 'runtime-gen/breakiteratoradopttext': [SKIP],
- 'runtime-gen/breakiteratorbreaktype': [SKIP],
- 'runtime-gen/breakiteratorbreaktype': [SKIP],
- 'runtime-gen/breakiteratorcurrent': [SKIP],
- 'runtime-gen/breakiteratorfirst': [SKIP],
- 'runtime-gen/breakiteratornext': [SKIP],
- 'runtime-gen/canonicalizelanguagetag': [SKIP],
- 'runtime-gen/createbreakiterator': [SKIP],
- 'runtime-gen/createcollator': [SKIP],
- 'runtime-gen/getdefaulticulocale': [SKIP],
- 'runtime-gen/getimplfrominitializedintlobject': [SKIP],
- 'runtime-gen/getlanguagetagvariants': [SKIP],
- 'runtime-gen/internalcompare': [SKIP],
- 'runtime-gen/internaldateformat': [SKIP],
- 'runtime-gen/internaldateparse': [SKIP],
- 'runtime-gen/internalnumberformat': [SKIP],
- 'runtime-gen/internalnumberparse': [SKIP],
- 'runtime-gen/isinitializedintlobject': [SKIP],
- 'runtime-gen/isinitializedintlobjectoftype': [SKIP],
- 'runtime-gen/markasinitializedintlobjectoftype': [SKIP],
- 'runtime-gen/stringnormalize': [SKIP],
-}],
-
-##############################################################################
['arch == arm64 or arch == android_arm64', {
# arm64 TF timeout.
@@ -348,7 +288,6 @@
'bit-not': [PASS, SLOW],
'compiler/alloc-number': [PASS, SLOW],
'compiler/osr-assert': [PASS, SLOW],
- 'compiler/osr-warm': [PASS, TIMEOUT, SLOW],
'compiler/osr-with-args': [PASS, SLOW],
'debug-scopes': [PASS, SLOW],
'generated-transition-stub': [PASS, SLOW],
@@ -473,9 +412,6 @@
# Currently always deopt on minus zero
'math-floor-of-div-minus-zero': [SKIP],
-
- # BUG(v8:3457).
- 'deserialize-reference': [SKIP],
}], # 'arch == mipsel or arch == mips'
##############################################################################
@@ -527,9 +463,6 @@
# Currently always deopt on minus zero
'math-floor-of-div-minus-zero': [SKIP],
-
- # BUG(v8:3457).
- 'deserialize-reference': [SKIP],
}], # 'arch == mips64el'
['arch == mips64el and simulator_run == False', {
@@ -538,6 +471,9 @@
}],
##############################################################################
['system == windows', {
+ # TODO(mstarzinger): Too slow with turbo fan.
+ 'big-object-literal': [PASS, ['mode == debug', SKIP]],
+
# BUG(v8:3435)
'debug-script-breakpoints': [PASS, FAIL],
}], # 'system == windows'
@@ -574,6 +510,18 @@
# Skip long running test that times out in debug mode and goes OOM on NaCl.
'regress/regress-crbug-160010': [SKIP],
+ # Skip tests that timout with turbofan.
+ 'regress/regress-1257': [PASS, NO_VARIANTS],
+ 'regress/regress-2618': [PASS, NO_VARIANTS],
+ 'regress/regress-298269': [PASS, NO_VARIANTS],
+ 'regress/regress-634': [PASS, NO_VARIANTS],
+ 'regress/regress-91008': [PASS, NO_VARIANTS],
+ 'compiler/osr-alignment': [PASS, NO_VARIANTS],
+ 'compiler/osr-one': [PASS, NO_VARIANTS],
+ 'compiler/osr-two': [PASS, NO_VARIANTS],
+ 'stack-traces-overflow': [PASS, NO_VARIANTS],
+ 'mirror-object': [PASS, NO_VARIANTS],
+
# Bug(v8:2978).
'lithium/MathExp': [PASS, FAIL],
diff --git a/deps/v8/test/mjsunit/new-string-add.js b/deps/v8/test/mjsunit/new-string-add.js
deleted file mode 100644
index f5b7cbfbf2..0000000000
--- a/deps/v8/test/mjsunit/new-string-add.js
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --new-string-add
-
-assertEquals("ab", "a" + "b", "ll");
-
-assertEquals("12", "1" + "2", "dd");
-assertEquals("123", "1" + "2" + "3", "ddd");
-assertEquals("123", 1 + "2" + "3", "ndd");
-assertEquals("123", "1" + 2 + "3", "dnd");
-assertEquals("123", "1" + "2" + 3, "ddn");
-
-assertEquals("123", "1" + 2 + 3, "dnn");
-assertEquals("123", 1 + "2" + 3, "ndn");
-assertEquals("33", 1 + 2 + "3", "nnd");
-
-var x = "1";
-assertEquals("12", x + 2, "vn");
-assertEquals("12", x + "2", "vd");
-assertEquals("21", 2 + x, "nv");
-assertEquals("21", "2" + x, "dv");
-
-var y = "2";
-assertEquals("12", x + y, "vdvd");
-
-x = 1;
-assertEquals("12", x + y, "vnvd");
-
-y = 2;
-assertEquals(3, x + y, "vnvn");
-
-x = "1";
-assertEquals("12", x + y, "vdvn");
-
-y = "2";
-assertEquals("12", x + y, "vdvd2");
-
-(function(x, y) {
- var z = "3";
- var w = "4";
-
- assertEquals("11", x + x, "xx");
- assertEquals("12", x + y, "xy");
- assertEquals("13", x + z, "xz");
- assertEquals("14", x + w, "xw");
-
- assertEquals("21", y + x, "yx");
- assertEquals("22", y + y, "yy");
- assertEquals("23", y + z, "yz");
- assertEquals("24", y + w, "yw");
-
- assertEquals("31", z + x, "zx");
- assertEquals("32", z + y, "zy");
- assertEquals("33", z + z, "zz");
- assertEquals("34", z + w, "zw");
-
- assertEquals("41", w + x, "wx");
- assertEquals("42", w + y, "wy");
- assertEquals("43", w + z, "wz");
- assertEquals("44", w + w, "ww");
-
- (function(){x = 1; z = 3;})();
-
- assertEquals(2, x + x, "x'x");
- assertEquals("12", x + y, "x'y");
- assertEquals(4, x + z, "x'z'");
- assertEquals("14", x + w, "x'w");
-
- assertEquals("21", y + x, "yx'");
- assertEquals("22", y + y, "yy");
- assertEquals("23", y + z, "yz'");
- assertEquals("24", y + w, "yw");
-
- assertEquals(4, z + x, "z'x'");
- assertEquals("32", z + y, "z'y");
- assertEquals(6, z + z, "z'z'");
- assertEquals("34", z + w, "z'w");
-
- assertEquals("41", w + x, "wx'");
- assertEquals("42", w + y, "wy");
- assertEquals("43", w + z, "wz'");
- assertEquals("44", w + w, "ww");
-})("1", "2");
-
-assertEquals("142", "1" + new Number(42), "sN");
-assertEquals("421", new Number(42) + "1", "Ns");
-assertEquals(84, new Number(42) + new Number(42), "NN");
-
-assertEquals("142", "1" + new String("42"), "sS");
-assertEquals("421", new String("42") + "1", "Ss");
-assertEquals("142", "1" + new String("42"), "sS");
-assertEquals("4242", new String("42") + new String("42"), "SS");
-
-assertEquals("1true", "1" + true, "sb");
-assertEquals("true1", true + "1", "bs");
-assertEquals(2, true + true, "bs");
-
-assertEquals("1true", "1" + new Boolean(true), "sB");
-assertEquals("true1", new Boolean(true) + "1", "Bs");
-assertEquals(2, new Boolean(true) + new Boolean(true), "Bs");
-
-assertEquals("1undefined", "1" + void 0, "sv");
-assertEquals("undefined1", (void 0) + "1", "vs");
-assertTrue(isNaN(void 0 + void 0), "vv");
-
-assertEquals("1null", "1" + null, "su");
-assertEquals("null1", null + "1", "us");
-assertEquals(0, null + null, "uu");
-
-(function (i) {
- // Check that incoming frames are merged correctly.
- var x;
- var y;
- var z;
- var w;
- switch (i) {
- case 1: x = 42; y = "stry"; z = "strz"; w = 42; break;
- default: x = "strx", y = 42; z = "strz"; w = 42; break;
- }
- var resxx = x + x;
- var resxy = x + y;
- var resxz = x + z;
- var resxw = x + w;
- var resyx = y + x;
- var resyy = y + y;
- var resyz = y + z;
- var resyw = y + w;
- var reszx = z + x;
- var reszy = z + y;
- var reszz = z + z;
- var reszw = z + w;
- var reswx = w + x;
- var reswy = w + y;
- var reswz = w + z;
- var resww = w + w;
- assertEquals(84, resxx, "swxx");
- assertEquals("42stry", resxy, "swxy");
- assertEquals("42strz", resxz, "swxz");
- assertEquals(84, resxw, "swxw");
- assertEquals("stry42", resyx, "swyx");
- assertEquals("strystry", resyy, "swyy");
- assertEquals("strystrz", resyz, "swyz");
- assertEquals("stry42", resyw, "swyw");
- assertEquals("strz42", reszx, "swzx");
- assertEquals("strzstry", reszy, "swzy");
- assertEquals("strzstrz", reszz, "swzz");
- assertEquals("strz42", reszw, "swzw");
- assertEquals(84, reswx, "swwx");
- assertEquals("42stry", reswy, "swwy");
- assertEquals("42strz", reswz, "swwz");
- assertEquals(84, resww, "swww");
-})(1);
-
-// Generate ascii and non ascii strings from length 0 to 20.
-var ascii = 'aaaaaaaaaaaaaaaaaaaa';
-var non_ascii = '\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234';
-assertEquals(20, ascii.length);
-assertEquals(20, non_ascii.length);
-var a = Array(21);
-var b = Array(21);
-for (var i = 0; i <= 20; i++) {
- a[i] = ascii.substring(0, i);
- b[i] = non_ascii.substring(0, i);
-}
-
-// Add ascii and non-ascii strings generating strings with length from 0 to 20.
-for (var i = 0; i <= 20; i++) {
- for (var j = 0; j < i; j++) {
- assertEquals(a[i], a[j] + a[i - j])
- assertEquals(b[i], b[j] + b[i - j])
- }
-}
diff --git a/deps/v8/test/mjsunit/number-literal.js b/deps/v8/test/mjsunit/number-literal.js
new file mode 100644
index 0000000000..7191a1c76f
--- /dev/null
+++ b/deps/v8/test/mjsunit/number-literal.js
@@ -0,0 +1,33 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function test(message, a, b, skipStrictMode) {
+ assertSame(eval(a), eval(b), message);
+ if (!skipStrictMode) {
+ (function() {
+ 'use strict';
+ assertSame(eval(a), eval(b), message);
+ })();
+ }
+}
+
+test('hex-int', '0x20', '32');
+test('oct-int', '040', '32', true); // Octals disallowed in strict mode.
+test('dec-int', '32.00', '32');
+test('dec-underflow-int', '32.00000000000000000000000000000000000000001', '32');
+test('exp-int', '3.2e1', '32');
+test('exp-int', '3200e-2', '32');
+test('overflow-inf', '1e2000', 'Infinity');
+test('overflow-inf-exact', '1.797693134862315808e+308', 'Infinity');
+test('non-overflow-inf-exact', '1.797693134862315807e+308',
+ '1.7976931348623157e+308');
+test('underflow-0', '1e-2000', '0');
+test('underflow-0-exact', '2.4703282292062E-324', '0');
+test('non-underflow-0-exact', '2.4703282292063E-324', '5e-324');
+test('precission-loss-high', '9007199254740992', '9007199254740993');
+test('precission-loss-low', '1.9999999999999998', '1.9999999999999997');
+test('non-canonical-literal-int', '1.0', '1');
+test('non-canonical-literal-frac', '1.50', '1.5');
+test('rounding-down', '1.12512512512512452', '1.1251251251251244');
+test('rounding-up', '1.12512512512512453', '1.1251251251251246');
diff --git a/deps/v8/test/mjsunit/object-literal.js b/deps/v8/test/mjsunit/object-literal.js
index 3d0b33bd99..53188d15b8 100644
--- a/deps/v8/test/mjsunit/object-literal.js
+++ b/deps/v8/test/mjsunit/object-literal.js
@@ -190,3 +190,73 @@ function testKeywordProperty(keyword) {
for (var i = 0; i < keywords.length; i++) {
testKeywordProperty(keywords[i]);
}
+
+
+(function TestNumericNames() {
+ var o = {
+ 1: 1,
+ 2.: 2,
+ 3.0: 3,
+ 4e0: 4,
+ 5E0: 5,
+ 6e-0: 6,
+ 7E-0: 7,
+ 0x8: 8,
+ 0X9: 9,
+ }
+ assertEquals(['1', '2', '3', '4', '5', '6', '7', '8', '9'], Object.keys(o));
+
+ o = {
+ 1.2: 1.2,
+ 1.30: 1.3
+ };
+ assertEquals(['1.2', '1.3'], Object.keys(o));
+})();
+
+
+function TestNumericNamesGetter(expectedKeys, object) {
+ assertEquals(expectedKeys, Object.keys(object));
+ expectedKeys.forEach(function(key) {
+ var descr = Object.getOwnPropertyDescriptor(object, key);
+ assertEquals(key, descr.get.name);
+ });
+}
+TestNumericNamesGetter(['1', '2', '3', '4', '5', '6', '7', '8', '9'], {
+ get 1() {},
+ get 2.() {},
+ get 3.0() {},
+ get 4e0() {},
+ get 5E0() {},
+ get 6e-0() {},
+ get 7E-0() {},
+ get 0x8() {},
+ get 0X9() {},
+});
+TestNumericNamesGetter(['1.2', '1.3'], {
+ get 1.2() {},
+ get 1.30() {}
+});
+
+
+function TestNumericNamesSetter(expectedKeys, object) {
+ assertEquals(expectedKeys, Object.keys(object));
+ expectedKeys.forEach(function(key) {
+ var descr = Object.getOwnPropertyDescriptor(object, key);
+ assertEquals(key, descr.set.name);
+ });
+}
+TestNumericNamesSetter(['1', '2', '3', '4', '5', '6', '7', '8', '9'], {
+ set 1(_) {},
+ set 2.(_) {},
+ set 3.0(_) {},
+ set 4e0(_) {},
+ set 5E0(_) {},
+ set 6e-0(_) {},
+ set 7E-0(_) {},
+ set 0x8(_) {},
+ set 0X9(_) {},
+});
+TestNumericNamesSetter(['1.2', '1.3'], {
+ set 1.2(_) {; },
+ set 1.30(_) {; }
+});
diff --git a/deps/v8/test/cctest/test-libplatform-task-queue.cc b/deps/v8/test/mjsunit/regexp-not-sticky-yet.js
index 630686b459..4186a63fef 100644
--- a/deps/v8/test/cctest/test-libplatform-task-queue.cc
+++ b/deps/v8/test/mjsunit/regexp-not-sticky-yet.js
@@ -1,4 +1,4 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
+// Copyright 2014 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,72 +25,41 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/v8.h"
+// Test that sticky regexp support is not affecting V8 when the
+// --harmony-regexps flag is not on.
-#include "src/libplatform/task-queue.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/test-libplatform.h"
+assertThrows(function() { eval("/foo.bar/y"); }, SyntaxError);
+assertThrows(function() { eval("/foobar/y"); }, SyntaxError);
+assertThrows(function() { eval("/foo.bar/gy"); }, SyntaxError);
+assertThrows(function() { eval("/foobar/gy"); }, SyntaxError);
+assertThrows(function() { new RegExp("foo.bar", "y"); }, SyntaxError);
+assertThrows(function() { new RegExp("foobar", "y"); }, SyntaxError);
+assertThrows(function() { new RegExp("foo.bar", "gy"); }, SyntaxError);
+assertThrows(function() { new RegExp("foobar", "gy"); }, SyntaxError);
-using namespace v8::internal;
-using namespace v8::platform;
+var re = /foo.bar/;
+assertEquals("/foo.bar/", "" + re);
+var plain = /foobar/;
+assertEquals("/foobar/", "" + plain);
+re.compile("foo.bar");
+assertEquals(void 0, re.sticky);
-TEST(TaskQueueBasic) {
- TaskCounter task_counter;
+var global = /foo.bar/g;
+assertEquals("/foo.bar/g", "" + global);
+var plainglobal = /foobar/g;
+assertEquals("/foobar/g", "" + plainglobal);
- TaskQueue queue;
+assertEquals(void 0, re.sticky);
+re.sticky = true; // Has no effect on the regexp, just sets a property.
+assertTrue(re.sticky);
- TestTask* task = new TestTask(&task_counter);
- queue.Append(task);
- CHECK_EQ(1, task_counter.GetCount());
- CHECK_EQ(task, queue.GetNext());
- delete task;
- CHECK_EQ(0, task_counter.GetCount());
+assertTrue(re.test("..foo.bar"));
- queue.Terminate();
- CHECK_EQ(NULL, queue.GetNext());
-}
+re.lastIndex = -1; // Ignored for non-global, non-sticky.
+assertTrue(re.test("..foo.bar"));
+assertEquals(-1, re.lastIndex);
-
-class ReadQueueTask : public TestTask {
- public:
- ReadQueueTask(TaskCounter* task_counter, TaskQueue* queue)
- : TestTask(task_counter, true), queue_(queue) {}
- virtual ~ReadQueueTask() {}
-
- virtual void Run() V8_OVERRIDE {
- TestTask::Run();
- CHECK_EQ(NULL, queue_->GetNext());
- }
-
- private:
- TaskQueue* queue_;
-
- DISALLOW_COPY_AND_ASSIGN(ReadQueueTask);
-};
-
-
-TEST(TaskQueueTerminateMultipleReaders) {
- TaskQueue queue;
- TaskCounter task_counter;
- ReadQueueTask* read1 = new ReadQueueTask(&task_counter, &queue);
- ReadQueueTask* read2 = new ReadQueueTask(&task_counter, &queue);
-
- TestWorkerThread thread1(read1);
- TestWorkerThread thread2(read2);
-
- thread1.Start();
- thread2.Start();
-
- CHECK_EQ(2, task_counter.GetCount());
-
- thread1.Signal();
- thread2.Signal();
-
- queue.Terminate();
-
- thread1.Join();
- thread2.Join();
-
- CHECK_EQ(0, task_counter.GetCount());
-}
+re.lastIndex = -1; // Ignored for non-global, non-sticky.
+assertTrue(!!re.exec("..foo.bar"));
+assertEquals(-1, re.lastIndex);
diff --git a/deps/v8/test/mjsunit/regress-3225.js b/deps/v8/test/mjsunit/regress-3225.js
index 357f94b24c..fe44b85110 100644
--- a/deps/v8/test/mjsunit/regress-3225.js
+++ b/deps/v8/test/mjsunit/regress-3225.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --harmony-generators
+// Flags: --expose-debug-as debug
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/regress/poly_count_operation.js b/deps/v8/test/mjsunit/regress/poly_count_operation.js
index a8a1ed2ebc..99f041b6f1 100644
--- a/deps/v8/test/mjsunit/regress/poly_count_operation.js
+++ b/deps/v8/test/mjsunit/regress/poly_count_operation.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --turbo-deoptimization
var o1 = {x:1};
var o2 = {};
diff --git a/deps/v8/test/mjsunit/regress/regress-1170187.js b/deps/v8/test/mjsunit/regress/regress-1170187.js
index 5e82f8a83b..3621bc44a8 100644
--- a/deps/v8/test/mjsunit/regress/regress-1170187.js
+++ b/deps/v8/test/mjsunit/regress/regress-1170187.js
@@ -26,6 +26,8 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug
+// Flags: --turbo-deoptimization
+
// Make sure that the retreival of local variables are performed correctly even
// when an adapter frame is present.
diff --git a/deps/v8/test/mjsunit/regress/regress-119609.js b/deps/v8/test/mjsunit/regress/regress-119609.js
index 99041adaf4..0c85063ac7 100644
--- a/deps/v8/test/mjsunit/regress/regress-119609.js
+++ b/deps/v8/test/mjsunit/regress/regress-119609.js
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug
+// Flags: --turbo-deoptimization
Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/regress/regress-131994.js b/deps/v8/test/mjsunit/regress/regress-131994.js
index 7f600959da..3de3813eac 100644
--- a/deps/v8/test/mjsunit/regress/regress-131994.js
+++ b/deps/v8/test/mjsunit/regress/regress-131994.js
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug
+// Flags: --turbo-deoptimization
// Test that a variable in the local scope that shadows a context-allocated
// variable is correctly resolved when being evaluated in the debugger.
diff --git a/deps/v8/test/mjsunit/regress/regress-325676.js b/deps/v8/test/mjsunit/regress/regress-325676.js
index 427bbc38dc..7450a6d12c 100644
--- a/deps/v8/test/mjsunit/regress/regress-325676.js
+++ b/deps/v8/test/mjsunit/regress/regress-325676.js
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug
+// Flags: --turbo-deoptimization
// If a function parameter is forced to be context allocated,
// debug evaluate need to resolve it to a context slot instead of
diff --git a/deps/v8/test/mjsunit/regress/regress-3564.js b/deps/v8/test/mjsunit/regress/regress-3564.js
new file mode 100644
index 0000000000..a0b9eb2994
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3564.js
@@ -0,0 +1,24 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function MyWrapper(v) {
+ return { valueOf: function() { return v } };
+}
+
+function f() {
+ assertTrue("a" < "x");
+ assertTrue("a" < new String("y"));
+ assertTrue("a" < new MyWrapper("z"));
+
+ assertFalse("a" > "x");
+ assertFalse("a" > new String("y"));
+ assertFalse("a" > new MyWrapper("z"));
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-403292.js b/deps/v8/test/mjsunit/regress/regress-403292.js
new file mode 100644
index 0000000000..4e7ba283f7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-403292.js
@@ -0,0 +1,53 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-natives-as=builtins --expose-gc
+
+var __v_7 = [];
+var __v_8 = {};
+var __v_10 = {};
+var __v_11 = this;
+var __v_12 = {};
+var __v_13 = {};
+var __v_14 = "";
+var __v_15 = {};
+try {
+__v_1 = {x:0};
+%OptimizeFunctionOnNextCall(__f_1);
+assertEquals("good", __f_1());
+delete __v_1.x;
+assertEquals("good", __f_1());
+} catch(e) { print("Caught: " + e); }
+try {
+__v_3 = new Set();
+__v_5 = new builtins.SetIterator(__v_3, -12);
+__v_4 = new Map();
+__v_6 = new builtins.MapIterator(__v_4, 2);
+__f_3(Array);
+} catch(e) { print("Caught: " + e); }
+function __f_4(__v_8, filter) {
+ function __f_6(v) {
+ for (var __v_4 in v) {
+ for (var __v_4 in v) {}
+ }
+ %OptimizeFunctionOnNextCall(filter);
+ return filter(v);
+ }
+ var __v_7 = eval(__v_8);
+ gc();
+ return __f_6(__v_7);
+}
+function __f_5(__v_6) {
+ var __v_5 = new Array(__v_6);
+ for (var __v_4 = 0; __v_4 < __v_6; __v_4++) __v_5.push('{}');
+ return __v_5;
+}
+try {
+try {
+ __v_8.test("\x80");
+ assertUnreachable();
+} catch (e) {
+}
+gc();
+} catch(e) { print("Caught: " + e); }
diff --git a/deps/v8/test/mjsunit/regress/regress-404981.js b/deps/v8/test/mjsunit/regress/regress-404981.js
new file mode 100644
index 0000000000..5508d6fea7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-404981.js
@@ -0,0 +1,6 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var large_object = new Array(5000001);
+large_object.length = 23;
diff --git a/deps/v8/test/mjsunit/regress/regress-408036.js b/deps/v8/test/mjsunit/regress/regress-408036.js
new file mode 100644
index 0000000000..a4dfade25d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-408036.js
@@ -0,0 +1,5 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-natives-as 1
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-387627.js b/deps/v8/test/mjsunit/regress/regress-409533.js
index 5c6389b5f1..e51065e4bf 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-387627.js
+++ b/deps/v8/test/mjsunit/regress/regress-409533.js
@@ -4,9 +4,9 @@
// Flags: --allow-natives-syntax
-function f() {}
-%FunctionBindArguments(f, {}, undefined, 1);
-
+function f() {
+ %_RegExpConstructResult(0, {}, {});
+}
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-410912.js b/deps/v8/test/mjsunit/regress/regress-410912.js
new file mode 100644
index 0000000000..98367bdf20
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-410912.js
@@ -0,0 +1,206 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+var assertDoesNotThrow;
+var assertInstanceof;
+var assertUnreachable;
+var assertOptimized;
+var assertUnoptimized;
+function classOf(object) { var string = Object.prototype.toString.call(object); return string.substring(8, string.length - 1); }
+function PrettyPrint(value) { return ""; }
+function PrettyPrintArrayElement(value, index, array) { return ""; }
+function fail(expectedText, found, name_opt) { }
+function deepObjectEquals(a, b) { var aProps = Object.keys(a); aProps.sort(); var bProps = Object.keys(b); bProps.sort(); if (!deepEquals(aProps, bProps)) { return false; } for (var i = 0; i < aProps.length; i++) { if (!deepEquals(a[aProps[i]], b[aProps[i]])) { return false; } } return true; }
+function deepEquals(a, b) { if (a === b) { if (a === 0) return (1 / a) === (1 / b); return true; } if (typeof a != typeof b) return false; if (typeof a == "number") return isNaN(a) && isNaN(b); if (typeof a !== "object" && typeof a !== "function") return false; var objectClass = classOf(a); if (objectClass !== classOf(b)) return false; if (objectClass === "RegExp") { return (a.toString() === b.toString()); } if (objectClass === "Function") return false; if (objectClass === "Array") { var elementCount = 0; if (a.length != b.length) { return false; } for (var i = 0; i < a.length; i++) { if (!deepEquals(a[i], b[i])) return false; } return true; } if (objectClass == "String" || objectClass == "Number" || objectClass == "Boolean" || objectClass == "Date") { if (a.valueOf() !== b.valueOf()) return false; } return deepObjectEquals(a, b); }
+assertSame = function assertSame(expected, found, name_opt) { if (found === expected) { if (expected !== 0 || (1 / expected) == (1 / found)) return; } else if ((expected !== expected) && (found !== found)) { return; } fail(PrettyPrint(expected), found, name_opt); }; assertEquals = function assertEquals(expected, found, name_opt) { if (!deepEquals(found, expected)) { fail(PrettyPrint(expected), found, name_opt); } };
+assertEqualsDelta = function assertEqualsDelta(expected, found, delta, name_opt) { assertTrue(Math.abs(expected - found) <= delta, name_opt); };
+assertArrayEquals = function assertArrayEquals(expected, found, name_opt) { var start = ""; if (name_opt) { start = name_opt + " - "; } assertEquals(expected.length, found.length, start + "array length"); if (expected.length == found.length) { for (var i = 0; i < expected.length; ++i) { assertEquals(expected[i], found[i], start + "array element at index " + i); } } };
+assertPropertiesEqual = function assertPropertiesEqual(expected, found, name_opt) { if (!deepObjectEquals(expected, found)) { fail(expected, found, name_opt); } };
+assertToStringEquals = function assertToStringEquals(expected, found, name_opt) { if (expected != String(found)) { fail(expected, found, name_opt); } };
+assertTrue = function assertTrue(value, name_opt) { assertEquals(true, value, name_opt); };
+assertFalse = function assertFalse(value, name_opt) { assertEquals(false, value, name_opt); };
+assertNull = function assertNull(value, name_opt) { if (value !== null) { fail("null", value, name_opt); } };
+assertNotNull = function assertNotNull(value, name_opt) { if (value === null) { fail("not null", value, name_opt); } };
+var __v_39 = {};
+var __v_40 = {};
+var __v_41 = {};
+var __v_42 = {};
+var __v_43 = {};
+var __v_44 = {};
+try {
+__v_0 = [1.5,,1.7];
+__v_1 = {__v_0:1.8};
+} catch(e) { print("Caught: " + e); }
+function __f_0(__v_1,__v_0,i) {
+ __v_1.a = __v_0[i];
+ gc();
+}
+try {
+__f_0(__v_1,__v_0,0);
+__f_0(__v_1,__v_0,0);
+%OptimizeFunctionOnNextCall(__f_0);
+__f_0(__v_1,__v_0,1);
+assertEquals(undefined, __v_1.a);
+__v_0 = [1,,3];
+__v_1 = {ab:5};
+} catch(e) { print("Caught: " + e); }
+function __f_1(__v_1,__v_0,i) {
+ __v_1.ab = __v_0[i];
+}
+try {
+__f_1(__v_1,__v_0,1);
+} catch(e) { print("Caught: " + e); }
+function __f_5(x) {
+ return ~x;
+}
+try {
+__f_5(42);
+assertEquals(~12, __f_5(12.45));
+assertEquals(~46, __f_5(42.87));
+__v_2 = 1, __v_4 = 2, __v_3 = 4, __v_6 = 8;
+} catch(e) { print("Caught: " + e); }
+function __f_4() {
+ return __v_2 | (__v_4 | (__v_3 | __v_6));
+}
+try {
+__f_4();
+__v_3 = "16";
+assertEquals(17 | -13 | 0 | -5, __f_4());
+} catch(e) { print("Caught: " + e); }
+function __f_6() {
+ return __f_4();
+}
+try {
+assertEquals(1 | 2 | 16 | 8, __f_6());
+__f_4 = function() { return 42; };
+assertEquals(42, __f_6());
+__v_5 = {};
+__v_5.__f_4 = __f_4;
+} catch(e) { print("Caught: " + e); }
+function __f_7(o) {
+ return o.__f_4();
+}
+try {
+for (var __v_7 = 0; __v_7 < 5; __v_7++) __f_7(__v_5);
+%OptimizeFunctionOnNextCall(__f_7);
+__f_7(__v_5);
+assertEquals(42, __f_7(__v_5));
+assertEquals(87, __f_7({__f_4: function() { return 87; }}));
+} catch(e) { print("Caught: " + e); }
+function __f_8(x,y) {
+ x = 42;
+ y = 1;
+ y = y << "0";
+ return x | y;
+}
+try {
+assertEquals(43, __f_8(0,0));
+} catch(e) { print("Caught: " + e); }
+function __f_2(x) {
+ return 'lit[' + (x + ']');
+}
+try {
+assertEquals('lit[-87]', __f_2(-87));
+assertEquals('lit[0]', __f_2(0));
+assertEquals('lit[42]', __f_2(42));
+__v_9 = "abc";
+gc();
+var __v_8;
+} catch(e) { print("Caught: " + e); }
+function __f_9(n) { return __v_9.charAt(n); }
+try {
+for (var __v_7 = 0; __v_7 < 5; __v_7++) {
+ __v_8 = __f_9(0);
+}
+%OptimizeFunctionOnNextCall(__f_9);
+__v_8 = __f_9(0);
+} catch(e) { print("Caught: " + e); }
+function __f_3(__v_2,__v_4,__v_3,__v_6) {
+ return __v_2+__v_4+__v_3+__v_6;
+}
+try {
+assertEquals(0x40000003, __f_3(1,1,2,0x3fffffff));
+} catch(e) { print("Caught: " + e); }
+try {
+__v_19 = {
+ fast_smi_only : 'fast smi only elements',
+ fast : 'fast elements',
+ fast_double : 'fast double elements',
+ dictionary : 'dictionary elements',
+ external_int32 : 'external int8 elements',
+ external_uint8 : 'external uint8 elements',
+ external_int16 : 'external int16 elements',
+ external_uint16 : 'external uint16 elements',
+ external_int32 : 'external int32 elements',
+ external_uint32 : 'external uint32 elements',
+ external_float32 : 'external float32 elements',
+ external_float64 : 'external float64 elements',
+ external_uint8_clamped : 'external uint8_clamped elements',
+ fixed_int32 : 'fixed int8 elements',
+ fixed_uint8 : 'fixed uint8 elements',
+ fixed_int16 : 'fixed int16 elements',
+ fixed_uint16 : 'fixed uint16 elements',
+ fixed_int32 : 'fixed int32 elements',
+ fixed_uint32 : 'fixed uint32 elements',
+ fixed_float32 : 'fixed float32 elements',
+ fixed_float64 : 'fixed float64 elements',
+ fixed_uint8_clamped : 'fixed uint8_clamped elements'
+}
+} catch(e) { print("Caught: " + e); }
+function __f_12() {
+}
+__v_10 = {};
+__v_10.dance = 0xD15C0;
+__v_10.drink = 0xC0C0A;
+__f_12(__v_19.fast, __v_10);
+__v_24 = [1,2,3];
+__f_12(__v_19.fast_smi_only, __v_24);
+__v_24.dance = 0xD15C0;
+__v_24.drink = 0xC0C0A;
+__f_12(__v_19.fast_smi_only, __v_24);
+function __f_18() {
+ var __v_27 = new Array();
+ __f_12(__v_19.fast_smi_only, __v_27);
+ for (var __v_18 = 0; __v_18 < 1337; __v_18++) {
+ var __v_16 = __v_18;
+ if (__v_18 == 1336) {
+ __f_12(__v_19.fast_smi_only, __v_27);
+ __v_16 = new Object();
+ }
+ __v_27[__v_18] = __v_16;
+ }
+ __f_12(__v_19.fast, __v_27);
+ var __v_15 = [];
+ __v_15[912570] = 7;
+ __f_12(__v_19.dictionary, __v_15);
+ var __v_26 = new Array(912561);
+ %SetAllocationTimeout(100000000, 10000000);
+ for (var __v_18 = 0; __v_18 < 0x20000; __v_18++) {
+ __v_26[0] = __v_18 / 2;
+ }
+ __f_12(__v_19.fixed_int8, new Int8Array(007));
+ __f_12(__v_19.fixed_uint8, new Uint8Array(007));
+ __f_12(__v_19.fixed_int16, new Int16Array(666));
+ __f_12(__v_19.fixed_uint16, new Uint16Array(42));
+ __f_12(__v_19.fixed_int32, new Int32Array(0xF));
+ __f_12(__v_19.fixed_uint32, new Uint32Array(23));
+ __f_12(__v_19.fixed_float32, new Float32Array(7));
+ __f_12(__v_19.fixed_float64, new Float64Array(0));
+ __f_12(__v_19.fixed_uint8_clamped, new Uint8ClampedArray(512));
+ var __v_13 = new ArrayBuffer(128);
+ __f_12(__v_19.external_int8, new Int8Array(__v_13));
+ __f_12(__v_37.external_uint8, new Uint8Array(__v_13));
+ __f_12(__v_19.external_int16, new Int16Array(__v_13));
+ __f_12(__v_19.external_uint16, new Uint16Array(__v_13));
+ __f_12(__v_19.external_int32, new Int32Array(__v_13));
+ __f_12(__v_19.external_uint32, new Uint32Array(__v_13));
+ __f_12(__v_19.external_float32, new Float32Array(__v_13));
+ __f_12(__v_19.external_float64, new Float64Array(__v_13));
+ __f_12(__v_19.external_uint8_clamped, new Uint8ClampedArray(__v_13));
+}
+try {
+__f_18();
+} catch(e) { }
diff --git a/deps/v8/test/mjsunit/regress/regress-411210.js b/deps/v8/test/mjsunit/regress/regress-411210.js
new file mode 100644
index 0000000000..2dbc5ff70c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-411210.js
@@ -0,0 +1,22 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --gc-interval=439 --random-seed=-423594851
+
+var __v_3;
+function __f_2() {
+ var __v_1 = new Array(3);
+ __v_1[0] = 10;
+ __v_1[1] = 15.5;
+ __v_3 = __f_2();
+ __v_1[2] = 20;
+ return __v_1;
+}
+
+try {
+ for (var __v_2 = 0; __v_2 < 3; ++__v_2) {
+ __v_3 = __f_2();
+ }
+}
+catch (e) { }
diff --git a/deps/v8/test/mjsunit/regress/regress-411237.js b/deps/v8/test/mjsunit/regress/regress-411237.js
new file mode 100644
index 0000000000..8b75ba3015
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-411237.js
@@ -0,0 +1,15 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony
+
+try {
+ %OptimizeFunctionOnNextCall(print);
+} catch(e) { }
+
+try {
+ function* f() {
+ }
+ %OptimizeFunctionOnNextCall(f);
+} catch(e) { }
diff --git a/deps/v8/test/mjsunit/regress/regress-412162.js b/deps/v8/test/mjsunit/regress/regress-412162.js
new file mode 100644
index 0000000000..6a7ad0c57f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-412162.js
@@ -0,0 +1,14 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function test() {
+ Math.abs(-NaN).toString();
+}
+
+test();
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
diff --git a/deps/v8/test/mjsunit/regress/regress-416416.js b/deps/v8/test/mjsunit/regress/regress-416416.js
new file mode 100644
index 0000000000..66e882e0fc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-416416.js
@@ -0,0 +1,14 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function foo() {
+ try {
+ String.prototype.length.x();
+ } catch (e) {
+ }
+}
+
+foo();
+foo();
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-416730.js b/deps/v8/test/mjsunit/regress/regress-416730.js
new file mode 100644
index 0000000000..8d7f207fd9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-416730.js
@@ -0,0 +1,24 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var d = {x: undefined, y: undefined};
+
+function Crash(left, right) {
+ var c = {
+ x: right.x - left.x,
+ y: right.y - left.y
+ };
+ return c.x * c.y;
+}
+
+var a = {x: 0.5, y: 0};
+var b = {x: 1, y: 0};
+
+for (var i = 0; i < 3; i++) Crash(a, b);
+%OptimizeFunctionOnNextCall(Crash);
+Crash(a, b);
+
+Crash({x: 0, y: 0.5}, b);
diff --git a/deps/v8/test/mjsunit/regress/regress-conditional-position.js b/deps/v8/test/mjsunit/regress/regress-conditional-position.js
index cd8f7bd745..ae5a3acb58 100644
--- a/deps/v8/test/mjsunit/regress/regress-conditional-position.js
+++ b/deps/v8/test/mjsunit/regress/regress-conditional-position.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --always-full-compiler
+// Flags: --nocrankshaft
var functionToCatch;
var lineNumber;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-107996.js b/deps/v8/test/mjsunit/regress/regress-crbug-107996.js
index dfe07e59de..b4907f3bb8 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-107996.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-107996.js
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug
+// Flags: --turbo-deoptimization
Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-171715.js b/deps/v8/test/mjsunit/regress/regress-crbug-171715.js
index 040c381e39..309f50a01b 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-171715.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-171715.js
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug
+// Flags: --turbo-deoptimization
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-222893.js b/deps/v8/test/mjsunit/regress/regress-crbug-222893.js
index 39363bc912..75e17289fd 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-222893.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-222893.js
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug
+// Flags: --turbo-deoptimization
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-320922.js b/deps/v8/test/mjsunit/regress/regress-crbug-320922.js
index 4a5b5813e0..9ba759a43e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-320922.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-320922.js
@@ -30,7 +30,7 @@
var string = "hello world";
var expected = "Hello " + "world";
function Capitalize() {
- %_OneByteSeqStringSetChar(string, 0, 0x48);
+ %_OneByteSeqStringSetChar(0, 0x48, string);
}
Capitalize();
assertEquals(expected, string);
@@ -40,7 +40,7 @@ assertEquals(expected, string);
var twobyte = "\u20ACello world";
function TwoByteCapitalize() {
- %_TwoByteSeqStringSetChar(twobyte, 0, 0x48);
+ %_TwoByteSeqStringSetChar(0, 0x48, twobyte);
}
TwoByteCapitalize();
assertEquals(expected, twobyte);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-323936.js b/deps/v8/test/mjsunit/regress/regress-crbug-323936.js
new file mode 100644
index 0000000000..d896eadcc4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-323936.js
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug;
+
+var step = 0;
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ if (step == 0) {
+ assertEquals("error", exec_state.frame(0).evaluate("e").value());
+ exec_state.frame(0).evaluate("e = 'foo'");
+ exec_state.frame(0).evaluate("x = 'modified'");
+ } else {
+ assertEquals("argument", exec_state.frame(0).evaluate("e").value());
+ exec_state.frame(0).evaluate("e = 'bar'");
+ }
+ step++;
+ } catch (e) {
+ print(e + e.stack);
+ exception = e;
+ }
+}
+
+Debug.setListener(listener);
+
+function f(e, x) {
+ try {
+ throw "error";
+ } catch(e) {
+ debugger;
+ assertEquals("foo", e);
+ }
+ debugger;
+ assertEquals("bar", e);
+ assertEquals("modified", x);
+}
+
+f("argument")
+assertNull(exception);
+assertEquals(2, step);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-357052.js b/deps/v8/test/mjsunit/regress/regress-crbug-357052.js
index 9cde1b66c2..7a58396407 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-357052.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-357052.js
@@ -7,5 +7,6 @@ function f() {
for (var i = 0; i < 30; i++) {
str += "abcdefgh12345678" + str;
}
+ return str;
}
assertThrows(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-403409.js b/deps/v8/test/mjsunit/regress/regress-crbug-403409.js
new file mode 100644
index 0000000000..ffd100b468
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-403409.js
@@ -0,0 +1,18 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Array.prototype[0] = 777;
+var kElements = 10;
+
+var input_array = [];
+for (var i = 1; i < kElements; i++) {
+ input_array[i] = 0.5;
+}
+var output_array = input_array.concat(0.5);
+
+assertEquals(kElements + 1, output_array.length);
+assertEquals(777, output_array[0]);
+for (var j = 1; j < kElements; j++) {
+ assertEquals(0.5, output_array[j]);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-405491.js b/deps/v8/test/mjsunit/regress/regress-crbug-405491.js
new file mode 100644
index 0000000000..b63378113f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-405491.js
@@ -0,0 +1,5 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as 1
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-405517.js b/deps/v8/test/mjsunit/regress/regress-crbug-405517.js
new file mode 100644
index 0000000000..36c3f4f7f7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-405517.js
@@ -0,0 +1,16 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --gc-interval=203
+
+function f() {
+ var e = [0];
+ %PreventExtensions(e);
+ for (var i = 0; i < 4; i++) e.shift();
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-405922.js b/deps/v8/test/mjsunit/regress/regress-crbug-405922.js
new file mode 100644
index 0000000000..9f76a862db
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-405922.js
@@ -0,0 +1,27 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-debug-as debug
+
+Debug = debug.Debug
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ exec_state.prepareStep(Debug.StepAction.StepIn, 3);
+ }
+ } catch (e) {
+ }
+}
+
+Debug.setListener(listener);
+
+function f(x) {
+ if (x > 0) %_CallFunction(null, x-1, f);
+}
+
+debugger;
+f(2);
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-407946.js b/deps/v8/test/mjsunit/regress/regress-crbug-407946.js
new file mode 100644
index 0000000000..d5687cca34
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-407946.js
@@ -0,0 +1,12 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(n) { return [0].indexOf((n - n) + 0); }
+
+assertEquals(0, f(.1));
+assertEquals(0, f(.1));
+%OptimizeFunctionOnNextCall(f);
+assertEquals(0, f(.1));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-412203.js b/deps/v8/test/mjsunit/regress/regress-crbug-412203.js
new file mode 100644
index 0000000000..f15085954f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-412203.js
@@ -0,0 +1,36 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var b = [];
+b[10000] = 1;
+// Required to reproduce the bug.
+assertTrue(%HasDictionaryElements(b));
+
+var a1 = [1.5];
+b.__proto__ = a1;
+assertEquals(1.5, ([].concat(b))[0]);
+
+var a2 = new Int32Array(2);
+a2[0] = 3;
+b.__proto__ = a2
+assertEquals(3, ([].concat(b))[0]);
+
+function foo(x, y) {
+ var a = [];
+ a[10000] = 1;
+ assertTrue(%HasDictionaryElements(a));
+
+ a.__proto__ = arguments;
+ var c = [].concat(a);
+ for (var i = 0; i < arguments.length; i++) {
+ assertEquals(i + 2, c[i]);
+ }
+ assertEquals(undefined, c[arguments.length]);
+ assertEquals(undefined, c[arguments.length + 1]);
+}
+foo(2);
+foo(2, 3);
+foo(2, 3, 4);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-412208.js b/deps/v8/test/mjsunit/regress/regress-crbug-412208.js
new file mode 100644
index 0000000000..a194f855b4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-412208.js
@@ -0,0 +1,16 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var non_const_true = true;
+
+function f() {
+ return non_const_true || (f() = this);
+}
+
+assertTrue(f());
+assertTrue(f());
+%OptimizeFunctionOnNextCall(f);
+assertTrue(f());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-412210.js b/deps/v8/test/mjsunit/regress/regress-crbug-412210.js
new file mode 100644
index 0000000000..6ec7d62379
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-412210.js
@@ -0,0 +1,12 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(x) {
+ return (x ? "" >> 0 : "") + /a/;
+};
+
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-412215.js b/deps/v8/test/mjsunit/regress/regress-crbug-412215.js
new file mode 100644
index 0000000000..ad926fc4a2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-412215.js
@@ -0,0 +1,33 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var dummy = {foo: "true"};
+
+var a = {y:0.5};
+a.y = 357;
+var b = a.y;
+
+var d;
+function f( ) {
+ d = 357;
+ return {foo: b};
+}
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+var x = f();
+
+// With the bug, x is now an invalid object; the code below
+// triggers a crash.
+
+function g(obj) {
+ return obj.foo.length;
+}
+
+g(dummy);
+g(dummy);
+%OptimizeFunctionOnNextCall(g);
+g(x);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-412319.js b/deps/v8/test/mjsunit/regress/regress-crbug-412319.js
new file mode 100644
index 0000000000..21386e3bd6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-412319.js
@@ -0,0 +1,19 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function __f_6() {
+ var __v_7 = [0];
+ %PreventExtensions(__v_7);
+ for (var __v_6 = -2; __v_6 < 19; __v_6++) __v_7.shift();
+ __f_7(__v_7);
+}
+__f_6();
+__f_6();
+%OptimizeFunctionOnNextCall(__f_6);
+__f_6();
+function __f_7(__v_7) {
+ __v_7.push(Infinity);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-416558.js b/deps/v8/test/mjsunit/regress/regress-crbug-416558.js
new file mode 100644
index 0000000000..375ad406ea
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-416558.js
@@ -0,0 +1,115 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function() {
+ function store(x) { x[0] = 0; }
+ store([]);
+ var c = /x/;
+ store(c);
+ function get_hole() {
+ var b = /x/;
+ store(b);
+ return b[1];
+ }
+ assertEquals(undefined, get_hole());
+ assertEquals(undefined, get_hole());
+})();
+
+(function() {
+ function store(x) { x[0] = 0; }
+ store([]);
+ var c = new Date();
+ store(c);
+ function get_hole() {
+ var b = new Date();
+ store(b);
+ return b[1];
+ }
+ assertEquals(undefined, get_hole());
+ assertEquals(undefined, get_hole());
+})();
+
+(function() {
+ function store(x) { x[0] = 0; }
+ store([]);
+ var c = new Number(1);
+ store(c);
+ function get_hole() {
+ var b = new Number(1);
+ store(b);
+ return b[1];
+ }
+ assertEquals(undefined, get_hole());
+ assertEquals(undefined, get_hole());
+})();
+
+(function() {
+ function store(x) { x[0] = 0; }
+ store([]);
+ var c = new Boolean();
+ store(c);
+ function get_hole() {
+ var b = new Boolean();
+ store(b);
+ return b[1];
+ }
+ assertEquals(undefined, get_hole());
+ assertEquals(undefined, get_hole());
+})();
+
+(function() {
+ function store(x) { x[0] = 0; }
+ store([]);
+ var c = new Map();
+ store(c);
+ function get_hole() {
+ var b = new Map();
+ store(b);
+ return b[1];
+ }
+ assertEquals(undefined, get_hole());
+ assertEquals(undefined, get_hole());
+})();
+
+(function() {
+ function store(x) { x[0] = 0; }
+ store([]);
+ var c = new Set();
+ store(c);
+ function get_hole() {
+ var b = new Set();
+ store(b);
+ return b[1];
+ }
+ assertEquals(undefined, get_hole());
+ assertEquals(undefined, get_hole());
+})();
+
+(function() {
+ function store(x) { x[0] = 0; }
+ store([]);
+ var c = new WeakMap();
+ store(c);
+ function get_hole() {
+ var b = new WeakMap();
+ store(b);
+ return b[1];
+ }
+ assertEquals(undefined, get_hole());
+ assertEquals(undefined, get_hole());
+})();
+
+(function() {
+ function store(x) { x[0] = 0; }
+ store([]);
+ var c = new WeakSet();
+ store(c);
+ function get_hole() {
+ var b = new WeakSet();
+ store(b);
+ return b[1];
+ }
+ assertEquals(undefined, get_hole());
+ assertEquals(undefined, get_hole());
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-debug-deopt-while-recompile.js b/deps/v8/test/mjsunit/regress/regress-debug-deopt-while-recompile.js
index 52c32e9cc3..ce5220a2b8 100644
--- a/deps/v8/test/mjsunit/regress/regress-debug-deopt-while-recompile.js
+++ b/deps/v8/test/mjsunit/regress/regress-debug-deopt-while-recompile.js
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --turbo-deoptimization
Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/regress/regress-force-constant-representation.js b/deps/v8/test/mjsunit/regress/regress-force-constant-representation.js
new file mode 100644
index 0000000000..4ec2a6a799
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-force-constant-representation.js
@@ -0,0 +1,18 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Test push double as tagged.
+var a = [{}];
+function f(a) {
+ a.push(Infinity);
+}
+
+f(a);
+f(a);
+f(a);
+%OptimizeFunctionOnNextCall(f);
+f(a);
+assertEquals([{}, Infinity, Infinity, Infinity, Infinity], a);
diff --git a/deps/v8/test/mjsunit/regress/regress-inline-constant-load.js b/deps/v8/test/mjsunit/regress/regress-inline-constant-load.js
new file mode 100644
index 0000000000..303639c74f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-inline-constant-load.js
@@ -0,0 +1,27 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var o1 = {};
+var o2 = {};
+
+function foo(x) {
+ return x.bar;
+}
+
+Object.defineProperty(o1, "bar", {value:200});
+foo(o1);
+foo(o1);
+
+function f(b) {
+ var o = o2;
+ if (b) { return foo(o) }
+}
+
+f(false);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(undefined, f(false));
+Object.defineProperty(o2, "bar", {value: 100});
+assertEquals(100, f(true));
diff --git a/deps/v8/test/mjsunit/regress/regress-json-parse-index.js b/deps/v8/test/mjsunit/regress/regress-json-parse-index.js
new file mode 100644
index 0000000000..d1a785aaf1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-json-parse-index.js
@@ -0,0 +1,6 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var o = JSON.parse('{"\\u0030":100}');
+assertEquals(100, o[0]);
diff --git a/deps/v8/test/mjsunit/regress/regress-opt-after-debug-deopt.js b/deps/v8/test/mjsunit/regress/regress-opt-after-debug-deopt.js
index c637be5497..5cbaabca55 100644
--- a/deps/v8/test/mjsunit/regress/regress-opt-after-debug-deopt.js
+++ b/deps/v8/test/mjsunit/regress/regress-opt-after-debug-deopt.js
@@ -27,6 +27,7 @@
// Flags: --expose-debug-as debug --allow-natives-syntax
// Flags: --concurrent-recompilation --block-concurrent-recompilation
+// Flags: --turbo-deoptimization
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
diff --git a/deps/v8/test/mjsunit/regress/regress-reset-dictionary-elements.js b/deps/v8/test/mjsunit/regress/regress-reset-dictionary-elements.js
new file mode 100644
index 0000000000..d3d093ec09
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-reset-dictionary-elements.js
@@ -0,0 +1,14 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var a = [];
+a[10000] = 1;
+a.length = 0;
+a[1] = 1;
+a.length = 0;
+assertEquals(undefined, a[1]);
+
+var o = {};
+Object.freeze(o);
+assertEquals(undefined, o[1]);
diff --git a/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js b/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js
index c85cf56e0c..1fd8d810b3 100644
--- a/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js
+++ b/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js
@@ -47,7 +47,7 @@ function StringFromCharCode(code) {
var two_byte = %NewString(n - i, false);
for (var j = 0; i < n; i++, j++) {
var code = %_Arguments(i);
- %_TwoByteSeqStringSetChar(two_byte, j, code);
+ %_TwoByteSeqStringSetChar(j, code, two_byte);
}
return one_byte + two_byte;
}
diff --git a/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex3.js b/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex3.js
index 43d2b08352..0a6b211648 100644
--- a/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex3.js
+++ b/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex3.js
@@ -30,8 +30,8 @@
function test() {
var string = %NewString(10, true);
for (var i = 0; i < 10; i++) {
- %_OneByteSeqStringSetChar(string, i, 65);
- %_OneByteSeqStringSetChar(string, i, 66);
+ %_OneByteSeqStringSetChar(i, 65, string);
+ %_OneByteSeqStringSetChar(i, 66, string);
}
for (var i = 0; i < 10; i++) {
assertEquals("B", string[i]);
diff --git a/deps/v8/test/mjsunit/regress/regress-sliced-external-cons-regexp.js b/deps/v8/test/mjsunit/regress/regress-sliced-external-cons-regexp.js
new file mode 100644
index 0000000000..145c831896
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-sliced-external-cons-regexp.js
@@ -0,0 +1,21 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-externalize-string --expose-gc
+
+var re = /(B)/;
+var cons1 = "0123456789" + "ABCDEFGHIJ";
+var cons2 = "0123456789\u1234" + "ABCDEFGHIJ";
+gc();
+gc(); // Promote cons.
+
+try { externalizeString(cons1, false); } catch (e) { }
+try { externalizeString(cons2, true); } catch (e) { }
+
+var slice1 = cons1.slice(1,-1);
+var slice2 = cons2.slice(1,-1);
+for (var i = 0; i < 10; i++) {
+ assertEquals(["B", "B"], re.exec(slice1));
+ assertEquals(["B", "B"], re.exec(slice2));
+}
diff --git a/deps/v8/test/mjsunit/regress/string-compare-memcmp.js b/deps/v8/test/mjsunit/regress/string-compare-memcmp.js
new file mode 100644
index 0000000000..45f47343ee
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/string-compare-memcmp.js
@@ -0,0 +1,7 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+assertEquals(-1, %StringCompare("abc\u0102", "abc\u0201"));
diff --git a/deps/v8/test/mjsunit/regress/string-set-char-deopt.js b/deps/v8/test/mjsunit/regress/string-set-char-deopt.js
index 9f6d434538..c8e8538e16 100644
--- a/deps/v8/test/mjsunit/regress/string-set-char-deopt.js
+++ b/deps/v8/test/mjsunit/regress/string-set-char-deopt.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --turbo-deoptimization
(function OneByteSeqStringSetCharDeoptOsr() {
function deopt() {
@@ -34,7 +34,7 @@
function f(string, osr) {
var world = " world";
- %_OneByteSeqStringSetChar(string, 0, (deopt(), 0x48));
+ %_OneByteSeqStringSetChar(0, (deopt(), 0x48), string);
if (osr) while (%GetOptimizationStatus(f) == 2) {}
@@ -56,7 +56,7 @@
}
function f(string) {
- g(%_OneByteSeqStringSetChar(string, 0, (deopt(), 0x48)));
+ g(%_OneByteSeqStringSetChar(0, (deopt(), 0x48), string));
return string;
}
@@ -75,7 +75,7 @@
}
function f(string) {
- g(%_TwoByteSeqStringSetChar(string, 0, (deopt(), 0x48)));
+ g(%_TwoByteSeqStringSetChar(0, (deopt(), 0x48), string));
return string;
}
diff --git a/deps/v8/test/mjsunit/runtime-gen/apply.js b/deps/v8/test/mjsunit/runtime-gen/apply.js
deleted file mode 100644
index 94c4753cb9..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/apply.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = function() {};
-var _receiver = new Object();
-var _arguments = new Object();
-var _offset = 1;
-var _argc = 1;
-%Apply(arg0, _receiver, _arguments, _offset, _argc);
diff --git a/deps/v8/test/mjsunit/runtime-gen/arraybuffergetbytelength.js b/deps/v8/test/mjsunit/runtime-gen/arraybuffergetbytelength.js
deleted file mode 100644
index 8aff9ac073..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/arraybuffergetbytelength.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new ArrayBuffer(8);
-%ArrayBufferGetByteLength(_holder);
diff --git a/deps/v8/test/mjsunit/runtime-gen/arraybufferinitialize.js b/deps/v8/test/mjsunit/runtime-gen/arraybufferinitialize.js
deleted file mode 100644
index c4520c6a64..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/arraybufferinitialize.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new ArrayBuffer(8);
-var _byteLength = 1.5;
-%ArrayBufferInitialize(_holder, _byteLength);
diff --git a/deps/v8/test/mjsunit/runtime-gen/arraybufferisview.js b/deps/v8/test/mjsunit/runtime-gen/arraybufferisview.js
deleted file mode 100644
index 46cc5ba995..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/arraybufferisview.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _object = new Object();
-%ArrayBufferIsView(_object);
diff --git a/deps/v8/test/mjsunit/runtime-gen/arraybufferneuter.js b/deps/v8/test/mjsunit/runtime-gen/arraybufferneuter.js
deleted file mode 100644
index 89e9ee96b7..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/arraybufferneuter.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _array_buffer = new ArrayBuffer(8);
-%ArrayBufferNeuter(_array_buffer);
diff --git a/deps/v8/test/mjsunit/runtime-gen/arraybuffersliceimpl.js b/deps/v8/test/mjsunit/runtime-gen/arraybuffersliceimpl.js
deleted file mode 100644
index cb02bb069c..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/arraybuffersliceimpl.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _source = new ArrayBuffer(8);
-var _target = new ArrayBuffer(8);
-var arg2 = 0;
-%ArrayBufferSliceImpl(_source, _target, arg2);
diff --git a/deps/v8/test/mjsunit/runtime-gen/arraybufferviewgetbytelength.js b/deps/v8/test/mjsunit/runtime-gen/arraybufferviewgetbytelength.js
deleted file mode 100644
index e32ea0d4e7..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/arraybufferviewgetbytelength.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Int32Array(2);
-%ArrayBufferViewGetByteLength(_holder);
diff --git a/deps/v8/test/mjsunit/runtime-gen/arraybufferviewgetbyteoffset.js b/deps/v8/test/mjsunit/runtime-gen/arraybufferviewgetbyteoffset.js
deleted file mode 100644
index 4c64ff206d..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/arraybufferviewgetbyteoffset.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Int32Array(2);
-%ArrayBufferViewGetByteOffset(_holder);
diff --git a/deps/v8/test/mjsunit/runtime-gen/arrayconcat.js b/deps/v8/test/mjsunit/runtime-gen/arrayconcat.js
deleted file mode 100644
index 09487a6073..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/arrayconcat.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = [1, 'a'];
-%ArrayConcat(arg0);
diff --git a/deps/v8/test/mjsunit/runtime-gen/availablelocalesof.js b/deps/v8/test/mjsunit/runtime-gen/availablelocalesof.js
deleted file mode 100644
index a59c9b077c..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/availablelocalesof.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _service = "foo";
-%AvailableLocalesOf(_service);
diff --git a/deps/v8/test/mjsunit/runtime-gen/basicjsonstringify.js b/deps/v8/test/mjsunit/runtime-gen/basicjsonstringify.js
deleted file mode 100644
index 55d197831e..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/basicjsonstringify.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _object = new Object();
-%BasicJSONStringify(_object);
diff --git a/deps/v8/test/mjsunit/runtime-gen/booleanize.js b/deps/v8/test/mjsunit/runtime-gen/booleanize.js
deleted file mode 100644
index 8685368e4f..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/booleanize.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _value_raw = new Object();
-var _token_raw = 1;
-%Booleanize(_value_raw, _token_raw);
diff --git a/deps/v8/test/mjsunit/runtime-gen/boundfunctiongetbindings.js b/deps/v8/test/mjsunit/runtime-gen/boundfunctiongetbindings.js
deleted file mode 100644
index 9221d3dd28..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/boundfunctiongetbindings.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _callable = new Object();
-%BoundFunctionGetBindings(_callable);
diff --git a/deps/v8/test/mjsunit/runtime-gen/break.js b/deps/v8/test/mjsunit/runtime-gen/break.js
deleted file mode 100644
index 4b600d8e3d..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/break.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%Break();
diff --git a/deps/v8/test/mjsunit/runtime-gen/breakiteratoradopttext.js b/deps/v8/test/mjsunit/runtime-gen/breakiteratoradopttext.js
deleted file mode 100644
index 64b6059da3..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/breakiteratoradopttext.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = %GetImplFromInitializedIntlObject(new Intl.v8BreakIterator());
-var _text = "foo";
-%BreakIteratorAdoptText(arg0, _text);
diff --git a/deps/v8/test/mjsunit/runtime-gen/breakiteratorbreaktype.js b/deps/v8/test/mjsunit/runtime-gen/breakiteratorbreaktype.js
deleted file mode 100644
index 08cceb87f8..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/breakiteratorbreaktype.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = %GetImplFromInitializedIntlObject(new Intl.v8BreakIterator());
-%BreakIteratorBreakType(arg0);
diff --git a/deps/v8/test/mjsunit/runtime-gen/breakiteratorcurrent.js b/deps/v8/test/mjsunit/runtime-gen/breakiteratorcurrent.js
deleted file mode 100644
index 42000a846c..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/breakiteratorcurrent.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = %GetImplFromInitializedIntlObject(new Intl.v8BreakIterator());
-%BreakIteratorCurrent(arg0);
diff --git a/deps/v8/test/mjsunit/runtime-gen/breakiteratorfirst.js b/deps/v8/test/mjsunit/runtime-gen/breakiteratorfirst.js
deleted file mode 100644
index 3fad88c9e3..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/breakiteratorfirst.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = %GetImplFromInitializedIntlObject(new Intl.v8BreakIterator());
-%BreakIteratorFirst(arg0);
diff --git a/deps/v8/test/mjsunit/runtime-gen/breakiteratornext.js b/deps/v8/test/mjsunit/runtime-gen/breakiteratornext.js
deleted file mode 100644
index be72ffc1d6..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/breakiteratornext.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = %GetImplFromInitializedIntlObject(new Intl.v8BreakIterator());
-%BreakIteratorNext(arg0);
diff --git a/deps/v8/test/mjsunit/runtime-gen/canonicalizelanguagetag.js b/deps/v8/test/mjsunit/runtime-gen/canonicalizelanguagetag.js
deleted file mode 100644
index 45df230a40..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/canonicalizelanguagetag.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _locale_id_str = "foo";
-%CanonicalizeLanguageTag(_locale_id_str);
diff --git a/deps/v8/test/mjsunit/runtime-gen/changebreakonexception.js b/deps/v8/test/mjsunit/runtime-gen/changebreakonexception.js
deleted file mode 100644
index 4bc0d43d01..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/changebreakonexception.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _type_arg = 32;
-var _enable = true;
-%ChangeBreakOnException(_type_arg, _enable);
diff --git a/deps/v8/test/mjsunit/runtime-gen/charfromcode.js b/deps/v8/test/mjsunit/runtime-gen/charfromcode.js
deleted file mode 100644
index 20823391da..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/charfromcode.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _code = 32;
-%CharFromCode(_code);
diff --git a/deps/v8/test/mjsunit/runtime-gen/checkexecutionstate.js b/deps/v8/test/mjsunit/runtime-gen/checkexecutionstate.js
deleted file mode 100644
index 7e740c39f6..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/checkexecutionstate.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _break_id = 32;
-try {
-%CheckExecutionState(_break_id);
-} catch(e) {}
diff --git a/deps/v8/test/mjsunit/runtime-gen/checkisbootstrapping.js b/deps/v8/test/mjsunit/runtime-gen/checkisbootstrapping.js
deleted file mode 100644
index 114b20c1c8..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/checkisbootstrapping.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-try {
-%CheckIsBootstrapping();
-} catch(e) {}
diff --git a/deps/v8/test/mjsunit/runtime-gen/clearbreakpoint.js b/deps/v8/test/mjsunit/runtime-gen/clearbreakpoint.js
deleted file mode 100644
index 1c11bc8f74..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/clearbreakpoint.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _break_point_object_arg = new Object();
-%ClearBreakPoint(_break_point_object_arg);
diff --git a/deps/v8/test/mjsunit/runtime-gen/clearfunctiontypefeedback.js b/deps/v8/test/mjsunit/runtime-gen/clearfunctiontypefeedback.js
deleted file mode 100644
index f42b8da200..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/clearfunctiontypefeedback.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _function = function() {};
-%ClearFunctionTypeFeedback(_function);
diff --git a/deps/v8/test/mjsunit/runtime-gen/clearstepping.js b/deps/v8/test/mjsunit/runtime-gen/clearstepping.js
deleted file mode 100644
index bfab2cde0b..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/clearstepping.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%ClearStepping();
diff --git a/deps/v8/test/mjsunit/runtime-gen/collectstacktrace.js b/deps/v8/test/mjsunit/runtime-gen/collectstacktrace.js
deleted file mode 100644
index bac9b6a66c..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/collectstacktrace.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _error_object = new Object();
-var _caller = new Object();
-%CollectStackTrace(_error_object, _caller);
diff --git a/deps/v8/test/mjsunit/runtime-gen/compilestring.js b/deps/v8/test/mjsunit/runtime-gen/compilestring.js
deleted file mode 100644
index 659afcaaef..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/compilestring.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _source = "foo";
-var arg1 = false;
-%CompileString(_source, arg1);
diff --git a/deps/v8/test/mjsunit/runtime-gen/constructdouble.js b/deps/v8/test/mjsunit/runtime-gen/constructdouble.js
deleted file mode 100644
index 9ac3dee9c0..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/constructdouble.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _hi = 32;
-var _lo = 32;
-%ConstructDouble(_hi, _lo);
diff --git a/deps/v8/test/mjsunit/runtime-gen/createbreakiterator.js b/deps/v8/test/mjsunit/runtime-gen/createbreakiterator.js
deleted file mode 100644
index a8750b3399..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/createbreakiterator.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = 'en-US';
-var arg1 = {type: 'string'};
-var _resolved = new Object();
-%CreateBreakIterator(arg0, arg1, _resolved);
diff --git a/deps/v8/test/mjsunit/runtime-gen/createcollator.js b/deps/v8/test/mjsunit/runtime-gen/createcollator.js
deleted file mode 100644
index 0d5b18d55d..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/createcollator.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _locale = "foo";
-var _options = new Object();
-var _resolved = new Object();
-%CreateCollator(_locale, _options, _resolved);
diff --git a/deps/v8/test/mjsunit/runtime-gen/createglobalprivatesymbol.js b/deps/v8/test/mjsunit/runtime-gen/createglobalprivatesymbol.js
deleted file mode 100644
index e4968c14f3..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/createglobalprivatesymbol.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _name = "foo";
-%CreateGlobalPrivateSymbol(_name);
diff --git a/deps/v8/test/mjsunit/runtime-gen/createjsfunctionproxy.js b/deps/v8/test/mjsunit/runtime-gen/createjsfunctionproxy.js
deleted file mode 100644
index b4e1c31ae8..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/createjsfunctionproxy.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _handler = new Object();
-var arg1 = function() {};
-var _construct_trap = function() {};
-var _prototype = new Object();
-%CreateJSFunctionProxy(_handler, arg1, _construct_trap, _prototype);
diff --git a/deps/v8/test/mjsunit/runtime-gen/createjsproxy.js b/deps/v8/test/mjsunit/runtime-gen/createjsproxy.js
deleted file mode 100644
index ecdef60223..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/createjsproxy.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _handler = new Object();
-var _prototype = new Object();
-%CreateJSProxy(_handler, _prototype);
diff --git a/deps/v8/test/mjsunit/runtime-gen/createprivateownsymbol.js b/deps/v8/test/mjsunit/runtime-gen/createprivateownsymbol.js
deleted file mode 100644
index 74548287c1..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/createprivateownsymbol.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = "foo";
-%CreatePrivateOwnSymbol(arg0);
diff --git a/deps/v8/test/mjsunit/runtime-gen/createprivatesymbol.js b/deps/v8/test/mjsunit/runtime-gen/createprivatesymbol.js
deleted file mode 100644
index bbd99c12b8..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/createprivatesymbol.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = "foo";
-%CreatePrivateSymbol(arg0);
diff --git a/deps/v8/test/mjsunit/runtime-gen/createsymbol.js b/deps/v8/test/mjsunit/runtime-gen/createsymbol.js
deleted file mode 100644
index 8452b9c90b..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/createsymbol.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = "foo";
-%CreateSymbol(arg0);
diff --git a/deps/v8/test/mjsunit/runtime-gen/dataviewgetbuffer.js b/deps/v8/test/mjsunit/runtime-gen/dataviewgetbuffer.js
deleted file mode 100644
index 84bab807f3..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/dataviewgetbuffer.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new DataView(new ArrayBuffer(24));
-%DataViewGetBuffer(_holder);
diff --git a/deps/v8/test/mjsunit/runtime-gen/dataviewgetfloat32.js b/deps/v8/test/mjsunit/runtime-gen/dataviewgetfloat32.js
deleted file mode 100644
index 57f3c2a596..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/dataviewgetfloat32.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _is_little_endian = true;
-%DataViewGetFloat32(_holder, _offset, _is_little_endian);
diff --git a/deps/v8/test/mjsunit/runtime-gen/dataviewgetfloat64.js b/deps/v8/test/mjsunit/runtime-gen/dataviewgetfloat64.js
deleted file mode 100644
index 7f80c5b0a0..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/dataviewgetfloat64.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _is_little_endian = true;
-%DataViewGetFloat64(_holder, _offset, _is_little_endian);
diff --git a/deps/v8/test/mjsunit/runtime-gen/dataviewgetint16.js b/deps/v8/test/mjsunit/runtime-gen/dataviewgetint16.js
deleted file mode 100644
index e618c1c00a..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/dataviewgetint16.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _is_little_endian = true;
-%DataViewGetInt16(_holder, _offset, _is_little_endian);
diff --git a/deps/v8/test/mjsunit/runtime-gen/dataviewgetint32.js b/deps/v8/test/mjsunit/runtime-gen/dataviewgetint32.js
deleted file mode 100644
index 2395a6dd9c..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/dataviewgetint32.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _is_little_endian = true;
-%DataViewGetInt32(_holder, _offset, _is_little_endian);
diff --git a/deps/v8/test/mjsunit/runtime-gen/dataviewgetint8.js b/deps/v8/test/mjsunit/runtime-gen/dataviewgetint8.js
deleted file mode 100644
index fe92ed7c35..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/dataviewgetint8.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _is_little_endian = true;
-%DataViewGetInt8(_holder, _offset, _is_little_endian);
diff --git a/deps/v8/test/mjsunit/runtime-gen/dataviewgetuint16.js b/deps/v8/test/mjsunit/runtime-gen/dataviewgetuint16.js
deleted file mode 100644
index 50be62b009..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/dataviewgetuint16.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _is_little_endian = true;
-%DataViewGetUint16(_holder, _offset, _is_little_endian);
diff --git a/deps/v8/test/mjsunit/runtime-gen/dataviewgetuint32.js b/deps/v8/test/mjsunit/runtime-gen/dataviewgetuint32.js
deleted file mode 100644
index 2f85aeef8a..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/dataviewgetuint32.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _is_little_endian = true;
-%DataViewGetUint32(_holder, _offset, _is_little_endian);
diff --git a/deps/v8/test/mjsunit/runtime-gen/dataviewgetuint8.js b/deps/v8/test/mjsunit/runtime-gen/dataviewgetuint8.js
deleted file mode 100644
index 6a682e1731..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/dataviewgetuint8.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _is_little_endian = true;
-%DataViewGetUint8(_holder, _offset, _is_little_endian);
diff --git a/deps/v8/test/mjsunit/runtime-gen/dataviewinitialize.js b/deps/v8/test/mjsunit/runtime-gen/dataviewinitialize.js
deleted file mode 100644
index 167d531562..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/dataviewinitialize.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new DataView(new ArrayBuffer(24));
-var _buffer = new ArrayBuffer(8);
-var _byte_offset = 1.5;
-var _byte_length = 1.5;
-%DataViewInitialize(_holder, _buffer, _byte_offset, _byte_length);
diff --git a/deps/v8/test/mjsunit/runtime-gen/dataviewsetfloat32.js b/deps/v8/test/mjsunit/runtime-gen/dataviewsetfloat32.js
deleted file mode 100644
index 46d00afff0..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/dataviewsetfloat32.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _value = 1.5;
-var _is_little_endian = true;
-%DataViewSetFloat32(_holder, _offset, _value, _is_little_endian);
diff --git a/deps/v8/test/mjsunit/runtime-gen/dataviewsetfloat64.js b/deps/v8/test/mjsunit/runtime-gen/dataviewsetfloat64.js
deleted file mode 100644
index c57b514dd0..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/dataviewsetfloat64.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _value = 1.5;
-var _is_little_endian = true;
-%DataViewSetFloat64(_holder, _offset, _value, _is_little_endian);
diff --git a/deps/v8/test/mjsunit/runtime-gen/dataviewsetint16.js b/deps/v8/test/mjsunit/runtime-gen/dataviewsetint16.js
deleted file mode 100644
index 1f45448f69..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/dataviewsetint16.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _value = 1.5;
-var _is_little_endian = true;
-%DataViewSetInt16(_holder, _offset, _value, _is_little_endian);
diff --git a/deps/v8/test/mjsunit/runtime-gen/dataviewsetint32.js b/deps/v8/test/mjsunit/runtime-gen/dataviewsetint32.js
deleted file mode 100644
index 837d4f26d5..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/dataviewsetint32.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _value = 1.5;
-var _is_little_endian = true;
-%DataViewSetInt32(_holder, _offset, _value, _is_little_endian);
diff --git a/deps/v8/test/mjsunit/runtime-gen/dataviewsetint8.js b/deps/v8/test/mjsunit/runtime-gen/dataviewsetint8.js
deleted file mode 100644
index 725e658ec4..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/dataviewsetint8.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _value = 1.5;
-var _is_little_endian = true;
-%DataViewSetInt8(_holder, _offset, _value, _is_little_endian);
diff --git a/deps/v8/test/mjsunit/runtime-gen/dataviewsetuint16.js b/deps/v8/test/mjsunit/runtime-gen/dataviewsetuint16.js
deleted file mode 100644
index d1b1a24bcd..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/dataviewsetuint16.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _value = 1.5;
-var _is_little_endian = true;
-%DataViewSetUint16(_holder, _offset, _value, _is_little_endian);
diff --git a/deps/v8/test/mjsunit/runtime-gen/dataviewsetuint32.js b/deps/v8/test/mjsunit/runtime-gen/dataviewsetuint32.js
deleted file mode 100644
index e46c8f302a..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/dataviewsetuint32.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _value = 1.5;
-var _is_little_endian = true;
-%DataViewSetUint32(_holder, _offset, _value, _is_little_endian);
diff --git a/deps/v8/test/mjsunit/runtime-gen/dataviewsetuint8.js b/deps/v8/test/mjsunit/runtime-gen/dataviewsetuint8.js
deleted file mode 100644
index 6c36723082..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/dataviewsetuint8.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _value = 1.5;
-var _is_little_endian = true;
-%DataViewSetUint8(_holder, _offset, _value, _is_little_endian);
diff --git a/deps/v8/test/mjsunit/runtime-gen/datecacheversion.js b/deps/v8/test/mjsunit/runtime-gen/datecacheversion.js
deleted file mode 100644
index ea56c73c74..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/datecacheversion.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%DateCacheVersion();
diff --git a/deps/v8/test/mjsunit/runtime-gen/datecurrenttime.js b/deps/v8/test/mjsunit/runtime-gen/datecurrenttime.js
deleted file mode 100644
index 759ebd0038..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/datecurrenttime.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%DateCurrentTime();
diff --git a/deps/v8/test/mjsunit/runtime-gen/datelocaltimezone.js b/deps/v8/test/mjsunit/runtime-gen/datelocaltimezone.js
deleted file mode 100644
index bfc1a81c7f..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/datelocaltimezone.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 1.5;
-%DateLocalTimezone(_x);
diff --git a/deps/v8/test/mjsunit/runtime-gen/datemakeday.js b/deps/v8/test/mjsunit/runtime-gen/datemakeday.js
deleted file mode 100644
index 3d2334f51e..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/datemakeday.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _year = 1;
-var _month = 1;
-%DateMakeDay(_year, _month);
diff --git a/deps/v8/test/mjsunit/runtime-gen/dateparsestring.js b/deps/v8/test/mjsunit/runtime-gen/dateparsestring.js
deleted file mode 100644
index fdf5faa7e9..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/dateparsestring.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _str = "foo";
-var arg1 = new Array(8);
-%DateParseString(_str, arg1);
diff --git a/deps/v8/test/mjsunit/runtime-gen/datesetvalue.js b/deps/v8/test/mjsunit/runtime-gen/datesetvalue.js
deleted file mode 100644
index dac1a36447..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/datesetvalue.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _date = new Date();
-var _time = 1.5;
-var _is_utc = 1;
-%DateSetValue(_date, _time, _is_utc);
diff --git a/deps/v8/test/mjsunit/runtime-gen/datetoutc.js b/deps/v8/test/mjsunit/runtime-gen/datetoutc.js
deleted file mode 100644
index f46644e951..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/datetoutc.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 1.5;
-%DateToUTC(_x);
diff --git a/deps/v8/test/mjsunit/runtime-gen/debugasynctaskevent.js b/deps/v8/test/mjsunit/runtime-gen/debugasynctaskevent.js
deleted file mode 100644
index ceeaf13774..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debugasynctaskevent.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _data = new Object();
-%DebugAsyncTaskEvent(_data);
diff --git a/deps/v8/test/mjsunit/runtime-gen/debugbreak.js b/deps/v8/test/mjsunit/runtime-gen/debugbreak.js
deleted file mode 100644
index 68220dfa9b..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debugbreak.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%DebugBreak();
diff --git a/deps/v8/test/mjsunit/runtime-gen/debugcallbacksupportsstepping.js b/deps/v8/test/mjsunit/runtime-gen/debugcallbacksupportsstepping.js
deleted file mode 100644
index b683be0aa4..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debugcallbacksupportsstepping.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _callback = new Object();
-%DebugCallbackSupportsStepping(_callback);
diff --git a/deps/v8/test/mjsunit/runtime-gen/debugconstructedby.js b/deps/v8/test/mjsunit/runtime-gen/debugconstructedby.js
deleted file mode 100644
index 885034429b..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debugconstructedby.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _constructor = function() {};
-var _max_references = 32;
-%DebugConstructedBy(_constructor, _max_references);
diff --git a/deps/v8/test/mjsunit/runtime-gen/debugdisassembleconstructor.js b/deps/v8/test/mjsunit/runtime-gen/debugdisassembleconstructor.js
deleted file mode 100644
index c2faca4f0c..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debugdisassembleconstructor.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _func = function() {};
-%DebugDisassembleConstructor(_func);
diff --git a/deps/v8/test/mjsunit/runtime-gen/debugdisassemblefunction.js b/deps/v8/test/mjsunit/runtime-gen/debugdisassemblefunction.js
deleted file mode 100644
index f65886779d..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debugdisassemblefunction.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _func = function() {};
-%DebugDisassembleFunction(_func);
diff --git a/deps/v8/test/mjsunit/runtime-gen/debugevaluate.js b/deps/v8/test/mjsunit/runtime-gen/debugevaluate.js
deleted file mode 100644
index 60e1e63fd0..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debugevaluate.js
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _break_id = 32;
-var _wrapped_id = 1;
-var _inlined_jsframe_index = 32;
-var _source = "foo";
-var _disable_break = true;
-var _context_extension = new Object();
-try {
-%DebugEvaluate(_break_id, _wrapped_id, _inlined_jsframe_index, _source, _disable_break, _context_extension);
-} catch(e) {}
diff --git a/deps/v8/test/mjsunit/runtime-gen/debugevaluateglobal.js b/deps/v8/test/mjsunit/runtime-gen/debugevaluateglobal.js
deleted file mode 100644
index 11411d1992..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debugevaluateglobal.js
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _break_id = 32;
-var _source = "foo";
-var _disable_break = true;
-var _context_extension = new Object();
-try {
-%DebugEvaluateGlobal(_break_id, _source, _disable_break, _context_extension);
-} catch(e) {}
diff --git a/deps/v8/test/mjsunit/runtime-gen/debuggetproperty.js b/deps/v8/test/mjsunit/runtime-gen/debuggetproperty.js
deleted file mode 100644
index 90109d1dc8..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debuggetproperty.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-var _name = "name";
-%DebugGetProperty(_obj, _name);
diff --git a/deps/v8/test/mjsunit/runtime-gen/debuggetpropertydetails.js b/deps/v8/test/mjsunit/runtime-gen/debuggetpropertydetails.js
deleted file mode 100644
index 0fe2f3104f..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debuggetpropertydetails.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-var _name = "name";
-%DebugGetPropertyDetails(_obj, _name);
diff --git a/deps/v8/test/mjsunit/runtime-gen/debuggetprototype.js b/deps/v8/test/mjsunit/runtime-gen/debuggetprototype.js
deleted file mode 100644
index 27de855b7b..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debuggetprototype.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-%DebugGetPrototype(_obj);
diff --git a/deps/v8/test/mjsunit/runtime-gen/debugindexedinterceptorelementvalue.js b/deps/v8/test/mjsunit/runtime-gen/debugindexedinterceptorelementvalue.js
deleted file mode 100644
index 22d24eead9..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debugindexedinterceptorelementvalue.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-var _index = 32;
-try {
-%DebugIndexedInterceptorElementValue(_obj, _index);
-} catch(e) {}
diff --git a/deps/v8/test/mjsunit/runtime-gen/debugnamedinterceptorpropertyvalue.js b/deps/v8/test/mjsunit/runtime-gen/debugnamedinterceptorpropertyvalue.js
deleted file mode 100644
index 13641d2c2b..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debugnamedinterceptorpropertyvalue.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-var _name = "name";
-try {
-%DebugNamedInterceptorPropertyValue(_obj, _name);
-} catch(e) {}
diff --git a/deps/v8/test/mjsunit/runtime-gen/debugpoppromise.js b/deps/v8/test/mjsunit/runtime-gen/debugpoppromise.js
deleted file mode 100644
index 9b81b13705..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debugpoppromise.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%DebugPopPromise();
diff --git a/deps/v8/test/mjsunit/runtime-gen/debugpreparestepinifstepping.js b/deps/v8/test/mjsunit/runtime-gen/debugpreparestepinifstepping.js
deleted file mode 100644
index a6061e6f98..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debugpreparestepinifstepping.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _callback = function() {};
-%DebugPrepareStepInIfStepping(_callback);
diff --git a/deps/v8/test/mjsunit/runtime-gen/debugprintscopes.js b/deps/v8/test/mjsunit/runtime-gen/debugprintscopes.js
deleted file mode 100644
index 2f106ddb6a..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debugprintscopes.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%DebugPrintScopes();
diff --git a/deps/v8/test/mjsunit/runtime-gen/debugpromiseevent.js b/deps/v8/test/mjsunit/runtime-gen/debugpromiseevent.js
deleted file mode 100644
index 20ae13c67a..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debugpromiseevent.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _data = new Object();
-%DebugPromiseEvent(_data);
diff --git a/deps/v8/test/mjsunit/runtime-gen/debugpromiserejectevent.js b/deps/v8/test/mjsunit/runtime-gen/debugpromiserejectevent.js
deleted file mode 100644
index 4e6e633426..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debugpromiserejectevent.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _promise = new Object();
-var _value = new Object();
-%DebugPromiseRejectEvent(_promise, _value);
diff --git a/deps/v8/test/mjsunit/runtime-gen/debugpropertyattributesfromdetails.js b/deps/v8/test/mjsunit/runtime-gen/debugpropertyattributesfromdetails.js
deleted file mode 100644
index 7802a35242..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debugpropertyattributesfromdetails.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _details = 513;
-%DebugPropertyAttributesFromDetails(_details);
diff --git a/deps/v8/test/mjsunit/runtime-gen/debugpropertyindexfromdetails.js b/deps/v8/test/mjsunit/runtime-gen/debugpropertyindexfromdetails.js
deleted file mode 100644
index 02edeeee24..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debugpropertyindexfromdetails.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _details = 513;
-%DebugPropertyIndexFromDetails(_details);
diff --git a/deps/v8/test/mjsunit/runtime-gen/debugpropertytypefromdetails.js b/deps/v8/test/mjsunit/runtime-gen/debugpropertytypefromdetails.js
deleted file mode 100644
index 551ff2c621..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debugpropertytypefromdetails.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _details = 513;
-%DebugPropertyTypeFromDetails(_details);
diff --git a/deps/v8/test/mjsunit/runtime-gen/debugpushpromise.js b/deps/v8/test/mjsunit/runtime-gen/debugpushpromise.js
deleted file mode 100644
index 350a61354a..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debugpushpromise.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _promise = new Object();
-%DebugPushPromise(_promise);
diff --git a/deps/v8/test/mjsunit/runtime-gen/debugreferencedby.js b/deps/v8/test/mjsunit/runtime-gen/debugreferencedby.js
deleted file mode 100644
index 94e1242793..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debugreferencedby.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _target = new Object();
-var _instance_filter = new Object();
-var _max_references = 32;
-%DebugReferencedBy(_target, _instance_filter, _max_references);
diff --git a/deps/v8/test/mjsunit/runtime-gen/debugtrace.js b/deps/v8/test/mjsunit/runtime-gen/debugtrace.js
deleted file mode 100644
index 2933ad114d..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/debugtrace.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%DebugTrace();
diff --git a/deps/v8/test/mjsunit/runtime-gen/defineaccessorpropertyunchecked.js b/deps/v8/test/mjsunit/runtime-gen/defineaccessorpropertyunchecked.js
deleted file mode 100644
index c6cbb91cc7..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/defineaccessorpropertyunchecked.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-var _name = "name";
-var arg2 = function() {};
-var arg3 = function() {};
-var arg4 = 2;
-%DefineAccessorPropertyUnchecked(_obj, _name, arg2, arg3, arg4);
diff --git a/deps/v8/test/mjsunit/runtime-gen/defineapiaccessorproperty.js b/deps/v8/test/mjsunit/runtime-gen/defineapiaccessorproperty.js
deleted file mode 100644
index 856a53129e..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/defineapiaccessorproperty.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _object = new Object();
-var _name = "name";
-var arg2 = undefined;
-var arg3 = undefined;
-var _attribute = 1;
-%DefineApiAccessorProperty(_object, _name, arg2, arg3, _attribute);
diff --git a/deps/v8/test/mjsunit/runtime-gen/definedatapropertyunchecked.js b/deps/v8/test/mjsunit/runtime-gen/definedatapropertyunchecked.js
deleted file mode 100644
index cb0f07f600..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/definedatapropertyunchecked.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _js_object = new Object();
-var _name = "name";
-var _obj_value = new Object();
-var _unchecked = 1;
-%DefineDataPropertyUnchecked(_js_object, _name, _obj_value, _unchecked);
diff --git a/deps/v8/test/mjsunit/runtime-gen/deleteproperty.js b/deps/v8/test/mjsunit/runtime-gen/deleteproperty.js
deleted file mode 100644
index 66a882b1ab..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/deleteproperty.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _object = new Object();
-var _key = "name";
-var _strict_mode = 1;
-%DeleteProperty(_object, _key, _strict_mode);
diff --git a/deps/v8/test/mjsunit/runtime-gen/deoptimizefunction.js b/deps/v8/test/mjsunit/runtime-gen/deoptimizefunction.js
deleted file mode 100644
index ec5db2ddae..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/deoptimizefunction.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _function = function() {};
-%DeoptimizeFunction(_function);
diff --git a/deps/v8/test/mjsunit/runtime-gen/doublehi.js b/deps/v8/test/mjsunit/runtime-gen/doublehi.js
deleted file mode 100644
index ac945dcd28..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/doublehi.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 1.5;
-%DoubleHi(_x);
diff --git a/deps/v8/test/mjsunit/runtime-gen/doublelo.js b/deps/v8/test/mjsunit/runtime-gen/doublelo.js
deleted file mode 100644
index 42c4c25495..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/doublelo.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 1.5;
-%DoubleLo(_x);
diff --git a/deps/v8/test/mjsunit/runtime-gen/enqueuemicrotask.js b/deps/v8/test/mjsunit/runtime-gen/enqueuemicrotask.js
deleted file mode 100644
index 2f21667613..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/enqueuemicrotask.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _microtask = function() {};
-%EnqueueMicrotask(_microtask);
diff --git a/deps/v8/test/mjsunit/runtime-gen/estimatenumberofelements.js b/deps/v8/test/mjsunit/runtime-gen/estimatenumberofelements.js
deleted file mode 100644
index cf3b9b606f..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/estimatenumberofelements.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _array = new Array();
-%EstimateNumberOfElements(_array);
diff --git a/deps/v8/test/mjsunit/runtime-gen/executeindebugcontext.js b/deps/v8/test/mjsunit/runtime-gen/executeindebugcontext.js
deleted file mode 100644
index 18bfac9b53..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/executeindebugcontext.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _function = function() {};
-var _without_debugger = true;
-%ExecuteInDebugContext(_function, _without_debugger);
diff --git a/deps/v8/test/mjsunit/runtime-gen/finisharrayprototypesetup.js b/deps/v8/test/mjsunit/runtime-gen/finisharrayprototypesetup.js
deleted file mode 100644
index e4e8eabab4..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/finisharrayprototypesetup.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _prototype = new Array();
-%FinishArrayPrototypeSetup(_prototype);
diff --git a/deps/v8/test/mjsunit/runtime-gen/fix.js b/deps/v8/test/mjsunit/runtime-gen/fix.js
deleted file mode 100644
index 010d2bcb70..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/fix.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _proxy = Proxy.create({});
-%Fix(_proxy);
diff --git a/deps/v8/test/mjsunit/runtime-gen/flattenstring.js b/deps/v8/test/mjsunit/runtime-gen/flattenstring.js
deleted file mode 100644
index 3f0b38d6c8..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/flattenstring.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _str = "foo";
-%FlattenString(_str);
diff --git a/deps/v8/test/mjsunit/runtime-gen/functionbindarguments.js b/deps/v8/test/mjsunit/runtime-gen/functionbindarguments.js
deleted file mode 100644
index 4d36716253..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/functionbindarguments.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _bound_function = function() {};
-var _bindee = new Object();
-var arg2 = undefined;
-var _new_length = 1.5;
-%FunctionBindArguments(_bound_function, _bindee, arg2, _new_length);
diff --git a/deps/v8/test/mjsunit/runtime-gen/functiongetinferredname.js b/deps/v8/test/mjsunit/runtime-gen/functiongetinferredname.js
deleted file mode 100644
index 8d765007cb..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/functiongetinferredname.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _f = function() {};
-%FunctionGetInferredName(_f);
diff --git a/deps/v8/test/mjsunit/runtime-gen/functiongetname.js b/deps/v8/test/mjsunit/runtime-gen/functiongetname.js
deleted file mode 100644
index ad23b11a69..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/functiongetname.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _f = function() {};
-%FunctionGetName(_f);
diff --git a/deps/v8/test/mjsunit/runtime-gen/functiongetscript.js b/deps/v8/test/mjsunit/runtime-gen/functiongetscript.js
deleted file mode 100644
index bd4364447e..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/functiongetscript.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _fun = function() {};
-%FunctionGetScript(_fun);
diff --git a/deps/v8/test/mjsunit/runtime-gen/functiongetscriptsourceposition.js b/deps/v8/test/mjsunit/runtime-gen/functiongetscriptsourceposition.js
deleted file mode 100644
index eb462f96f7..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/functiongetscriptsourceposition.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _fun = function() {};
-%FunctionGetScriptSourcePosition(_fun);
diff --git a/deps/v8/test/mjsunit/runtime-gen/functiongetsourcecode.js b/deps/v8/test/mjsunit/runtime-gen/functiongetsourcecode.js
deleted file mode 100644
index b9de88a15d..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/functiongetsourcecode.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _f = function() {};
-%FunctionGetSourceCode(_f);
diff --git a/deps/v8/test/mjsunit/runtime-gen/functionisapifunction.js b/deps/v8/test/mjsunit/runtime-gen/functionisapifunction.js
deleted file mode 100644
index 7fb8a21e0a..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/functionisapifunction.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _f = function() {};
-%FunctionIsAPIFunction(_f);
diff --git a/deps/v8/test/mjsunit/runtime-gen/functionisarrow.js b/deps/v8/test/mjsunit/runtime-gen/functionisarrow.js
deleted file mode 100644
index 08410b49dd..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/functionisarrow.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = () => null;
-%FunctionIsArrow(arg0);
diff --git a/deps/v8/test/mjsunit/runtime-gen/functionisbuiltin.js b/deps/v8/test/mjsunit/runtime-gen/functionisbuiltin.js
deleted file mode 100644
index a8dd6c6a88..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/functionisbuiltin.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _f = function() {};
-%FunctionIsBuiltin(_f);
diff --git a/deps/v8/test/mjsunit/runtime-gen/functionisgenerator.js b/deps/v8/test/mjsunit/runtime-gen/functionisgenerator.js
deleted file mode 100644
index 8be6aab2a7..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/functionisgenerator.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _f = function() {};
-%FunctionIsGenerator(_f);
diff --git a/deps/v8/test/mjsunit/runtime-gen/functionmarknameshouldprintasanonymous.js b/deps/v8/test/mjsunit/runtime-gen/functionmarknameshouldprintasanonymous.js
deleted file mode 100644
index 74f18e258c..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/functionmarknameshouldprintasanonymous.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _f = function() {};
-%FunctionMarkNameShouldPrintAsAnonymous(_f);
diff --git a/deps/v8/test/mjsunit/runtime-gen/functionnameshouldprintasanonymous.js b/deps/v8/test/mjsunit/runtime-gen/functionnameshouldprintasanonymous.js
deleted file mode 100644
index aa5bcddc18..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/functionnameshouldprintasanonymous.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _f = function() {};
-%FunctionNameShouldPrintAsAnonymous(_f);
diff --git a/deps/v8/test/mjsunit/runtime-gen/functionremoveprototype.js b/deps/v8/test/mjsunit/runtime-gen/functionremoveprototype.js
deleted file mode 100644
index a7ec5f52a9..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/functionremoveprototype.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _f = function() {};
-%FunctionRemovePrototype(_f);
diff --git a/deps/v8/test/mjsunit/runtime-gen/functionsetinstanceclassname.js b/deps/v8/test/mjsunit/runtime-gen/functionsetinstanceclassname.js
deleted file mode 100644
index 6986a15b1c..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/functionsetinstanceclassname.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _fun = function() {};
-var _name = "foo";
-%FunctionSetInstanceClassName(_fun, _name);
diff --git a/deps/v8/test/mjsunit/runtime-gen/functionsetlength.js b/deps/v8/test/mjsunit/runtime-gen/functionsetlength.js
deleted file mode 100644
index 5582e82cf2..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/functionsetlength.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _fun = function() {};
-var _length = 1;
-%FunctionSetLength(_fun, _length);
diff --git a/deps/v8/test/mjsunit/runtime-gen/functionsetname.js b/deps/v8/test/mjsunit/runtime-gen/functionsetname.js
deleted file mode 100644
index 0d44b20317..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/functionsetname.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _f = function() {};
-var _name = "foo";
-%FunctionSetName(_f, _name);
diff --git a/deps/v8/test/mjsunit/runtime-gen/functionsetprototype.js b/deps/v8/test/mjsunit/runtime-gen/functionsetprototype.js
deleted file mode 100644
index eb69ea8f5b..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/functionsetprototype.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _fun = function() {};
-var _value = new Object();
-%FunctionSetPrototype(_fun, _value);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getallscopesdetails.js b/deps/v8/test/mjsunit/runtime-gen/getallscopesdetails.js
deleted file mode 100644
index 97ad7cb538..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getallscopesdetails.js
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _break_id = 32;
-var _wrapped_id = 1;
-var _inlined_jsframe_index = 32;
-var _flag = true;
-try {
-%GetAllScopesDetails(_break_id, _wrapped_id, _inlined_jsframe_index, _flag);
-} catch(e) {}
diff --git a/deps/v8/test/mjsunit/runtime-gen/getargumentsproperty.js b/deps/v8/test/mjsunit/runtime-gen/getargumentsproperty.js
deleted file mode 100644
index 646e56be9f..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getargumentsproperty.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _raw_key = new Object();
-%GetArgumentsProperty(_raw_key);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getarraykeys.js b/deps/v8/test/mjsunit/runtime-gen/getarraykeys.js
deleted file mode 100644
index 341faa69ec..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getarraykeys.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _array = new Object();
-var _length = 32;
-%GetArrayKeys(_array, _length);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getbreaklocations.js b/deps/v8/test/mjsunit/runtime-gen/getbreaklocations.js
deleted file mode 100644
index d31fa15c51..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getbreaklocations.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _fun = function() {};
-var arg1 = 0;
-%GetBreakLocations(_fun, arg1);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getcalltrap.js b/deps/v8/test/mjsunit/runtime-gen/getcalltrap.js
deleted file mode 100644
index 406af9ffd9..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getcalltrap.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _proxy = Proxy.createFunction({}, function() {});
-%GetCallTrap(_proxy);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getconstructordelegate.js b/deps/v8/test/mjsunit/runtime-gen/getconstructordelegate.js
deleted file mode 100644
index 6d01415667..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getconstructordelegate.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _object = new Object();
-%GetConstructorDelegate(_object);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getconstructtrap.js b/deps/v8/test/mjsunit/runtime-gen/getconstructtrap.js
deleted file mode 100644
index 116d301eb3..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getconstructtrap.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _proxy = Proxy.createFunction({}, function() {});
-%GetConstructTrap(_proxy);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getdataproperty.js b/deps/v8/test/mjsunit/runtime-gen/getdataproperty.js
deleted file mode 100644
index 59cfba56d9..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getdataproperty.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _object = new Object();
-var _key = "name";
-%GetDataProperty(_object, _key);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getdefaulticulocale.js b/deps/v8/test/mjsunit/runtime-gen/getdefaulticulocale.js
deleted file mode 100644
index 920f256683..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getdefaulticulocale.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%GetDefaultICULocale();
diff --git a/deps/v8/test/mjsunit/runtime-gen/getdefaultreceiver.js b/deps/v8/test/mjsunit/runtime-gen/getdefaultreceiver.js
deleted file mode 100644
index 1d5b1cb44c..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getdefaultreceiver.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = function() {};
-%GetDefaultReceiver(arg0);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getframecount.js b/deps/v8/test/mjsunit/runtime-gen/getframecount.js
deleted file mode 100644
index a958efcd7f..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getframecount.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _break_id = 32;
-try {
-%GetFrameCount(_break_id);
-} catch(e) {}
diff --git a/deps/v8/test/mjsunit/runtime-gen/getframedetails.js b/deps/v8/test/mjsunit/runtime-gen/getframedetails.js
deleted file mode 100644
index 1138424845..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getframedetails.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _break_id = 32;
-var _index = 32;
-try {
-%GetFrameDetails(_break_id, _index);
-} catch(e) {}
diff --git a/deps/v8/test/mjsunit/runtime-gen/getfunctioncodepositionfromsource.js b/deps/v8/test/mjsunit/runtime-gen/getfunctioncodepositionfromsource.js
deleted file mode 100644
index 473b263241..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getfunctioncodepositionfromsource.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _function = function() {};
-var _source_position = 32;
-%GetFunctionCodePositionFromSource(_function, _source_position);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getfunctiondelegate.js b/deps/v8/test/mjsunit/runtime-gen/getfunctiondelegate.js
deleted file mode 100644
index 4d02ec2194..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getfunctiondelegate.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _object = new Object();
-%GetFunctionDelegate(_object);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getfunctionscopecount.js b/deps/v8/test/mjsunit/runtime-gen/getfunctionscopecount.js
deleted file mode 100644
index fb854cff42..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getfunctionscopecount.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _fun = function() {};
-%GetFunctionScopeCount(_fun);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getfunctionscopedetails.js b/deps/v8/test/mjsunit/runtime-gen/getfunctionscopedetails.js
deleted file mode 100644
index c24314003a..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getfunctionscopedetails.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _fun = function() {};
-var _index = 32;
-%GetFunctionScopeDetails(_fun, _index);
diff --git a/deps/v8/test/mjsunit/runtime-gen/gethandler.js b/deps/v8/test/mjsunit/runtime-gen/gethandler.js
deleted file mode 100644
index ea982cbb51..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/gethandler.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _proxy = Proxy.create({});
-%GetHandler(_proxy);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getheapusage.js b/deps/v8/test/mjsunit/runtime-gen/getheapusage.js
deleted file mode 100644
index cb174b72f2..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getheapusage.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%GetHeapUsage();
diff --git a/deps/v8/test/mjsunit/runtime-gen/getimplfrominitializedintlobject.js b/deps/v8/test/mjsunit/runtime-gen/getimplfrominitializedintlobject.js
deleted file mode 100644
index 899ba8859e..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getimplfrominitializedintlobject.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = new Intl.NumberFormat('en-US');
-%GetImplFromInitializedIntlObject(arg0);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getindexedinterceptorelementnames.js b/deps/v8/test/mjsunit/runtime-gen/getindexedinterceptorelementnames.js
deleted file mode 100644
index 8a83f0acd6..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getindexedinterceptorelementnames.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-%GetIndexedInterceptorElementNames(_obj);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getinterceptorinfo.js b/deps/v8/test/mjsunit/runtime-gen/getinterceptorinfo.js
deleted file mode 100644
index b33ba64916..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getinterceptorinfo.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-%GetInterceptorInfo(_obj);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getlanguagetagvariants.js b/deps/v8/test/mjsunit/runtime-gen/getlanguagetagvariants.js
deleted file mode 100644
index 0ecfee522c..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getlanguagetagvariants.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _input = new Array();
-%GetLanguageTagVariants(_input);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getnamedinterceptorpropertynames.js b/deps/v8/test/mjsunit/runtime-gen/getnamedinterceptorpropertynames.js
deleted file mode 100644
index 0dee531be6..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getnamedinterceptorpropertynames.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-%GetNamedInterceptorPropertyNames(_obj);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getobjectcontextnotifierperformchange.js b/deps/v8/test/mjsunit/runtime-gen/getobjectcontextnotifierperformchange.js
deleted file mode 100644
index 2960acee45..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getobjectcontextnotifierperformchange.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _object_info = new Object();
-%GetObjectContextNotifierPerformChange(_object_info);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getobjectcontextobjectgetnotifier.js b/deps/v8/test/mjsunit/runtime-gen/getobjectcontextobjectgetnotifier.js
deleted file mode 100644
index d6a043061e..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getobjectcontextobjectgetnotifier.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _object = new Object();
-%GetObjectContextObjectGetNotifier(_object);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getobjectcontextobjectobserve.js b/deps/v8/test/mjsunit/runtime-gen/getobjectcontextobjectobserve.js
deleted file mode 100644
index f1669e7385..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getobjectcontextobjectobserve.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _object = new Object();
-%GetObjectContextObjectObserve(_object);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getobservationstate.js b/deps/v8/test/mjsunit/runtime-gen/getobservationstate.js
deleted file mode 100644
index 429cdcd91f..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getobservationstate.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%GetObservationState();
diff --git a/deps/v8/test/mjsunit/runtime-gen/getoptimizationcount.js b/deps/v8/test/mjsunit/runtime-gen/getoptimizationcount.js
deleted file mode 100644
index da1ab9efcc..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getoptimizationcount.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _function = function() {};
-%GetOptimizationCount(_function);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getownelementnames.js b/deps/v8/test/mjsunit/runtime-gen/getownelementnames.js
deleted file mode 100644
index 54d9a69855..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getownelementnames.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-%GetOwnElementNames(_obj);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getownproperty.js b/deps/v8/test/mjsunit/runtime-gen/getownproperty.js
deleted file mode 100644
index 1e5a808f71..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getownproperty.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-var _name = "name";
-%GetOwnProperty(_obj, _name);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getownpropertynames.js b/deps/v8/test/mjsunit/runtime-gen/getownpropertynames.js
deleted file mode 100644
index 10f7f2c776..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getownpropertynames.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-var _filter_value = 1;
-%GetOwnPropertyNames(_obj, _filter_value);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getproperty.js b/deps/v8/test/mjsunit/runtime-gen/getproperty.js
deleted file mode 100644
index 569189a3aa..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getproperty.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _object = new Object();
-var _key = new Object();
-%GetProperty(_object, _key);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getpropertynames.js b/deps/v8/test/mjsunit/runtime-gen/getpropertynames.js
deleted file mode 100644
index ad94eedc9c..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getpropertynames.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _object = new Object();
-%GetPropertyNames(_object);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getpropertynamesfast.js b/deps/v8/test/mjsunit/runtime-gen/getpropertynamesfast.js
deleted file mode 100644
index c2d14cb653..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getpropertynamesfast.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _raw_object = new Object();
-%GetPropertyNamesFast(_raw_object);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getprototype.js b/deps/v8/test/mjsunit/runtime-gen/getprototype.js
deleted file mode 100644
index b9ef1f9912..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getprototype.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-%GetPrototype(_obj);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getrootnan.js b/deps/v8/test/mjsunit/runtime-gen/getrootnan.js
deleted file mode 100644
index b6df0fd5fb..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getrootnan.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-try {
-%GetRootNaN();
-} catch(e) {}
diff --git a/deps/v8/test/mjsunit/runtime-gen/getscopecount.js b/deps/v8/test/mjsunit/runtime-gen/getscopecount.js
deleted file mode 100644
index d53bece37c..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getscopecount.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _break_id = 32;
-var _wrapped_id = 1;
-try {
-%GetScopeCount(_break_id, _wrapped_id);
-} catch(e) {}
diff --git a/deps/v8/test/mjsunit/runtime-gen/getscopedetails.js b/deps/v8/test/mjsunit/runtime-gen/getscopedetails.js
deleted file mode 100644
index 4ea28ac73e..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getscopedetails.js
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _break_id = 32;
-var _wrapped_id = 1;
-var _inlined_jsframe_index = 32;
-var _index = 32;
-try {
-%GetScopeDetails(_break_id, _wrapped_id, _inlined_jsframe_index, _index);
-} catch(e) {}
diff --git a/deps/v8/test/mjsunit/runtime-gen/getscript.js b/deps/v8/test/mjsunit/runtime-gen/getscript.js
deleted file mode 100644
index cae0087ccf..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getscript.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _script_name = "foo";
-%GetScript(_script_name);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getstepinpositions.js b/deps/v8/test/mjsunit/runtime-gen/getstepinpositions.js
deleted file mode 100644
index 221c586ed4..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getstepinpositions.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _break_id = 32;
-var _wrapped_id = 1;
-try {
-%GetStepInPositions(_break_id, _wrapped_id);
-} catch(e) {}
diff --git a/deps/v8/test/mjsunit/runtime-gen/gettemplatefield.js b/deps/v8/test/mjsunit/runtime-gen/gettemplatefield.js
deleted file mode 100644
index 16d3824b2d..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/gettemplatefield.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _templ = new Object();
-var _index = 1;
-try {
-%GetTemplateField(_templ, _index);
-} catch(e) {}
diff --git a/deps/v8/test/mjsunit/runtime-gen/getthreadcount.js b/deps/v8/test/mjsunit/runtime-gen/getthreadcount.js
deleted file mode 100644
index 5037066a7d..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getthreadcount.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _break_id = 32;
-try {
-%GetThreadCount(_break_id);
-} catch(e) {}
diff --git a/deps/v8/test/mjsunit/runtime-gen/getthreaddetails.js b/deps/v8/test/mjsunit/runtime-gen/getthreaddetails.js
deleted file mode 100644
index 6fc0d14ce4..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getthreaddetails.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _break_id = 32;
-var _index = 32;
-try {
-%GetThreadDetails(_break_id, _index);
-} catch(e) {}
diff --git a/deps/v8/test/mjsunit/runtime-gen/getv8version.js b/deps/v8/test/mjsunit/runtime-gen/getv8version.js
deleted file mode 100644
index e311eef139..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getv8version.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%GetV8Version();
diff --git a/deps/v8/test/mjsunit/runtime-gen/getweakmapentries.js b/deps/v8/test/mjsunit/runtime-gen/getweakmapentries.js
deleted file mode 100644
index ced728d3b5..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getweakmapentries.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new WeakMap();
-%GetWeakMapEntries(_holder);
diff --git a/deps/v8/test/mjsunit/runtime-gen/getweaksetvalues.js b/deps/v8/test/mjsunit/runtime-gen/getweaksetvalues.js
deleted file mode 100644
index 650c947d07..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/getweaksetvalues.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new WeakMap();
-%GetWeakSetValues(_holder);
diff --git a/deps/v8/test/mjsunit/runtime-gen/globalprint.js b/deps/v8/test/mjsunit/runtime-gen/globalprint.js
deleted file mode 100644
index 059f08efe2..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/globalprint.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _string = "foo";
-%GlobalPrint(_string);
diff --git a/deps/v8/test/mjsunit/runtime-gen/globalproxy.js b/deps/v8/test/mjsunit/runtime-gen/globalproxy.js
deleted file mode 100644
index 80e500c887..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/globalproxy.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _global = new Object();
-%GlobalProxy(_global);
diff --git a/deps/v8/test/mjsunit/runtime-gen/haselement.js b/deps/v8/test/mjsunit/runtime-gen/haselement.js
deleted file mode 100644
index 3d32ac5f00..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/haselement.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _receiver = new Object();
-var _index = 1;
-%HasElement(_receiver, _index);
diff --git a/deps/v8/test/mjsunit/runtime-gen/hasownproperty.js b/deps/v8/test/mjsunit/runtime-gen/hasownproperty.js
deleted file mode 100644
index 7443bff104..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/hasownproperty.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _object = new Object();
-var _key = "name";
-%HasOwnProperty(_object, _key);
diff --git a/deps/v8/test/mjsunit/runtime-gen/hasproperty.js b/deps/v8/test/mjsunit/runtime-gen/hasproperty.js
deleted file mode 100644
index df4de8eb34..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/hasproperty.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _receiver = new Object();
-var _key = "name";
-%HasProperty(_receiver, _key);
diff --git a/deps/v8/test/mjsunit/runtime-gen/havesamemap.js b/deps/v8/test/mjsunit/runtime-gen/havesamemap.js
deleted file mode 100644
index b399d17cb7..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/havesamemap.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj1 = new Object();
-var _obj2 = new Object();
-%HaveSameMap(_obj1, _obj2);
diff --git a/deps/v8/test/mjsunit/runtime-gen/internalcompare.js b/deps/v8/test/mjsunit/runtime-gen/internalcompare.js
deleted file mode 100644
index 95cc006f31..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/internalcompare.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = %GetImplFromInitializedIntlObject(new Intl.Collator('en-US'));
-var _string1 = "foo";
-var _string2 = "foo";
-%InternalCompare(arg0, _string1, _string2);
diff --git a/deps/v8/test/mjsunit/runtime-gen/internaldateformat.js b/deps/v8/test/mjsunit/runtime-gen/internaldateformat.js
deleted file mode 100644
index 933714e934..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/internaldateformat.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = %GetImplFromInitializedIntlObject(new Intl.DateTimeFormat('en-US'));
-var _date = new Date();
-%InternalDateFormat(arg0, _date);
diff --git a/deps/v8/test/mjsunit/runtime-gen/internaldateparse.js b/deps/v8/test/mjsunit/runtime-gen/internaldateparse.js
deleted file mode 100644
index be8c49a942..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/internaldateparse.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = %GetImplFromInitializedIntlObject(new Intl.DateTimeFormat('en-US'));
-var _date_string = "foo";
-%InternalDateParse(arg0, _date_string);
diff --git a/deps/v8/test/mjsunit/runtime-gen/internalnumberformat.js b/deps/v8/test/mjsunit/runtime-gen/internalnumberformat.js
deleted file mode 100644
index cd21edc247..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/internalnumberformat.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = %GetImplFromInitializedIntlObject(new Intl.NumberFormat('en-US'));
-var _number = new Object();
-%InternalNumberFormat(arg0, _number);
diff --git a/deps/v8/test/mjsunit/runtime-gen/internalnumberparse.js b/deps/v8/test/mjsunit/runtime-gen/internalnumberparse.js
deleted file mode 100644
index cdbd322c4c..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/internalnumberparse.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = %GetImplFromInitializedIntlObject(new Intl.NumberFormat('en-US'));
-var _number_string = "foo";
-%InternalNumberParse(arg0, _number_string);
diff --git a/deps/v8/test/mjsunit/runtime-gen/internalsetprototype.js b/deps/v8/test/mjsunit/runtime-gen/internalsetprototype.js
deleted file mode 100644
index 1bc67d3826..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/internalsetprototype.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-var _prototype = new Object();
-%InternalSetPrototype(_obj, _prototype);
diff --git a/deps/v8/test/mjsunit/runtime-gen/isattachedglobal.js b/deps/v8/test/mjsunit/runtime-gen/isattachedglobal.js
deleted file mode 100644
index 9ead91a408..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/isattachedglobal.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _global = new Object();
-%IsAttachedGlobal(_global);
diff --git a/deps/v8/test/mjsunit/runtime-gen/isbreakonexception.js b/deps/v8/test/mjsunit/runtime-gen/isbreakonexception.js
deleted file mode 100644
index e55c7d030a..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/isbreakonexception.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _type_arg = 32;
-%IsBreakOnException(_type_arg);
diff --git a/deps/v8/test/mjsunit/runtime-gen/isconcurrentrecompilationsupported.js b/deps/v8/test/mjsunit/runtime-gen/isconcurrentrecompilationsupported.js
deleted file mode 100644
index 44e2917d72..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/isconcurrentrecompilationsupported.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%IsConcurrentRecompilationSupported();
diff --git a/deps/v8/test/mjsunit/runtime-gen/isextensible.js b/deps/v8/test/mjsunit/runtime-gen/isextensible.js
deleted file mode 100644
index 20a7c8d8a4..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/isextensible.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-%IsExtensible(_obj);
diff --git a/deps/v8/test/mjsunit/runtime-gen/isinitializedintlobject.js b/deps/v8/test/mjsunit/runtime-gen/isinitializedintlobject.js
deleted file mode 100644
index 2816e5e27a..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/isinitializedintlobject.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _input = new Object();
-%IsInitializedIntlObject(_input);
diff --git a/deps/v8/test/mjsunit/runtime-gen/isinitializedintlobjectoftype.js b/deps/v8/test/mjsunit/runtime-gen/isinitializedintlobjectoftype.js
deleted file mode 100644
index 60e3850082..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/isinitializedintlobjectoftype.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _input = new Object();
-var _expected_type = "foo";
-%IsInitializedIntlObjectOfType(_input, _expected_type);
diff --git a/deps/v8/test/mjsunit/runtime-gen/isinprototypechain.js b/deps/v8/test/mjsunit/runtime-gen/isinprototypechain.js
deleted file mode 100644
index 37048348d1..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/isinprototypechain.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _O = new Object();
-var _V = new Object();
-%IsInPrototypeChain(_O, _V);
diff --git a/deps/v8/test/mjsunit/runtime-gen/isjsfunctionproxy.js b/deps/v8/test/mjsunit/runtime-gen/isjsfunctionproxy.js
deleted file mode 100644
index ca6ea5a916..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/isjsfunctionproxy.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-%IsJSFunctionProxy(_obj);
diff --git a/deps/v8/test/mjsunit/runtime-gen/isjsglobalproxy.js b/deps/v8/test/mjsunit/runtime-gen/isjsglobalproxy.js
deleted file mode 100644
index f0de610155..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/isjsglobalproxy.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-%IsJSGlobalProxy(_obj);
diff --git a/deps/v8/test/mjsunit/runtime-gen/isjsmodule.js b/deps/v8/test/mjsunit/runtime-gen/isjsmodule.js
deleted file mode 100644
index 8b43a729fb..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/isjsmodule.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-%IsJSModule(_obj);
diff --git a/deps/v8/test/mjsunit/runtime-gen/isjsproxy.js b/deps/v8/test/mjsunit/runtime-gen/isjsproxy.js
deleted file mode 100644
index a4d32beb16..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/isjsproxy.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-%IsJSProxy(_obj);
diff --git a/deps/v8/test/mjsunit/runtime-gen/isobserved.js b/deps/v8/test/mjsunit/runtime-gen/isobserved.js
deleted file mode 100644
index f649a1b33e..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/isobserved.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-%IsObserved(_obj);
diff --git a/deps/v8/test/mjsunit/runtime-gen/isoptimized.js b/deps/v8/test/mjsunit/runtime-gen/isoptimized.js
deleted file mode 100644
index e1daf0da88..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/isoptimized.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%IsOptimized();
diff --git a/deps/v8/test/mjsunit/runtime-gen/ispropertyenumerable.js b/deps/v8/test/mjsunit/runtime-gen/ispropertyenumerable.js
deleted file mode 100644
index 575ee3468c..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/ispropertyenumerable.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _object = new Object();
-var _key = "name";
-%IsPropertyEnumerable(_object, _key);
diff --git a/deps/v8/test/mjsunit/runtime-gen/issloppymodefunction.js b/deps/v8/test/mjsunit/runtime-gen/issloppymodefunction.js
deleted file mode 100644
index a0c75b32df..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/issloppymodefunction.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = function() {};
-%IsSloppyModeFunction(arg0);
diff --git a/deps/v8/test/mjsunit/runtime-gen/istemplate.js b/deps/v8/test/mjsunit/runtime-gen/istemplate.js
deleted file mode 100644
index 421229fe6e..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/istemplate.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _arg = new Object();
-%IsTemplate(_arg);
diff --git a/deps/v8/test/mjsunit/runtime-gen/isvalidsmi.js b/deps/v8/test/mjsunit/runtime-gen/isvalidsmi.js
deleted file mode 100644
index 98cf53bb2d..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/isvalidsmi.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _number = 32;
-%IsValidSmi(_number);
diff --git a/deps/v8/test/mjsunit/runtime-gen/keyedgetproperty.js b/deps/v8/test/mjsunit/runtime-gen/keyedgetproperty.js
deleted file mode 100644
index cd8473c99a..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/keyedgetproperty.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _receiver_obj = new Object();
-var _key_obj = new Object();
-%KeyedGetProperty(_receiver_obj, _key_obj);
diff --git a/deps/v8/test/mjsunit/runtime-gen/liveeditcheckanddropactivations.js b/deps/v8/test/mjsunit/runtime-gen/liveeditcheckanddropactivations.js
deleted file mode 100644
index 7247acc3a7..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/liveeditcheckanddropactivations.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _shared_array = new Array();
-var _do_drop = true;
-%LiveEditCheckAndDropActivations(_shared_array, _do_drop);
diff --git a/deps/v8/test/mjsunit/runtime-gen/liveeditcomparestrings.js b/deps/v8/test/mjsunit/runtime-gen/liveeditcomparestrings.js
deleted file mode 100644
index 611d78b03c..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/liveeditcomparestrings.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _s1 = "foo";
-var _s2 = "foo";
-%LiveEditCompareStrings(_s1, _s2);
diff --git a/deps/v8/test/mjsunit/runtime-gen/liveeditfunctionsetscript.js b/deps/v8/test/mjsunit/runtime-gen/liveeditfunctionsetscript.js
deleted file mode 100644
index 51d61d3bc7..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/liveeditfunctionsetscript.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _function_object = new Object();
-var _script_object = new Object();
-%LiveEditFunctionSetScript(_function_object, _script_object);
diff --git a/deps/v8/test/mjsunit/runtime-gen/lookupaccessor.js b/deps/v8/test/mjsunit/runtime-gen/loadfromsuper.js
index 89f40d76c9..25f4ff9ac8 100644
--- a/deps/v8/test/mjsunit/runtime-gen/lookupaccessor.js
+++ b/deps/v8/test/mjsunit/runtime-gen/loadfromsuper.js
@@ -1,7 +1,7 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
// Flags: --allow-natives-syntax --harmony --harmony-proxies
+var _home_object = new Object();
var _receiver = new Object();
var _name = "name";
-var _flag = 1;
-%LookupAccessor(_receiver, _name, _flag);
+%LoadFromSuper(_home_object, _receiver, _name);
diff --git a/deps/v8/test/mjsunit/runtime-gen/loadmutabledouble.js b/deps/v8/test/mjsunit/runtime-gen/loadmutabledouble.js
deleted file mode 100644
index 1a2e7e9f90..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/loadmutabledouble.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = {foo: 1.2};
-var _index = 1;
-%LoadMutableDouble(arg0, _index);
diff --git a/deps/v8/test/mjsunit/runtime-gen/mapclear.js b/deps/v8/test/mjsunit/runtime-gen/mapclear.js
deleted file mode 100644
index b34e694514..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/mapclear.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Map();
-%MapClear(_holder);
diff --git a/deps/v8/test/mjsunit/runtime-gen/mapdelete.js b/deps/v8/test/mjsunit/runtime-gen/mapdelete.js
deleted file mode 100644
index ab78954427..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/mapdelete.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Map();
-var _key = new Object();
-%MapDelete(_holder, _key);
diff --git a/deps/v8/test/mjsunit/runtime-gen/mapget.js b/deps/v8/test/mjsunit/runtime-gen/mapget.js
deleted file mode 100644
index 0e996f5232..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/mapget.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Map();
-var _key = new Object();
-%MapGet(_holder, _key);
diff --git a/deps/v8/test/mjsunit/runtime-gen/mapgetsize.js b/deps/v8/test/mjsunit/runtime-gen/mapgetsize.js
deleted file mode 100644
index 50a06044b4..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/mapgetsize.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Map();
-%MapGetSize(_holder);
diff --git a/deps/v8/test/mjsunit/runtime-gen/maphas.js b/deps/v8/test/mjsunit/runtime-gen/maphas.js
deleted file mode 100644
index 2dc70c93e3..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/maphas.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Map();
-var _key = new Object();
-%MapHas(_holder, _key);
diff --git a/deps/v8/test/mjsunit/runtime-gen/mapinitialize.js b/deps/v8/test/mjsunit/runtime-gen/mapinitialize.js
deleted file mode 100644
index 6240a02594..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/mapinitialize.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Map();
-%MapInitialize(_holder);
diff --git a/deps/v8/test/mjsunit/runtime-gen/mapiteratorinitialize.js b/deps/v8/test/mjsunit/runtime-gen/mapiteratorinitialize.js
deleted file mode 100644
index 584fe18a4d..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/mapiteratorinitialize.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Map().entries();
-var _map = new Map();
-var _kind = 1;
-%MapIteratorInitialize(_holder, _map, _kind);
diff --git a/deps/v8/test/mjsunit/runtime-gen/mapiteratornext.js b/deps/v8/test/mjsunit/runtime-gen/mapiteratornext.js
deleted file mode 100644
index e155227023..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/mapiteratornext.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Map().entries();
-var _value_array = new Array();
-%MapIteratorNext(_holder, _value_array);
diff --git a/deps/v8/test/mjsunit/runtime-gen/mapset.js b/deps/v8/test/mjsunit/runtime-gen/mapset.js
deleted file mode 100644
index 32c2080a8d..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/mapset.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Map();
-var _key = new Object();
-var _value = new Object();
-%MapSet(_holder, _key, _value);
diff --git a/deps/v8/test/mjsunit/runtime-gen/markasinitializedintlobjectoftype.js b/deps/v8/test/mjsunit/runtime-gen/markasinitializedintlobjectoftype.js
deleted file mode 100644
index bd0c581c89..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/markasinitializedintlobjectoftype.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _input = new Object();
-var _type = "foo";
-var _impl = new Object();
-%MarkAsInitializedIntlObjectOfType(_input, _type, _impl);
diff --git a/deps/v8/test/mjsunit/runtime-gen/mathacos.js b/deps/v8/test/mjsunit/runtime-gen/mathacos.js
deleted file mode 100644
index fa44268389..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/mathacos.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 1.5;
-%MathAcos(_x);
diff --git a/deps/v8/test/mjsunit/runtime-gen/mathasin.js b/deps/v8/test/mjsunit/runtime-gen/mathasin.js
deleted file mode 100644
index 0d20b3108d..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/mathasin.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 1.5;
-%MathAsin(_x);
diff --git a/deps/v8/test/mjsunit/runtime-gen/mathatan.js b/deps/v8/test/mjsunit/runtime-gen/mathatan.js
deleted file mode 100644
index 0e2708f1f2..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/mathatan.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 1.5;
-%MathAtan(_x);
diff --git a/deps/v8/test/mjsunit/runtime-gen/mathatan2.js b/deps/v8/test/mjsunit/runtime-gen/mathatan2.js
deleted file mode 100644
index 4294797115..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/mathatan2.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 1.5;
-var _y = 1.5;
-%MathAtan2(_x, _y);
diff --git a/deps/v8/test/mjsunit/runtime-gen/mathexprt.js b/deps/v8/test/mjsunit/runtime-gen/mathexprt.js
deleted file mode 100644
index e4584366de..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/mathexprt.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 1.5;
-%MathExpRT(_x);
diff --git a/deps/v8/test/mjsunit/runtime-gen/mathfloorrt.js b/deps/v8/test/mjsunit/runtime-gen/mathfloorrt.js
deleted file mode 100644
index 2ae83aab52..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/mathfloorrt.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 1.5;
-%MathFloorRT(_x);
diff --git a/deps/v8/test/mjsunit/runtime-gen/mathfround.js b/deps/v8/test/mjsunit/runtime-gen/mathfround.js
deleted file mode 100644
index 10a92986c1..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/mathfround.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 1.5;
-%MathFround(_x);
diff --git a/deps/v8/test/mjsunit/runtime-gen/mathlogrt.js b/deps/v8/test/mjsunit/runtime-gen/mathlogrt.js
deleted file mode 100644
index 5c484cbbb1..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/mathlogrt.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 1.5;
-%MathLogRT(_x);
diff --git a/deps/v8/test/mjsunit/runtime-gen/mathsqrtrt.js b/deps/v8/test/mjsunit/runtime-gen/mathsqrtrt.js
deleted file mode 100644
index e0df8d72d5..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/mathsqrtrt.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 1.5;
-%MathSqrtRT(_x);
diff --git a/deps/v8/test/mjsunit/runtime-gen/maxsmi.js b/deps/v8/test/mjsunit/runtime-gen/maxsmi.js
deleted file mode 100644
index 717a6544eb..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/maxsmi.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%MaxSmi();
diff --git a/deps/v8/test/mjsunit/runtime-gen/movearraycontents.js b/deps/v8/test/mjsunit/runtime-gen/movearraycontents.js
deleted file mode 100644
index 41c4ee1cd3..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/movearraycontents.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _from = new Array();
-var _to = new Array();
-%MoveArrayContents(_from, _to);
diff --git a/deps/v8/test/mjsunit/runtime-gen/neveroptimizefunction.js b/deps/v8/test/mjsunit/runtime-gen/neveroptimizefunction.js
deleted file mode 100644
index b03e42f1f8..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/neveroptimizefunction.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _function = function() {};
-%NeverOptimizeFunction(_function);
diff --git a/deps/v8/test/mjsunit/runtime-gen/newarguments.js b/deps/v8/test/mjsunit/runtime-gen/newarguments.js
deleted file mode 100644
index 908fc3af7c..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/newarguments.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _callee = function() {};
-%NewArguments(_callee);
diff --git a/deps/v8/test/mjsunit/runtime-gen/newobjectfrombound.js b/deps/v8/test/mjsunit/runtime-gen/newobjectfrombound.js
deleted file mode 100644
index 36f75077b6..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/newobjectfrombound.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = (function() {}).bind({});
-%NewObjectFromBound(arg0);
diff --git a/deps/v8/test/mjsunit/runtime-gen/newstring.js b/deps/v8/test/mjsunit/runtime-gen/newstring.js
deleted file mode 100644
index 24b01489e5..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/newstring.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _length = 1;
-var _is_one_byte = true;
-%NewString(_length, _is_one_byte);
diff --git a/deps/v8/test/mjsunit/runtime-gen/newstringwrapper.js b/deps/v8/test/mjsunit/runtime-gen/newstringwrapper.js
deleted file mode 100644
index cf53a3af20..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/newstringwrapper.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _value = "foo";
-%NewStringWrapper(_value);
diff --git a/deps/v8/test/mjsunit/runtime-gen/newsymbolwrapper.js b/deps/v8/test/mjsunit/runtime-gen/newsymbolwrapper.js
deleted file mode 100644
index 08c0ea7e60..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/newsymbolwrapper.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _symbol = Symbol("symbol");
-%NewSymbolWrapper(_symbol);
diff --git a/deps/v8/test/mjsunit/runtime-gen/notifycontextdisposed.js b/deps/v8/test/mjsunit/runtime-gen/notifycontextdisposed.js
deleted file mode 100644
index d353fc5cea..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/notifycontextdisposed.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%NotifyContextDisposed();
diff --git a/deps/v8/test/mjsunit/runtime-gen/numberadd.js b/deps/v8/test/mjsunit/runtime-gen/numberadd.js
deleted file mode 100644
index f85017d49d..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numberadd.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 1.5;
-var _y = 1.5;
-%NumberAdd(_x, _y);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numberand.js b/deps/v8/test/mjsunit/runtime-gen/numberand.js
deleted file mode 100644
index 9635e11bb6..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numberand.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 32;
-var _y = 32;
-%NumberAnd(_x, _y);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numbercompare.js b/deps/v8/test/mjsunit/runtime-gen/numbercompare.js
deleted file mode 100644
index 5f7ac9363c..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numbercompare.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 1.5;
-var _y = 1.5;
-var _uncomparable_result = new Object();
-%NumberCompare(_x, _y, _uncomparable_result);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numberdiv.js b/deps/v8/test/mjsunit/runtime-gen/numberdiv.js
deleted file mode 100644
index c62d5921c7..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numberdiv.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 1.5;
-var _y = 1.5;
-%NumberDiv(_x, _y);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numberequals.js b/deps/v8/test/mjsunit/runtime-gen/numberequals.js
deleted file mode 100644
index 3b919fc02f..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numberequals.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 1.5;
-var _y = 1.5;
-%NumberEquals(_x, _y);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numberimul.js b/deps/v8/test/mjsunit/runtime-gen/numberimul.js
deleted file mode 100644
index f3c98bdc28..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numberimul.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 32;
-var _y = 32;
-%NumberImul(_x, _y);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numbermod.js b/deps/v8/test/mjsunit/runtime-gen/numbermod.js
deleted file mode 100644
index 6d5faeb2c5..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numbermod.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 1.5;
-var _y = 1.5;
-%NumberMod(_x, _y);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numbermul.js b/deps/v8/test/mjsunit/runtime-gen/numbermul.js
deleted file mode 100644
index 0bdc7c2378..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numbermul.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 1.5;
-var _y = 1.5;
-%NumberMul(_x, _y);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numberor.js b/deps/v8/test/mjsunit/runtime-gen/numberor.js
deleted file mode 100644
index c5ac65fc8d..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numberor.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 32;
-var _y = 32;
-%NumberOr(_x, _y);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numbersar.js b/deps/v8/test/mjsunit/runtime-gen/numbersar.js
deleted file mode 100644
index 639270a08a..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numbersar.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 32;
-var _y = 32;
-%NumberSar(_x, _y);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numbershl.js b/deps/v8/test/mjsunit/runtime-gen/numbershl.js
deleted file mode 100644
index b505ff6ed8..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numbershl.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 32;
-var _y = 32;
-%NumberShl(_x, _y);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numbershr.js b/deps/v8/test/mjsunit/runtime-gen/numbershr.js
deleted file mode 100644
index bd1a3c4541..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numbershr.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 32;
-var _y = 32;
-%NumberShr(_x, _y);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numbersub.js b/deps/v8/test/mjsunit/runtime-gen/numbersub.js
deleted file mode 100644
index 5c99f872fa..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numbersub.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 1.5;
-var _y = 1.5;
-%NumberSub(_x, _y);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numbertoexponential.js b/deps/v8/test/mjsunit/runtime-gen/numbertoexponential.js
deleted file mode 100644
index 30159bb3ad..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numbertoexponential.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _value = 1.5;
-var _f_number = 1.5;
-%NumberToExponential(_value, _f_number);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numbertofixed.js b/deps/v8/test/mjsunit/runtime-gen/numbertofixed.js
deleted file mode 100644
index 0df152541a..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numbertofixed.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _value = 1.5;
-var _f_number = 1.5;
-%NumberToFixed(_value, _f_number);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numbertointeger.js b/deps/v8/test/mjsunit/runtime-gen/numbertointeger.js
deleted file mode 100644
index eada58f45a..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numbertointeger.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _number = 1.5;
-%NumberToInteger(_number);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numbertointegermapminuszero.js b/deps/v8/test/mjsunit/runtime-gen/numbertointegermapminuszero.js
deleted file mode 100644
index ce32480610..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numbertointegermapminuszero.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _number = 1.5;
-%NumberToIntegerMapMinusZero(_number);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numbertojsint32.js b/deps/v8/test/mjsunit/runtime-gen/numbertojsint32.js
deleted file mode 100644
index 77321f9c62..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numbertojsint32.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _number = 1.5;
-%NumberToJSInt32(_number);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numbertojsuint32.js b/deps/v8/test/mjsunit/runtime-gen/numbertojsuint32.js
deleted file mode 100644
index d4f7302fe9..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numbertojsuint32.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _number = 32;
-%NumberToJSUint32(_number);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numbertoprecision.js b/deps/v8/test/mjsunit/runtime-gen/numbertoprecision.js
deleted file mode 100644
index 6591117ec8..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numbertoprecision.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _value = 1.5;
-var _f_number = 1.5;
-%NumberToPrecision(_value, _f_number);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numbertoradixstring.js b/deps/v8/test/mjsunit/runtime-gen/numbertoradixstring.js
deleted file mode 100644
index 020aac2853..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numbertoradixstring.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _value = 1.5;
-var arg1 = 2;
-%NumberToRadixString(_value, arg1);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numbertostringrt.js b/deps/v8/test/mjsunit/runtime-gen/numbertostringrt.js
deleted file mode 100644
index 4b2b6d93b0..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numbertostringrt.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _number = 1.5;
-%NumberToStringRT(_number);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numberunaryminus.js b/deps/v8/test/mjsunit/runtime-gen/numberunaryminus.js
deleted file mode 100644
index 54dc49eda9..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numberunaryminus.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 1.5;
-%NumberUnaryMinus(_x);
diff --git a/deps/v8/test/mjsunit/runtime-gen/numberxor.js b/deps/v8/test/mjsunit/runtime-gen/numberxor.js
deleted file mode 100644
index 237269803b..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/numberxor.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 32;
-var _y = 32;
-%NumberXor(_x, _y);
diff --git a/deps/v8/test/mjsunit/runtime-gen/objectfreeze.js b/deps/v8/test/mjsunit/runtime-gen/objectfreeze.js
deleted file mode 100644
index cfc066c6f1..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/objectfreeze.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _object = new Object();
-%ObjectFreeze(_object);
diff --git a/deps/v8/test/mjsunit/runtime-gen/objectwascreatedincurrentorigin.js b/deps/v8/test/mjsunit/runtime-gen/objectwascreatedincurrentorigin.js
deleted file mode 100644
index 776997009c..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/objectwascreatedincurrentorigin.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _object = new Object();
-%ObjectWasCreatedInCurrentOrigin(_object);
diff --git a/deps/v8/test/mjsunit/runtime-gen/observationweakmapcreate.js b/deps/v8/test/mjsunit/runtime-gen/observationweakmapcreate.js
deleted file mode 100644
index 6c71eace41..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/observationweakmapcreate.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%ObservationWeakMapCreate();
diff --git a/deps/v8/test/mjsunit/runtime-gen/observerobjectandrecordhavesameorigin.js b/deps/v8/test/mjsunit/runtime-gen/observerobjectandrecordhavesameorigin.js
deleted file mode 100644
index 6c251ecd95..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/observerobjectandrecordhavesameorigin.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _observer = function() {};
-var _object = new Object();
-var _record = new Object();
-%ObserverObjectAndRecordHaveSameOrigin(_observer, _object, _record);
diff --git a/deps/v8/test/mjsunit/runtime-gen/optimizeobjectforaddingmultipleproperties.js b/deps/v8/test/mjsunit/runtime-gen/optimizeobjectforaddingmultipleproperties.js
deleted file mode 100644
index 7016e1c062..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/optimizeobjectforaddingmultipleproperties.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _object = new Object();
-var _properties = 1;
-%OptimizeObjectForAddingMultipleProperties(_object, _properties);
diff --git a/deps/v8/test/mjsunit/runtime-gen/ownkeys.js b/deps/v8/test/mjsunit/runtime-gen/ownkeys.js
deleted file mode 100644
index 0a392422cc..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/ownkeys.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _raw_object = new Object();
-%OwnKeys(_raw_object);
diff --git a/deps/v8/test/mjsunit/runtime-gen/parsejson.js b/deps/v8/test/mjsunit/runtime-gen/parsejson.js
deleted file mode 100644
index 0a038790ea..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/parsejson.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = "{}";
-%ParseJson(arg0);
diff --git a/deps/v8/test/mjsunit/runtime-gen/preventextensions.js b/deps/v8/test/mjsunit/runtime-gen/preventextensions.js
deleted file mode 100644
index 8e24b75e0c..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/preventextensions.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-%PreventExtensions(_obj);
diff --git a/deps/v8/test/mjsunit/runtime-gen/pushifabsent.js b/deps/v8/test/mjsunit/runtime-gen/pushifabsent.js
deleted file mode 100644
index c998121f53..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/pushifabsent.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _array = new Array();
-var _element = new Object();
-%PushIfAbsent(_array, _element);
diff --git a/deps/v8/test/mjsunit/runtime-gen/quotejsonstring.js b/deps/v8/test/mjsunit/runtime-gen/quotejsonstring.js
deleted file mode 100644
index 61ade34263..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/quotejsonstring.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _string = "foo";
-%QuoteJSONString(_string);
diff --git a/deps/v8/test/mjsunit/runtime-gen/regexpcompile.js b/deps/v8/test/mjsunit/runtime-gen/regexpcompile.js
deleted file mode 100644
index c0edfa6fcf..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/regexpcompile.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _re = /ab/g;
-var _pattern = "foo";
-var _flags = "foo";
-%RegExpCompile(_re, _pattern, _flags);
diff --git a/deps/v8/test/mjsunit/runtime-gen/regexpconstructresult.js b/deps/v8/test/mjsunit/runtime-gen/regexpconstructresult.js
deleted file mode 100644
index 50d2e0d8fe..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/regexpconstructresult.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _size = 1;
-var _index = new Object();
-var _input = new Object();
-%_RegExpConstructResult(_size, _index, _input);
diff --git a/deps/v8/test/mjsunit/runtime-gen/regexpexecmultiple.js b/deps/v8/test/mjsunit/runtime-gen/regexpexecmultiple.js
deleted file mode 100644
index 9db6e6d2b3..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/regexpexecmultiple.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _regexp = /ab/g;
-var _subject = "foo";
-var arg2 = ['a'];
-var arg3 = ['a'];
-%RegExpExecMultiple(_regexp, _subject, arg2, arg3);
diff --git a/deps/v8/test/mjsunit/runtime-gen/regexpexecrt.js b/deps/v8/test/mjsunit/runtime-gen/regexpexecrt.js
deleted file mode 100644
index 3b20191f2b..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/regexpexecrt.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _regexp = /ab/g;
-var _subject = "foo";
-var _index = 1;
-var _last_match_info = new Array();
-%RegExpExecRT(_regexp, _subject, _index, _last_match_info);
diff --git a/deps/v8/test/mjsunit/runtime-gen/regexpinitializeobject.js b/deps/v8/test/mjsunit/runtime-gen/regexpinitializeobject.js
deleted file mode 100644
index fccdeeed78..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/regexpinitializeobject.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _regexp = /ab/g;
-var _source = "foo";
-var _global = new Object();
-var _ignoreCase = new Object();
-var _multiline = new Object();
-%RegExpInitializeObject(_regexp, _source, _global, _ignoreCase, _multiline);
diff --git a/deps/v8/test/mjsunit/runtime-gen/removearrayholes.js b/deps/v8/test/mjsunit/runtime-gen/removearrayholes.js
deleted file mode 100644
index 971e63cab5..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/removearrayholes.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _object = new Object();
-var _limit = 32;
-%RemoveArrayHoles(_object, _limit);
diff --git a/deps/v8/test/mjsunit/runtime-gen/rempio2.js b/deps/v8/test/mjsunit/runtime-gen/rempio2.js
deleted file mode 100644
index 6d47bac4ac..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/rempio2.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = 1.5;
-%RemPiO2(_x);
diff --git a/deps/v8/test/mjsunit/runtime-gen/roundnumber.js b/deps/v8/test/mjsunit/runtime-gen/roundnumber.js
deleted file mode 100644
index 2ec1159b2b..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/roundnumber.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _input = 1.5;
-%RoundNumber(_input);
diff --git a/deps/v8/test/mjsunit/runtime-gen/runmicrotasks.js b/deps/v8/test/mjsunit/runtime-gen/runmicrotasks.js
deleted file mode 100644
index 945260a8df..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/runmicrotasks.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%RunMicrotasks();
diff --git a/deps/v8/test/mjsunit/runtime-gen/runninginsimulator.js b/deps/v8/test/mjsunit/runtime-gen/runninginsimulator.js
deleted file mode 100644
index fe5678259d..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/runninginsimulator.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%RunningInSimulator();
diff --git a/deps/v8/test/mjsunit/runtime-gen/setadd.js b/deps/v8/test/mjsunit/runtime-gen/setadd.js
deleted file mode 100644
index 75b923fbf3..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/setadd.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Set();
-var _key = new Object();
-%SetAdd(_holder, _key);
diff --git a/deps/v8/test/mjsunit/runtime-gen/setclear.js b/deps/v8/test/mjsunit/runtime-gen/setclear.js
deleted file mode 100644
index 82ef6d955b..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/setclear.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Set();
-%SetClear(_holder);
diff --git a/deps/v8/test/mjsunit/runtime-gen/setcode.js b/deps/v8/test/mjsunit/runtime-gen/setcode.js
deleted file mode 100644
index 4e2206fbc8..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/setcode.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _target = function() {};
-var _source = function() {};
-%SetCode(_target, _source);
diff --git a/deps/v8/test/mjsunit/runtime-gen/setdebugeventlistener.js b/deps/v8/test/mjsunit/runtime-gen/setdebugeventlistener.js
deleted file mode 100644
index d51b277b80..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/setdebugeventlistener.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = undefined;
-var _data = new Object();
-%SetDebugEventListener(arg0, _data);
diff --git a/deps/v8/test/mjsunit/runtime-gen/setdelete.js b/deps/v8/test/mjsunit/runtime-gen/setdelete.js
deleted file mode 100644
index 80bd343d0e..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/setdelete.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Set();
-var _key = new Object();
-%SetDelete(_holder, _key);
diff --git a/deps/v8/test/mjsunit/runtime-gen/setdisablebreak.js b/deps/v8/test/mjsunit/runtime-gen/setdisablebreak.js
deleted file mode 100644
index 461942b60f..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/setdisablebreak.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _disable_break = true;
-%SetDisableBreak(_disable_break);
diff --git a/deps/v8/test/mjsunit/runtime-gen/setflags.js b/deps/v8/test/mjsunit/runtime-gen/setflags.js
deleted file mode 100644
index 70db03ee98..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/setflags.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _arg = "foo";
-%SetFlags(_arg);
diff --git a/deps/v8/test/mjsunit/runtime-gen/setfunctionbreakpoint.js b/deps/v8/test/mjsunit/runtime-gen/setfunctionbreakpoint.js
deleted file mode 100644
index 010330e5a4..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/setfunctionbreakpoint.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _function = function() {};
-var arg1 = 218;
-var _break_point_object_arg = new Object();
-%SetFunctionBreakPoint(_function, arg1, _break_point_object_arg);
diff --git a/deps/v8/test/mjsunit/runtime-gen/setgetsize.js b/deps/v8/test/mjsunit/runtime-gen/setgetsize.js
deleted file mode 100644
index 842016bb2d..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/setgetsize.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Set();
-%SetGetSize(_holder);
diff --git a/deps/v8/test/mjsunit/runtime-gen/sethas.js b/deps/v8/test/mjsunit/runtime-gen/sethas.js
deleted file mode 100644
index 8cec0d8c35..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/sethas.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Set();
-var _key = new Object();
-%SetHas(_holder, _key);
diff --git a/deps/v8/test/mjsunit/runtime-gen/setinitialize.js b/deps/v8/test/mjsunit/runtime-gen/setinitialize.js
deleted file mode 100644
index b21a089692..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/setinitialize.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Set();
-%SetInitialize(_holder);
diff --git a/deps/v8/test/mjsunit/runtime-gen/setisobserved.js b/deps/v8/test/mjsunit/runtime-gen/setisobserved.js
deleted file mode 100644
index d885113ffa..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/setisobserved.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-%SetIsObserved(_obj);
diff --git a/deps/v8/test/mjsunit/runtime-gen/setiteratorinitialize.js b/deps/v8/test/mjsunit/runtime-gen/setiteratorinitialize.js
deleted file mode 100644
index 34769e51dc..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/setiteratorinitialize.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Set().values();
-var _set = new Set();
-var arg2 = 2;
-%SetIteratorInitialize(_holder, _set, arg2);
diff --git a/deps/v8/test/mjsunit/runtime-gen/setiteratornext.js b/deps/v8/test/mjsunit/runtime-gen/setiteratornext.js
deleted file mode 100644
index 02b74d44da..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/setiteratornext.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Set().values();
-var _value_array = new Array();
-%SetIteratorNext(_holder, _value_array);
diff --git a/deps/v8/test/mjsunit/runtime-gen/setprototype.js b/deps/v8/test/mjsunit/runtime-gen/setprototype.js
deleted file mode 100644
index 6353151f4e..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/setprototype.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-var _prototype = new Object();
-%SetPrototype(_obj, _prototype);
diff --git a/deps/v8/test/mjsunit/runtime-gen/setscopevariablevalue.js b/deps/v8/test/mjsunit/runtime-gen/setscopevariablevalue.js
deleted file mode 100644
index 680bab52cc..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/setscopevariablevalue.js
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _fun = function() {};
-var _wrapped_id = 1;
-var _inlined_jsframe_index = 32;
-var _index = 32;
-var _variable_name = "foo";
-var _new_value = new Object();
-%SetScopeVariableValue(_fun, _wrapped_id, _inlined_jsframe_index, _index, _variable_name, _new_value);
diff --git a/deps/v8/test/mjsunit/runtime-gen/smilexicographiccompare.js b/deps/v8/test/mjsunit/runtime-gen/smilexicographiccompare.js
deleted file mode 100644
index d227a9ffc1..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/smilexicographiccompare.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x_value = 1;
-var _y_value = 1;
-%SmiLexicographicCompare(_x_value, _y_value);
diff --git a/deps/v8/test/mjsunit/runtime-gen/sparsejoinwithseparator.js b/deps/v8/test/mjsunit/runtime-gen/sparsejoinwithseparator.js
deleted file mode 100644
index 3a8e7754d4..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/sparsejoinwithseparator.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _elements_array = new Array();
-var _array_length = 32;
-var _separator = "foo";
-%SparseJoinWithSeparator(_elements_array, _array_length, _separator);
diff --git a/deps/v8/test/mjsunit/runtime-gen/specialarrayfunctions.js b/deps/v8/test/mjsunit/runtime-gen/specialarrayfunctions.js
deleted file mode 100644
index 5956e8422c..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/specialarrayfunctions.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%SpecialArrayFunctions();
diff --git a/deps/v8/test/mjsunit/runtime-gen/stringbuilderconcat.js b/deps/v8/test/mjsunit/runtime-gen/stringbuilderconcat.js
deleted file mode 100644
index 9d7c78a3e6..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/stringbuilderconcat.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = [1, 2, 3];
-var arg1 = 3;
-var _special = "foo";
-%StringBuilderConcat(arg0, arg1, _special);
diff --git a/deps/v8/test/mjsunit/runtime-gen/stringbuilderjoin.js b/deps/v8/test/mjsunit/runtime-gen/stringbuilderjoin.js
deleted file mode 100644
index bf990c62d6..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/stringbuilderjoin.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var arg0 = ['a', 'b'];
-var arg1 = 4;
-var _separator = "foo";
-%StringBuilderJoin(arg0, arg1, _separator);
diff --git a/deps/v8/test/mjsunit/runtime-gen/stringcharcodeatrt.js b/deps/v8/test/mjsunit/runtime-gen/stringcharcodeatrt.js
deleted file mode 100644
index fa016ac00e..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/stringcharcodeatrt.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _subject = "foo";
-var _i = 32;
-%StringCharCodeAtRT(_subject, _i);
diff --git a/deps/v8/test/mjsunit/runtime-gen/stringequals.js b/deps/v8/test/mjsunit/runtime-gen/stringequals.js
deleted file mode 100644
index 14e40eb028..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/stringequals.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _x = "foo";
-var _y = "foo";
-%StringEquals(_x, _y);
diff --git a/deps/v8/test/mjsunit/runtime-gen/stringindexof.js b/deps/v8/test/mjsunit/runtime-gen/stringindexof.js
deleted file mode 100644
index 3c5cab31c5..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/stringindexof.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _sub = "foo";
-var _pat = "foo";
-var _index = new Object();
-%StringIndexOf(_sub, _pat, _index);
diff --git a/deps/v8/test/mjsunit/runtime-gen/stringlastindexof.js b/deps/v8/test/mjsunit/runtime-gen/stringlastindexof.js
deleted file mode 100644
index afbc51f5a4..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/stringlastindexof.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _sub = "foo";
-var _pat = "foo";
-var _index = new Object();
-%StringLastIndexOf(_sub, _pat, _index);
diff --git a/deps/v8/test/mjsunit/runtime-gen/stringlocalecompare.js b/deps/v8/test/mjsunit/runtime-gen/stringlocalecompare.js
deleted file mode 100644
index b37e231183..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/stringlocalecompare.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _str1 = "foo";
-var _str2 = "foo";
-%StringLocaleCompare(_str1, _str2);
diff --git a/deps/v8/test/mjsunit/runtime-gen/stringmatch.js b/deps/v8/test/mjsunit/runtime-gen/stringmatch.js
deleted file mode 100644
index 330aeae9c0..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/stringmatch.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _subject = "foo";
-var _regexp = /ab/g;
-var arg2 = ['a', 'b'];
-%StringMatch(_subject, _regexp, arg2);
diff --git a/deps/v8/test/mjsunit/runtime-gen/stringnormalize.js b/deps/v8/test/mjsunit/runtime-gen/stringnormalize.js
deleted file mode 100644
index fb408a41a5..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/stringnormalize.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _stringValue = "foo";
-var arg1 = 2;
-%StringNormalize(_stringValue, arg1);
diff --git a/deps/v8/test/mjsunit/runtime-gen/stringparsefloat.js b/deps/v8/test/mjsunit/runtime-gen/stringparsefloat.js
deleted file mode 100644
index 520a24e756..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/stringparsefloat.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _subject = "foo";
-%StringParseFloat(_subject);
diff --git a/deps/v8/test/mjsunit/runtime-gen/stringparseint.js b/deps/v8/test/mjsunit/runtime-gen/stringparseint.js
deleted file mode 100644
index 43116554eb..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/stringparseint.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _subject = "foo";
-var _radix = 32;
-%StringParseInt(_subject, _radix);
diff --git a/deps/v8/test/mjsunit/runtime-gen/stringreplaceglobalregexpwithstring.js b/deps/v8/test/mjsunit/runtime-gen/stringreplaceglobalregexpwithstring.js
deleted file mode 100644
index ad2b6e67d9..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/stringreplaceglobalregexpwithstring.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _subject = "foo";
-var _regexp = /ab/g;
-var _replacement = "foo";
-var arg3 = ['a'];
-%StringReplaceGlobalRegExpWithString(_subject, _regexp, _replacement, arg3);
diff --git a/deps/v8/test/mjsunit/runtime-gen/stringreplaceonecharwithstring.js b/deps/v8/test/mjsunit/runtime-gen/stringreplaceonecharwithstring.js
deleted file mode 100644
index 5e38a79f44..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/stringreplaceonecharwithstring.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _subject = "foo";
-var _search = "foo";
-var _replace = "foo";
-%StringReplaceOneCharWithString(_subject, _search, _replace);
diff --git a/deps/v8/test/mjsunit/runtime-gen/stringsplit.js b/deps/v8/test/mjsunit/runtime-gen/stringsplit.js
deleted file mode 100644
index dfe683194a..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/stringsplit.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _subject = "foo";
-var _pattern = "foo";
-var _limit = 32;
-%StringSplit(_subject, _pattern, _limit);
diff --git a/deps/v8/test/mjsunit/runtime-gen/stringtoarray.js b/deps/v8/test/mjsunit/runtime-gen/stringtoarray.js
deleted file mode 100644
index 6ed48a771a..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/stringtoarray.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _s = "foo";
-var _limit = 32;
-%StringToArray(_s, _limit);
diff --git a/deps/v8/test/mjsunit/runtime-gen/stringtolowercase.js b/deps/v8/test/mjsunit/runtime-gen/stringtolowercase.js
deleted file mode 100644
index 3a7261a0e0..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/stringtolowercase.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _s = "foo";
-%StringToLowerCase(_s);
diff --git a/deps/v8/test/mjsunit/runtime-gen/stringtonumber.js b/deps/v8/test/mjsunit/runtime-gen/stringtonumber.js
deleted file mode 100644
index 88e2e84a2e..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/stringtonumber.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _subject = "foo";
-%StringToNumber(_subject);
diff --git a/deps/v8/test/mjsunit/runtime-gen/stringtouppercase.js b/deps/v8/test/mjsunit/runtime-gen/stringtouppercase.js
deleted file mode 100644
index b7d9731015..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/stringtouppercase.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _s = "foo";
-%StringToUpperCase(_s);
diff --git a/deps/v8/test/mjsunit/runtime-gen/stringtrim.js b/deps/v8/test/mjsunit/runtime-gen/stringtrim.js
deleted file mode 100644
index 75d197efa9..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/stringtrim.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _string = "foo";
-var _trimLeft = true;
-var _trimRight = true;
-%StringTrim(_string, _trimLeft, _trimRight);
diff --git a/deps/v8/test/mjsunit/runtime-gen/symboldescription.js b/deps/v8/test/mjsunit/runtime-gen/symboldescription.js
deleted file mode 100644
index 13360828b8..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/symboldescription.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _symbol = Symbol("symbol");
-%SymbolDescription(_symbol);
diff --git a/deps/v8/test/mjsunit/runtime-gen/symbolisprivate.js b/deps/v8/test/mjsunit/runtime-gen/symbolisprivate.js
deleted file mode 100644
index 8e5343e1d5..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/symbolisprivate.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _symbol = Symbol("symbol");
-%SymbolIsPrivate(_symbol);
diff --git a/deps/v8/test/mjsunit/runtime-gen/symbolregistry.js b/deps/v8/test/mjsunit/runtime-gen/symbolregistry.js
deleted file mode 100644
index 71964e6eae..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/symbolregistry.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%SymbolRegistry();
diff --git a/deps/v8/test/mjsunit/runtime-gen/tobool.js b/deps/v8/test/mjsunit/runtime-gen/tobool.js
deleted file mode 100644
index ca522c8a9f..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/tobool.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _object = new Object();
-%ToBool(_object);
diff --git a/deps/v8/test/mjsunit/runtime-gen/tofastproperties.js b/deps/v8/test/mjsunit/runtime-gen/tofastproperties.js
deleted file mode 100644
index f9c1890b1c..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/tofastproperties.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _object = new Object();
-%ToFastProperties(_object);
diff --git a/deps/v8/test/mjsunit/runtime-gen/traceenter.js b/deps/v8/test/mjsunit/runtime-gen/traceenter.js
deleted file mode 100644
index 768a0c2437..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/traceenter.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%TraceEnter();
diff --git a/deps/v8/test/mjsunit/runtime-gen/traceexit.js b/deps/v8/test/mjsunit/runtime-gen/traceexit.js
deleted file mode 100644
index 378d008c90..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/traceexit.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-%TraceExit(_obj);
diff --git a/deps/v8/test/mjsunit/runtime-gen/truncatestring.js b/deps/v8/test/mjsunit/runtime-gen/truncatestring.js
deleted file mode 100644
index 64ef628e5b..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/truncatestring.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _string = "seqstring";
-var _new_length = 1;
-%TruncateString(_string, _new_length);
diff --git a/deps/v8/test/mjsunit/runtime-gen/trymigrateinstance.js b/deps/v8/test/mjsunit/runtime-gen/trymigrateinstance.js
deleted file mode 100644
index b82eb741bb..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/trymigrateinstance.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _object = new Object();
-%TryMigrateInstance(_object);
diff --git a/deps/v8/test/mjsunit/runtime-gen/typedarraygetbuffer.js b/deps/v8/test/mjsunit/runtime-gen/typedarraygetbuffer.js
deleted file mode 100644
index 56a805b3b2..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/typedarraygetbuffer.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Int32Array(2);
-%TypedArrayGetBuffer(_holder);
diff --git a/deps/v8/test/mjsunit/runtime-gen/typedarraygetlength.js b/deps/v8/test/mjsunit/runtime-gen/typedarraygetlength.js
deleted file mode 100644
index 8d1865f40f..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/typedarraygetlength.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Int32Array(2);
-%TypedArrayGetLength(_holder);
diff --git a/deps/v8/test/mjsunit/runtime-gen/typedarrayinitialize.js b/deps/v8/test/mjsunit/runtime-gen/typedarrayinitialize.js
deleted file mode 100644
index be1e29607e..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/typedarrayinitialize.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Int32Array(2);
-var arg1 = 6;
-var arg2 = new ArrayBuffer(8);
-var _byte_offset_object = 1.5;
-var arg4 = 4;
-%TypedArrayInitialize(_holder, arg1, arg2, _byte_offset_object, arg4);
diff --git a/deps/v8/test/mjsunit/runtime-gen/typedarrayinitializefromarraylike.js b/deps/v8/test/mjsunit/runtime-gen/typedarrayinitializefromarraylike.js
deleted file mode 100644
index 0ca7a0f7ce..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/typedarrayinitializefromarraylike.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _holder = new Int32Array(2);
-var arg1 = 6;
-var _source = new Object();
-var _length_obj = 1.5;
-%TypedArrayInitializeFromArrayLike(_holder, arg1, _source, _length_obj);
diff --git a/deps/v8/test/mjsunit/runtime-gen/typedarraymaxsizeinheap.js b/deps/v8/test/mjsunit/runtime-gen/typedarraymaxsizeinheap.js
deleted file mode 100644
index 61467bd9fa..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/typedarraymaxsizeinheap.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-%TypedArrayMaxSizeInHeap();
diff --git a/deps/v8/test/mjsunit/runtime-gen/typedarraysetfastcases.js b/deps/v8/test/mjsunit/runtime-gen/typedarraysetfastcases.js
deleted file mode 100644
index 495212952b..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/typedarraysetfastcases.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _target_obj = new Int32Array(2);
-var _source_obj = new Int32Array(2);
-var arg2 = 0;
-%TypedArraySetFastCases(_target_obj, _source_obj, arg2);
diff --git a/deps/v8/test/mjsunit/runtime-gen/typeof.js b/deps/v8/test/mjsunit/runtime-gen/typeof.js
deleted file mode 100644
index 78bfa6ea2c..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/typeof.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _obj = new Object();
-%Typeof(_obj);
diff --git a/deps/v8/test/mjsunit/runtime-gen/unblockconcurrentrecompilation.js b/deps/v8/test/mjsunit/runtime-gen/unblockconcurrentrecompilation.js
deleted file mode 100644
index a08add7b28..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/unblockconcurrentrecompilation.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-try {
-%UnblockConcurrentRecompilation();
-} catch(e) {}
diff --git a/deps/v8/test/mjsunit/runtime-gen/uriescape.js b/deps/v8/test/mjsunit/runtime-gen/uriescape.js
deleted file mode 100644
index f32edc98e6..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/uriescape.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _source = "foo";
-%URIEscape(_source);
diff --git a/deps/v8/test/mjsunit/runtime-gen/uriunescape.js b/deps/v8/test/mjsunit/runtime-gen/uriunescape.js
deleted file mode 100644
index 2ba812c588..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/uriunescape.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _source = "foo";
-%URIUnescape(_source);
diff --git a/deps/v8/test/mjsunit/runtime-gen/weakcollectiondelete.js b/deps/v8/test/mjsunit/runtime-gen/weakcollectiondelete.js
deleted file mode 100644
index a6fff79e19..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/weakcollectiondelete.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _weak_collection = new WeakMap();
-var _key = new Object();
-%WeakCollectionDelete(_weak_collection, _key);
diff --git a/deps/v8/test/mjsunit/runtime-gen/weakcollectionget.js b/deps/v8/test/mjsunit/runtime-gen/weakcollectionget.js
deleted file mode 100644
index f248ac05a5..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/weakcollectionget.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _weak_collection = new WeakMap();
-var _key = new Object();
-%WeakCollectionGet(_weak_collection, _key);
diff --git a/deps/v8/test/mjsunit/runtime-gen/weakcollectionhas.js b/deps/v8/test/mjsunit/runtime-gen/weakcollectionhas.js
deleted file mode 100644
index af600c3e8d..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/weakcollectionhas.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _weak_collection = new WeakMap();
-var _key = new Object();
-%WeakCollectionHas(_weak_collection, _key);
diff --git a/deps/v8/test/mjsunit/runtime-gen/weakcollectioninitialize.js b/deps/v8/test/mjsunit/runtime-gen/weakcollectioninitialize.js
deleted file mode 100644
index 97f5ce56a1..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/weakcollectioninitialize.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _weak_collection = new WeakMap();
-%WeakCollectionInitialize(_weak_collection);
diff --git a/deps/v8/test/mjsunit/runtime-gen/weakcollectionset.js b/deps/v8/test/mjsunit/runtime-gen/weakcollectionset.js
deleted file mode 100644
index 3479ba6031..0000000000
--- a/deps/v8/test/mjsunit/runtime-gen/weakcollectionset.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _weak_collection = new WeakMap();
-var _key = new Object();
-var _value = new Object();
-%WeakCollectionSet(_weak_collection, _key, _value);
diff --git a/deps/v8/test/mjsunit/serialize-ic.js b/deps/v8/test/mjsunit/serialize-ic.js
new file mode 100644
index 0000000000..8f20b2758f
--- /dev/null
+++ b/deps/v8/test/mjsunit/serialize-ic.js
@@ -0,0 +1,9 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --cache=code --serialize-toplevel
+
+var foo = [];
+foo[0] = "bar";
+assertEquals(["bar"], foo);
diff --git a/deps/v8/test/mjsunit/string-external-cached.js b/deps/v8/test/mjsunit/string-external-cached.js
index 6e24285331..cd368f660a 100644
--- a/deps/v8/test/mjsunit/string-external-cached.js
+++ b/deps/v8/test/mjsunit/string-external-cached.js
@@ -68,8 +68,8 @@ function test() {
externalizeString(ascii, false);
externalizeString(twobyte, true);
} catch (ex) { }
- assertTrue(isAsciiString(ascii));
- assertFalse(isAsciiString(twobyte));
+ assertTrue(isOneByteString(ascii));
+ assertFalse(isOneByteString(twobyte));
var ascii_slice = ascii.slice(1,-1);
var twobyte_slice = twobyte.slice(2,-1);
var ascii_cons = ascii + ascii;
@@ -97,18 +97,18 @@ function test() {
externalizeString(long_ascii, false);
externalizeString(short_twobyte, true);
externalizeString(long_twobyte, true);
- assertTrue(isAsciiString(short_asii) && isAsciiString(long_ascii));
- assertFalse(isAsciiString(short_twobyte) || isAsciiString(long_twobyte));
+ assertTrue(isOneByteString(short_asii) && isOneByteString(long_ascii));
+ assertFalse(isOneByteString(short_twobyte) || isOneByteString(long_twobyte));
} catch (ex) { }
assertEquals("E=MCsquared", short_ascii + long_ascii);
- assertTrue(isAsciiString(short_ascii + long_ascii));
+ assertTrue(isOneByteString(short_ascii + long_ascii));
assertEquals("MCsquaredE=", long_ascii + short_ascii);
assertEquals("E\u1234MCsquare\u1234", short_twobyte + long_twobyte);
- assertFalse(isAsciiString(short_twobyte + long_twobyte));
+ assertFalse(isOneByteString(short_twobyte + long_twobyte));
assertEquals("E=MCsquared", "E=" + long_ascii);
assertEquals("E\u1234MCsquared", short_twobyte + "MCsquared");
assertEquals("E\u1234MCsquared", short_twobyte + long_ascii);
- assertFalse(isAsciiString(short_twobyte + long_ascii));
+ assertFalse(isOneByteString(short_twobyte + long_ascii));
}
// Run the test many times to ensure IC-s don't break things.
diff --git a/deps/v8/test/mjsunit/string-externalize.js b/deps/v8/test/mjsunit/string-externalize.js
index d52a7e2baf..39cc124914 100644
--- a/deps/v8/test/mjsunit/string-externalize.js
+++ b/deps/v8/test/mjsunit/string-externalize.js
@@ -36,27 +36,27 @@ function test() {
for (var i = 0; i < size; i++) {
str += String.fromCharCode(i & 0x7f);
}
- assertTrue(isAsciiString(str));
+ assertTrue(isOneByteString(str));
- var twoByteExternalWithAsciiData =
+ var twoByteExternalWithOneByteData =
"AA" + (function() { return "A"; })();
- externalizeString(twoByteExternalWithAsciiData, true /* force two-byte */);
- assertFalse(isAsciiString(twoByteExternalWithAsciiData));
+ externalizeString(twoByteExternalWithOneByteData, true /* force two-byte */);
+ assertFalse(isOneByteString(twoByteExternalWithOneByteData));
var realTwoByteExternalString =
"\u1234\u1234\u1234\u1234" + (function() { return "\u1234"; })();
externalizeString(realTwoByteExternalString);
- assertFalse(isAsciiString(realTwoByteExternalString));
+ assertFalse(isOneByteString(realTwoByteExternalString));
- assertTrue(isAsciiString(["a", twoByteExternalWithAsciiData].join("")));
+ assertTrue(isOneByteString(["a", twoByteExternalWithOneByteData].join("")));
// Appending a two-byte string that contains only ascii chars should
// still produce an ascii cons.
- var str1 = str + twoByteExternalWithAsciiData;
- assertTrue(isAsciiString(str1));
+ var str1 = str + twoByteExternalWithOneByteData;
+ assertTrue(isOneByteString(str1));
// Force flattening of the string.
- var old_length = str1.length - twoByteExternalWithAsciiData.length;
+ var old_length = str1.length - twoByteExternalWithOneByteData.length;
for (var i = 0; i < old_length; i++) {
assertEquals(String.fromCharCode(i & 0x7f), str1[i]);
}
@@ -65,16 +65,16 @@ function test() {
}
// Flattened string should still be ascii.
- assertTrue(isAsciiString(str1));
+ assertTrue(isOneByteString(str1));
// Lower-casing an ascii string should produce ascii.
- assertTrue(isAsciiString(str1.toLowerCase()));
+ assertTrue(isOneByteString(str1.toLowerCase()));
- assertFalse(isAsciiString(["a", realTwoByteExternalString].join("")));
+ assertFalse(isOneByteString(["a", realTwoByteExternalString].join("")));
// Appending a real two-byte string should produce a two-byte cons.
var str2 = str + realTwoByteExternalString;
- assertFalse(isAsciiString(str2));
+ assertFalse(isOneByteString(str2));
// Force flattening of the string.
old_length = str2.length - realTwoByteExternalString.length;
@@ -86,7 +86,7 @@ function test() {
}
// Flattened string should still be two-byte.
- assertFalse(isAsciiString(str2));
+ assertFalse(isOneByteString(str2));
}
// Run the test many times to ensure IC-s don't break things.
diff --git a/deps/v8/test/mjsunit/string-match.js b/deps/v8/test/mjsunit/string-match.js
index 202396d308..7689652cee 100644
--- a/deps/v8/test/mjsunit/string-match.js
+++ b/deps/v8/test/mjsunit/string-match.js
@@ -66,7 +66,6 @@ function testMatch(name, input, regexp, result, captures, from, to) {
assertEquals(undefined, RegExp.$10, name + "-nocapture-10");
assertEquals(input, RegExp.input, name + "-input");
- assertEquals(input, RegExp.$input, name + "-$input");
assertEquals(input, RegExp.$_, name + "-$_");
assertEquals(preMatch, RegExp["$`"], name + "-$`");
diff --git a/deps/v8/test/mjsunit/string-natives.js b/deps/v8/test/mjsunit/string-natives.js
index 7a9009bfd1..40fe9c697e 100644
--- a/deps/v8/test/mjsunit/string-natives.js
+++ b/deps/v8/test/mjsunit/string-natives.js
@@ -29,27 +29,27 @@
function test() {
var s1 = %NewString(26, true);
- for (i = 0; i < 26; i++) %_OneByteSeqStringSetChar(s1, i, 65);
+ for (i = 0; i < 26; i++) %_OneByteSeqStringSetChar(i, 65, s1);
assertEquals("AAAAAAAAAAAAAAAAAAAAAAAAAA", s1);
- %_OneByteSeqStringSetChar(s1, 25, 66);
+ %_OneByteSeqStringSetChar(25, 66, s1);
assertEquals("AAAAAAAAAAAAAAAAAAAAAAAAAB", s1);
- for (i = 0; i < 26; i++) %_OneByteSeqStringSetChar(s1, i, i+65);
+ for (i = 0; i < 26; i++) %_OneByteSeqStringSetChar(i, i+65, s1);
assertEquals("ABCDEFGHIJKLMNOPQRSTUVWXYZ", s1);
s1 = %TruncateString(s1, 13);
assertEquals("ABCDEFGHIJKLM", s1);
var s2 = %NewString(26, false);
- for (i = 0; i < 26; i++) %_TwoByteSeqStringSetChar(s2, i, 65);
+ for (i = 0; i < 26; i++) %_TwoByteSeqStringSetChar(i, 65, s2);
assertEquals("AAAAAAAAAAAAAAAAAAAAAAAAAA", s2);
- %_TwoByteSeqStringSetChar(s2, 25, 66);
+ %_TwoByteSeqStringSetChar(25, 66, s2);
assertEquals("AAAAAAAAAAAAAAAAAAAAAAAAAB", s2);
- for (i = 0; i < 26; i++) %_TwoByteSeqStringSetChar(s2, i, i+65);
+ for (i = 0; i < 26; i++) %_TwoByteSeqStringSetChar(i, i+65, s2);
assertEquals("ABCDEFGHIJKLMNOPQRSTUVWXYZ", s2);
s2 = %TruncateString(s2, 13);
assertEquals("ABCDEFGHIJKLM", s2);
var s3 = %NewString(26, false);
- for (i = 0; i < 26; i++) %_TwoByteSeqStringSetChar(s3, i, i+1000);
+ for (i = 0; i < 26; i++) %_TwoByteSeqStringSetChar(i, i+1000, s3);
for (i = 0; i < 26; i++) assertEquals(s3[i], String.fromCharCode(i+1000));
var a = [];
diff --git a/deps/v8/test/mjsunit/string-oom-concat.js b/deps/v8/test/mjsunit/string-oom-concat.js
index 9529c89381..0b35021c33 100644
--- a/deps/v8/test/mjsunit/string-oom-concat.js
+++ b/deps/v8/test/mjsunit/string-oom-concat.js
@@ -7,6 +7,7 @@ function concat() {
for (var i = 0; i < 100; i++) {
a += a;
}
+ return a;
}
assertThrows(concat, RangeError);
diff --git a/deps/v8/test/mjsunit/string-slices.js b/deps/v8/test/mjsunit/string-slices.js
index 2fec04b0b0..c3f889bd99 100644
--- a/deps/v8/test/mjsunit/string-slices.js
+++ b/deps/v8/test/mjsunit/string-slices.js
@@ -197,9 +197,9 @@ var a = "123456789" + "qwertyuiopasdfghjklzxcvbnm";
var b = "23456789qwertyuiopasdfghjklzxcvbn"
assertEquals(a.slice(1,-1), b);
-assertTrue(isAsciiString(a));
+assertTrue(isOneByteString(a));
externalizeString(a, true);
-assertFalse(isAsciiString(a));
+assertFalse(isOneByteString(a));
assertEquals(a.slice(1,-1), b);
assertTrue(/3456789qwe/.test(a));
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index 5662ee4144..e9f58c6133 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -56,12 +56,10 @@
# TODO(turbofan): These are all covered by mjsunit as well. Enable them once
# we pass 'mjsunit' and 'webkit' with TurboFan.
'js1_4/Functions/function-001': [PASS, NO_VARIANTS],
- 'js1_5/Regress/regress-104077': [PASS, NO_VARIANTS],
'js1_5/Regress/regress-396684': [PASS, NO_VARIANTS],
'js1_5/Regress/regress-80981': [PASS, NO_VARIANTS],
# TODO(turbofan): Large switch statements crash.
- 'js1_5/Regress/regress-366601': [PASS, NO_VARIANTS],
'js1_5/Regress/regress-398085-01': [PASS, NO_VARIANTS],
##################### SKIPPED TESTS #####################
diff --git a/deps/v8/test/perf-test/Collections/Collections.json b/deps/v8/test/perf-test/Collections/Collections.json
new file mode 100644
index 0000000000..bf735c0dcb
--- /dev/null
+++ b/deps/v8/test/perf-test/Collections/Collections.json
@@ -0,0 +1,15 @@
+{
+ "path": ["."],
+ "main": "run.js",
+ "flags": ["--harmony-collections"],
+ "run_count": 5,
+ "units": "score",
+ "results_regexp": "^%s\\-Collections\\(Score\\): (.+)$",
+ "total": true,
+ "tests": [
+ {"name": "Map"},
+ {"name": "Set"},
+ {"name": "WeakMap"},
+ {"name": "WeakSet"}
+ ]
+}
diff --git a/deps/v8/test/perf-test/Collections/base.js b/deps/v8/test/perf-test/Collections/base.js
new file mode 100644
index 0000000000..b0ce40b888
--- /dev/null
+++ b/deps/v8/test/perf-test/Collections/base.js
@@ -0,0 +1,367 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Performance.now is used in latency benchmarks, the fallback is Date.now.
+var performance = performance || {};
+performance.now = (function() {
+ return performance.now ||
+ performance.mozNow ||
+ performance.msNow ||
+ performance.oNow ||
+ performance.webkitNow ||
+ Date.now;
+})();
+
+// Simple framework for running the benchmark suites and
+// computing a score based on the timing measurements.
+
+
+// A benchmark has a name (string) and a function that will be run to
+// do the performance measurement. The optional setup and tearDown
+// arguments are functions that will be invoked before and after
+// running the benchmark, but the running time of these functions will
+// not be accounted for in the benchmark score.
+function Benchmark(name, doWarmup, doDeterministic, deterministicIterations,
+ run, setup, tearDown, rmsResult, minIterations) {
+ this.name = name;
+ this.doWarmup = doWarmup;
+ this.doDeterministic = doDeterministic;
+ this.deterministicIterations = deterministicIterations;
+ this.run = run;
+ this.Setup = setup ? setup : function() { };
+ this.TearDown = tearDown ? tearDown : function() { };
+ this.rmsResult = rmsResult ? rmsResult : null;
+ this.minIterations = minIterations ? minIterations : 32;
+}
+
+
+// Benchmark results hold the benchmark and the measured time used to
+// run the benchmark. The benchmark score is computed later once a
+// full benchmark suite has run to completion. If latency is set to 0
+// then there is no latency score for this benchmark.
+function BenchmarkResult(benchmark, time, latency) {
+ this.benchmark = benchmark;
+ this.time = time;
+ this.latency = latency;
+}
+
+
+// Automatically convert results to numbers. Used by the geometric
+// mean computation.
+BenchmarkResult.prototype.valueOf = function() {
+ return this.time;
+}
+
+
+// Suites of benchmarks consist of a name and the set of benchmarks in
+// addition to the reference timing that the final score will be based
+// on. This way, all scores are relative to a reference run and higher
+// scores implies better performance.
+function BenchmarkSuite(name, reference, benchmarks) {
+ this.name = name;
+ this.reference = reference;
+ this.benchmarks = benchmarks;
+ BenchmarkSuite.suites.push(this);
+}
+
+
+// Keep track of all declared benchmark suites.
+BenchmarkSuite.suites = [];
+
+// Scores are not comparable across versions. Bump the version if
+// you're making changes that will affect that scores, e.g. if you add
+// a new benchmark or change an existing one.
+BenchmarkSuite.version = '1';
+
+
+// Defines global benchsuite running mode that overrides benchmark suite
+// behavior. Intended to be set by the benchmark driver. Undefined
+// values here allow a benchmark to define behaviour itself.
+BenchmarkSuite.config = {
+ doWarmup: undefined,
+ doDeterministic: undefined
+};
+
+
+// Override the alert function to throw an exception instead.
+alert = function(s) {
+ throw "Alert called with argument: " + s;
+};
+
+
+// To make the benchmark results predictable, we replace Math.random
+// with a 100% deterministic alternative.
+BenchmarkSuite.ResetRNG = function() {
+ Math.random = (function() {
+ var seed = 49734321;
+ return function() {
+ // Robert Jenkins' 32 bit integer hash function.
+ seed = ((seed + 0x7ed55d16) + (seed << 12)) & 0xffffffff;
+ seed = ((seed ^ 0xc761c23c) ^ (seed >>> 19)) & 0xffffffff;
+ seed = ((seed + 0x165667b1) + (seed << 5)) & 0xffffffff;
+ seed = ((seed + 0xd3a2646c) ^ (seed << 9)) & 0xffffffff;
+ seed = ((seed + 0xfd7046c5) + (seed << 3)) & 0xffffffff;
+ seed = ((seed ^ 0xb55a4f09) ^ (seed >>> 16)) & 0xffffffff;
+ return (seed & 0xfffffff) / 0x10000000;
+ };
+ })();
+}
+
+
+// Runs all registered benchmark suites and optionally yields between
+// each individual benchmark to avoid running for too long in the
+// context of browsers. Once done, the final score is reported to the
+// runner.
+BenchmarkSuite.RunSuites = function(runner, skipBenchmarks) {
+ skipBenchmarks = typeof skipBenchmarks === 'undefined' ? [] : skipBenchmarks;
+ var continuation = null;
+ var suites = BenchmarkSuite.suites;
+ var length = suites.length;
+ BenchmarkSuite.scores = [];
+ var index = 0;
+ function RunStep() {
+ while (continuation || index < length) {
+ if (continuation) {
+ continuation = continuation();
+ } else {
+ var suite = suites[index++];
+ if (runner.NotifyStart) runner.NotifyStart(suite.name);
+ if (skipBenchmarks.indexOf(suite.name) > -1) {
+ suite.NotifySkipped(runner);
+ } else {
+ continuation = suite.RunStep(runner);
+ }
+ }
+ if (continuation && typeof window != 'undefined' && window.setTimeout) {
+ window.setTimeout(RunStep, 25);
+ return;
+ }
+ }
+
+ // show final result
+ if (runner.NotifyScore) {
+ var score = BenchmarkSuite.GeometricMean(BenchmarkSuite.scores);
+ var formatted = BenchmarkSuite.FormatScore(100 * score);
+ runner.NotifyScore(formatted);
+ }
+ }
+ RunStep();
+}
+
+
+// Counts the total number of registered benchmarks. Useful for
+// showing progress as a percentage.
+BenchmarkSuite.CountBenchmarks = function() {
+ var result = 0;
+ var suites = BenchmarkSuite.suites;
+ for (var i = 0; i < suites.length; i++) {
+ result += suites[i].benchmarks.length;
+ }
+ return result;
+}
+
+
+// Computes the geometric mean of a set of numbers.
+BenchmarkSuite.GeometricMean = function(numbers) {
+ var log = 0;
+ for (var i = 0; i < numbers.length; i++) {
+ log += Math.log(numbers[i]);
+ }
+ return Math.pow(Math.E, log / numbers.length);
+}
+
+
+// Computes the geometric mean of a set of throughput time measurements.
+BenchmarkSuite.GeometricMeanTime = function(measurements) {
+ var log = 0;
+ for (var i = 0; i < measurements.length; i++) {
+ log += Math.log(measurements[i].time);
+ }
+ return Math.pow(Math.E, log / measurements.length);
+}
+
+
+// Computes the geometric mean of a set of rms measurements.
+BenchmarkSuite.GeometricMeanLatency = function(measurements) {
+ var log = 0;
+ var hasLatencyResult = false;
+ for (var i = 0; i < measurements.length; i++) {
+ if (measurements[i].latency != 0) {
+ log += Math.log(measurements[i].latency);
+ hasLatencyResult = true;
+ }
+ }
+ if (hasLatencyResult) {
+ return Math.pow(Math.E, log / measurements.length);
+ } else {
+ return 0;
+ }
+}
+
+
+// Converts a score value to a string with at least three significant
+// digits.
+BenchmarkSuite.FormatScore = function(value) {
+ if (value > 100) {
+ return value.toFixed(0);
+ } else {
+ return value.toPrecision(3);
+ }
+}
+
+// Notifies the runner that we're done running a single benchmark in
+// the benchmark suite. This can be useful to report progress.
+BenchmarkSuite.prototype.NotifyStep = function(result) {
+ this.results.push(result);
+ if (this.runner.NotifyStep) this.runner.NotifyStep(result.benchmark.name);
+}
+
+
+// Notifies the runner that we're done with running a suite and that
+// we have a result which can be reported to the user if needed.
+BenchmarkSuite.prototype.NotifyResult = function() {
+ var mean = BenchmarkSuite.GeometricMeanTime(this.results);
+ var score = this.reference[0] / mean;
+ BenchmarkSuite.scores.push(score);
+ if (this.runner.NotifyResult) {
+ var formatted = BenchmarkSuite.FormatScore(100 * score);
+ this.runner.NotifyResult(this.name, formatted);
+ }
+ if (this.reference.length == 2) {
+ var meanLatency = BenchmarkSuite.GeometricMeanLatency(this.results);
+ if (meanLatency != 0) {
+ var scoreLatency = this.reference[1] / meanLatency;
+ BenchmarkSuite.scores.push(scoreLatency);
+ if (this.runner.NotifyResult) {
+ var formattedLatency = BenchmarkSuite.FormatScore(100 * scoreLatency)
+ this.runner.NotifyResult(this.name + "Latency", formattedLatency);
+ }
+ }
+ }
+}
+
+
+BenchmarkSuite.prototype.NotifySkipped = function(runner) {
+ BenchmarkSuite.scores.push(1); // push default reference score.
+ if (runner.NotifyResult) {
+ runner.NotifyResult(this.name, "Skipped");
+ }
+}
+
+
+// Notifies the runner that running a benchmark resulted in an error.
+BenchmarkSuite.prototype.NotifyError = function(error) {
+ if (this.runner.NotifyError) {
+ this.runner.NotifyError(this.name, error);
+ }
+ if (this.runner.NotifyStep) {
+ this.runner.NotifyStep(this.name);
+ }
+}
+
+
+// Runs a single benchmark for at least a second and computes the
+// average time it takes to run a single iteration.
+BenchmarkSuite.prototype.RunSingleBenchmark = function(benchmark, data) {
+ var config = BenchmarkSuite.config;
+ var doWarmup = config.doWarmup !== undefined
+ ? config.doWarmup
+ : benchmark.doWarmup;
+ var doDeterministic = config.doDeterministic !== undefined
+ ? config.doDeterministic
+ : benchmark.doDeterministic;
+
+ function Measure(data) {
+ var elapsed = 0;
+ var start = new Date();
+
+ // Run either for 1 second or for the number of iterations specified
+ // by minIterations, depending on the config flag doDeterministic.
+ for (var i = 0; (doDeterministic ?
+ i<benchmark.deterministicIterations : elapsed < 1000); i++) {
+ benchmark.run();
+ elapsed = new Date() - start;
+ }
+ if (data != null) {
+ data.runs += i;
+ data.elapsed += elapsed;
+ }
+ }
+
+ // Sets up data in order to skip or not the warmup phase.
+ if (!doWarmup && data == null) {
+ data = { runs: 0, elapsed: 0 };
+ }
+
+ if (data == null) {
+ Measure(null);
+ return { runs: 0, elapsed: 0 };
+ } else {
+ Measure(data);
+ // If we've run too few iterations, we continue for another second.
+ if (data.runs < benchmark.minIterations) return data;
+ var usec = (data.elapsed * 1000) / data.runs;
+ var rms = (benchmark.rmsResult != null) ? benchmark.rmsResult() : 0;
+ this.NotifyStep(new BenchmarkResult(benchmark, usec, rms));
+ return null;
+ }
+}
+
+
+// This function starts running a suite, but stops between each
+// individual benchmark in the suite and returns a continuation
+// function which can be invoked to run the next benchmark. Once the
+// last benchmark has been executed, null is returned.
+BenchmarkSuite.prototype.RunStep = function(runner) {
+ BenchmarkSuite.ResetRNG();
+ this.results = [];
+ this.runner = runner;
+ var length = this.benchmarks.length;
+ var index = 0;
+ var suite = this;
+ var data;
+
+ // Run the setup, the actual benchmark, and the tear down in three
+ // separate steps to allow the framework to yield between any of the
+ // steps.
+
+ function RunNextSetup() {
+ if (index < length) {
+ try {
+ suite.benchmarks[index].Setup();
+ } catch (e) {
+ suite.NotifyError(e);
+ return null;
+ }
+ return RunNextBenchmark;
+ }
+ suite.NotifyResult();
+ return null;
+ }
+
+ function RunNextBenchmark() {
+ try {
+ data = suite.RunSingleBenchmark(suite.benchmarks[index], data);
+ } catch (e) {
+ suite.NotifyError(e);
+ return null;
+ }
+ // If data is null, we're done with this benchmark.
+ return (data == null) ? RunNextTearDown : RunNextBenchmark();
+ }
+
+ function RunNextTearDown() {
+ try {
+ suite.benchmarks[index++].TearDown();
+ } catch (e) {
+ suite.NotifyError(e);
+ return null;
+ }
+ return RunNextSetup;
+ }
+
+ // Start out running the setup.
+ return RunNextSetup();
+}
diff --git a/deps/v8/test/perf-test/Collections/map.js b/deps/v8/test/perf-test/Collections/map.js
new file mode 100644
index 0000000000..b310a71902
--- /dev/null
+++ b/deps/v8/test/perf-test/Collections/map.js
@@ -0,0 +1,81 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+var MapBenchmark = new BenchmarkSuite('Map', [1000], [
+ new Benchmark('Set', false, false, 0, MapSet),
+ new Benchmark('Has', false, false, 0, MapHas, MapSetup, MapTearDown),
+ new Benchmark('Get', false, false, 0, MapGet, MapSetup, MapTearDown),
+ new Benchmark('Delete', false, false, 0, MapDelete, MapSetup, MapTearDown),
+ new Benchmark('ForEach', false, false, 0, MapForEach, MapSetup, MapTearDown),
+]);
+
+
+var map;
+var N = 10;
+
+
+function MapSetup() {
+ map = new Map;
+ for (var i = 0; i < N; i++) {
+ map.set(i, i);
+ }
+}
+
+
+function MapTearDown() {
+ map = null;
+}
+
+
+function MapSet() {
+ MapSetup();
+ MapTearDown();
+}
+
+
+function MapHas() {
+ for (var i = 0; i < N; i++) {
+ if (!map.has(i)) {
+ throw new Error();
+ }
+ }
+ for (var i = N; i < 2 * N; i++) {
+ if (map.has(i)) {
+ throw new Error();
+ }
+ }
+}
+
+
+function MapGet() {
+ for (var i = 0; i < N; i++) {
+ if (map.get(i) !== i) {
+ throw new Error();
+ }
+ }
+ for (var i = N; i < 2 * N; i++) {
+ if (map.get(i) !== undefined) {
+ throw new Error();
+ }
+ }
+}
+
+
+function MapDelete() {
+ // This is run more than once per setup so we will end up deleting items
+ // more than once. Therefore, we do not the return value of delete.
+ for (var i = 0; i < N; i++) {
+ map.delete(i);
+ }
+}
+
+
+function MapForEach() {
+ map.forEach(function(v, k) {
+ if (v !== k) {
+ throw new Error();
+ }
+ });
+}
diff --git a/deps/v8/test/perf-test/Collections/run.js b/deps/v8/test/perf-test/Collections/run.js
new file mode 100644
index 0000000000..cfd1aef525
--- /dev/null
+++ b/deps/v8/test/perf-test/Collections/run.js
@@ -0,0 +1,30 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+load('base.js');
+load('map.js');
+load('set.js');
+load('weakmap.js');
+load('weakset.js');
+
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-Collections(Score): ' + result);
+}
+
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+ success = false;
+}
+
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+ NotifyError: PrintError });
diff --git a/deps/v8/test/perf-test/Collections/set.js b/deps/v8/test/perf-test/Collections/set.js
new file mode 100644
index 0000000000..e6455e1c0a
--- /dev/null
+++ b/deps/v8/test/perf-test/Collections/set.js
@@ -0,0 +1,66 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+var SetBenchmark = new BenchmarkSuite('Set', [1000], [
+ new Benchmark('Add', false, false, 0, SetAdd),
+ new Benchmark('Has', false, false, 0, SetHas, SetSetup, SetTearDown),
+ new Benchmark('Delete', false, false, 0, SetDelete, SetSetup, SetTearDown),
+ new Benchmark('ForEach', false, false, 0, SetForEach, SetSetup, SetTearDown),
+]);
+
+
+var set;
+var N = 10;
+
+
+function SetSetup() {
+ set = new Set;
+ for (var i = 0; i < N; i++) {
+ set.add(i);
+ }
+}
+
+
+function SetTearDown() {
+ map = null;
+}
+
+
+function SetAdd() {
+ SetSetup();
+ SetTearDown();
+}
+
+
+function SetHas() {
+ for (var i = 0; i < N; i++) {
+ if (!set.has(i)) {
+ throw new Error();
+ }
+ }
+ for (var i = N; i < 2 * N; i++) {
+ if (set.has(i)) {
+ throw new Error();
+ }
+ }
+}
+
+
+function SetDelete() {
+ // This is run more than once per setup so we will end up deleting items
+ // more than once. Therefore, we do not the return value of delete.
+ for (var i = 0; i < N; i++) {
+ set.delete(i);
+ }
+}
+
+
+function SetForEach() {
+ set.forEach(function(v, k) {
+ if (v !== k) {
+ throw new Error();
+ }
+ });
+}
diff --git a/deps/v8/test/perf-test/Collections/weakmap.js b/deps/v8/test/perf-test/Collections/weakmap.js
new file mode 100644
index 0000000000..8736dfd58b
--- /dev/null
+++ b/deps/v8/test/perf-test/Collections/weakmap.js
@@ -0,0 +1,80 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+var MapBenchmark = new BenchmarkSuite('WeakMap', [1000], [
+ new Benchmark('Set', false, false, 0, WeakMapSet),
+ new Benchmark('Has', false, false, 0, WeakMapHas, WeakMapSetup,
+ WeakMapTearDown),
+ new Benchmark('Get', false, false, 0, WeakMapGet, WeakMapSetup,
+ WeakMapTearDown),
+ new Benchmark('Delete', false, false, 0, WeakMapDelete, WeakMapSetup,
+ WeakMapTearDown),
+]);
+
+
+var wm;
+var N = 10;
+var keys = [];
+
+
+for (var i = 0; i < N * 2; i++) {
+ keys[i] = {};
+}
+
+
+function WeakMapSetup() {
+ wm = new WeakMap;
+ for (var i = 0; i < N; i++) {
+ wm.set(keys[i], i);
+ }
+}
+
+
+function WeakMapTearDown() {
+ wm = null;
+}
+
+
+function WeakMapSet() {
+ WeakMapSetup();
+ WeakMapTearDown();
+}
+
+
+function WeakMapHas() {
+ for (var i = 0; i < N; i++) {
+ if (!wm.has(keys[i])) {
+ throw new Error();
+ }
+ }
+ for (var i = N; i < 2 * N; i++) {
+ if (wm.has(keys[i])) {
+ throw new Error();
+ }
+ }
+}
+
+
+function WeakMapGet() {
+ for (var i = 0; i < N; i++) {
+ if (wm.get(keys[i]) !== i) {
+ throw new Error();
+ }
+ }
+ for (var i = N; i < 2 * N; i++) {
+ if (wm.get(keys[i]) !== undefined) {
+ throw new Error();
+ }
+ }
+}
+
+
+function WeakMapDelete() {
+ // This is run more than once per setup so we will end up deleting items
+ // more than once. Therefore, we do not the return value of delete.
+ for (var i = 0; i < N; i++) {
+ wm.delete(keys[i]);
+ }
+}
diff --git a/deps/v8/test/perf-test/Collections/weakset.js b/deps/v8/test/perf-test/Collections/weakset.js
new file mode 100644
index 0000000000..a7d0f3d076
--- /dev/null
+++ b/deps/v8/test/perf-test/Collections/weakset.js
@@ -0,0 +1,64 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+var SetBenchmark = new BenchmarkSuite('WeakSet', [1000], [
+ new Benchmark('Add', false, false, 0, WeakSetAdd),
+ new Benchmark('Has', false, false, 0, WeakSetHas, WeakSetSetup,
+ WeakSetTearDown),
+ new Benchmark('Delete', false, false, 0, WeakSetDelete, WeakSetSetup,
+ WeakSetTearDown),
+]);
+
+
+var ws;
+var N = 10;
+var keys = [];
+
+
+for (var i = 0; i < N * 2; i++) {
+ keys[i] = {};
+}
+
+
+function WeakSetSetup() {
+ ws = new WeakSet;
+ for (var i = 0; i < N; i++) {
+ ws.add(keys[i]);
+ }
+}
+
+
+function WeakSetTearDown() {
+ ws = null;
+}
+
+
+function WeakSetAdd() {
+ WeakSetSetup();
+ WeakSetTearDown();
+}
+
+
+function WeakSetHas() {
+ for (var i = 0; i < N; i++) {
+ if (!ws.has(keys[i])) {
+ throw new Error();
+ }
+ }
+ for (var i = N; i < 2 * N; i++) {
+ if (ws.has(keys[i])) {
+ throw new Error();
+ }
+ }
+}
+
+
+function WeakSetDelete() {
+ // This is run more than once per setup so we will end up deleting items
+ // more than once. Therefore, we do not the return value of delete.
+ for (var i = 0; i < N; i++) {
+ ws.delete(keys[i]);
+ }
+}
diff --git a/deps/v8/test/preparser/duplicate-property.pyt b/deps/v8/test/preparser/duplicate-property.pyt
deleted file mode 100644
index 594b4786cb..0000000000
--- a/deps/v8/test/preparser/duplicate-property.pyt
+++ /dev/null
@@ -1,162 +0,0 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# Tests of duplicate properties in object literals.
-
-# ----------------------------------------------------------------------
-# Utility functions to generate a number of tests for each property
-# name pair.
-
-def PropertyTest(name, propa, propb, allow_strict = True):
- replacement = {"id1": propa, "id2": propb, "name": name}
-
- # Tests same test in both strict and non-strict context.
- def StrictTest(name, source, replacement, expectation):
- if (allow_strict):
- Template("strict-" + name,
- "\"use strict\";\n" + source)(replacement, expectation)
- Template(name, source)(replacement, expectation)
-
- # This one only fails in non-strict context.
- if (allow_strict):
- Template("strict-$name-data-data", """
- "use strict";
- var o = {$id1: 42, $id2: 42};
- """)(replacement, "strict_duplicate_property")
-
- Template("$name-data-data", """
- var o = {$id1: 42, $id2: 42};
- """)(replacement, None)
-
- StrictTest("$name-data-get", """
- var o = {$id1: 42, get $id2(){}};
- """, replacement, "accessor_data_property")
-
- StrictTest("$name-data-set", """
- var o = {$id1: 42, set $id2(v){}};
- """, replacement, "accessor_data_property")
-
- StrictTest("$name-get-data", """
- var o = {get $id1(){}, $id2: 42};
- """, replacement, "accessor_data_property")
-
- StrictTest("$name-set-data", """
- var o = {set $id1(v){}, $id2: 42};
- """, replacement, "accessor_data_property")
-
- StrictTest("$name-get-get", """
- var o = {get $id1(){}, get $id2(){}};
- """, replacement, "accessor_get_set")
-
- StrictTest("$name-set-set", """
- var o = {set $id1(v){}, set $id2(v){}};
- """, replacement, "accessor_get_set")
-
- StrictTest("$name-nested-get", """
- var o = {get $id1(){}, o: {get $id2(){} } };
- """, replacement, None)
-
- StrictTest("$name-nested-set", """
- var o = {set $id1(v){}, o: {set $id2(v){} } };
- """, replacement, None)
-
-
-def TestBothWays(name, propa, propb, allow_strict = True):
- PropertyTest(name + "-1", propa, propb, allow_strict)
- PropertyTest(name + "-2", propb, propa, allow_strict)
-
-def TestSame(name, prop, allow_strict = True):
- PropertyTest(name, prop, prop, allow_strict)
-
-#-----------------------------------------------------------------------
-
-# Simple identifier property
-TestSame("a", "a")
-
-# Get/set identifiers
-TestSame("get-id", "get")
-TestSame("set-id", "set")
-
-# Number properties
-TestSame("0", "0")
-TestSame("0.1", "0.1")
-TestSame("1.0", "1.0")
-TestSame("42.33", "42.33")
-TestSame("2^32-2", "4294967294")
-TestSame("2^32", "4294967296")
-TestSame("2^53", "9007199254740992")
-TestSame("Hex20", "0x20")
-TestSame("exp10", "1e10")
-TestSame("exp20", "1e20")
-TestSame("Oct40", "040", False);
-
-
-# String properties
-TestSame("str-a", '"a"')
-TestSame("str-0", '"0"')
-TestSame("str-42", '"42"')
-TestSame("str-empty", '""')
-
-# Keywords
-TestSame("if", "if")
-TestSame("case", "case")
-
-# Future reserved keywords
-TestSame("public", "public")
-TestSame("class", "class")
-
-
-# Test that numbers are converted to string correctly.
-
-TestBothWays("hex-int", "0x20", "32")
-TestBothWays("oct-int", "040", "32", False) # Octals disallowed in strict mode.
-TestBothWays("dec-int", "32.00", "32")
-TestBothWays("dec-underflow-int",
- "32.00000000000000000000000000000000000000001", "32")
-TestBothWays("exp-int", "3.2e1", "32")
-TestBothWays("exp-int", "3200e-2", "32")
-TestBothWays("overflow-inf", "1e2000", "Infinity")
-TestBothWays("overflow-inf-exact", "1.797693134862315808e+308", "Infinity")
-TestBothWays("non-overflow-inf-exact", "1.797693134862315807e+308",
- "1.7976931348623157e+308")
-TestBothWays("underflow-0", "1e-2000", "0")
-TestBothWays("underflow-0-exact", "2.4703282292062E-324", "0")
-TestBothWays("non-underflow-0-exact", "2.4703282292063E-324", "5e-324")
-TestBothWays("precission-loss-high", "9007199254740992", "9007199254740993")
-TestBothWays("precission-loss-low", "1.9999999999999998", "1.9999999999999997")
-TestBothWays("non-canonical-literal-int", "1.0", "1")
-TestBothWays("non-canonical-literal-frac", "1.50", "1.5")
-TestBothWays("rounding-down", "1.12512512512512452", "1.1251251251251244")
-TestBothWays("rounding-up", "1.12512512512512453", "1.1251251251251246")
-
-TestBothWays("hex-int-str", "0x20", '"32"')
-TestBothWays("dec-int-str", "32.00", '"32"')
-TestBothWays("exp-int-str", "3.2e1", '"32"')
-TestBothWays("overflow-inf-str", "1e2000", '"Infinity"')
-TestBothWays("underflow-0-str", "1e-2000", '"0"')
-TestBothWays("non-canonical-literal-int-str", "1.0", '"1"')
-TestBothWays("non-canonical-literal-frac-str", "1.50", '"1.5"')
diff --git a/deps/v8/test/preparser/preparser.status b/deps/v8/test/preparser/preparser.status
index babf35d5d8..9d69988f71 100644
--- a/deps/v8/test/preparser/preparser.status
+++ b/deps/v8/test/preparser/preparser.status
@@ -25,10 +25,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# We don't parse RegExps at scanning time, so we can't fail on octal
-# escapes (we need to parse to distinguish octal escapes from valid
-# back-references).
[
[ALWAYS, {
# TODO(mstarzinger): This script parses but throws a TypeError when run.
diff --git a/deps/v8/test/promises-aplus/promises-aplus.status b/deps/v8/test/promises-aplus/promises-aplus.status
index fdcf40b13f..5da9efae90 100644
--- a/deps/v8/test/promises-aplus/promises-aplus.status
+++ b/deps/v8/test/promises-aplus/promises-aplus.status
@@ -25,7 +25,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
[
[ALWAYS, {
}], # ALWAYS
diff --git a/deps/v8/test/test262-es6/README b/deps/v8/test/test262-es6/README
new file mode 100644
index 0000000000..d0b3b42ef9
--- /dev/null
+++ b/deps/v8/test/test262-es6/README
@@ -0,0 +1,18 @@
+This directory contains code for binding the test262 test suite
+into the v8 test harness. To use the tests check out the test262
+tests from
+
+ https://github.com/tc39/test262
+
+at hash 9bd6686 (2014/08/25 revision) as 'data' in this directory. Using later
+version may be possible but the tests are only known to pass (and indeed run)
+with that revision.
+
+ git clone https://github.com/tc39/test262 data
+ cd data
+ git checkout 9bd6686
+
+If you do update to a newer revision you may have to change the test
+harness adapter code since it uses internal functionality from the
+harness that comes bundled with the tests. You will most likely also
+have to update the test expectation file.
diff --git a/deps/v8/test/test262-es6/harness-adapt.js b/deps/v8/test/test262-es6/harness-adapt.js
new file mode 100644
index 0000000000..60c0858f02
--- /dev/null
+++ b/deps/v8/test/test262-es6/harness-adapt.js
@@ -0,0 +1,91 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function fnGlobalObject() { return (function() { return this; })(); }
+
+var ES5Harness = (function() {
+ var currentTest = {};
+ var $this = this;
+
+ function Test262Error(id, path, description, codeString,
+ preconditionString, result, error) {
+ this.id = id;
+ this.path = path;
+ this.description = description;
+ this.result = result;
+ this.error = error;
+ this.code = codeString;
+ this.pre = preconditionString;
+ }
+
+ Test262Error.prototype.toString = function() {
+ return this.result + " " + this.error;
+ }
+
+ function registerTest(test) {
+ if (!(test.precondition && !test.precondition())) {
+ var error;
+ try {
+ var res = test.test.call($this);
+ } catch(e) {
+ res = 'fail';
+ error = e;
+ }
+ var retVal = /^s/i.test(test.id)
+ ? (res === true || typeof res == 'undefined' ? 'pass' : 'fail')
+ : (res === true ? 'pass' : 'fail');
+
+ if (retVal != 'pass') {
+ var precondition = (test.precondition !== undefined)
+ ? test.precondition.toString()
+ : '';
+
+ throw new Test262Error(
+ test.id,
+ test.path,
+ test.description,
+ test.test.toString(),
+ precondition,
+ retVal,
+ error);
+ }
+ }
+ }
+
+ return {
+ registerTest: registerTest
+ }
+})();
+
+function $DONE(arg){
+ if (arg) {
+ print('FAILED! Error: ' + arg);
+ quit(1);
+ }
+
+ quit(0);
+};
diff --git a/deps/v8/test/test262-es6/test262-es6.status b/deps/v8/test/test262-es6/test262-es6.status
new file mode 100644
index 0000000000..c4c94f3bf0
--- /dev/null
+++ b/deps/v8/test/test262-es6/test262-es6.status
@@ -0,0 +1,166 @@
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+[
+[ALWAYS, {
+ ############################### BUGS ###################################
+
+ '15.5.4.9_CE': [['no_i18n', SKIP]],
+
+ # BUG(v8:3455)
+ '11.2.3_b': [FAIL],
+ '12.2.3_b': [FAIL],
+
+ ###################### NEEDS INVESTIGATION #######################
+
+ # Possibly same cause as S8.5_A2.1, below: floating-point tests.
+ 'S15.8.2.16_A7': [PASS, FAIL_OK],
+ 'S15.8.2.18_A7': [PASS, FAIL_OK],
+ 'S15.8.2.7_A7': [PASS, FAIL_OK],
+
+ # This is an incompatibility between ES5 and V8 on enumerating
+ # shadowed elements in a for..in loop.
+ # https://code.google.com/p/v8/issues/detail?id=705
+ '12.6.4-2': [PASS, FAIL_OK],
+
+ ###################### MISSING ES6 FEATURES #######################
+
+ # Array.from
+ 'S22.1.2.1_T1': [FAIL],
+ 'S22.1.2.1_T2': [FAIL],
+
+ # Direct proxies
+ 'Array.prototype.find_callable-predicate': [FAIL],
+
+ ######################## OBSOLETED BY ES6 ###########################
+
+ # ES6 allows duplicate properties
+ # TODO(arv): Reactivate when check removal has relanded.
+ # '11.1.5-4-4-a-1-s': [FAIL],
+ # '11.1.5_4-4-b-1': [FAIL],
+ # '11.1.5_4-4-b-2': [FAIL],
+ # '11.1.5_4-4-c-1': [FAIL],
+ # '11.1.5_4-4-c-2': [FAIL],
+ # '11.1.5_4-4-d-1': [FAIL],
+ # '11.1.5_4-4-d-2': [FAIL],
+ # '11.1.5_4-4-d-3': [FAIL],
+ # '11.1.5_4-4-d-4': [FAIL],
+
+ # ES6 allows block-local functions.
+ 'Sbp_A1_T1': [FAIL],
+ 'Sbp_A2_T1': [FAIL],
+ 'Sbp_A2_T2': [FAIL],
+ 'Sbp_A3_T1': [FAIL],
+ 'Sbp_A3_T2': [FAIL],
+ 'Sbp_A4_T1': [FAIL],
+ 'Sbp_A4_T2': [FAIL],
+ 'Sbp_A5_T1': [PASS], # Test is broken (strict reference to unbound variable)
+ 'Sbp_A5_T2': [FAIL],
+
+ ######################## NEEDS INVESTIGATION ###########################
+
+ # These test failures are specific to the intl402 suite and need investigation
+ # to be either marked as bugs with issues filed for them or as deliberate
+ # incompatibilities if the test cases turn out to be broken or ambiguous.
+ '6.2.3': [FAIL],
+ '9.2.1_2': [FAIL],
+ '9.2.6_2': [FAIL],
+ '10.1.1_a': [FAIL],
+ '10.1.1_19_c': [PASS, FAIL, NO_VARIANTS],
+ '10.1.2.1_4': [FAIL],
+ '10.2.3_b': [PASS, FAIL],
+ '10.3_a': [FAIL],
+ '11.1.1_17': [PASS, FAIL],
+ '11.1.1_19': [PASS, FAIL],
+ '11.1.1_20_c': [FAIL],
+ '11.1.1_a': [FAIL],
+ '11.1.2.1_4': [FAIL],
+ '11.3.2_FN_2': [PASS, FAIL],
+ '11.3.2_TRF': [PASS, FAIL],
+ '11.3_a': [FAIL],
+ '12.1.1_a': [FAIL],
+ '12.1.2.1_4': [FAIL],
+ '12.3.2_FDT_7_a_iv': [FAIL],
+ '12.3.3': [FAIL],
+ '12.3_a': [FAIL],
+ '15.5.4.9_3': [PASS, FAIL],
+
+ ##################### DELIBERATE INCOMPATIBILITIES #####################
+
+ 'S15.8.2.8_A6': [PASS, FAIL_OK], # Math.exp (less precise with --fast-math)
+
+ # Linux for ia32 (and therefore simulators) default to extended 80 bit
+ # floating point formats, so these tests checking 64-bit FP precision fail.
+ # The other platforms/arch's pass these tests.
+ # We follow the other major JS engines by keeping this default.
+ 'S8.5_A2.1': [PASS, FAIL_OK],
+ 'S8.5_A2.2': [PASS, FAIL_OK],
+
+ ############################ INVALID TESTS #############################
+
+ # The reference value calculated by Test262 is incorrect if you run these
+ # tests in PST/PDT between first Sunday in March and first Sunday in April.
+ # The DST switch was moved in 2007 whereas Test262 bases the reference value
+ # on 2000. Test262 Bug: https://bugs.ecmascript.org/show_bug.cgi?id=293
+ 'S15.9.3.1_A5_T1': [PASS, FAIL_OK],
+ 'S15.9.3.1_A5_T2': [PASS, FAIL_OK],
+ 'S15.9.3.1_A5_T3': [PASS, FAIL_OK],
+ 'S15.9.3.1_A5_T4': [PASS, FAIL_OK],
+ 'S15.9.3.1_A5_T5': [PASS, FAIL_OK],
+ 'S15.9.3.1_A5_T6': [PASS, FAIL_OK],
+
+ # Test makes unjustified assumptions about the number of calls to SortCompare.
+ # Test262 Bug: https://bugs.ecmascript.org/show_bug.cgi?id=596
+ 'bug_596_1': [PASS, FAIL_OK],
+
+ ############################ SKIPPED TESTS #############################
+
+ # These tests take a looong time to run in debug mode.
+ 'S15.1.3.1_A2.5_T1': [PASS, ['mode == debug', SKIP]],
+ 'S15.1.3.2_A2.5_T1': [PASS, ['mode == debug', SKIP]],
+}], # ALWAYS
+
+['system == macos', {
+ '11.3.2_TRP': [FAIL],
+ '9.2.5_11_g_ii_2': [FAIL],
+}], # system == macos
+
+['arch == arm or arch == mipsel or arch == mips or arch == arm64 or arch == mips64el', {
+
+ # TODO(mstarzinger): Causes stack overflow on simulators due to eager
+ # compilation of parenthesized function literals. Needs investigation.
+ 'S13.2.1_A1_T1': [SKIP],
+
+ # BUG(3251225): Tests that timeout with --nocrankshaft.
+ 'S15.1.3.1_A2.4_T1': [SKIP],
+ 'S15.1.3.1_A2.5_T1': [SKIP],
+ 'S15.1.3.2_A2.4_T1': [SKIP],
+ 'S15.1.3.2_A2.5_T1': [SKIP],
+ 'S15.1.3.3_A2.3_T1': [SKIP],
+ 'S15.1.3.4_A2.3_T1': [SKIP],
+}], # 'arch == arm or arch == mipsel or arch == mips or arch == arm64'
+]
diff --git a/deps/v8/test/test262-es6/testcfg.py b/deps/v8/test/test262-es6/testcfg.py
new file mode 100644
index 0000000000..59eda32b7b
--- /dev/null
+++ b/deps/v8/test/test262-es6/testcfg.py
@@ -0,0 +1,164 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import hashlib
+import os
+import shutil
+import sys
+import tarfile
+import imp
+
+from testrunner.local import testsuite
+from testrunner.local import utils
+from testrunner.objects import testcase
+
+TEST_262_ARCHIVE_REVISION = "9bd6686" # This is the 2014-08-25 revision.
+TEST_262_ARCHIVE_MD5 = "0f5928b391864890d5a397f8cdc82705"
+TEST_262_URL = "https://github.com/tc39/test262/tarball/%s"
+TEST_262_HARNESS_FILES = ["sta.js"]
+
+TEST_262_SUITE_PATH = ["data", "test", "suite"]
+TEST_262_HARNESS_PATH = ["data", "test", "harness"]
+TEST_262_TOOLS_PATH = ["data", "tools", "packaging"]
+
+class Test262TestSuite(testsuite.TestSuite):
+
+ def __init__(self, name, root):
+ super(Test262TestSuite, self).__init__(name, root)
+ self.testroot = os.path.join(self.root, *TEST_262_SUITE_PATH)
+ self.harnesspath = os.path.join(self.root, *TEST_262_HARNESS_PATH)
+ self.harness = [os.path.join(self.harnesspath, f)
+ for f in TEST_262_HARNESS_FILES]
+ self.harness += [os.path.join(self.root, "harness-adapt.js")]
+ self.ParseTestRecord = None
+
+ def CommonTestName(self, testcase):
+ return testcase.path.split(os.path.sep)[-1]
+
+ def ListTests(self, context):
+ tests = []
+ for dirname, dirs, files in os.walk(self.testroot):
+ for dotted in [x for x in dirs if x.startswith(".")]:
+ dirs.remove(dotted)
+ if context.noi18n and "intl402" in dirs:
+ dirs.remove("intl402")
+ dirs.sort()
+ files.sort()
+ for filename in files:
+ if filename.endswith(".js"):
+ testname = os.path.join(dirname[len(self.testroot) + 1:],
+ filename[:-3])
+ case = testcase.TestCase(self, testname)
+ tests.append(case)
+ return tests
+
+ def GetFlagsForTestCase(self, testcase, context):
+ return (testcase.flags + context.mode_flags + self.harness +
+ self.GetIncludesForTest(testcase) + ["--harmony"] +
+ [os.path.join(self.testroot, testcase.path + ".js")])
+
+ def LoadParseTestRecord(self):
+ if not self.ParseTestRecord:
+ root = os.path.join(self.root, *TEST_262_TOOLS_PATH)
+ f = None
+ try:
+ (f, pathname, description) = imp.find_module("parseTestRecord", [root])
+ module = imp.load_module("parseTestRecord", f, pathname, description)
+ self.ParseTestRecord = module.parseTestRecord
+ except:
+ raise ImportError("Cannot load parseTestRecord; you may need to "
+ "--download-data for test262")
+ finally:
+ if f:
+ f.close()
+ return self.ParseTestRecord
+
+ def GetTestRecord(self, testcase):
+ if not hasattr(testcase, "test_record"):
+ ParseTestRecord = self.LoadParseTestRecord()
+ testcase.test_record = ParseTestRecord(self.GetSourceForTest(testcase),
+ testcase.path)
+ return testcase.test_record
+
+ def GetIncludesForTest(self, testcase):
+ test_record = self.GetTestRecord(testcase)
+ if "includes" in test_record:
+ includes = [os.path.join(self.harnesspath, f)
+ for f in test_record["includes"]]
+ else:
+ includes = []
+ return includes
+
+ def GetSourceForTest(self, testcase):
+ filename = os.path.join(self.testroot, testcase.path + ".js")
+ with open(filename) as f:
+ return f.read()
+
+ def IsNegativeTest(self, testcase):
+ test_record = self.GetTestRecord(testcase)
+ return "negative" in test_record
+
+ def IsFailureOutput(self, output, testpath):
+ if output.exit_code != 0:
+ return True
+ return "FAILED!" in output.stdout
+
+ def DownloadData(self):
+ revision = TEST_262_ARCHIVE_REVISION
+ archive_url = TEST_262_URL % revision
+ archive_name = os.path.join(self.root, "tc39-test262-%s.tar.gz" % revision)
+ directory_name = os.path.join(self.root, "data")
+ directory_old_name = os.path.join(self.root, "data.old")
+ if not os.path.exists(archive_name):
+ print "Downloading test data from %s ..." % archive_url
+ utils.URLRetrieve(archive_url, archive_name)
+ if os.path.exists(directory_name):
+ if os.path.exists(directory_old_name):
+ shutil.rmtree(directory_old_name)
+ os.rename(directory_name, directory_old_name)
+ if not os.path.exists(directory_name):
+ print "Extracting test262-%s.tar.gz ..." % revision
+ md5 = hashlib.md5()
+ with open(archive_name, "rb") as f:
+ for chunk in iter(lambda: f.read(8192), ""):
+ md5.update(chunk)
+ if md5.hexdigest() != TEST_262_ARCHIVE_MD5:
+ os.remove(archive_name)
+ raise Exception("Hash mismatch of test data file")
+ archive = tarfile.open(archive_name, "r:gz")
+ if sys.platform in ("win32", "cygwin"):
+ # Magic incantation to allow longer path names on Windows.
+ archive.extractall(u"\\\\?\\%s" % self.root)
+ else:
+ archive.extractall(self.root)
+ os.rename(os.path.join(self.root, "tc39-test262-%s" % revision),
+ directory_name)
+
+
+def GetSuite(name, root):
+ return Test262TestSuite(name, root)
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index dd075d9688..86663130ac 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -31,9 +31,6 @@
'15.5.4.9_CE': [['no_i18n', SKIP]],
- # TODO(turbofan): Timeouts on TurboFan need investigation.
- '10.1.1_13': [PASS, NO_VARIANTS],
-
# BUG(v8:3455)
'11.2.3_b': [FAIL],
'12.2.3_b': [FAIL],
diff --git a/deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt b/deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt
index 4b8eb14775..4fe6742d00 100644
--- a/deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt
+++ b/deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt
@@ -77,7 +77,7 @@ PASS getSortedOwnPropertyNames(Number) is ['EPSILON', 'MAX_SAFE_INTEGER', 'MAX_V
PASS getSortedOwnPropertyNames(Number.prototype) is ['constructor', 'toExponential', 'toFixed', 'toLocaleString', 'toPrecision', 'toString', 'valueOf']
PASS getSortedOwnPropertyNames(Date) is ['UTC', 'arguments', 'caller', 'length', 'name', 'now', 'parse', 'prototype']
PASS getSortedOwnPropertyNames(Date.prototype) is ['constructor', 'getDate', 'getDay', 'getFullYear', 'getHours', 'getMilliseconds', 'getMinutes', 'getMonth', 'getSeconds', 'getTime', 'getTimezoneOffset', 'getUTCDate', 'getUTCDay', 'getUTCFullYear', 'getUTCHours', 'getUTCMilliseconds', 'getUTCMinutes', 'getUTCMonth', 'getUTCSeconds', 'getYear', 'setDate', 'setFullYear', 'setHours', 'setMilliseconds', 'setMinutes', 'setMonth', 'setSeconds', 'setTime', 'setUTCDate', 'setUTCFullYear', 'setUTCHours', 'setUTCMilliseconds', 'setUTCMinutes', 'setUTCMonth', 'setUTCSeconds', 'setYear', 'toDateString', 'toGMTString', 'toISOString', 'toJSON', 'toLocaleDateString', 'toLocaleString', 'toLocaleTimeString', 'toString', 'toTimeString', 'toUTCString', 'valueOf']
-FAIL getSortedOwnPropertyNames(RegExp) should be $&,$',$*,$+,$1,$2,$3,$4,$5,$6,$7,$8,$9,$_,$`,arguments,caller,input,lastMatch,lastParen,leftContext,length,multiline,name,prototype,rightContext. Was $&,$',$*,$+,$1,$2,$3,$4,$5,$6,$7,$8,$9,$_,$`,$input,arguments,caller,input,lastMatch,lastParen,leftContext,length,multiline,name,prototype,rightContext.
+PASS getSortedOwnPropertyNames(RegExp) is ['$&', "$'", '$*', '$+', '$1', '$2', '$3', '$4', '$5', '$6', '$7', '$8', '$9', '$_', '$`', 'arguments', 'caller', 'input', 'lastMatch', 'lastParen', 'leftContext', 'length', 'multiline', 'name', 'prototype', 'rightContext']
PASS getSortedOwnPropertyNames(RegExp.prototype) is ['compile', 'constructor', 'exec', 'global', 'ignoreCase', 'lastIndex', 'multiline', 'source', 'test', 'toString']
PASS getSortedOwnPropertyNames(Error) is ['arguments', 'caller', 'captureStackTrace', 'length', 'name', 'prototype', 'stackTraceLimit']
PASS getSortedOwnPropertyNames(Error.prototype) is ['constructor', 'message', 'name', 'toString']
diff --git a/deps/v8/test/webkit/webkit.status b/deps/v8/test/webkit/webkit.status
index c14d5c13c4..3bb6574dde 100644
--- a/deps/v8/test/webkit/webkit.status
+++ b/deps/v8/test/webkit/webkit.status
@@ -33,8 +33,6 @@
'dfg-inline-arguments-become-int32': [PASS, FAIL],
'dfg-inline-arguments-reset': [PASS, FAIL],
'dfg-inline-arguments-reset-changetype': [PASS, FAIL],
- # TODO(turbofan): Sometimes the try-catch blacklist fails.
- 'exception-with-handler-inside-eval-with-dynamic-scope': [PASS, NO_VARIANTS],
# TODO(turbofan): We run out of stack earlier on 64-bit for now.
'fast/js/deep-recursion-test': [PASS, NO_VARIANTS],
# TODO(bmeurer,svenpanne): Investigate test failure.
diff --git a/deps/v8/testing/gmock-support.h b/deps/v8/testing/gmock-support.h
new file mode 100644
index 0000000000..44348b60e0
--- /dev/null
+++ b/deps/v8/testing/gmock-support.h
@@ -0,0 +1,72 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TESTING_GMOCK_SUPPORT_H_
+#define V8_TESTING_GMOCK_SUPPORT_H_
+
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace testing {
+
+template <typename T>
+class Capture {
+ public:
+ Capture() : value_(), has_value_(false) {}
+
+ const T& value() const { return value_; }
+ bool has_value() const { return has_value_; }
+
+ void SetValue(const T& value) {
+ DCHECK(!has_value());
+ value_ = value;
+ has_value_ = true;
+ }
+
+ private:
+ T value_;
+ bool has_value_;
+};
+
+
+namespace internal {
+
+template <typename T>
+class CaptureEqMatcher : public MatcherInterface<T> {
+ public:
+ explicit CaptureEqMatcher(Capture<T>* capture) : capture_(capture) {}
+
+ virtual void DescribeTo(std::ostream* os) const {
+ *os << "captured by " << static_cast<const void*>(capture_);
+ if (capture_->has_value()) *os << " which has value " << capture_->value();
+ }
+
+ virtual bool MatchAndExplain(T value, MatchResultListener* listener) const {
+ if (!capture_->has_value()) {
+ capture_->SetValue(value);
+ return true;
+ }
+ if (value != capture_->value()) {
+ *listener << "which is not equal to " << capture_->value();
+ return false;
+ }
+ return true;
+ }
+
+ private:
+ Capture<T>* capture_;
+};
+
+} // namespace internal
+
+
+// CaptureEq(capture) captures the value passed in during matching as long as it
+// is unset, and once set, compares the value for equality with the argument.
+template <typename T>
+Matcher<T> CaptureEq(Capture<T>* capture) {
+ return MakeMatcher(new internal::CaptureEqMatcher<T>(capture));
+}
+
+} // namespace testing
+
+#endif // V8_TESTING_GMOCK_SUPPORT_H_
diff --git a/deps/v8/testing/gmock.gyp b/deps/v8/testing/gmock.gyp
index a36584dcf7..ba4386141a 100644
--- a/deps/v8/testing/gmock.gyp
+++ b/deps/v8/testing/gmock.gyp
@@ -30,7 +30,7 @@
'gmock/src/gmock-matchers.cc',
'gmock/src/gmock-spec-builders.cc',
'gmock/src/gmock.cc',
- 'gmock_mutant.h', # gMock helpers
+ 'gmock-support.h', # gMock helpers
],
'sources!': [
'gmock/src/gmock-all.cc', # Not needed by our build.
@@ -47,6 +47,13 @@
'export_dependent_settings': [
'gtest.gyp:gtest',
],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ],
},
{
'target_name': 'gmock_main',
diff --git a/deps/v8/testing/gtest-support.h b/deps/v8/testing/gtest-support.h
new file mode 100644
index 0000000000..66b1094ff4
--- /dev/null
+++ b/deps/v8/testing/gtest-support.h
@@ -0,0 +1,58 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TESTING_GTEST_SUPPORT_H_
+#define V8_TESTING_GTEST_SUPPORT_H_
+
+#include "include/v8stdint.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace testing {
+namespace internal {
+
+#define GET_TYPE_NAME(type) \
+ template <> \
+ inline std::string GetTypeName<type>() { \
+ return #type; \
+ }
+GET_TYPE_NAME(int8_t)
+GET_TYPE_NAME(uint8_t)
+GET_TYPE_NAME(int16_t)
+GET_TYPE_NAME(uint16_t)
+GET_TYPE_NAME(int32_t)
+GET_TYPE_NAME(uint32_t)
+GET_TYPE_NAME(int64_t)
+GET_TYPE_NAME(uint64_t)
+GET_TYPE_NAME(float)
+GET_TYPE_NAME(double)
+#undef GET_TYPE_NAME
+
+
+// TRACED_FOREACH(type, var, array) expands to a loop that assigns |var| every
+// item in the |array| and adds a SCOPED_TRACE() message for the |var| while
+// inside the loop body.
+// TODO(bmeurer): Migrate to C++11 once we're ready.
+#define TRACED_FOREACH(_type, _var, _array) \
+ for (size_t _i = 0; _i < arraysize(_array); ++_i) \
+ for (bool _done = false; !_done;) \
+ for (_type const _var = _array[_i]; !_done;) \
+ for (SCOPED_TRACE(::testing::Message() << #_var << " = " << _var); \
+ !_done; _done = true)
+
+
+// TRACED_FORRANGE(type, var, low, high) expands to a loop that assigns |var|
+// every value in the range |low| to (including) |high| and adds a
+// SCOPED_TRACE() message for the |var| while inside the loop body.
+// TODO(bmeurer): Migrate to C++11 once we're ready.
+#define TRACED_FORRANGE(_type, _var, _low, _high) \
+ for (_type _i = _low; _i <= _high; ++_i) \
+ for (bool _done = false; !_done;) \
+ for (_type const _var = _i; !_done;) \
+ for (SCOPED_TRACE(::testing::Message() << #_var << " = " << _var); \
+ !_done; _done = true)
+
+} // namespace internal
+} // namespace testing
+
+#endif // V8_TESTING_GTEST_SUPPORT_H_
diff --git a/deps/v8/testing/gtest-type-names.h b/deps/v8/testing/gtest-type-names.h
deleted file mode 100644
index ba900ddb88..0000000000
--- a/deps/v8/testing/gtest-type-names.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_TESTING_GTEST_TYPE_NAMES_H_
-#define V8_TESTING_GTEST_TYPE_NAMES_H_
-
-#include "include/v8stdint.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace testing {
-namespace internal {
-
-#define GET_TYPE_NAME(type) \
- template <> \
- std::string GetTypeName<type>() { \
- return #type; \
- }
-GET_TYPE_NAME(int8_t)
-GET_TYPE_NAME(uint8_t)
-GET_TYPE_NAME(int16_t)
-GET_TYPE_NAME(uint16_t)
-GET_TYPE_NAME(int32_t)
-GET_TYPE_NAME(uint32_t)
-GET_TYPE_NAME(int64_t)
-GET_TYPE_NAME(uint64_t)
-GET_TYPE_NAME(float)
-GET_TYPE_NAME(double)
-#undef GET_TYPE_NAME
-
-} // namespace internal
-} // namespace testing
-
-#endif // V8_TESTING_GTEST_TYPE_NAMES_H_
diff --git a/deps/v8/testing/gtest.gyp b/deps/v8/testing/gtest.gyp
index 5d068d0257..d7662101cf 100644
--- a/deps/v8/testing/gtest.gyp
+++ b/deps/v8/testing/gtest.gyp
@@ -37,7 +37,7 @@
'gtest/src/gtest-test-part.cc',
'gtest/src/gtest-typed-test.cc',
'gtest/src/gtest.cc',
- 'gtest-type-names.h',
+ 'gtest-support.h',
],
'sources!': [
'gtest/src/gtest-all.cc', # Not needed by our build.
diff --git a/deps/v8/third_party/fdlibm/LICENSE b/deps/v8/third_party/fdlibm/LICENSE
index b0247953f8..b54cb52278 100644
--- a/deps/v8/third_party/fdlibm/LICENSE
+++ b/deps/v8/third_party/fdlibm/LICENSE
@@ -1,4 +1,4 @@
-Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+Copyright (C) 1993-2004 by Sun Microsystems, Inc. All rights reserved.
Developed at SunSoft, a Sun Microsystems, Inc. business.
Permission to use, copy, modify, and distribute this
diff --git a/deps/v8/third_party/fdlibm/fdlibm.cc b/deps/v8/third_party/fdlibm/fdlibm.cc
index 2f6eab17e8..c009cd0f6f 100644
--- a/deps/v8/third_party/fdlibm/fdlibm.cc
+++ b/deps/v8/third_party/fdlibm/fdlibm.cc
@@ -34,19 +34,19 @@ const double MathConstants::constants[] = {
2.02226624879595063154e-21, // pio2_2t 4
2.02226624871116645580e-21, // pio2_3 5
8.47842766036889956997e-32, // pio2_3t 6
- -1.66666666666666324348e-01, // S1 7
+ -1.66666666666666324348e-01, // S1 7 coefficients for sin
8.33333333332248946124e-03, // 8
-1.98412698298579493134e-04, // 9
2.75573137070700676789e-06, // 10
-2.50507602534068634195e-08, // 11
1.58969099521155010221e-10, // S6 12
- 4.16666666666666019037e-02, // C1 13
+ 4.16666666666666019037e-02, // C1 13 coefficients for cos
-1.38888888888741095749e-03, // 14
2.48015872894767294178e-05, // 15
-2.75573143513906633035e-07, // 16
2.08757232129817482790e-09, // 17
-1.13596475577881948265e-11, // C6 18
- 3.33333333333334091986e-01, // T0 19
+ 3.33333333333334091986e-01, // T0 19 coefficients for tan
1.33333333333201242699e-01, // 20
5.39682539762260521377e-02, // 21
2.18694882948595424599e-02, // 22
@@ -65,13 +65,21 @@ const double MathConstants::constants[] = {
1.90821492927058770002e-10, // ln2_lo 35
1.80143985094819840000e+16, // 2^54 36
6.666666666666666666e-01, // 2/3 37
- 6.666666666666735130e-01, // LP1 38
+ 6.666666666666735130e-01, // LP1 38 coefficients for log1p
3.999999999940941908e-01, // 39
2.857142874366239149e-01, // 40
2.222219843214978396e-01, // 41
1.818357216161805012e-01, // 42
1.531383769920937332e-01, // 43
1.479819860511658591e-01, // LP7 44
+ 7.09782712893383973096e+02, // 45 overflow threshold for expm1
+ 1.44269504088896338700e+00, // 1/ln2 46
+ -3.33333333333331316428e-02, // Q1 47 coefficients for expm1
+ 1.58730158725481460165e-03, // 48
+ -7.93650757867487942473e-05, // 49
+ 4.00821782732936239552e-06, // 50
+ -2.01099218183624371326e-07, // Q5 51
+ 710.4758600739439 // 52 overflow threshold sinh, cosh
};
diff --git a/deps/v8/third_party/fdlibm/fdlibm.h b/deps/v8/third_party/fdlibm/fdlibm.h
index 7985c3a323..cadf85b95a 100644
--- a/deps/v8/third_party/fdlibm/fdlibm.h
+++ b/deps/v8/third_party/fdlibm/fdlibm.h
@@ -23,7 +23,7 @@ int rempio2(double x, double* y);
// Constants to be exposed to builtins via Float64Array.
struct MathConstants {
- static const double constants[45];
+ static const double constants[53];
};
}
} // namespace v8::internal
diff --git a/deps/v8/third_party/fdlibm/fdlibm.js b/deps/v8/third_party/fdlibm/fdlibm.js
index a55b7c70c8..08c6f5e720 100644
--- a/deps/v8/third_party/fdlibm/fdlibm.js
+++ b/deps/v8/third_party/fdlibm/fdlibm.js
@@ -1,7 +1,7 @@
// The following is adapted from fdlibm (http://www.netlib.org/fdlibm),
//
// ====================================================
-// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+// Copyright (C) 1993-2004 by Sun Microsystems, Inc. All rights reserved.
//
// Developed at SunSoft, a Sun Microsystems, Inc. business.
// Permission to use, copy, modify, and distribute this
@@ -16,8 +16,11 @@
// The following is a straightforward translation of fdlibm routines
// by Raymond Toy (rtoy@google.com).
-
-var kMath; // Initialized to a Float64Array during genesis and is not writable.
+// Double constants that do not have empty lower 32 bits are found in fdlibm.cc
+// and exposed through kMath as typed array. We assume the compiler to convert
+// from decimal to binary accurately enough to produce the intended values.
+// kMath is initialized to a Float64Array during genesis and not writable.
+var kMath;
const INVPIO2 = kMath[0];
const PIO2_1 = kMath[1];
@@ -407,10 +410,8 @@ function MathTan(x) {
// 1 ulp (unit in the last place).
//
// Constants:
-// The hexadecimal values are the intended ones for the following
-// constants. The decimal values may be used, provided that the
-// compiler will convert from decimal to binary accurately enough
-// to produce the hexadecimal values shown.
+// Constants are found in fdlibm.cc. We assume the C++ compiler to convert
+// from decimal to binary accurately enough to produce the intended values.
//
// Note: Assuming log() return accurate answer, the following
// algorithm can be used to compute log1p(x) to within a few ULP:
@@ -425,7 +426,7 @@ const LN2_HI = kMath[34];
const LN2_LO = kMath[35];
const TWO54 = kMath[36];
const TWO_THIRD = kMath[37];
-macro KLOGP1(x)
+macro KLOG1P(x)
(kMath[38+x])
endmacro
@@ -507,12 +508,307 @@ function MathLog1p(x) {
var s = f / (2 + f);
var z = s * s;
- var R = z * (KLOGP1(0) + z * (KLOGP1(1) + z *
- (KLOGP1(2) + z * (KLOGP1(3) + z *
- (KLOGP1(4) + z * (KLOGP1(5) + z * KLOGP1(6)))))));
+ var R = z * (KLOG1P(0) + z * (KLOG1P(1) + z *
+ (KLOG1P(2) + z * (KLOG1P(3) + z *
+ (KLOG1P(4) + z * (KLOG1P(5) + z * KLOG1P(6)))))));
if (k === 0) {
return f - (hfsq - s * (hfsq + R));
} else {
return k * LN2_HI - ((hfsq - (s * (hfsq + R) + (k * LN2_LO + c))) - f);
}
}
+
+// ES6 draft 09-27-13, section 20.2.2.14.
+// Math.expm1
+// Returns exp(x)-1, the exponential of x minus 1.
+//
+// Method
+// 1. Argument reduction:
+// Given x, find r and integer k such that
+//
+// x = k*ln2 + r, |r| <= 0.5*ln2 ~ 0.34658
+//
+// Here a correction term c will be computed to compensate
+// the error in r when rounded to a floating-point number.
+//
+// 2. Approximating expm1(r) by a special rational function on
+// the interval [0,0.34658]:
+// Since
+// r*(exp(r)+1)/(exp(r)-1) = 2+ r^2/6 - r^4/360 + ...
+// we define R1(r*r) by
+// r*(exp(r)+1)/(exp(r)-1) = 2+ r^2/6 * R1(r*r)
+// That is,
+// R1(r**2) = 6/r *((exp(r)+1)/(exp(r)-1) - 2/r)
+// = 6/r * ( 1 + 2.0*(1/(exp(r)-1) - 1/r))
+// = 1 - r^2/60 + r^4/2520 - r^6/100800 + ...
+// We use a special Remes algorithm on [0,0.347] to generate
+// a polynomial of degree 5 in r*r to approximate R1. The
+// maximum error of this polynomial approximation is bounded
+// by 2**-61. In other words,
+// R1(z) ~ 1.0 + Q1*z + Q2*z**2 + Q3*z**3 + Q4*z**4 + Q5*z**5
+// where Q1 = -1.6666666666666567384E-2,
+// Q2 = 3.9682539681370365873E-4,
+// Q3 = -9.9206344733435987357E-6,
+// Q4 = 2.5051361420808517002E-7,
+// Q5 = -6.2843505682382617102E-9;
+// (where z=r*r, and the values of Q1 to Q5 are listed below)
+// with error bounded by
+// | 5 | -61
+// | 1.0+Q1*z+...+Q5*z - R1(z) | <= 2
+// | |
+//
+// expm1(r) = exp(r)-1 is then computed by the following
+// specific way which minimize the accumulation rounding error:
+// 2 3
+// r r [ 3 - (R1 + R1*r/2) ]
+// expm1(r) = r + --- + --- * [--------------------]
+// 2 2 [ 6 - r*(3 - R1*r/2) ]
+//
+// To compensate the error in the argument reduction, we use
+// expm1(r+c) = expm1(r) + c + expm1(r)*c
+// ~ expm1(r) + c + r*c
+// Thus c+r*c will be added in as the correction terms for
+// expm1(r+c). Now rearrange the term to avoid optimization
+// screw up:
+// ( 2 2 )
+// ({ ( r [ R1 - (3 - R1*r/2) ] ) } r )
+// expm1(r+c)~r - ({r*(--- * [--------------------]-c)-c} - --- )
+// ({ ( 2 [ 6 - r*(3 - R1*r/2) ] ) } 2 )
+// ( )
+//
+// = r - E
+// 3. Scale back to obtain expm1(x):
+// From step 1, we have
+// expm1(x) = either 2^k*[expm1(r)+1] - 1
+// = or 2^k*[expm1(r) + (1-2^-k)]
+// 4. Implementation notes:
+// (A). To save one multiplication, we scale the coefficient Qi
+// to Qi*2^i, and replace z by (x^2)/2.
+// (B). To achieve maximum accuracy, we compute expm1(x) by
+// (i) if x < -56*ln2, return -1.0, (raise inexact if x!=inf)
+// (ii) if k=0, return r-E
+// (iii) if k=-1, return 0.5*(r-E)-0.5
+// (iv) if k=1 if r < -0.25, return 2*((r+0.5)- E)
+// else return 1.0+2.0*(r-E);
+// (v) if (k<-2||k>56) return 2^k(1-(E-r)) - 1 (or exp(x)-1)
+// (vi) if k <= 20, return 2^k((1-2^-k)-(E-r)), else
+// (vii) return 2^k(1-((E+2^-k)-r))
+//
+// Special cases:
+// expm1(INF) is INF, expm1(NaN) is NaN;
+// expm1(-INF) is -1, and
+// for finite argument, only expm1(0)=0 is exact.
+//
+// Accuracy:
+// according to an error analysis, the error is always less than
+// 1 ulp (unit in the last place).
+//
+// Misc. info.
+// For IEEE double
+// if x > 7.09782712893383973096e+02 then expm1(x) overflow
+//
+const KEXPM1_OVERFLOW = kMath[45];
+const INVLN2 = kMath[46];
+macro KEXPM1(x)
+(kMath[47+x])
+endmacro
+
+function MathExpm1(x) {
+ x = x * 1; // Convert to number.
+ var y;
+ var hi;
+ var lo;
+ var k;
+ var t;
+ var c;
+
+ var hx = %_DoubleHi(x);
+ var xsb = hx & 0x80000000; // Sign bit of x
+ var y = (xsb === 0) ? x : -x; // y = |x|
+ hx &= 0x7fffffff; // High word of |x|
+
+ // Filter out huge and non-finite argument
+ if (hx >= 0x4043687a) { // if |x| ~=> 56 * ln2
+ if (hx >= 0x40862e42) { // if |x| >= 709.78
+ if (hx >= 0x7ff00000) {
+ // expm1(inf) = inf; expm1(-inf) = -1; expm1(nan) = nan;
+ return (x === -INFINITY) ? -1 : x;
+ }
+ if (x > KEXPM1_OVERFLOW) return INFINITY; // Overflow
+ }
+ if (xsb != 0) return -1; // x < -56 * ln2, return -1.
+ }
+
+ // Argument reduction
+ if (hx > 0x3fd62e42) { // if |x| > 0.5 * ln2
+ if (hx < 0x3ff0a2b2) { // and |x| < 1.5 * ln2
+ if (xsb === 0) {
+ hi = x - LN2_HI;
+ lo = LN2_LO;
+ k = 1;
+ } else {
+ hi = x + LN2_HI;
+ lo = -LN2_LO;
+ k = -1;
+ }
+ } else {
+ k = (INVLN2 * x + ((xsb === 0) ? 0.5 : -0.5)) | 0;
+ t = k;
+ // t * ln2_hi is exact here.
+ hi = x - t * LN2_HI;
+ lo = t * LN2_LO;
+ }
+ x = hi - lo;
+ c = (hi - x) - lo;
+ } else if (hx < 0x3c900000) {
+ // When |x| < 2^-54, we can return x.
+ return x;
+ } else {
+ // Fall through.
+ k = 0;
+ }
+
+ // x is now in primary range
+ var hfx = 0.5 * x;
+ var hxs = x * hfx;
+ var r1 = 1 + hxs * (KEXPM1(0) + hxs * (KEXPM1(1) + hxs *
+ (KEXPM1(2) + hxs * (KEXPM1(3) + hxs * KEXPM1(4)))));
+ t = 3 - r1 * hfx;
+ var e = hxs * ((r1 - t) / (6 - x * t));
+ if (k === 0) { // c is 0
+ return x - (x*e - hxs);
+ } else {
+ e = (x * (e - c) - c);
+ e -= hxs;
+ if (k === -1) return 0.5 * (x - e) - 0.5;
+ if (k === 1) {
+ if (x < -0.25) return -2 * (e - (x + 0.5));
+ return 1 + 2 * (x - e);
+ }
+
+ if (k <= -2 || k > 56) {
+ // suffice to return exp(x) + 1
+ y = 1 - (e - x);
+ // Add k to y's exponent
+ y = %_ConstructDouble(%_DoubleHi(y) + (k << 20), %_DoubleLo(y));
+ return y - 1;
+ }
+ if (k < 20) {
+ // t = 1 - 2^k
+ t = %_ConstructDouble(0x3ff00000 - (0x200000 >> k), 0);
+ y = t - (e - x);
+ // Add k to y's exponent
+ y = %_ConstructDouble(%_DoubleHi(y) + (k << 20), %_DoubleLo(y));
+ } else {
+ // t = 2^-k
+ t = %_ConstructDouble((0x3ff - k) << 20, 0);
+ y = x - (e + t);
+ y += 1;
+ // Add k to y's exponent
+ y = %_ConstructDouble(%_DoubleHi(y) + (k << 20), %_DoubleLo(y));
+ }
+ }
+ return y;
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.30.
+// Math.sinh
+// Method :
+// mathematically sinh(x) if defined to be (exp(x)-exp(-x))/2
+// 1. Replace x by |x| (sinh(-x) = -sinh(x)).
+// 2.
+// E + E/(E+1)
+// 0 <= x <= 22 : sinh(x) := --------------, E=expm1(x)
+// 2
+//
+// 22 <= x <= lnovft : sinh(x) := exp(x)/2
+// lnovft <= x <= ln2ovft: sinh(x) := exp(x/2)/2 * exp(x/2)
+// ln2ovft < x : sinh(x) := x*shuge (overflow)
+//
+// Special cases:
+// sinh(x) is |x| if x is +Infinity, -Infinity, or NaN.
+// only sinh(0)=0 is exact for finite x.
+//
+const KSINH_OVERFLOW = kMath[52];
+const TWO_M28 = 3.725290298461914e-9; // 2^-28, empty lower half
+const LOG_MAXD = 709.7822265625; // 0x40862e42 00000000, empty lower half
+
+function MathSinh(x) {
+ x = x * 1; // Convert to number.
+ var h = (x < 0) ? -0.5 : 0.5;
+ // |x| in [0, 22]. return sign(x)*0.5*(E+E/(E+1))
+ var ax = MathAbs(x);
+ if (ax < 22) {
+ // For |x| < 2^-28, sinh(x) = x
+ if (ax < TWO_M28) return x;
+ var t = MathExpm1(ax);
+ if (ax < 1) return h * (2 * t - t * t / (t + 1));
+ return h * (t + t / (t + 1));
+ }
+ // |x| in [22, log(maxdouble)], return 0.5 * exp(|x|)
+ if (ax < LOG_MAXD) return h * MathExp(ax);
+ // |x| in [log(maxdouble), overflowthreshold]
+ // overflowthreshold = 710.4758600739426
+ if (ax <= KSINH_OVERFLOW) {
+ var w = MathExp(0.5 * ax);
+ var t = h * w;
+ return t * w;
+ }
+ // |x| > overflowthreshold or is NaN.
+ // Return Infinity of the appropriate sign or NaN.
+ return x * INFINITY;
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.12.
+// Math.cosh
+// Method :
+// mathematically cosh(x) if defined to be (exp(x)+exp(-x))/2
+// 1. Replace x by |x| (cosh(x) = cosh(-x)).
+// 2.
+// [ exp(x) - 1 ]^2
+// 0 <= x <= ln2/2 : cosh(x) := 1 + -------------------
+// 2*exp(x)
+//
+// exp(x) + 1/exp(x)
+// ln2/2 <= x <= 22 : cosh(x) := -------------------
+// 2
+// 22 <= x <= lnovft : cosh(x) := exp(x)/2
+// lnovft <= x <= ln2ovft: cosh(x) := exp(x/2)/2 * exp(x/2)
+// ln2ovft < x : cosh(x) := huge*huge (overflow)
+//
+// Special cases:
+// cosh(x) is |x| if x is +INF, -INF, or NaN.
+// only cosh(0)=1 is exact for finite x.
+//
+const KCOSH_OVERFLOW = kMath[52];
+
+function MathCosh(x) {
+ x = x * 1; // Convert to number.
+ var ix = %_DoubleHi(x) & 0x7fffffff;
+ // |x| in [0,0.5*log2], return 1+expm1(|x|)^2/(2*exp(|x|))
+ if (ix < 0x3fd62e43) {
+ var t = MathExpm1(MathAbs(x));
+ var w = 1 + t;
+ // For |x| < 2^-55, cosh(x) = 1
+ if (ix < 0x3c800000) return w;
+ return 1 + (t * t) / (w + w);
+ }
+ // |x| in [0.5*log2, 22], return (exp(|x|)+1/exp(|x|)/2
+ if (ix < 0x40360000) {
+ var t = MathExp(MathAbs(x));
+ return 0.5 * t + 0.5 / t;
+ }
+ // |x| in [22, log(maxdouble)], return half*exp(|x|)
+ if (ix < 0x40862e42) return 0.5 * MathExp(MathAbs(x));
+ // |x| in [log(maxdouble), overflowthreshold]
+ if (MathAbs(x) <= KCOSH_OVERFLOW) {
+ var w = MathExp(0.5 * MathAbs(x));
+ var t = 0.5 * w;
+ return t * w;
+ }
+ if (NUMBER_IS_NAN(x)) return x;
+ // |x| > overflowthreshold.
+ return INFINITY;
+}
diff --git a/deps/v8/tools/check-name-clashes.py b/deps/v8/tools/check-name-clashes.py
new file mode 100755
index 0000000000..89a7dee7a1
--- /dev/null
+++ b/deps/v8/tools/check-name-clashes.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import js2c
+import os
+import re
+import sys
+
+FILENAME = "src/runtime/runtime.h"
+LISTHEAD = re.compile(r"#define\s+(\w+LIST\w*)\((\w+)\)")
+LISTBODY = re.compile(r".*\\$")
+BLACKLIST = ['INLINE_FUNCTION_LIST']
+
+
+class Function(object):
+ def __init__(self, match):
+ self.name = match.group(1).strip()
+
+def ListMacroRe(list):
+ macro = LISTHEAD.match(list[0]).group(2)
+ re_string = "\s*%s\((\w+)" % macro
+ return re.compile(re_string)
+
+
+def FindLists(filename):
+ lists = []
+ current_list = []
+ mode = "SEARCHING"
+ with open(filename, "r") as f:
+ for line in f:
+ if mode == "SEARCHING":
+ match = LISTHEAD.match(line)
+ if match and match.group(1) not in BLACKLIST:
+ mode = "APPENDING"
+ current_list.append(line)
+ else:
+ current_list.append(line)
+ match = LISTBODY.match(line)
+ if not match:
+ mode = "SEARCHING"
+ lists.append(current_list)
+ current_list = []
+ return lists
+
+
+# Detects runtime functions by parsing FILENAME.
+def FindRuntimeFunctions():
+ functions = []
+ lists = FindLists(FILENAME)
+ for list in lists:
+ function_re = ListMacroRe(list)
+ for line in list:
+ match = function_re.match(line)
+ if match:
+ functions.append(Function(match))
+ return functions
+
+
+class Builtin(object):
+ def __init__(self, match):
+ self.name = match.group(1)
+
+
+def FindJSNatives():
+ PATH = "src"
+ fileslist = []
+ for (root, dirs, files) in os.walk(PATH):
+ for f in files:
+ if f.endswith(".js"):
+ fileslist.append(os.path.join(root, f))
+ natives = []
+ regexp = re.compile("^function (\w+)\s*\((.*?)\) {")
+ matches = 0
+ for filename in fileslist:
+ with open(filename, "r") as f:
+ file_contents = f.read()
+ file_contents = js2c.ExpandInlineMacros(file_contents)
+ lines = file_contents.split("\n")
+ partial_line = ""
+ for line in lines:
+ if line.startswith("function") and not '{' in line:
+ partial_line += line.rstrip()
+ continue
+ if partial_line:
+ partial_line += " " + line.strip()
+ if '{' in line:
+ line = partial_line
+ partial_line = ""
+ else:
+ continue
+ match = regexp.match(line)
+ if match:
+ natives.append(Builtin(match))
+ return natives
+
+
+def Main():
+ functions = FindRuntimeFunctions()
+ natives = FindJSNatives()
+ errors = 0
+ runtime_map = {}
+ for f in functions:
+ runtime_map[f.name] = 1
+ for b in natives:
+ if b.name in runtime_map:
+ print("JS_Native/Runtime_Function name clash: %s" % b.name)
+ errors += 1
+
+ if errors > 0:
+ return 1
+ print("Runtime/Natives name clashes: checked %d/%d functions, all good." %
+ (len(functions), len(natives)))
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(Main())
diff --git a/deps/v8/tools/cpu.sh b/deps/v8/tools/cpu.sh
new file mode 100755
index 0000000000..8e8a243c60
--- /dev/null
+++ b/deps/v8/tools/cpu.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+CPUPATH=/sys/devices/system/cpu
+
+MAXID=$(cat $CPUPATH/present | awk -F- '{print $NF}')
+
+set_governor() {
+ echo "Setting CPU frequency governor to \"$1\""
+ for (( i=0; i<=$MAXID; i++ )); do
+ echo "$1" > $CPUPATH/cpu$i/cpufreq/scaling_governor
+ done
+}
+
+dual_core() {
+ echo "Switching to dual-core mode"
+ for (( i=2; i<=$MAXID; i++ )); do
+ echo 0 > $CPUPATH/cpu$i/online
+ done
+}
+
+single_core() {
+ echo "Switching to single-core mode"
+ for (( i=1; i<=$MAXID; i++ )); do
+ echo 0 > $CPUPATH/cpu$i/online
+ done
+}
+
+
+all_cores() {
+ echo "Reactivating all CPU cores"
+ for (( i=2; i<=$MAXID; i++ )); do
+ echo 1 > $CPUPATH/cpu$i/online
+ done
+}
+
+case "$1" in
+ fast | performance)
+ set_governor "performance"
+ ;;
+ slow | powersave)
+ set_governor "powersave"
+ ;;
+ default | ondemand)
+ set_governor "ondemand"
+ ;;
+ dualcore | dual)
+ dual_core
+ ;;
+ singlecore | single)
+ single_core
+ ;;
+ allcores | all)
+ all_cores
+ ;;
+ *)
+ echo "Usage: $0 fast|slow|default|singlecore|dualcore|all"
+ exit 1
+ ;;
+esac
diff --git a/deps/v8/tools/detect-builtins.js b/deps/v8/tools/detect-builtins.js
new file mode 100644
index 0000000000..2a476baa4b
--- /dev/null
+++ b/deps/v8/tools/detect-builtins.js
@@ -0,0 +1,51 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global) {
+
+ var GetProperties = function(this_name, object) {
+ var result = {};
+ try {
+ var names = Object.getOwnPropertyNames(object);
+ } catch(e) {
+ return;
+ }
+ for (var i = 0; i < names.length; ++i) {
+ var name = names[i];
+ if (typeof object === "function") {
+ if (name === "length" ||
+ name === "name" ||
+ name === "arguments" ||
+ name === "caller" ||
+ name === "prototype") {
+ continue;
+ }
+ }
+ // Avoid endless recursion.
+ if (this_name === "prototype" && name === "constructor") continue;
+ // Could get this from the parent, but having it locally is easier.
+ var property = { "name": name };
+ try {
+ var value = object[name];
+ } catch(e) {
+ property.type = "getter";
+ result[name] = property;
+ continue;
+ }
+ var type = typeof value;
+ property.type = type;
+ if (type === "function") {
+ property.length = value.length;
+ property.prototype = GetProperties("prototype", value.prototype);
+ }
+ property.properties = GetProperties(name, value);
+ result[name] = property;
+ }
+ return result;
+ };
+
+ var g = GetProperties("", global, "");
+ print(JSON.stringify(g, undefined, 2));
+
+})(this); // Must wrap in anonymous closure or it'll detect itself as builtin.
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index b617573d9c..04a1ea87f4 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -61,7 +61,7 @@ consts_misc = [
{ 'name': 'StringEncodingMask', 'value': 'kStringEncodingMask' },
{ 'name': 'TwoByteStringTag', 'value': 'kTwoByteStringTag' },
- { 'name': 'AsciiStringTag', 'value': 'kOneByteStringTag' },
+ { 'name': 'OneByteStringTag', 'value': 'kOneByteStringTag' },
{ 'name': 'StringRepresentationMask',
'value': 'kStringRepresentationMask' },
@@ -70,6 +70,8 @@ consts_misc = [
{ 'name': 'ExternalStringTag', 'value': 'kExternalStringTag' },
{ 'name': 'SlicedStringTag', 'value': 'kSlicedStringTag' },
+ { 'name': 'FailureTag', 'value': 'kFailureTag' },
+ { 'name': 'FailureTagMask', 'value': 'kFailureTagMask' },
{ 'name': 'HeapObjectTag', 'value': 'kHeapObjectTag' },
{ 'name': 'HeapObjectTagMask', 'value': 'kHeapObjectTagMask' },
{ 'name': 'SmiTag', 'value': 'kSmiTag' },
@@ -92,6 +94,8 @@ consts_misc = [
'value': 'DescriptorArray::kFirstIndex' },
{ 'name': 'prop_type_field',
'value': 'FIELD' },
+ { 'name': 'prop_type_first_phantom',
+ 'value': 'TRANSITION' },
{ 'name': 'prop_type_mask',
'value': 'PropertyDetails::TypeField::kMask' },
{ 'name': 'prop_index_mask',
@@ -116,9 +120,9 @@ consts_misc = [
'value': 'DICTIONARY_ELEMENTS' },
{ 'name': 'bit_field2_elements_kind_mask',
- 'value': 'Map::ElementsKindBits::kMask' },
+ 'value': 'Map::kElementsKindMask' },
{ 'name': 'bit_field2_elements_kind_shift',
- 'value': 'Map::ElementsKindBits::kShift' },
+ 'value': 'Map::kElementsKindShift' },
{ 'name': 'bit_field3_dictionary_map_shift',
'value': 'Map::DictionaryMap::kShift' },
@@ -192,9 +196,9 @@ header = '''
* This file is generated by %s. Do not edit directly.
*/
-#include "src/v8.h"
-#include "src/frames.h"
-#include "src/frames-inl.h" /* for architecture-specific frame constants */
+#include "v8.h"
+#include "frames.h"
+#include "frames-inl.h" /* for architecture-specific frame constants */
using namespace v8::internal;
@@ -311,11 +315,11 @@ def load_objects():
#
# Mapping string types is more complicated. Both types and
# class names for Strings specify a representation (e.g., Seq,
- # Cons, External, or Sliced) and an encoding (TwoByte or Ascii),
+ # Cons, External, or Sliced) and an encoding (TwoByte/OneByte),
# In the simplest case, both of these are explicit in both
# names, as in:
#
- # EXTERNAL_ASCII_STRING_TYPE => ExternalAsciiString
+ # EXTERNAL_ONE_BYTE_STRING_TYPE => ExternalOneByteString
#
# However, either the representation or encoding can be omitted
# from the type name, in which case "Seq" and "TwoByte" are
@@ -326,7 +330,7 @@ def load_objects():
# Additionally, sometimes the type name has more information
# than the class, as in:
#
- # CONS_ASCII_STRING_TYPE => ConsString
+ # CONS_ONE_BYTE_STRING_TYPE => ConsString
#
# To figure this out dynamically, we first check for a
# representation and encoding and add them if they're not
@@ -337,19 +341,19 @@ def load_objects():
if (cctype.find('Cons') == -1 and
cctype.find('External') == -1 and
cctype.find('Sliced') == -1):
- if (cctype.find('Ascii') != -1):
- cctype = re.sub('AsciiString$',
+ if (cctype.find('OneByte') != -1):
+ cctype = re.sub('OneByteString$',
'SeqOneByteString', cctype);
else:
cctype = re.sub('String$',
'SeqString', cctype);
- if (cctype.find('Ascii') == -1):
+ if (cctype.find('OneByte') == -1):
cctype = re.sub('String$', 'TwoByteString',
cctype);
if (not (cctype in klasses)):
- cctype = re.sub('Ascii', '', cctype);
+ cctype = re.sub('OneByte', '', cctype);
cctype = re.sub('TwoByte', '', cctype);
#
diff --git a/deps/v8/tools/generate-builtins-tests.py b/deps/v8/tools/generate-builtins-tests.py
new file mode 100755
index 0000000000..4e6961deb1
--- /dev/null
+++ b/deps/v8/tools/generate-builtins-tests.py
@@ -0,0 +1,158 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import optparse
+import os
+import random
+import shutil
+import subprocess
+import sys
+
+
+BLACKLIST = [
+ # Skip special d8 functions.
+ "load", "os", "print", "read", "readline", "quit"
+]
+
+
+def GetRandomObject():
+ return random.choice([
+ "0", "1", "2.5", "0x1000", "\"string\"", "{foo: \"bar\"}", "[1, 2, 3]",
+ "function() { return 0; }"
+ ])
+
+
+g_var_index = 0
+
+
+def GetVars(result, num, first = []):
+ global g_var_index
+ variables = []
+ for i in range(num):
+ variables.append("__v_%d" % g_var_index)
+ g_var_index += 1
+ for var in variables:
+ result.append("var %s = %s;" % (var, GetRandomObject()))
+ return ", ".join(first + variables)
+
+
+# Wraps |string| in try..catch.
+def TryCatch(result, string, exception_behavior = ""):
+ result.append("try { %s } catch(e) { %s }" % (string, exception_behavior))
+
+
+def BuildTests(function, full_name, options):
+ assert function["type"] == "function"
+ global g_var_index
+ g_var_index = 0
+ result = ["// AUTO-GENERATED BY tools/generate-builtins-tests.py.\n"]
+ result.append("// Function call test:")
+ length = function["length"]
+ TryCatch(result, "%s(%s);" % (full_name, GetVars(result, length)))
+
+ if "prototype" in function:
+ proto = function["prototype"]
+ result.append("\n// Constructor test:")
+ TryCatch(result,
+ "var recv = new %s(%s);" % (full_name, GetVars(result, length)),
+ "var recv = new Object();")
+
+ getters = []
+ methods = []
+ for prop in proto:
+ proto_property = proto[prop]
+ proto_property_type = proto_property["type"]
+ if proto_property_type == "getter":
+ getters.append(proto_property)
+ result.append("recv.__defineGetter__(\"%s\", "
+ "function() { return %s; });" %
+ (proto_property["name"], GetVars(result, 1)))
+ if proto_property_type == "number":
+ result.append("recv.__defineGetter__(\"%s\", "
+ "function() { return %s; });" %
+ (proto_property["name"], GetVars(result, 1)))
+ if proto_property_type == "function":
+ methods.append(proto_property)
+ if getters:
+ result.append("\n// Getter tests:")
+ for getter in getters:
+ result.append("print(recv.%s);" % getter["name"])
+ if methods:
+ result.append("\n// Method tests:")
+ for method in methods:
+ args = GetVars(result, method["length"], ["recv"])
+ call = "%s.prototype.%s.call(%s)" % (full_name, method["name"], args)
+ TryCatch(result, call)
+
+ filename = os.path.join(options.outdir, "%s.js" % (full_name))
+ with open(filename, "w") as f:
+ f.write("\n".join(result))
+ f.write("\n")
+
+
+def VisitObject(obj, path, options):
+ obj_type = obj["type"]
+ obj_name = "%s%s" % (path, obj["name"])
+ if obj_type == "function":
+ BuildTests(obj, obj_name, options)
+ if "properties" in obj:
+ for prop_name in obj["properties"]:
+ prop = obj["properties"][prop_name]
+ VisitObject(prop, "%s." % (obj_name), options)
+
+
+def ClearGeneratedFiles(options):
+ if os.path.exists(options.outdir):
+ shutil.rmtree(options.outdir)
+
+
+def GenerateTests(options):
+ ClearGeneratedFiles(options) # Re-generate everything.
+ output = subprocess.check_output(
+ "%s %s" % (options.d8, options.script), shell=True).strip()
+ objects = json.loads(output)
+
+ os.makedirs(options.outdir)
+ for obj_name in objects:
+ if obj_name in BLACKLIST: continue
+ obj = objects[obj_name]
+ VisitObject(obj, "", options)
+
+
+def BuildOptions():
+ result = optparse.OptionParser()
+ result.add_option("--d8", help="d8 binary to use",
+ default="out/ia32.release/d8")
+ result.add_option("--outdir", help="directory where to place generated tests",
+ default="test/mjsunit/builtins-gen")
+ result.add_option("--script", help="builtins detector script to run in d8",
+ default="tools/detect-builtins.js")
+ return result
+
+
+def Main():
+ parser = BuildOptions()
+ (options, args) = parser.parse_args()
+ if len(args) != 1 or args[0] == "help":
+ parser.print_help()
+ return 1
+ action = args[0]
+
+ if action == "generate":
+ GenerateTests(options)
+ return 0
+
+ if action == "clear":
+ ClearGeneratedFiles(options)
+ return 0
+
+ print("Unknown action: %s" % action)
+ parser.print_help()
+ return 1
+
+
+if __name__ == "__main__":
+ sys.exit(Main())
diff --git a/deps/v8/tools/generate-runtime-tests.py b/deps/v8/tools/generate-runtime-tests.py
deleted file mode 100755
index b5f61a8422..0000000000
--- a/deps/v8/tools/generate-runtime-tests.py
+++ /dev/null
@@ -1,1412 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import itertools
-import js2c
-import multiprocessing
-import optparse
-import os
-import random
-import re
-import shutil
-import signal
-import string
-import subprocess
-import sys
-import time
-
-FILENAME = "src/runtime.cc"
-HEADERFILENAME = "src/runtime.h"
-FUNCTION = re.compile("^RUNTIME_FUNCTION\(Runtime_(\w+)")
-ARGSLENGTH = re.compile(".*DCHECK\(.*args\.length\(\) == (\d+)\);")
-FUNCTIONEND = "}\n"
-MACRO = re.compile(r"^#define ([^ ]+)\(([^)]*)\) *([^\\]*)\\?\n$")
-FIRST_WORD = re.compile("^\s*(.*?)[\s({\[]")
-
-WORKSPACE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
-BASEPATH = os.path.join(WORKSPACE, "test", "mjsunit", "runtime-gen")
-THIS_SCRIPT = os.path.relpath(sys.argv[0])
-
-# Expand these macros, they define further runtime functions.
-EXPAND_MACROS = [
- "BUFFER_VIEW_GETTER",
- "DATA_VIEW_GETTER",
- "DATA_VIEW_SETTER",
- "RUNTIME_UNARY_MATH",
-]
-# TODO(jkummerow): We could also whitelist the following macros, but the
-# functions they define are so trivial that it's unclear how much benefit
-# that would provide:
-# ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
-# FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
-# TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
-
-# Counts of functions in each detection state. These are used to assert
-# that the parser doesn't bit-rot. Change the values as needed when you add,
-# remove or change runtime functions, but make sure we don't lose our ability
-# to parse them!
-EXPECTED_FUNCTION_COUNT = 429
-EXPECTED_FUZZABLE_COUNT = 332
-EXPECTED_CCTEST_COUNT = 7
-EXPECTED_UNKNOWN_COUNT = 16
-EXPECTED_BUILTINS_COUNT = 808
-
-
-# Don't call these at all.
-BLACKLISTED = [
- "Abort", # Kills the process.
- "AbortJS", # Kills the process.
- "CompileForOnStackReplacement", # Riddled with DCHECK.
- "IS_VAR", # Not implemented in the runtime.
- "ListNatives", # Not available in Release mode.
- "SetAllocationTimeout", # Too slow for fuzzing.
- "SystemBreak", # Kills (int3) the process.
-
- # These are weird. They violate some invariants when called after
- # bootstrapping.
- "DisableAccessChecks",
- "EnableAccessChecks",
-
- # The current LiveEdit implementation relies on and messes with internals
- # in ways that makes it fundamentally unfuzzable :-(
- "DebugGetLoadedScripts",
- "DebugSetScriptSource",
- "LiveEditFindSharedFunctionInfosForScript",
- "LiveEditFunctionSourceUpdated",
- "LiveEditGatherCompileInfo",
- "LiveEditPatchFunctionPositions",
- "LiveEditReplaceFunctionCode",
- "LiveEditReplaceRefToNestedFunction",
- "LiveEditReplaceScript",
- "LiveEditRestartFrame",
- "SetScriptBreakPoint",
-
- # TODO(jkummerow): Fix these and un-blacklist them!
- "CreateDateTimeFormat",
- "CreateNumberFormat",
-
- # TODO(danno): Fix these internal function that are only callable form stubs
- # and un-blacklist them!
- "NumberToString",
- "RxegExpConstructResult",
- "RegExpExec",
- "StringAdd",
- "SubString",
- "StringCompare",
- "StringCharCodeAt",
- "GetFromCache",
-
- # Compilation
- "CompileUnoptimized",
- "CompileOptimized",
- "TryInstallOptimizedCode",
- "NotifyDeoptimized",
- "NotifyStubFailure",
-
- # Utilities
- "AllocateInNewSpace",
- "AllocateInTargetSpace",
- "AllocateHeapNumber",
- "NumberToSmi",
- "NumberToStringSkipCache",
-
- "NewSloppyArguments",
- "NewStrictArguments",
-
- # Harmony
- "CreateJSGeneratorObject",
- "SuspendJSGeneratorObject",
- "ResumeJSGeneratorObject",
- "ThrowGeneratorStateError",
-
- # Arrays
- "ArrayConstructor",
- "InternalArrayConstructor",
- "NormalizeElements",
-
- # Literals
- "MaterializeRegExpLiteral",
- "CreateObjectLiteral",
- "CreateArrayLiteral",
- "CreateArrayLiteralStubBailout",
-
- # Statements
- "NewClosure",
- "NewClosureFromStubFailure",
- "NewObject",
- "NewObjectWithAllocationSite",
- "FinalizeInstanceSize",
- "Throw",
- "ReThrow",
- "ThrowReferenceError",
- "ThrowNotDateError",
- "StackGuard",
- "Interrupt",
- "PromoteScheduledException",
-
- # Contexts
- "NewGlobalContext",
- "NewFunctionContext",
- "PushWithContext",
- "PushCatchContext",
- "PushBlockContext",
- "PushModuleContext",
- "DeleteLookupSlot",
- "LoadLookupSlot",
- "LoadLookupSlotNoReferenceError",
- "StoreLookupSlot",
-
- # Declarations
- "DeclareGlobals",
- "DeclareModules",
- "DeclareContextSlot",
- "InitializeConstGlobal",
- "InitializeConstContextSlot",
-
- # Eval
- "ResolvePossiblyDirectEval",
-
- # Maths
- "MathPowSlow",
- "MathPowRT"
-]
-
-
-# These will always throw.
-THROWS = [
- "CheckExecutionState", # Needs to hit a break point.
- "CheckIsBootstrapping", # Needs to be bootstrapping.
- "DebugEvaluate", # Needs to hit a break point.
- "DebugEvaluateGlobal", # Needs to hit a break point.
- "DebugIndexedInterceptorElementValue", # Needs an indexed interceptor.
- "DebugNamedInterceptorPropertyValue", # Needs a named interceptor.
- "DebugSetScriptSource", # Checks compilation state of script.
- "GetAllScopesDetails", # Needs to hit a break point.
- "GetFrameCount", # Needs to hit a break point.
- "GetFrameDetails", # Needs to hit a break point.
- "GetRootNaN", # Needs to be bootstrapping.
- "GetScopeCount", # Needs to hit a break point.
- "GetScopeDetails", # Needs to hit a break point.
- "GetStepInPositions", # Needs to hit a break point.
- "GetTemplateField", # Needs a {Function,Object}TemplateInfo.
- "GetThreadCount", # Needs to hit a break point.
- "GetThreadDetails", # Needs to hit a break point.
- "IsAccessAllowedForObserver", # Needs access-check-required object.
- "UnblockConcurrentRecompilation" # Needs --block-concurrent-recompilation.
-]
-
-
-# Definitions used in CUSTOM_KNOWN_GOOD_INPUT below.
-_BREAK_ITERATOR = (
- "%GetImplFromInitializedIntlObject(new Intl.v8BreakIterator())")
-_COLLATOR = "%GetImplFromInitializedIntlObject(new Intl.Collator('en-US'))"
-_DATETIME_FORMAT = (
- "%GetImplFromInitializedIntlObject(new Intl.DateTimeFormat('en-US'))")
-_NUMBER_FORMAT = (
- "%GetImplFromInitializedIntlObject(new Intl.NumberFormat('en-US'))")
-
-
-# Custom definitions for function input that does not throw.
-# Format: "FunctionName": ["arg0", "arg1", ..., argslength].
-# None means "fall back to autodetected value".
-CUSTOM_KNOWN_GOOD_INPUT = {
- "AddNamedProperty": [None, "\"bla\"", None, None, None],
- "AddPropertyForTemplate": [None, 10, None, None, None],
- "Apply": ["function() {}", None, None, None, None, None],
- "ArrayBufferSliceImpl": [None, None, 0, None],
- "ArrayConcat": ["[1, 'a']", None],
- "BreakIteratorAdoptText": [_BREAK_ITERATOR, None, None],
- "BreakIteratorBreakType": [_BREAK_ITERATOR, None],
- "BreakIteratorCurrent": [_BREAK_ITERATOR, None],
- "BreakIteratorFirst": [_BREAK_ITERATOR, None],
- "BreakIteratorNext": [_BREAK_ITERATOR, None],
- "CompileString": [None, "false", None],
- "CreateBreakIterator": ["'en-US'", "{type: 'string'}", None, None],
- "CreateJSFunctionProxy": [None, "function() {}", None, None, None],
- "CreatePrivateSymbol": ["\"foo\"", None],
- "CreatePrivateOwnSymbol": ["\"foo\"", None],
- "CreateSymbol": ["\"foo\"", None],
- "DateParseString": [None, "new Array(8)", None],
- "DefineAccessorPropertyUnchecked": [None, None, "function() {}",
- "function() {}", 2, None],
- "FunctionBindArguments": [None, None, "undefined", None, None],
- "GetBreakLocations": [None, 0, None],
- "GetDefaultReceiver": ["function() {}", None],
- "GetImplFromInitializedIntlObject": ["new Intl.NumberFormat('en-US')", None],
- "InternalCompare": [_COLLATOR, None, None, None],
- "InternalDateFormat": [_DATETIME_FORMAT, None, None],
- "InternalDateParse": [_DATETIME_FORMAT, None, None],
- "InternalNumberFormat": [_NUMBER_FORMAT, None, None],
- "InternalNumberParse": [_NUMBER_FORMAT, None, None],
- "IsSloppyModeFunction": ["function() {}", None],
- "LoadMutableDouble": ["{foo: 1.2}", None, None],
- "NewObjectFromBound": ["(function() {}).bind({})", None],
- "NumberToRadixString": [None, "2", None],
- "ParseJson": ["\"{}\"", 1],
- "RegExpExecMultiple": [None, None, "['a']", "['a']", None],
- "DefineApiAccessorProperty": [None, None, "undefined", "undefined", None, None],
- "SetIteratorInitialize": [None, None, "2", None],
- "SetDebugEventListener": ["undefined", None, None],
- "SetFunctionBreakPoint": [None, 218, None, None],
- "StringBuilderConcat": ["[1, 2, 3]", 3, None, None],
- "StringBuilderJoin": ["['a', 'b']", 4, None, None],
- "StringMatch": [None, None, "['a', 'b']", None],
- "StringNormalize": [None, 2, None],
- "StringReplaceGlobalRegExpWithString": [None, None, None, "['a']", None],
- "TypedArrayInitialize": [None, 6, "new ArrayBuffer(8)", None, 4, None],
- "TypedArrayInitializeFromArrayLike": [None, 6, None, None, None],
- "TypedArraySetFastCases": [None, None, "0", None],
- "FunctionIsArrow": ["() => null", None],
-}
-
-
-# Types of arguments that cannot be generated in a JavaScript testcase.
-NON_JS_TYPES = [
- "Code", "Context", "FixedArray", "FunctionTemplateInfo",
- "JSFunctionResultCache", "JSMessageObject", "Map", "ScopeInfo",
- "SharedFunctionInfo"]
-
-
-class Generator(object):
-
- def RandomVariable(self, varname, vartype, simple):
- if simple:
- return self._Variable(varname, self.GENERATORS[vartype][0])
- return self.GENERATORS[vartype][1](self, varname,
- self.DEFAULT_RECURSION_BUDGET)
-
- @staticmethod
- def IsTypeSupported(typename):
- return typename in Generator.GENERATORS
-
- USUAL_SUSPECT_PROPERTIES = ["size", "length", "byteLength", "__proto__",
- "prototype", "0", "1", "-1"]
- DEFAULT_RECURSION_BUDGET = 2
- PROXY_TRAPS = """{
- getOwnPropertyDescriptor: function(name) {
- return {value: function() {}, configurable: true, writable: true,
- enumerable: true};
- },
- getPropertyDescriptor: function(name) {
- return {value: function() {}, configurable: true, writable: true,
- enumerable: true};
- },
- getOwnPropertyNames: function() { return []; },
- getPropertyNames: function() { return []; },
- defineProperty: function(name, descriptor) {},
- delete: function(name) { return true; },
- fix: function() {}
- }"""
-
- def _Variable(self, name, value, fallback=None):
- args = { "name": name, "value": value, "fallback": fallback }
- if fallback:
- wrapper = "try { %%s } catch(e) { var %(name)s = %(fallback)s; }" % args
- else:
- wrapper = "%s"
- return [wrapper % ("var %(name)s = %(value)s;" % args)]
-
- def _Boolean(self, name, recursion_budget):
- return self._Variable(name, random.choice(["true", "false"]))
-
- def _Oddball(self, name, recursion_budget):
- return self._Variable(name,
- random.choice(["true", "false", "undefined", "null"]))
-
- def _StrictMode(self, name, recursion_budget):
- return self._Variable(name, random.choice([0, 1]))
-
- def _Int32(self, name, recursion_budget=0):
- die = random.random()
- if die < 0.5:
- value = random.choice([-3, -1, 0, 1, 2, 10, 515, 0x3fffffff, 0x7fffffff,
- 0x40000000, -0x40000000, -0x80000000])
- elif die < 0.75:
- value = random.randint(-1000, 1000)
- else:
- value = random.randint(-0x80000000, 0x7fffffff)
- return self._Variable(name, value)
-
- def _Uint32(self, name, recursion_budget=0):
- die = random.random()
- if die < 0.5:
- value = random.choice([0, 1, 2, 3, 4, 8, 0x3fffffff, 0x40000000,
- 0x7fffffff, 0xffffffff])
- elif die < 0.75:
- value = random.randint(0, 1000)
- else:
- value = random.randint(0, 0xffffffff)
- return self._Variable(name, value)
-
- def _Smi(self, name, recursion_budget):
- die = random.random()
- if die < 0.5:
- value = random.choice([-5, -1, 0, 1, 2, 3, 0x3fffffff, -0x40000000])
- elif die < 0.75:
- value = random.randint(-1000, 1000)
- else:
- value = random.randint(-0x40000000, 0x3fffffff)
- return self._Variable(name, value)
-
- def _Number(self, name, recursion_budget):
- die = random.random()
- if die < 0.5:
- return self._Smi(name, recursion_budget)
- elif die < 0.6:
- value = random.choice(["Infinity", "-Infinity", "NaN", "-0",
- "1.7976931348623157e+308", # Max value.
- "2.2250738585072014e-308", # Min value.
- "4.9406564584124654e-324"]) # Min subnormal.
- else:
- value = random.lognormvariate(0, 15)
- return self._Variable(name, value)
-
- def _RawRandomString(self, minlength=0, maxlength=100,
- alphabet=string.ascii_letters):
- length = random.randint(minlength, maxlength)
- result = ""
- for i in xrange(length):
- result += random.choice(alphabet)
- return result
-
- def _SeqString(self, name, recursion_budget):
- s1 = self._RawRandomString(1, 5)
- s2 = self._RawRandomString(1, 5)
- # 'foo' + 'bar'
- return self._Variable(name, "\"%s\" + \"%s\"" % (s1, s2))
-
- def _SeqTwoByteString(self, name):
- s1 = self._RawRandomString(1, 5)
- s2 = self._RawRandomString(1, 5)
- # 'foo' + unicode + 'bar'
- return self._Variable(name, "\"%s\" + \"\\2082\" + \"%s\"" % (s1, s2))
-
- def _SlicedString(self, name):
- s = self._RawRandomString(20, 30)
- # 'ffoo12345678901234567890'.substr(1)
- return self._Variable(name, "\"%s\".substr(1)" % s)
-
- def _ConsString(self, name):
- s1 = self._RawRandomString(8, 15)
- s2 = self._RawRandomString(8, 15)
- # 'foo12345' + (function() { return 'bar12345';})()
- return self._Variable(name,
- "\"%s\" + (function() { return \"%s\";})()" % (s1, s2))
-
- def _InternalizedString(self, name):
- return self._Variable(name, "\"%s\"" % self._RawRandomString(0, 20))
-
- def _String(self, name, recursion_budget):
- die = random.random()
- if die < 0.5:
- string = random.choice(self.USUAL_SUSPECT_PROPERTIES)
- return self._Variable(name, "\"%s\"" % string)
- elif die < 0.6:
- number_name = name + "_number"
- result = self._Number(number_name, recursion_budget)
- return result + self._Variable(name, "\"\" + %s" % number_name)
- elif die < 0.7:
- return self._SeqString(name, recursion_budget)
- elif die < 0.8:
- return self._ConsString(name)
- elif die < 0.9:
- return self._InternalizedString(name)
- else:
- return self._SlicedString(name)
-
- def _Symbol(self, name, recursion_budget):
- raw_string_name = name + "_1"
- result = self._String(raw_string_name, recursion_budget)
- return result + self._Variable(name, "Symbol(%s)" % raw_string_name)
-
- def _Name(self, name, recursion_budget):
- if random.random() < 0.2:
- return self._Symbol(name, recursion_budget)
- return self._String(name, recursion_budget)
-
- def _JSValue(self, name, recursion_budget):
- die = random.random()
- raw_name = name + "_1"
- if die < 0.33:
- result = self._String(raw_name, recursion_budget)
- return result + self._Variable(name, "new String(%s)" % raw_name)
- elif die < 0.66:
- result = self._Boolean(raw_name, recursion_budget)
- return result + self._Variable(name, "new Boolean(%s)" % raw_name)
- else:
- result = self._Number(raw_name, recursion_budget)
- return result + self._Variable(name, "new Number(%s)" % raw_name)
-
- def _RawRandomPropertyName(self):
- if random.random() < 0.5:
- return random.choice(self.USUAL_SUSPECT_PROPERTIES)
- return self._RawRandomString(0, 10)
-
- def _AddProperties(self, name, result, recursion_budget):
- propcount = random.randint(0, 3)
- propname = None
- for i in range(propcount):
- die = random.random()
- if die < 0.5:
- propname = "%s_prop%d" % (name, i)
- result += self._Name(propname, recursion_budget - 1)
- else:
- propname = "\"%s\"" % self._RawRandomPropertyName()
- propvalue_name = "%s_val%d" % (name, i)
- result += self._Object(propvalue_name, recursion_budget - 1)
- result.append("try { %s[%s] = %s; } catch (e) {}" %
- (name, propname, propvalue_name))
- if random.random() < 0.2 and propname:
- # Force the object to slow mode.
- result.append("delete %s[%s];" % (name, propname))
-
- def _RandomElementIndex(self, element_name, result):
- if random.random() < 0.5:
- return random.randint(-1000, 1000)
- result += self._Smi(element_name, 0)
- return element_name
-
- def _AddElements(self, name, result, recursion_budget):
- elementcount = random.randint(0, 3)
- for i in range(elementcount):
- element_name = "%s_idx%d" % (name, i)
- index = self._RandomElementIndex(element_name, result)
- value_name = "%s_elt%d" % (name, i)
- result += self._Object(value_name, recursion_budget - 1)
- result.append("try { %s[%s] = %s; } catch(e) {}" %
- (name, index, value_name))
-
- def _AddAccessors(self, name, result, recursion_budget):
- accessorcount = random.randint(0, 3)
- for i in range(accessorcount):
- propname = self._RawRandomPropertyName()
- what = random.choice(["get", "set"])
- function_name = "%s_access%d" % (name, i)
- result += self._PlainFunction(function_name, recursion_budget - 1)
- result.append("try { Object.defineProperty(%s, \"%s\", {%s: %s}); } "
- "catch (e) {}" % (name, propname, what, function_name))
-
- def _PlainArray(self, name, recursion_budget):
- die = random.random()
- if die < 0.5:
- literal = random.choice(["[]", "[1, 2]", "[1.5, 2.5]",
- "['a', 'b', 1, true]"])
- return self._Variable(name, literal)
- else:
- new = random.choice(["", "new "])
- length = random.randint(0, 101000)
- return self._Variable(name, "%sArray(%d)" % (new, length))
-
- def _PlainObject(self, name, recursion_budget):
- die = random.random()
- if die < 0.67:
- literal_propcount = random.randint(0, 3)
- properties = []
- result = []
- for i in range(literal_propcount):
- propname = self._RawRandomPropertyName()
- propvalue_name = "%s_lit%d" % (name, i)
- result += self._Object(propvalue_name, recursion_budget - 1)
- properties.append("\"%s\": %s" % (propname, propvalue_name))
- return result + self._Variable(name, "{%s}" % ", ".join(properties))
- else:
- return self._Variable(name, "new Object()")
-
- def _JSArray(self, name, recursion_budget):
- result = self._PlainArray(name, recursion_budget)
- self._AddAccessors(name, result, recursion_budget)
- self._AddProperties(name, result, recursion_budget)
- self._AddElements(name, result, recursion_budget)
- return result
-
- def _RawRandomBufferLength(self):
- if random.random() < 0.2:
- return random.choice([0, 1, 8, 0x40000000, 0x80000000])
- return random.randint(0, 1000)
-
- def _JSArrayBuffer(self, name, recursion_budget):
- length = self._RawRandomBufferLength()
- return self._Variable(name, "new ArrayBuffer(%d)" % length)
-
- def _JSDataView(self, name, recursion_budget):
- buffer_name = name + "_buffer"
- result = self._JSArrayBuffer(buffer_name, recursion_budget)
- args = [buffer_name]
- die = random.random()
- if die < 0.67:
- offset = self._RawRandomBufferLength()
- args.append("%d" % offset)
- if die < 0.33:
- length = self._RawRandomBufferLength()
- args.append("%d" % length)
- result += self._Variable(name, "new DataView(%s)" % ", ".join(args),
- fallback="new DataView(new ArrayBuffer(8))")
- return result
-
- def _JSDate(self, name, recursion_budget):
- die = random.random()
- if die < 0.25:
- return self._Variable(name, "new Date()")
- elif die < 0.5:
- ms_name = name + "_ms"
- result = self._Number(ms_name, recursion_budget)
- return result + self._Variable(name, "new Date(%s)" % ms_name)
- elif die < 0.75:
- str_name = name + "_str"
- month = random.choice(["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul",
- "Aug", "Sep", "Oct", "Nov", "Dec"])
- day = random.randint(1, 28)
- year = random.randint(1900, 2100)
- hour = random.randint(0, 23)
- minute = random.randint(0, 59)
- second = random.randint(0, 59)
- str_value = ("\"%s %s, %s %s:%s:%s\"" %
- (month, day, year, hour, minute, second))
- result = self._Variable(str_name, str_value)
- return result + self._Variable(name, "new Date(%s)" % str_name)
- else:
- components = tuple(map(lambda x: "%s_%s" % (name, x),
- ["y", "m", "d", "h", "min", "s", "ms"]))
- return ([j for i in map(self._Int32, components) for j in i] +
- self._Variable(name, "new Date(%s)" % ", ".join(components)))
-
- def _PlainFunction(self, name, recursion_budget):
- result_name = "result"
- body = ["function() {"]
- body += self._Object(result_name, recursion_budget - 1)
- body.append("return result;\n}")
- return self._Variable(name, "%s" % "\n".join(body))
-
- def _JSFunction(self, name, recursion_budget):
- result = self._PlainFunction(name, recursion_budget)
- self._AddAccessors(name, result, recursion_budget)
- self._AddProperties(name, result, recursion_budget)
- self._AddElements(name, result, recursion_budget)
- return result
-
- def _JSFunctionProxy(self, name, recursion_budget):
- # TODO(jkummerow): Revisit this as the Proxy implementation evolves.
- return self._Variable(name, "Proxy.createFunction(%s, function() {})" %
- self.PROXY_TRAPS)
-
- def _JSGeneratorObject(self, name, recursion_budget):
- # TODO(jkummerow): Be more creative here?
- return self._Variable(name, "(function*() { yield 1; })()")
-
- def _JSMap(self, name, recursion_budget, weak=""):
- result = self._Variable(name, "new %sMap()" % weak)
- num_entries = random.randint(0, 3)
- for i in range(num_entries):
- key_name = "%s_k%d" % (name, i)
- value_name = "%s_v%d" % (name, i)
- if weak:
- result += self._JSObject(key_name, recursion_budget - 1)
- else:
- result += self._Object(key_name, recursion_budget - 1)
- result += self._Object(value_name, recursion_budget - 1)
- result.append("%s.set(%s, %s)" % (name, key_name, value_name))
- return result
-
- def _JSMapIterator(self, name, recursion_budget):
- map_name = name + "_map"
- result = self._JSMap(map_name, recursion_budget)
- iterator_type = random.choice(['keys', 'values', 'entries'])
- return (result + self._Variable(name, "%s.%s()" %
- (map_name, iterator_type)))
-
- def _JSProxy(self, name, recursion_budget):
- # TODO(jkummerow): Revisit this as the Proxy implementation evolves.
- return self._Variable(name, "Proxy.create(%s)" % self.PROXY_TRAPS)
-
- def _JSRegExp(self, name, recursion_budget):
- flags = random.choice(["", "g", "i", "m", "gi"])
- string = "a(b|c)*a" # TODO(jkummerow): Be more creative here?
- ctor = random.choice(["/%s/%s", "new RegExp(\"%s\", \"%s\")"])
- return self._Variable(name, ctor % (string, flags))
-
- def _JSSet(self, name, recursion_budget, weak=""):
- result = self._Variable(name, "new %sSet()" % weak)
- num_entries = random.randint(0, 3)
- for i in range(num_entries):
- element_name = "%s_e%d" % (name, i)
- if weak:
- result += self._JSObject(element_name, recursion_budget - 1)
- else:
- result += self._Object(element_name, recursion_budget - 1)
- result.append("%s.add(%s)" % (name, element_name))
- return result
-
- def _JSSetIterator(self, name, recursion_budget):
- set_name = name + "_set"
- result = self._JSSet(set_name, recursion_budget)
- iterator_type = random.choice(['values', 'entries'])
- return (result + self._Variable(name, "%s.%s()" %
- (set_name, iterator_type)))
-
- def _JSTypedArray(self, name, recursion_budget):
- arraytype = random.choice(["Int8", "Int16", "Int32", "Uint8", "Uint16",
- "Uint32", "Float32", "Float64", "Uint8Clamped"])
- ctor_type = random.randint(0, 3)
- if ctor_type == 0:
- length = random.randint(0, 1000)
- return self._Variable(name, "new %sArray(%d)" % (arraytype, length),
- fallback="new %sArray(8)" % arraytype)
- elif ctor_type == 1:
- input_name = name + "_typedarray"
- result = self._JSTypedArray(input_name, recursion_budget - 1)
- return (result +
- self._Variable(name, "new %sArray(%s)" % (arraytype, input_name),
- fallback="new %sArray(8)" % arraytype))
- elif ctor_type == 2:
- arraylike_name = name + "_arraylike"
- result = self._JSObject(arraylike_name, recursion_budget - 1)
- length = random.randint(0, 1000)
- result.append("try { %s.length = %d; } catch(e) {}" %
- (arraylike_name, length))
- return (result +
- self._Variable(name,
- "new %sArray(%s)" % (arraytype, arraylike_name),
- fallback="new %sArray(8)" % arraytype))
- else:
- die = random.random()
- buffer_name = name + "_buffer"
- args = [buffer_name]
- result = self._JSArrayBuffer(buffer_name, recursion_budget)
- if die < 0.67:
- offset_name = name + "_offset"
- args.append(offset_name)
- result += self._Int32(offset_name)
- if die < 0.33:
- length_name = name + "_length"
- args.append(length_name)
- result += self._Int32(length_name)
- return (result +
- self._Variable(name,
- "new %sArray(%s)" % (arraytype, ", ".join(args)),
- fallback="new %sArray(8)" % arraytype))
-
- def _JSArrayBufferView(self, name, recursion_budget):
- if random.random() < 0.4:
- return self._JSDataView(name, recursion_budget)
- else:
- return self._JSTypedArray(name, recursion_budget)
-
- def _JSWeakCollection(self, name, recursion_budget):
- ctor = random.choice([self._JSMap, self._JSSet])
- return ctor(name, recursion_budget, weak="Weak")
-
- def _PropertyDetails(self, name, recursion_budget):
- # TODO(jkummerow): Be more clever here?
- return self._Int32(name)
-
- def _JSObject(self, name, recursion_budget):
- die = random.random()
- if die < 0.4:
- function = random.choice([self._PlainObject, self._PlainArray,
- self._PlainFunction])
- elif die < 0.5:
- return self._Variable(name, "this") # Global object.
- else:
- function = random.choice([self._JSArrayBuffer, self._JSDataView,
- self._JSDate, self._JSFunctionProxy,
- self._JSGeneratorObject, self._JSMap,
- self._JSMapIterator, self._JSRegExp,
- self._JSSet, self._JSSetIterator,
- self._JSTypedArray, self._JSValue,
- self._JSWeakCollection])
- result = function(name, recursion_budget)
- self._AddAccessors(name, result, recursion_budget)
- self._AddProperties(name, result, recursion_budget)
- self._AddElements(name, result, recursion_budget)
- return result
-
- def _JSReceiver(self, name, recursion_budget):
- if random.random() < 0.9: return self._JSObject(name, recursion_budget)
- return self._JSProxy(name, recursion_budget)
-
- def _HeapObject(self, name, recursion_budget):
- die = random.random()
- if die < 0.9: return self._JSReceiver(name, recursion_budget)
- elif die < 0.95: return self._Oddball(name, recursion_budget)
- else: return self._Name(name, recursion_budget)
-
- def _Object(self, name, recursion_budget):
- if recursion_budget <= 0:
- function = random.choice([self._Oddball, self._Number, self._Name,
- self._JSValue, self._JSRegExp])
- return function(name, recursion_budget)
- if random.random() < 0.2:
- return self._Smi(name, recursion_budget)
- return self._HeapObject(name, recursion_budget)
-
- GENERATORS = {
- "Boolean": ["true", _Boolean],
- "HeapObject": ["new Object()", _HeapObject],
- "Int32": ["32", _Int32],
- "JSArray": ["new Array()", _JSArray],
- "JSArrayBuffer": ["new ArrayBuffer(8)", _JSArrayBuffer],
- "JSArrayBufferView": ["new Int32Array(2)", _JSArrayBufferView],
- "JSDataView": ["new DataView(new ArrayBuffer(24))", _JSDataView],
- "JSDate": ["new Date()", _JSDate],
- "JSFunction": ["function() {}", _JSFunction],
- "JSFunctionProxy": ["Proxy.createFunction({}, function() {})",
- _JSFunctionProxy],
- "JSGeneratorObject": ["(function*(){ yield 1; })()", _JSGeneratorObject],
- "JSMap": ["new Map()", _JSMap],
- "JSMapIterator": ["new Map().entries()", _JSMapIterator],
- "JSObject": ["new Object()", _JSObject],
- "JSProxy": ["Proxy.create({})", _JSProxy],
- "JSReceiver": ["new Object()", _JSReceiver],
- "JSRegExp": ["/ab/g", _JSRegExp],
- "JSSet": ["new Set()", _JSSet],
- "JSSetIterator": ["new Set().values()", _JSSetIterator],
- "JSTypedArray": ["new Int32Array(2)", _JSTypedArray],
- "JSValue": ["new String('foo')", _JSValue],
- "JSWeakCollection": ["new WeakMap()", _JSWeakCollection],
- "Name": ["\"name\"", _Name],
- "Number": ["1.5", _Number],
- "Object": ["new Object()", _Object],
- "PropertyDetails": ["513", _PropertyDetails],
- "SeqOneByteString": ["\"seq 1-byte\"", _SeqString],
- "SeqString": ["\"seqstring\"", _SeqString],
- "SeqTwoByteString": ["\"seq \\u2082-byte\"", _SeqTwoByteString],
- "Smi": ["1", _Smi],
- "StrictMode": ["1", _StrictMode],
- "String": ["\"foo\"", _String],
- "Symbol": ["Symbol(\"symbol\")", _Symbol],
- "Uint32": ["32", _Uint32],
- }
-
-
-class ArgParser(object):
- def __init__(self, regex, ctor):
- self.regex = regex
- self.ArgCtor = ctor
-
-
-class Arg(object):
- def __init__(self, typename, varname, index):
- self.type = typename
- self.name = "_%s" % varname
- self.index = index
-
-
-class Function(object):
- def __init__(self, match):
- self.name = match.group(1)
- self.argslength = -1
- self.args = {}
- self.inline = ""
-
- handle_arg_parser = ArgParser(
- re.compile("^\s*CONVERT_ARG_HANDLE_CHECKED\((\w+), (\w+), (\d+)\)"),
- lambda match: Arg(match.group(1), match.group(2), int(match.group(3))))
-
- plain_arg_parser = ArgParser(
- re.compile("^\s*CONVERT_ARG_CHECKED\((\w+), (\w+), (\d+)\)"),
- lambda match: Arg(match.group(1), match.group(2), int(match.group(3))))
-
- number_handle_arg_parser = ArgParser(
- re.compile("^\s*CONVERT_NUMBER_ARG_HANDLE_CHECKED\((\w+), (\d+)\)"),
- lambda match: Arg("Number", match.group(1), int(match.group(2))))
-
- smi_arg_parser = ArgParser(
- re.compile("^\s*CONVERT_SMI_ARG_CHECKED\((\w+), (\d+)\)"),
- lambda match: Arg("Smi", match.group(1), int(match.group(2))))
-
- double_arg_parser = ArgParser(
- re.compile("^\s*CONVERT_DOUBLE_ARG_CHECKED\((\w+), (\d+)\)"),
- lambda match: Arg("Number", match.group(1), int(match.group(2))))
-
- number_arg_parser = ArgParser(
- re.compile(
- "^\s*CONVERT_NUMBER_CHECKED\(\w+, (\w+), (\w+), args\[(\d+)\]\)"),
- lambda match: Arg(match.group(2), match.group(1), int(match.group(3))))
-
- strict_mode_arg_parser = ArgParser(
- re.compile("^\s*CONVERT_STRICT_MODE_ARG_CHECKED\((\w+), (\d+)\)"),
- lambda match: Arg("StrictMode", match.group(1), int(match.group(2))))
-
- boolean_arg_parser = ArgParser(
- re.compile("^\s*CONVERT_BOOLEAN_ARG_CHECKED\((\w+), (\d+)\)"),
- lambda match: Arg("Boolean", match.group(1), int(match.group(2))))
-
- property_details_parser = ArgParser(
- re.compile("^\s*CONVERT_PROPERTY_DETAILS_CHECKED\((\w+), (\d+)\)"),
- lambda match: Arg("PropertyDetails", match.group(1), int(match.group(2))))
-
- arg_parsers = [handle_arg_parser, plain_arg_parser, number_handle_arg_parser,
- smi_arg_parser,
- double_arg_parser, number_arg_parser, strict_mode_arg_parser,
- boolean_arg_parser, property_details_parser]
-
- def SetArgsLength(self, match):
- self.argslength = int(match.group(1))
-
- def TryParseArg(self, line):
- for parser in Function.arg_parsers:
- match = parser.regex.match(line)
- if match:
- arg = parser.ArgCtor(match)
- self.args[arg.index] = arg
- return True
- return False
-
- def Filename(self):
- return "%s.js" % self.name.lower()
-
- def __str__(self):
- s = [self.name, "("]
- argcount = self.argslength
- if argcount < 0:
- print("WARNING: unknown argslength for function %s" % self.name)
- if self.args:
- argcount = max([self.args[i].index + 1 for i in self.args])
- else:
- argcount = 0
- for i in range(argcount):
- if i > 0: s.append(", ")
- s.append(self.args[i].type if i in self.args else "<unknown>")
- s.append(")")
- return "".join(s)
-
-
-class Macro(object):
- def __init__(self, match):
- self.name = match.group(1)
- self.args = [s.strip() for s in match.group(2).split(",")]
- self.lines = []
- self.indentation = 0
- self.AddLine(match.group(3))
-
- def AddLine(self, line):
- if not line: return
- if not self.lines:
- # This is the first line, detect indentation.
- self.indentation = len(line) - len(line.lstrip())
- line = line.rstrip("\\\n ")
- if not line: return
- assert len(line[:self.indentation].strip()) == 0, \
- ("expected whitespace: '%s', full line: '%s'" %
- (line[:self.indentation], line))
- line = line[self.indentation:]
- if not line: return
- self.lines.append(line + "\n")
-
- def Finalize(self):
- for arg in self.args:
- pattern = re.compile(r"(##|\b)%s(##|\b)" % arg)
- for i in range(len(self.lines)):
- self.lines[i] = re.sub(pattern, "%%(%s)s" % arg, self.lines[i])
-
- def FillIn(self, arg_values):
- filler = {}
- assert len(arg_values) == len(self.args)
- for i in range(len(self.args)):
- filler[self.args[i]] = arg_values[i]
- result = []
- for line in self.lines:
- result.append(line % filler)
- return result
-
-
-# Parses HEADERFILENAME to find out which runtime functions are "inline".
-def FindInlineRuntimeFunctions():
- inline_functions = []
- with open(HEADERFILENAME, "r") as f:
- inline_list = "#define INLINE_FUNCTION_LIST(F) \\\n"
- inline_function = re.compile(r"^\s*F\((\w+), \d+, \d+\)\s*\\?")
- mode = "SEARCHING"
- for line in f:
- if mode == "ACTIVE":
- match = inline_function.match(line)
- if match:
- inline_functions.append(match.group(1))
- if not line.endswith("\\\n"):
- mode = "SEARCHING"
- elif mode == "SEARCHING":
- if line == inline_list:
- mode = "ACTIVE"
- return inline_functions
-
-
-def ReadFileAndExpandMacros(filename):
- found_macros = {}
- expanded_lines = []
- with open(filename, "r") as f:
- found_macro = None
- for line in f:
- if found_macro is not None:
- found_macro.AddLine(line)
- if not line.endswith("\\\n"):
- found_macro.Finalize()
- found_macro = None
- continue
-
- match = MACRO.match(line)
- if match:
- found_macro = Macro(match)
- if found_macro.name in EXPAND_MACROS:
- found_macros[found_macro.name] = found_macro
- else:
- found_macro = None
- continue
-
- match = FIRST_WORD.match(line)
- if match:
- first_word = match.group(1)
- if first_word in found_macros:
- MACRO_CALL = re.compile("%s\(([^)]*)\)" % first_word)
- match = MACRO_CALL.match(line)
- assert match
- args = [s.strip() for s in match.group(1).split(",")]
- expanded_lines += found_macros[first_word].FillIn(args)
- continue
-
- expanded_lines.append(line)
- return expanded_lines
-
-
-# Detects runtime functions by parsing FILENAME.
-def FindRuntimeFunctions():
- inline_functions = FindInlineRuntimeFunctions()
- functions = []
- expanded_lines = ReadFileAndExpandMacros(FILENAME)
- function = None
- partial_line = ""
- for line in expanded_lines:
- # Multi-line definition support, ignoring macros.
- if line.startswith("RUNTIME_FUNCTION") and not line.endswith("{\n"):
- if line.endswith("\\\n"): continue
- partial_line = line.rstrip()
- continue
- if partial_line:
- partial_line += " " + line.strip()
- if partial_line.endswith("{"):
- line = partial_line
- partial_line = ""
- else:
- continue
-
- match = FUNCTION.match(line)
- if match:
- function = Function(match)
- if function.name in inline_functions:
- function.inline = "_"
- continue
- if function is None: continue
-
- match = ARGSLENGTH.match(line)
- if match:
- function.SetArgsLength(match)
- continue
-
- if function.TryParseArg(line):
- continue
-
- if line == FUNCTIONEND:
- if function is not None:
- functions.append(function)
- function = None
- return functions
-
-
-# Hack: This must have the same fields as class Function above, because the
-# two are used polymorphically in RunFuzzer(). We could use inheritance...
-class Builtin(object):
- def __init__(self, match):
- self.name = match.group(1)
- args = match.group(2)
- self.argslength = 0 if args == "" else args.count(",") + 1
- self.inline = ""
- self.args = {}
- if self.argslength > 0:
- args = args.split(",")
- for i in range(len(args)):
- # a = args[i].strip() # TODO: filter out /* comments */ first.
- a = ""
- self.args[i] = Arg("Object", a, i)
-
- def __str__(self):
- return "%s(%d)" % (self.name, self.argslength)
-
-
-def FindJSBuiltins():
- PATH = "src"
- fileslist = []
- for (root, dirs, files) in os.walk(PATH):
- for f in files:
- if f.endswith(".js"):
- fileslist.append(os.path.join(root, f))
- builtins = []
- regexp = re.compile("^function (\w+)\s*\((.*?)\) {")
- matches = 0
- for filename in fileslist:
- with open(filename, "r") as f:
- file_contents = f.read()
- file_contents = js2c.ExpandInlineMacros(file_contents)
- lines = file_contents.split("\n")
- partial_line = ""
- for line in lines:
- if line.startswith("function") and not '{' in line:
- partial_line += line.rstrip()
- continue
- if partial_line:
- partial_line += " " + line.strip()
- if '{' in line:
- line = partial_line
- partial_line = ""
- else:
- continue
- match = regexp.match(line)
- if match:
- builtins.append(Builtin(match))
- return builtins
-
-
-# Classifies runtime functions.
-def ClassifyFunctions(functions):
- # Can be fuzzed with a JavaScript testcase.
- js_fuzzable_functions = []
- # We have enough information to fuzz these, but they need inputs that
- # cannot be created or passed around in JavaScript.
- cctest_fuzzable_functions = []
- # This script does not have enough information about these.
- unknown_functions = []
-
- types = {}
- for f in functions:
- if f.name in BLACKLISTED:
- continue
- decision = js_fuzzable_functions
- custom = CUSTOM_KNOWN_GOOD_INPUT.get(f.name, None)
- if f.argslength < 0:
- # Unknown length -> give up unless there's a custom definition.
- if custom and custom[-1] is not None:
- f.argslength = custom[-1]
- assert len(custom) == f.argslength + 1, \
- ("%s: last custom definition must be argslength" % f.name)
- else:
- decision = unknown_functions
- else:
- if custom:
- # Any custom definitions must match the known argslength.
- assert len(custom) == f.argslength + 1, \
- ("%s should have %d custom definitions but has %d" %
- (f.name, f.argslength + 1, len(custom)))
- for i in range(f.argslength):
- if custom and custom[i] is not None:
- # All good, there's a custom definition.
- pass
- elif not i in f.args:
- # No custom definition and no parse result -> give up.
- decision = unknown_functions
- else:
- t = f.args[i].type
- if t in NON_JS_TYPES:
- decision = cctest_fuzzable_functions
- else:
- assert Generator.IsTypeSupported(t), \
- ("type generator not found for %s, function: %s" % (t, f))
- decision.append(f)
- return (js_fuzzable_functions, cctest_fuzzable_functions, unknown_functions)
-
-
-def _GetKnownGoodArgs(function, generator):
- custom_input = CUSTOM_KNOWN_GOOD_INPUT.get(function.name, None)
- definitions = []
- argslist = []
- for i in range(function.argslength):
- if custom_input and custom_input[i] is not None:
- name = "arg%d" % i
- definitions.append("var %s = %s;" % (name, custom_input[i]))
- else:
- arg = function.args[i]
- name = arg.name
- definitions += generator.RandomVariable(name, arg.type, simple=True)
- argslist.append(name)
- return (definitions, argslist)
-
-
-def _GenerateTestcase(function, definitions, argslist, throws):
- s = ["// Copyright 2014 the V8 project authors. All rights reserved.",
- "// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY",
- "// Flags: --allow-natives-syntax --harmony --harmony-proxies"
- ] + definitions
- call = "%%%s%s(%s);" % (function.inline, function.name, ", ".join(argslist))
- if throws:
- s.append("try {")
- s.append(call);
- s.append("} catch(e) {}")
- else:
- s.append(call)
- testcase = "\n".join(s)
- return testcase
-
-
-def GenerateJSTestcaseForFunction(function):
- gen = Generator()
- (definitions, argslist) = _GetKnownGoodArgs(function, gen)
- testcase = _GenerateTestcase(function, definitions, argslist,
- function.name in THROWS)
- path = os.path.join(BASEPATH, function.Filename())
- with open(path, "w") as f:
- f.write("%s\n" % testcase)
-
-
-def GenerateTestcases(functions):
- shutil.rmtree(BASEPATH) # Re-generate everything.
- os.makedirs(BASEPATH)
- for f in functions:
- GenerateJSTestcaseForFunction(f)
-
-
-def _SaveFileName(save_path, process_id, save_file_index):
- return "%s/fuzz_%d_%d.js" % (save_path, process_id, save_file_index)
-
-
-def _GetFuzzableRuntimeFunctions():
- functions = FindRuntimeFunctions()
- (js_fuzzable_functions, cctest_fuzzable_functions, unknown_functions) = \
- ClassifyFunctions(functions)
- return js_fuzzable_functions
-
-
-FUZZ_TARGET_LISTS = {
- "runtime": _GetFuzzableRuntimeFunctions,
- "builtins": FindJSBuiltins,
-}
-
-
-def RunFuzzer(process_id, options, stop_running):
- MAX_SLEEP_TIME = 0.1
- INITIAL_SLEEP_TIME = 0.001
- SLEEP_TIME_FACTOR = 1.25
- base_file_name = "/dev/shm/runtime_fuzz_%d" % process_id
- test_file_name = "%s.js" % base_file_name
- stderr_file_name = "%s.out" % base_file_name
- save_file_index = 0
- while os.path.exists(_SaveFileName(options.save_path, process_id,
- save_file_index)):
- save_file_index += 1
-
- targets = FUZZ_TARGET_LISTS[options.fuzz_target]()
- try:
- for i in range(options.num_tests):
- if stop_running.is_set(): break
- function = None
- while function is None or function.argslength == 0:
- function = random.choice(targets)
- args = []
- definitions = []
- gen = Generator()
- for i in range(function.argslength):
- arg = function.args[i]
- argname = "arg%d%s" % (i, arg.name)
- args.append(argname)
- definitions += gen.RandomVariable(argname, arg.type, simple=False)
- testcase = _GenerateTestcase(function, definitions, args, True)
- with open(test_file_name, "w") as f:
- f.write("%s\n" % testcase)
- with open("/dev/null", "w") as devnull:
- with open(stderr_file_name, "w") as stderr:
- process = subprocess.Popen(
- [options.binary, "--allow-natives-syntax", "--harmony",
- "--harmony-proxies", "--enable-slow-asserts", test_file_name],
- stdout=devnull, stderr=stderr)
- end_time = time.time() + options.timeout
- timed_out = False
- exit_code = None
- sleep_time = INITIAL_SLEEP_TIME
- while exit_code is None:
- if time.time() >= end_time:
- # Kill the process and wait for it to exit.
- os.kill(process.pid, signal.SIGTERM)
- exit_code = process.wait()
- timed_out = True
- else:
- exit_code = process.poll()
- time.sleep(sleep_time)
- sleep_time = sleep_time * SLEEP_TIME_FACTOR
- if sleep_time > MAX_SLEEP_TIME:
- sleep_time = MAX_SLEEP_TIME
- if exit_code != 0 and not timed_out:
- oom = False
- with open(stderr_file_name, "r") as stderr:
- for line in stderr:
- if line.strip() == "# Allocation failed - process out of memory":
- oom = True
- break
- if oom: continue
- save_name = _SaveFileName(options.save_path, process_id,
- save_file_index)
- shutil.copyfile(test_file_name, save_name)
- save_file_index += 1
- except KeyboardInterrupt:
- stop_running.set()
- finally:
- if os.path.exists(test_file_name):
- os.remove(test_file_name)
- if os.path.exists(stderr_file_name):
- os.remove(stderr_file_name)
-
-
-def BuildOptionParser():
- usage = """Usage: %%prog [options] ACTION
-
-where ACTION can be:
-
-info Print diagnostic info.
-check Check that runtime functions can be parsed as expected, and that
- test cases exist.
-generate Parse source code for runtime functions, and auto-generate
- test cases for them. Warning: this will nuke and re-create
- %(path)s.
-fuzz Generate fuzz tests, run them, save those that crashed (see options).
-""" % {"path": os.path.relpath(BASEPATH)}
-
- o = optparse.OptionParser(usage=usage)
- o.add_option("--binary", default="out/x64.debug/d8",
- help="d8 binary used for running fuzz tests (default: %default)")
- o.add_option("--fuzz-target", default="runtime",
- help="Set of functions targeted by fuzzing. Allowed values: "
- "%s (default: %%default)" % ", ".join(FUZZ_TARGET_LISTS))
- o.add_option("-n", "--num-tests", default=1000, type="int",
- help="Number of fuzz tests to generate per worker process"
- " (default: %default)")
- o.add_option("--save-path", default="~/runtime_fuzz_output",
- help="Path to directory where failing tests will be stored"
- " (default: %default)")
- o.add_option("--timeout", default=20, type="int",
- help="Timeout for each fuzz test (in seconds, default:"
- "%default)")
- return o
-
-
-def ProcessOptions(options, args):
- options.save_path = os.path.expanduser(options.save_path)
- if options.fuzz_target not in FUZZ_TARGET_LISTS:
- print("Invalid fuzz target: %s" % options.fuzz_target)
- return False
- if len(args) != 1 or args[0] == "help":
- return False
- return True
-
-
-def Main():
- parser = BuildOptionParser()
- (options, args) = parser.parse_args()
-
- if not ProcessOptions(options, args):
- parser.print_help()
- return 1
- action = args[0]
-
- functions = FindRuntimeFunctions()
- (js_fuzzable_functions, cctest_fuzzable_functions, unknown_functions) = \
- ClassifyFunctions(functions)
- builtins = FindJSBuiltins()
-
- if action == "test":
- print("put your temporary debugging code here")
- return 0
-
- if action == "info":
- print("%d functions total; js_fuzzable_functions: %d, "
- "cctest_fuzzable_functions: %d, unknown_functions: %d"
- % (len(functions), len(js_fuzzable_functions),
- len(cctest_fuzzable_functions), len(unknown_functions)))
- print("%d JavaScript builtins" % len(builtins))
- print("unknown functions:")
- for f in unknown_functions:
- print(f)
- return 0
-
- if action == "check":
- errors = 0
-
- def CheckCount(actual, expected, description):
- if len(actual) != expected:
- print("Expected to detect %d %s, but found %d." % (
- expected, description, len(actual)))
- print("If this change is intentional, please update the expectations"
- " at the top of %s." % THIS_SCRIPT)
- return 1
- return 0
-
- errors += CheckCount(functions, EXPECTED_FUNCTION_COUNT,
- "functions in total")
- errors += CheckCount(js_fuzzable_functions, EXPECTED_FUZZABLE_COUNT,
- "JavaScript-fuzzable functions")
- errors += CheckCount(cctest_fuzzable_functions, EXPECTED_CCTEST_COUNT,
- "cctest-fuzzable functions")
- errors += CheckCount(unknown_functions, EXPECTED_UNKNOWN_COUNT,
- "functions with incomplete type information")
- errors += CheckCount(builtins, EXPECTED_BUILTINS_COUNT,
- "JavaScript builtins")
-
- def CheckTestcasesExisting(functions):
- errors = 0
- for f in functions:
- if not os.path.isfile(os.path.join(BASEPATH, f.Filename())):
- print("Missing testcase for %s, please run '%s generate'" %
- (f.name, THIS_SCRIPT))
- errors += 1
- files = filter(lambda filename: not filename.startswith("."),
- os.listdir(BASEPATH))
- if (len(files) != len(functions)):
- unexpected_files = set(files) - set([f.Filename() for f in functions])
- for f in unexpected_files:
- print("Unexpected testcase: %s" % os.path.join(BASEPATH, f))
- errors += 1
- print("Run '%s generate' to automatically clean these up."
- % THIS_SCRIPT)
- return errors
-
- errors += CheckTestcasesExisting(js_fuzzable_functions)
-
- def CheckNameClashes(runtime_functions, builtins):
- errors = 0
- runtime_map = {}
- for f in runtime_functions:
- runtime_map[f.name] = 1
- for b in builtins:
- if b.name in runtime_map:
- print("Builtin/Runtime_Function name clash: %s" % b.name)
- errors += 1
- return errors
-
- errors += CheckNameClashes(functions, builtins)
-
- if errors > 0:
- return 1
- print("Generated runtime tests: all good.")
- return 0
-
- if action == "generate":
- GenerateTestcases(js_fuzzable_functions)
- return 0
-
- if action == "fuzz":
- processes = []
- if not os.path.isdir(options.save_path):
- os.makedirs(options.save_path)
- stop_running = multiprocessing.Event()
- for i in range(multiprocessing.cpu_count()):
- args = (i, options, stop_running)
- p = multiprocessing.Process(target=RunFuzzer, args=args)
- p.start()
- processes.append(p)
- try:
- for i in range(len(processes)):
- processes[i].join()
- except KeyboardInterrupt:
- stop_running.set()
- for i in range(len(processes)):
- processes[i].join()
- return 0
-
-if __name__ == "__main__":
- sys.exit(Main())
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index f6c45fce4b..2177ec2122 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -1482,22 +1482,22 @@ class Code(HeapObject):
class V8Heap(object):
CLASS_MAP = {
"SYMBOL_TYPE": SeqString,
- "ASCII_SYMBOL_TYPE": SeqString,
+ "ONE_BYTE_SYMBOL_TYPE": SeqString,
"CONS_SYMBOL_TYPE": ConsString,
- "CONS_ASCII_SYMBOL_TYPE": ConsString,
+ "CONS_ONE_BYTE_SYMBOL_TYPE": ConsString,
"EXTERNAL_SYMBOL_TYPE": ExternalString,
- "EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE": ExternalString,
- "EXTERNAL_ASCII_SYMBOL_TYPE": ExternalString,
+ "EXTERNAL_SYMBOL_WITH_ONE_BYTE_DATA_TYPE": ExternalString,
+ "EXTERNAL_ONE_BYTE_SYMBOL_TYPE": ExternalString,
"SHORT_EXTERNAL_SYMBOL_TYPE": ExternalString,
- "SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE": ExternalString,
- "SHORT_EXTERNAL_ASCII_SYMBOL_TYPE": ExternalString,
+ "SHORT_EXTERNAL_SYMBOL_WITH_ONE_BYTE_DATA_TYPE": ExternalString,
+ "SHORT_EXTERNAL_ONE_BYTE_SYMBOL_TYPE": ExternalString,
"STRING_TYPE": SeqString,
- "ASCII_STRING_TYPE": SeqString,
+ "ONE_BYTE_STRING_TYPE": SeqString,
"CONS_STRING_TYPE": ConsString,
- "CONS_ASCII_STRING_TYPE": ConsString,
+ "CONS_ONE_BYTE_STRING_TYPE": ConsString,
"EXTERNAL_STRING_TYPE": ExternalString,
- "EXTERNAL_STRING_WITH_ASCII_DATA_TYPE": ExternalString,
- "EXTERNAL_ASCII_STRING_TYPE": ExternalString,
+ "EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE": ExternalString,
+ "EXTERNAL_ONE_BYTE_STRING_TYPE": ExternalString,
"MAP_TYPE": Map,
"ODDBALL_TYPE": Oddball,
"FIXED_ARRAY_TYPE": FixedArray,
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index d907725230..ee391c31c2 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -55,11 +55,23 @@
# to appear before libv8_snapshot.a so it's listed explicitly.
'dependencies': ['v8_base', 'v8_nosnapshot'],
}],
- ['v8_use_external_startup_data==1 and want_separate_host_toolset==0', {
+ ['v8_use_external_startup_data==1 and want_separate_host_toolset==1', {
'dependencies': ['v8_base', 'v8_external_snapshot'],
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'inputs': [
+ '<(PRODUCT_DIR)/snapshot_blob_host.bin',
+ ],
+ }, {
+ 'inputs': [
+ '<(PRODUCT_DIR)/snapshot_blob.bin',
+ ],
+ }],
+ ],
}],
- ['v8_use_external_startup_data==1 and want_separate_host_toolset==1', {
- 'dependencies': ['v8_base', 'v8_external_snapshot#host'],
+ ['v8_use_external_startup_data==1 and want_separate_host_toolset==0', {
+ 'dependencies': ['v8_base', 'v8_external_snapshot'],
+ 'inputs': [ '<(PRODUCT_DIR)/snapshot_blob.bin', ],
}],
['component=="shared_library"', {
'type': '<(component)',
@@ -218,11 +230,11 @@
'type': 'static_library',
'conditions': [
['want_separate_host_toolset==1', {
- 'toolsets': ['host'],
+ 'toolsets': ['host', 'target'],
'dependencies': [
'mksnapshot#host',
'js2c#host',
- 'natives_blob#host',
+ 'natives_blob',
]}, {
'toolsets': ['target'],
'dependencies': [
@@ -260,9 +272,27 @@
'inputs': [
'<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
],
- 'outputs': [
- '<(INTERMEDIATE_DIR)/snapshot.cc',
- '<(PRODUCT_DIR)/snapshot_blob.bin',
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/snapshot.cc',
+ '<(PRODUCT_DIR)/snapshot_blob_host.bin',
+ ],
+ }, {
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/snapshot.cc',
+ '<(PRODUCT_DIR)/snapshot_blob.bin',
+ ],
+ }],
+ ],
+ }, {
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/snapshot.cc',
+ '<(PRODUCT_DIR)/snapshot_blob.bin',
+ ],
+ }],
],
'variables': {
'mksnapshot_flags': [
@@ -317,6 +347,12 @@
'../../src/ast-value-factory.h',
'../../src/ast.cc',
'../../src/ast.h',
+ '../../src/background-parsing-task.cc',
+ '../../src/background-parsing-task.h',
+ '../../src/bailout-reason.cc',
+ '../../src/bailout-reason.h',
+ '../../src/basic-block-profiler.cc',
+ '../../src/basic-block-profiler.h',
'../../src/bignum-dtoa.cc',
'../../src/bignum-dtoa.h',
'../../src/bignum.cc',
@@ -334,6 +370,8 @@
'../../src/checks.h',
'../../src/circular-queue-inl.h',
'../../src/circular-queue.h',
+ '../../src/code-factory.cc',
+ '../../src/code-factory.h',
'../../src/code-stubs.cc',
'../../src/code-stubs.h',
'../../src/code-stubs-hydrogen.cc',
@@ -342,14 +380,19 @@
'../../src/codegen.h',
'../../src/compilation-cache.cc',
'../../src/compilation-cache.h',
+ '../../src/compiler/access-builder.cc',
+ '../../src/compiler/access-builder.h',
'../../src/compiler/ast-graph-builder.cc',
'../../src/compiler/ast-graph-builder.h',
+ '../../src/compiler/basic-block-instrumentor.cc',
+ '../../src/compiler/basic-block-instrumentor.h',
'../../src/compiler/change-lowering.cc',
'../../src/compiler/change-lowering.h',
'../../src/compiler/code-generator-impl.h',
'../../src/compiler/code-generator.cc',
'../../src/compiler/code-generator.h',
'../../src/compiler/common-node-cache.h',
+ '../../src/compiler/common-operator.cc',
'../../src/compiler/common-operator.h',
'../../src/compiler/control-builders.cc',
'../../src/compiler/control-builders.h',
@@ -378,24 +421,27 @@
'../../src/compiler/instruction-selector.h',
'../../src/compiler/instruction.cc',
'../../src/compiler/instruction.h',
+ '../../src/compiler/js-builtin-reducer.cc',
+ '../../src/compiler/js-builtin-reducer.h',
'../../src/compiler/js-context-specialization.cc',
'../../src/compiler/js-context-specialization.h',
'../../src/compiler/js-generic-lowering.cc',
'../../src/compiler/js-generic-lowering.h',
'../../src/compiler/js-graph.cc',
'../../src/compiler/js-graph.h',
+ '../../src/compiler/js-inlining.cc',
+ '../../src/compiler/js-inlining.h',
'../../src/compiler/js-operator.h',
'../../src/compiler/js-typed-lowering.cc',
'../../src/compiler/js-typed-lowering.h',
'../../src/compiler/linkage-impl.h',
'../../src/compiler/linkage.cc',
'../../src/compiler/linkage.h',
- '../../src/compiler/lowering-builder.cc',
- '../../src/compiler/lowering-builder.h',
- '../../src/compiler/machine-node-factory.h',
'../../src/compiler/machine-operator-reducer.cc',
'../../src/compiler/machine-operator-reducer.h',
+ '../../src/compiler/machine-operator.cc',
'../../src/compiler/machine-operator.h',
+ '../../src/compiler/machine-type.cc',
'../../src/compiler/machine-type.h',
'../../src/compiler/node-aux-data-inl.h',
'../../src/compiler/node-aux-data.h',
@@ -409,6 +455,7 @@
'../../src/compiler/opcodes.h',
'../../src/compiler/operator-properties-inl.h',
'../../src/compiler/operator-properties.h',
+ '../../src/compiler/operator.cc',
'../../src/compiler/operator.h',
'../../src/compiler/phi-reducer.h',
'../../src/compiler/pipeline.cc',
@@ -424,14 +471,16 @@
'../../src/compiler/scheduler.h',
'../../src/compiler/simplified-lowering.cc',
'../../src/compiler/simplified-lowering.h',
- '../../src/compiler/simplified-node-factory.h',
+ '../../src/compiler/simplified-operator-reducer.cc',
+ '../../src/compiler/simplified-operator-reducer.h',
+ '../../src/compiler/simplified-operator.cc',
'../../src/compiler/simplified-operator.h',
'../../src/compiler/source-position.cc',
'../../src/compiler/source-position.h',
- '../../src/compiler/structured-machine-assembler.cc',
- '../../src/compiler/structured-machine-assembler.h',
'../../src/compiler/typer.cc',
'../../src/compiler/typer.h',
+ '../../src/compiler/value-numbering-reducer.cc',
+ '../../src/compiler/value-numbering-reducer.h',
'../../src/compiler/verifier.cc',
'../../src/compiler/verifier.h',
'../../src/compiler.cc',
@@ -487,7 +536,6 @@
'../../src/fast-dtoa.cc',
'../../src/fast-dtoa.h',
'../../src/feedback-slots.h',
- '../../src/field-index.cc',
'../../src/field-index.h',
'../../src/field-index-inl.h',
'../../src/fixed-dtoa.cc',
@@ -516,6 +564,8 @@
'../../src/heap-snapshot-generator-inl.h',
'../../src/heap-snapshot-generator.cc',
'../../src/heap-snapshot-generator.h',
+ '../../src/heap/gc-idle-time-handler.cc',
+ '../../src/heap/gc-idle-time-handler.h',
'../../src/heap/gc-tracer.cc',
'../../src/heap/gc-tracer.h',
'../../src/heap/heap-inl.h',
@@ -594,11 +644,23 @@
'../../src/i18n.h',
'../../src/icu_util.cc',
'../../src/icu_util.h',
- '../../src/ic-inl.h',
- '../../src/ic.cc',
- '../../src/ic.h',
+ '../../src/ic/access-compiler.cc',
+ '../../src/ic/access-compiler.h',
+ '../../src/ic/call-optimization.cc',
+ '../../src/ic/call-optimization.h',
+ '../../src/ic/handler-compiler.cc',
+ '../../src/ic/handler-compiler.h',
+ '../../src/ic/ic-inl.h',
+ '../../src/ic/ic-state.cc',
+ '../../src/ic/ic-state.h',
+ '../../src/ic/ic.cc',
+ '../../src/ic/ic.h',
+ '../../src/ic/ic-compiler.cc',
+ '../../src/ic/ic-compiler.h',
'../../src/interface.cc',
'../../src/interface.h',
+ '../../src/interface-descriptors.cc',
+ '../../src/interface-descriptors.h',
'../../src/interpreter-irregexp.cc',
'../../src/interpreter-irregexp.h',
'../../src/isolate.cc',
@@ -673,8 +735,21 @@
'../../src/rewriter.h',
'../../src/runtime-profiler.cc',
'../../src/runtime-profiler.h',
- '../../src/runtime.cc',
- '../../src/runtime.h',
+ '../../src/runtime/runtime-collections.cc',
+ '../../src/runtime/runtime-compiler.cc',
+ '../../src/runtime/runtime-i18n.cc',
+ '../../src/runtime/runtime-json.cc',
+ '../../src/runtime/runtime-maths.cc',
+ '../../src/runtime/runtime-numbers.cc',
+ '../../src/runtime/runtime-regexp.cc',
+ '../../src/runtime/runtime-strings.cc',
+ '../../src/runtime/runtime-test.cc',
+ '../../src/runtime/runtime-typedarray.cc',
+ '../../src/runtime/runtime-uri.cc',
+ '../../src/runtime/runtime-utils.h',
+ '../../src/runtime/runtime.cc',
+ '../../src/runtime/runtime.h',
+ '../../src/runtime/string-builder.h',
'../../src/safepoint-table.cc',
'../../src/safepoint-table.h',
'../../src/sampler.cc',
@@ -700,13 +775,16 @@
'../../src/string-stream.h',
'../../src/strtod.cc',
'../../src/strtod.h',
- '../../src/stub-cache.cc',
- '../../src/stub-cache.h',
+ '../../src/ic/stub-cache.cc',
+ '../../src/ic/stub-cache.h',
'../../src/token.cc',
'../../src/token.h',
'../../src/transitions-inl.h',
'../../src/transitions.cc',
'../../src/transitions.h',
+ '../../src/type-feedback-vector-inl.h',
+ '../../src/type-feedback-vector.cc',
+ '../../src/type-feedback-vector.h',
'../../src/type-info.cc',
'../../src/type-info.h',
'../../src/types-inl.h',
@@ -767,7 +845,8 @@
'../../src/arm/frames-arm.cc',
'../../src/arm/frames-arm.h',
'../../src/arm/full-codegen-arm.cc',
- '../../src/arm/ic-arm.cc',
+ '../../src/arm/interface-descriptors-arm.cc',
+ '../../src/arm/interface-descriptors-arm.h',
'../../src/arm/lithium-arm.cc',
'../../src/arm/lithium-arm.h',
'../../src/arm/lithium-codegen-arm.cc',
@@ -779,11 +858,15 @@
'../../src/arm/regexp-macro-assembler-arm.cc',
'../../src/arm/regexp-macro-assembler-arm.h',
'../../src/arm/simulator-arm.cc',
- '../../src/arm/stub-cache-arm.cc',
'../../src/compiler/arm/code-generator-arm.cc',
'../../src/compiler/arm/instruction-codes-arm.h',
'../../src/compiler/arm/instruction-selector-arm.cc',
'../../src/compiler/arm/linkage-arm.cc',
+ '../../src/ic/arm/access-compiler-arm.cc',
+ '../../src/ic/arm/handler-compiler-arm.cc',
+ '../../src/ic/arm/ic-arm.cc',
+ '../../src/ic/arm/ic-compiler-arm.cc',
+ '../../src/ic/arm/stub-cache-arm.cc',
],
}],
['v8_target_arch=="arm64"', {
@@ -811,11 +894,12 @@
'../../src/arm64/frames-arm64.cc',
'../../src/arm64/frames-arm64.h',
'../../src/arm64/full-codegen-arm64.cc',
- '../../src/arm64/ic-arm64.cc',
'../../src/arm64/instructions-arm64.cc',
'../../src/arm64/instructions-arm64.h',
'../../src/arm64/instrument-arm64.cc',
'../../src/arm64/instrument-arm64.h',
+ '../../src/arm64/interface-descriptors-arm64.cc',
+ '../../src/arm64/interface-descriptors-arm64.h',
'../../src/arm64/lithium-arm64.cc',
'../../src/arm64/lithium-arm64.h',
'../../src/arm64/lithium-codegen-arm64.cc',
@@ -829,13 +913,17 @@
'../../src/arm64/regexp-macro-assembler-arm64.h',
'../../src/arm64/simulator-arm64.cc',
'../../src/arm64/simulator-arm64.h',
- '../../src/arm64/stub-cache-arm64.cc',
'../../src/arm64/utils-arm64.cc',
'../../src/arm64/utils-arm64.h',
'../../src/compiler/arm64/code-generator-arm64.cc',
'../../src/compiler/arm64/instruction-codes-arm64.h',
'../../src/compiler/arm64/instruction-selector-arm64.cc',
'../../src/compiler/arm64/linkage-arm64.cc',
+ '../../src/ic/arm64/access-compiler-arm64.cc',
+ '../../src/ic/arm64/handler-compiler-arm64.cc',
+ '../../src/ic/arm64/ic-arm64.cc',
+ '../../src/ic/arm64/ic-compiler-arm64.cc',
+ '../../src/ic/arm64/stub-cache-arm64.cc',
],
}],
['v8_target_arch=="ia32"', {
@@ -855,7 +943,7 @@
'../../src/ia32/frames-ia32.cc',
'../../src/ia32/frames-ia32.h',
'../../src/ia32/full-codegen-ia32.cc',
- '../../src/ia32/ic-ia32.cc',
+ '../../src/ia32/interface-descriptors-ia32.cc',
'../../src/ia32/lithium-codegen-ia32.cc',
'../../src/ia32/lithium-codegen-ia32.h',
'../../src/ia32/lithium-gap-resolver-ia32.cc',
@@ -866,11 +954,15 @@
'../../src/ia32/macro-assembler-ia32.h',
'../../src/ia32/regexp-macro-assembler-ia32.cc',
'../../src/ia32/regexp-macro-assembler-ia32.h',
- '../../src/ia32/stub-cache-ia32.cc',
'../../src/compiler/ia32/code-generator-ia32.cc',
'../../src/compiler/ia32/instruction-codes-ia32.h',
'../../src/compiler/ia32/instruction-selector-ia32.cc',
'../../src/compiler/ia32/linkage-ia32.cc',
+ '../../src/ic/ia32/access-compiler-ia32.cc',
+ '../../src/ic/ia32/handler-compiler-ia32.cc',
+ '../../src/ic/ia32/ic-ia32.cc',
+ '../../src/ic/ia32/ic-compiler-ia32.cc',
+ '../../src/ic/ia32/stub-cache-ia32.cc',
],
}],
['v8_target_arch=="x87"', {
@@ -890,7 +982,7 @@
'../../src/x87/frames-x87.cc',
'../../src/x87/frames-x87.h',
'../../src/x87/full-codegen-x87.cc',
- '../../src/x87/ic-x87.cc',
+ '../../src/x87/interface-descriptors-x87.cc',
'../../src/x87/lithium-codegen-x87.cc',
'../../src/x87/lithium-codegen-x87.h',
'../../src/x87/lithium-gap-resolver-x87.cc',
@@ -901,7 +993,11 @@
'../../src/x87/macro-assembler-x87.h',
'../../src/x87/regexp-macro-assembler-x87.cc',
'../../src/x87/regexp-macro-assembler-x87.h',
- '../../src/x87/stub-cache-x87.cc',
+ '../../src/ic/x87/access-compiler-x87.cc',
+ '../../src/ic/x87/handler-compiler-x87.cc',
+ '../../src/ic/x87/ic-x87.cc',
+ '../../src/ic/x87/ic-compiler-x87.cc',
+ '../../src/ic/x87/stub-cache-x87.cc',
],
}],
['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
@@ -923,7 +1019,7 @@
'../../src/mips/frames-mips.cc',
'../../src/mips/frames-mips.h',
'../../src/mips/full-codegen-mips.cc',
- '../../src/mips/ic-mips.cc',
+ '../../src/mips/interface-descriptors-mips.cc',
'../../src/mips/lithium-codegen-mips.cc',
'../../src/mips/lithium-codegen-mips.h',
'../../src/mips/lithium-gap-resolver-mips.cc',
@@ -935,7 +1031,11 @@
'../../src/mips/regexp-macro-assembler-mips.cc',
'../../src/mips/regexp-macro-assembler-mips.h',
'../../src/mips/simulator-mips.cc',
- '../../src/mips/stub-cache-mips.cc',
+ '../../src/ic/mips/access-compiler-mips.cc',
+ '../../src/ic/mips/handler-compiler-mips.cc',
+ '../../src/ic/mips/ic-mips.cc',
+ '../../src/ic/mips/ic-compiler-mips.cc',
+ '../../src/ic/mips/stub-cache-mips.cc',
],
}],
['v8_target_arch=="mips64el"', {
@@ -957,7 +1057,7 @@
'../../src/mips64/frames-mips64.cc',
'../../src/mips64/frames-mips64.h',
'../../src/mips64/full-codegen-mips64.cc',
- '../../src/mips64/ic-mips64.cc',
+ '../../src/mips64/interface-descriptors-mips64.cc',
'../../src/mips64/lithium-codegen-mips64.cc',
'../../src/mips64/lithium-codegen-mips64.h',
'../../src/mips64/lithium-gap-resolver-mips64.cc',
@@ -969,7 +1069,11 @@
'../../src/mips64/regexp-macro-assembler-mips64.cc',
'../../src/mips64/regexp-macro-assembler-mips64.h',
'../../src/mips64/simulator-mips64.cc',
- '../../src/mips64/stub-cache-mips64.cc',
+ '../../src/ic/mips64/access-compiler-mips64.cc',
+ '../../src/ic/mips64/handler-compiler-mips64.cc',
+ '../../src/ic/mips64/ic-mips64.cc',
+ '../../src/ic/mips64/ic-compiler-mips64.cc',
+ '../../src/ic/mips64/stub-cache-mips64.cc',
],
}],
['v8_target_arch=="x64" or v8_target_arch=="x32"', {
@@ -989,7 +1093,7 @@
'../../src/x64/frames-x64.cc',
'../../src/x64/frames-x64.h',
'../../src/x64/full-codegen-x64.cc',
- '../../src/x64/ic-x64.cc',
+ '../../src/x64/interface-descriptors-x64.cc',
'../../src/x64/lithium-codegen-x64.cc',
'../../src/x64/lithium-codegen-x64.h',
'../../src/x64/lithium-gap-resolver-x64.cc',
@@ -1000,11 +1104,15 @@
'../../src/x64/macro-assembler-x64.h',
'../../src/x64/regexp-macro-assembler-x64.cc',
'../../src/x64/regexp-macro-assembler-x64.h',
- '../../src/x64/stub-cache-x64.cc',
'../../src/compiler/x64/code-generator-x64.cc',
'../../src/compiler/x64/instruction-codes-x64.h',
'../../src/compiler/x64/instruction-selector-x64.cc',
'../../src/compiler/x64/linkage-x64.cc',
+ '../../src/ic/x64/access-compiler-x64.cc',
+ '../../src/ic/x64/handler-compiler-x64.cc',
+ '../../src/ic/x64/ic-x64.cc',
+ '../../src/ic/x64/ic-compiler-x64.cc',
+ '../../src/ic/x64/stub-cache-x64.cc',
],
}],
['OS=="linux"', {
@@ -1085,9 +1193,15 @@
'../../src/base/atomicops_internals_x86_gcc.cc',
'../../src/base/atomicops_internals_x86_gcc.h',
'../../src/base/atomicops_internals_x86_msvc.h',
+ '../../src/base/bits.cc',
+ '../../src/base/bits.h',
'../../src/base/build_config.h',
+ '../../src/base/compiler-specific.h',
'../../src/base/cpu.cc',
'../../src/base/cpu.h',
+ '../../src/base/division-by-constant.cc',
+ '../../src/base/division-by-constant.h',
+ '../../src/base/flags.h',
'../../src/base/lazy-instance.h',
'../../src/base/logging.cc',
'../../src/base/logging.h',
@@ -1108,6 +1222,8 @@
'../../src/base/safe_conversions_impl.h',
'../../src/base/safe_math.h',
'../../src/base/safe_math_impl.h',
+ '../../src/base/sys-info.cc',
+ '../../src/base/sys-info.h',
'../../src/base/utils/random-number-generator.cc',
'../../src/base/utils/random-number-generator.h',
],
@@ -1344,7 +1460,13 @@
'type': 'none',
'conditions': [
[ 'v8_use_external_startup_data==1', {
- 'dependencies': ['js2c'],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'dependencies': ['js2c#host'],
+ }, {
+ 'dependencies': ['js2c'],
+ }],
+ ],
'actions': [{
'action_name': 'concatenate_natives_blob',
'inputs': [
@@ -1352,14 +1474,38 @@
'<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
'<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
],
- 'outputs': [
- '<(PRODUCT_DIR)/natives_blob.bin',
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'outputs': [
+ '<(PRODUCT_DIR)/natives_blob_host.bin',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob_host.bin'
+ ],
+ }, {
+ 'outputs': [
+ '<(PRODUCT_DIR)/natives_blob.bin',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob.bin'
+ ],
+ }],
+ ],
+ }, {
+ 'outputs': [
+ '<(PRODUCT_DIR)/natives_blob.bin',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob.bin'
+ ],
+ }],
],
- 'action': ['python', '<@(_inputs)', '<@(_outputs)'],
}],
}],
['want_separate_host_toolset==1', {
- 'toolsets': ['host'],
+ 'toolsets': ['host', 'target'],
}, {
'toolsets': ['target'],
}],
@@ -1396,24 +1542,25 @@
'../../src/uri.js',
'../../third_party/fdlibm/fdlibm.js',
'../../src/math.js',
- '../../src/messages.js',
'../../src/apinatives.js',
- '../../src/debug-debugger.js',
- '../../src/mirror-debugger.js',
- '../../src/liveedit-debugger.js',
'../../src/date.js',
- '../../src/json.js',
'../../src/regexp.js',
'../../src/arraybuffer.js',
'../../src/typedarray.js',
- '../../src/weak_collection.js',
- '../../src/promise.js',
+ '../../src/generator.js',
'../../src/object-observe.js',
'../../src/collection.js',
+ '../../src/weak-collection.js',
'../../src/collection-iterator.js',
- '../../src/macros.py',
+ '../../src/promise.js',
+ '../../src/messages.js',
+ '../../src/json.js',
'../../src/array-iterator.js',
- '../../src/string-iterator.js'
+ '../../src/string-iterator.js',
+ '../../src/debug-debugger.js',
+ '../../src/mirror-debugger.js',
+ '../../src/liveedit-debugger.js',
+ '../../src/macros.py',
],
'experimental_library_files': [
'../../src/macros.py',
@@ -1421,6 +1568,7 @@
'../../src/generator.js',
'../../src/harmony-string.js',
'../../src/harmony-array.js',
+ '../../src/harmony-classes.js',
],
'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
'libraries_experimental_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
diff --git a/deps/v8/tools/lexer-shell.cc b/deps/v8/tools/lexer-shell.cc
index cbd3524cb3..5e8f531883 100644
--- a/deps/v8/tools/lexer-shell.cc
+++ b/deps/v8/tools/lexer-shell.cc
@@ -37,7 +37,7 @@
#include "src/api.h"
#include "src/base/platform/platform.h"
#include "src/messages.h"
-#include "src/runtime.h"
+#include "src/runtime/runtime.h"
#include "src/scanner-character-streams.h"
#include "src/scopeinfo.h"
#include "tools/shell-utils.h"
@@ -180,10 +180,11 @@ v8::base::TimeDelta ProcessFile(
int main(int argc, char* argv[]) {
+ v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::V8::InitializeICU();
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
- v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+ v8::V8::Initialize();
Encoding encoding = LATIN1;
bool print_tokens = false;
std::vector<std::string> fnames;
diff --git a/deps/v8/tools/parser-shell.cc b/deps/v8/tools/parser-shell.cc
index a774449876..2cafc838e1 100644
--- a/deps/v8/tools/parser-shell.cc
+++ b/deps/v8/tools/parser-shell.cc
@@ -45,7 +45,7 @@
using namespace v8::internal;
-class StringResource8 : public v8::String::ExternalAsciiStringResource {
+class StringResource8 : public v8::String::ExternalOneByteStringResource {
public:
StringResource8(const char* data, int length)
: data_(data), length_(length) { }
@@ -123,10 +123,11 @@ std::pair<v8::base::TimeDelta, v8::base::TimeDelta> RunBaselineParser(
int main(int argc, char* argv[]) {
+ v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::V8::InitializeICU();
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
- v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+ v8::V8::Initialize();
Encoding encoding = LATIN1;
std::vector<std::string> fnames;
std::string benchmark;
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index 4b22444f9e..8a6ff2af13 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -236,10 +236,7 @@ class CppLintProcessor(SourceFileProcessor):
or (name in CppLintProcessor.IGNORE_LINT))
def GetPathsToSearch(self):
- return ['src', 'include', 'samples',
- join('test', 'base-unittests'),
- join('test', 'cctest'),
- join('test', 'compiler-unittests')]
+ return ['src', 'include', 'samples', join('test', 'cctest')]
def GetCpplintScript(self, prio_path):
for path in [prio_path] + os.environ["PATH"].split(os.pathsep):
@@ -420,10 +417,9 @@ class SourceProcessor(SourceFileProcessor):
return success
-def CheckGeneratedRuntimeTests(workspace):
+def CheckRuntimeVsNativesNameClashes(workspace):
code = subprocess.call(
- [sys.executable, join(workspace, "tools", "generate-runtime-tests.py"),
- "check"])
+ [sys.executable, join(workspace, "tools", "check-name-clashes.py")])
return code == 0
@@ -451,7 +447,7 @@ def Main():
print "Running copyright header, trailing whitespaces and " \
"two empty lines between declarations check..."
success = SourceProcessor().Run(workspace) and success
- success = CheckGeneratedRuntimeTests(workspace) and success
+ success = CheckRuntimeVsNativesNameClashes(workspace) and success
success = CheckExternalReferenceRegistration(workspace) and success
if success:
return 0
diff --git a/deps/v8/tools/push-to-trunk/auto_push.py b/deps/v8/tools/push-to-trunk/auto_push.py
index aeaea805b1..fef3b5367b 100755
--- a/deps/v8/tools/push-to-trunk/auto_push.py
+++ b/deps/v8/tools/push-to-trunk/auto_push.py
@@ -36,22 +36,13 @@ import urllib
from common_includes import *
import push_to_trunk
-SETTINGS_LOCATION = "SETTINGS_LOCATION"
-
-CONFIG = {
- PERSISTFILE_BASENAME: "/tmp/v8-auto-push-tempfile",
- DOT_GIT_LOCATION: ".git",
- SETTINGS_LOCATION: "~/.auto-roll",
-}
-
PUSH_MESSAGE_RE = re.compile(r".* \(based on bleeding_edge revision r(\d+)\)$")
-
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
- self.InitialEnvironmentChecks()
+ self.InitialEnvironmentChecks(self.default_cwd)
self.CommonPrepare()
@@ -59,7 +50,7 @@ class CheckAutoPushSettings(Step):
MESSAGE = "Checking settings file."
def RunStep(self):
- settings_file = os.path.realpath(self.Config(SETTINGS_LOCATION))
+ settings_file = os.path.realpath(self.Config("SETTINGS_LOCATION"))
if os.path.exists(settings_file):
settings_dict = json.loads(FileToText(settings_file))
if settings_dict.get("enable_auto_roll") is False:
@@ -119,9 +110,8 @@ class PushToTrunk(Step):
# TODO(machenbach): Update the script before calling it.
if self._options.push:
- P = push_to_trunk.PushToTrunk
self._side_effect_handler.Call(
- P(push_to_trunk.CONFIG, self._side_effect_handler).Run,
+ push_to_trunk.PushToTrunk().Run,
["--author", self._options.author,
"--reviewer", self._options.reviewer,
"--revision", self["lkgr"],
@@ -141,6 +131,12 @@ class AutoPush(ScriptsBase):
options.requires_editor = False
return True
+ def _Config(self):
+ return {
+ "PERSISTFILE_BASENAME": "/tmp/v8-auto-push-tempfile",
+ "SETTINGS_LOCATION": "~/.auto-roll",
+ }
+
def _Steps(self):
return [
Preparation,
@@ -153,4 +149,4 @@ class AutoPush(ScriptsBase):
if __name__ == "__main__": # pragma: no cover
- sys.exit(AutoPush(CONFIG).Run())
+ sys.exit(AutoPush().Run())
diff --git a/deps/v8/tools/push-to-trunk/auto_roll.py b/deps/v8/tools/push-to-trunk/auto_roll.py
index 6e6c7fe2ab..2cca070f9f 100755
--- a/deps/v8/tools/push-to-trunk/auto_roll.py
+++ b/deps/v8/tools/push-to-trunk/auto_roll.py
@@ -12,14 +12,6 @@ import urllib
from common_includes import *
import chromium_roll
-CLUSTERFUZZ_API_KEY_FILE = "CLUSTERFUZZ_API_KEY_FILE"
-
-CONFIG = {
- PERSISTFILE_BASENAME: "/tmp/v8-auto-roll-tempfile",
- CLUSTERFUZZ_API_KEY_FILE: ".cf_api_key",
-}
-
-CR_DEPS_URL = 'http://src.chromium.org/svn/trunk/src/DEPS'
class CheckActiveRoll(Step):
MESSAGE = "Check active roll."
@@ -50,8 +42,9 @@ class DetectLastPush(Step):
MESSAGE = "Detect commit ID of the last push to trunk."
def RunStep(self):
- push_hash = self.FindLastTrunkPush(include_patches=True)
- self["last_push"] = self.GitSVNFindSVNRev(push_hash)
+ push_hash = self.FindLastTrunkPush(
+ branch="origin/candidates", include_patches=True)
+ self["last_push"] = self.GetCommitPositionNumber(push_hash)
class DetectLastRoll(Step):
@@ -59,10 +52,14 @@ class DetectLastRoll(Step):
def RunStep(self):
# Interpret the DEPS file to retrieve the v8 revision.
+ # TODO(machenbach): This should be part or the roll-deps api of
+ # depot_tools.
Var = lambda var: '%s'
- exec(self.ReadURL(CR_DEPS_URL))
- last_roll = vars['v8_revision']
- if last_roll >= self["last_push"]:
+ exec(FileToText(os.path.join(self._options.chromium, "DEPS")))
+ last_roll = self.GetCommitPositionNumber(vars['v8_revision'])
+ # FIXME(machenbach): When rolling from bleeding edge and from trunk there
+ # be different commit numbers here. Better use version?
+ if int(last_roll) >= int(self["last_push"]):
print("There is no newer v8 revision than the one in Chromium (%s)."
% last_roll)
return True
@@ -72,10 +69,10 @@ class CheckClusterFuzz(Step):
MESSAGE = "Check ClusterFuzz api for new problems."
def RunStep(self):
- if not os.path.exists(self.Config(CLUSTERFUZZ_API_KEY_FILE)):
+ if not os.path.exists(self.Config("CLUSTERFUZZ_API_KEY_FILE")):
print "Skipping ClusterFuzz check. No api key file found."
return False
- api_key = FileToText(self.Config(CLUSTERFUZZ_API_KEY_FILE))
+ api_key = FileToText(self.Config("CLUSTERFUZZ_API_KEY_FILE"))
# Check for open, reproducible issues that have no associated bug.
result = self._side_effect_handler.ReadClusterFuzzAPI(
api_key, job_type="linux_asan_d8_dbg", reproducible="True",
@@ -95,16 +92,14 @@ class RollChromium(Step):
"--author", self._options.author,
"--reviewer", self._options.reviewer,
"--chromium", self._options.chromium,
- "--force",
"--use-commit-queue",
]
if self._options.sheriff:
args.extend([
"--sheriff", "--googlers-mapping", self._options.googlers_mapping])
- R = chromium_roll.ChromiumRoll
- self._side_effect_handler.Call(
- R(chromium_roll.CONFIG, self._side_effect_handler).Run,
- args)
+ if self._options.dry_run:
+ args.extend(["--dry-run"])
+ self._side_effect_handler.Call(chromium_roll.ChromiumRoll().Run, args)
class AutoRoll(ScriptsBase):
@@ -112,8 +107,7 @@ class AutoRoll(ScriptsBase):
parser.add_argument("-c", "--chromium", required=True,
help=("The path to your Chromium src/ "
"directory to automate the V8 roll."))
- parser.add_argument("--roll",
- help="Make Chromium roll. Dry run if unspecified.",
+ parser.add_argument("--roll", help="Call Chromium roll script.",
default=False, action="store_true")
def _ProcessOptions(self, options): # pragma: no cover
@@ -125,6 +119,12 @@ class AutoRoll(ScriptsBase):
return False
return True
+ def _Config(self):
+ return {
+ "PERSISTFILE_BASENAME": "/tmp/v8-auto-roll-tempfile",
+ "CLUSTERFUZZ_API_KEY_FILE": ".cf_api_key",
+ }
+
def _Steps(self):
return [
CheckActiveRoll,
@@ -136,4 +136,4 @@ class AutoRoll(ScriptsBase):
if __name__ == "__main__": # pragma: no cover
- sys.exit(AutoRoll(CONFIG).Run())
+ sys.exit(AutoRoll().Run())
diff --git a/deps/v8/tools/push-to-trunk/auto_tag.py b/deps/v8/tools/push-to-trunk/auto_tag.py
index 6beaaff8a3..7b82e83890 100755
--- a/deps/v8/tools/push-to-trunk/auto_tag.py
+++ b/deps/v8/tools/push-to-trunk/auto_tag.py
@@ -8,36 +8,28 @@ import sys
from common_includes import *
-CONFIG = {
- BRANCHNAME: "auto-tag-v8",
- PERSISTFILE_BASENAME: "/tmp/v8-auto-tag-tempfile",
- DOT_GIT_LOCATION: ".git",
- VERSION_FILE: "src/version.cc",
-}
-
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
+ # TODO(machenbach): Remove after the git switch.
+ if self.Config("PERSISTFILE_BASENAME") == "/tmp/v8-auto-tag-tempfile":
+ print "This script is disabled until after the v8 git migration."
+ return True
+
self.CommonPrepare()
self.PrepareBranch()
self.GitCheckout("master")
- self.GitSVNRebase()
+ self.vc.Pull()
class GetTags(Step):
MESSAGE = "Get all V8 tags."
def RunStep(self):
- self.GitCreateBranch(self._config[BRANCHNAME])
-
- # Get remote tags.
- tags = filter(lambda s: re.match(r"^svn/tags/[\d+\.]+$", s),
- self.GitRemotes())
-
- # Remove 'svn/tags/' prefix.
- self["tags"] = map(lambda s: s[9:], tags)
+ self.GitCreateBranch(self._config["BRANCHNAME"])
+ self["tags"] = self.vc.GetTags()
class GetOldestUntaggedVersion(Step):
@@ -55,7 +47,7 @@ class GetOldestUntaggedVersion(Step):
format="%H", grep="\\[Auto\\-roll\\] Bump up version to").splitlines():
# Get the version.
- if not self.GitCheckoutFileSafe(self._config[VERSION_FILE], git_hash):
+ if not self.GitCheckoutFileSafe(VERSION_FILE, git_hash):
continue
self.ReadAndPersistVersion()
@@ -66,7 +58,7 @@ class GetOldestUntaggedVersion(Step):
version = version[:-2]
# Clean up checked-out version file.
- self.GitCheckoutFileSafe(self._config[VERSION_FILE], "HEAD")
+ self.GitCheckoutFileSafe(VERSION_FILE, "HEAD")
if version in tags:
if self["candidate"]:
@@ -121,9 +113,9 @@ class CalculateTagRevision(Step):
def RunStep(self):
# Get the lkgr after the tag candidate and before the next tag candidate.
- candidate_svn = self.GitSVNFindSVNRev(self["candidate"])
+ candidate_svn = self.vc.GitSvn(self["candidate"])
if self["next"]:
- next_svn = self.GitSVNFindSVNRev(self["next"])
+ next_svn = self.vc.GitSvn(self["next"])
else:
# Don't include the version change commit itself if there is no upper
# limit yet.
@@ -137,7 +129,7 @@ class CalculateTagRevision(Step):
return True
# Let's check if the lkgr is at least three hours old.
- self["lkgr"] = self.GitSVNFindGitHash(lkgr_svn)
+ self["lkgr"] = self.vc.SvnGit(lkgr_svn)
if not self["lkgr"]:
print "Couldn't find git hash for lkgr %s" % lkgr_svn
self.CommonCleanup()
@@ -160,7 +152,7 @@ class MakeTag(Step):
def RunStep(self):
if not self._options.dry_run:
self.GitReset(self["lkgr"])
- self.GitSVNTag(self["candidate_version"])
+ self.vc.Tag(self["candidate_version"])
class CleanUp(Step):
@@ -184,6 +176,12 @@ class AutoTag(ScriptsBase):
options.force_upload = True
return True
+ def _Config(self):
+ return {
+ "BRANCHNAME": "auto-tag-v8",
+ "PERSISTFILE_BASENAME": "/tmp/v8-auto-tag-tempfile",
+ }
+
def _Steps(self):
return [
Preparation,
@@ -197,4 +195,4 @@ class AutoTag(ScriptsBase):
if __name__ == "__main__": # pragma: no cover
- sys.exit(AutoTag(CONFIG).Run())
+ sys.exit(AutoTag().Run())
diff --git a/deps/v8/tools/push-to-trunk/bump_up_version.py b/deps/v8/tools/push-to-trunk/bump_up_version.py
index af5f73a600..4a10b86615 100755
--- a/deps/v8/tools/push-to-trunk/bump_up_version.py
+++ b/deps/v8/tools/push-to-trunk/bump_up_version.py
@@ -25,25 +25,26 @@ import sys
from common_includes import *
-CONFIG = {
- PERSISTFILE_BASENAME: "/tmp/v8-bump-up-version-tempfile",
- VERSION_FILE: "src/version.cc",
-}
-
VERSION_BRANCH = "auto-bump-up-version"
+# TODO(machenbach): Add vc interface that works on git mirror.
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
+ # TODO(machenbach): Remove after the git switch.
+ if(self.Config("PERSISTFILE_BASENAME") ==
+ "/tmp/v8-bump-up-version-tempfile"):
+ print "This script is disabled until after the v8 git migration."
+ return True
+
# Check for a clean workdir.
if not self.GitIsWorkdirClean(): # pragma: no cover
# This is in case a developer runs this script on a dirty tree.
self.GitStash()
- # TODO(machenbach): This should be called master after the git switch.
- self.GitCheckout("bleeding_edge")
+ self.GitCheckout("master")
self.GitPull()
@@ -55,8 +56,7 @@ class GetCurrentBleedingEdgeVersion(Step):
MESSAGE = "Get latest bleeding edge version."
def RunStep(self):
- # TODO(machenbach): This should be called master after the git switch.
- self.GitCheckout("bleeding_edge")
+ self.GitCheckout("master")
# Store latest version and revision.
self.ReadAndPersistVersion()
@@ -72,7 +72,7 @@ class LastChangeBailout(Step):
MESSAGE = "Stop script if the last change modified the version."
def RunStep(self):
- if self._config[VERSION_FILE] in self.GitChangedFiles(self["latest"]):
+ if VERSION_FILE in self.GitChangedFiles(self["latest"]):
print "Stop due to recent version change."
return True
@@ -93,7 +93,7 @@ class GetLKGRVersion(Step):
MESSAGE = "Get bleeding edge lkgr version."
def RunStep(self):
- self.GitCheckout("bleeding_edge")
+ self.GitCheckout("master")
# If the commit was made from svn, there is a mapping entry in the commit
# message.
self["lkgr"] = self.GitLog(
@@ -111,7 +111,7 @@ class GetLKGRVersion(Step):
print "LKGR version: %s" % self["lkgr_version"]
# Ensure a clean version branch.
- self.GitCheckout("bleeding_edge")
+ self.GitCheckout("master")
self.DeleteBranch(VERSION_BRANCH)
@@ -121,7 +121,7 @@ class LKGRVersionUpToDateBailout(Step):
def RunStep(self):
# If a version-change commit becomes the lkgr, don't bump up the version
# again.
- if self._config[VERSION_FILE] in self.GitChangedFiles(self["lkgr"]):
+ if VERSION_FILE in self.GitChangedFiles(self["lkgr"]):
print "Stop because the lkgr is a version change itself."
return True
@@ -136,8 +136,7 @@ class GetTrunkVersion(Step):
MESSAGE = "Get latest trunk version."
def RunStep(self):
- # TODO(machenbach): This should be called trunk after the git switch.
- self.GitCheckout("master")
+ self.GitCheckout("candidates")
self.GitPull()
self.ReadAndPersistVersion("trunk_")
self["trunk_version"] = self.ArrayToVersion("trunk_")
@@ -191,21 +190,25 @@ class ChangeVersion(Step):
MESSAGE = "Bump up the version."
def RunStep(self):
- self.GitCreateBranch(VERSION_BRANCH, "bleeding_edge")
+ self.GitCreateBranch(VERSION_BRANCH, "master")
- self.SetVersion(self.Config(VERSION_FILE), "new_")
+ self.SetVersion(os.path.join(self.default_cwd, VERSION_FILE), "new_")
try:
- self.GitCommit("[Auto-roll] Bump up version to %s\n\nTBR=%s" %
- (self["new_version"], self._options.author))
- self.GitUpload(author=self._options.author,
- force=self._options.force_upload,
- bypass_hooks=True)
- self.GitDCommit()
+ msg = "[Auto-roll] Bump up version to %s" % self["new_version"]
+ self.GitCommit("%s\n\nTBR=%s" % (msg, self._options.author),
+ author=self._options.author)
+ if self._options.svn:
+ self.SVNCommit("branches/bleeding_edge", msg)
+ else:
+ self.GitUpload(author=self._options.author,
+ force=self._options.force_upload,
+ bypass_hooks=True)
+ self.GitDCommit()
print "Successfully changed the version."
finally:
# Clean up.
- self.GitCheckout("bleeding_edge")
+ self.GitCheckout("master")
self.DeleteBranch(VERSION_BRANCH)
@@ -223,6 +226,12 @@ class BumpUpVersion(ScriptsBase):
options.force_upload = True
return True
+ def _Config(self):
+ return {
+ "PERSISTFILE_BASENAME": "/tmp/v8-bump-up-version-tempfile",
+ "PATCH_FILE": "/tmp/v8-bump-up-version-tempfile-patch-file",
+ }
+
def _Steps(self):
return [
Preparation,
@@ -238,4 +247,4 @@ class BumpUpVersion(ScriptsBase):
]
if __name__ == "__main__": # pragma: no cover
- sys.exit(BumpUpVersion(CONFIG).Run())
+ sys.exit(BumpUpVersion().Run())
diff --git a/deps/v8/tools/push-to-trunk/chromium_roll.py b/deps/v8/tools/push-to-trunk/chromium_roll.py
index 0138ff8e72..ceedbc179e 100755
--- a/deps/v8/tools/push-to-trunk/chromium_roll.py
+++ b/deps/v8/tools/push-to-trunk/chromium_roll.py
@@ -9,21 +9,13 @@ import sys
from common_includes import *
-DEPS_FILE = "DEPS_FILE"
-CHROMIUM = "CHROMIUM"
-
-CONFIG = {
- PERSISTFILE_BASENAME: "/tmp/v8-chromium-roll-tempfile",
- DOT_GIT_LOCATION: ".git",
- DEPS_FILE: "DEPS",
-}
-
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
- self.CommonPrepare()
+ # Update v8 remote tracking branches.
+ self.GitFetchOrigin()
class DetectLastPush(Step):
@@ -31,88 +23,79 @@ class DetectLastPush(Step):
def RunStep(self):
self["last_push"] = self._options.last_push or self.FindLastTrunkPush(
- include_patches=True)
- self["trunk_revision"] = self.GitSVNFindSVNRev(self["last_push"])
+ branch="origin/candidates", include_patches=True)
+ self["trunk_revision"] = self.GetCommitPositionNumber(self["last_push"])
self["push_title"] = self.GitLog(n=1, format="%s",
git_hash=self["last_push"])
-class CheckChromium(Step):
- MESSAGE = "Ask for chromium checkout."
-
- def Run(self):
- self["chrome_path"] = self._options.chromium
- while not self["chrome_path"]:
- self.DieNoManualMode("Please specify the path to a Chromium checkout in "
- "forced mode.")
- print ("Please specify the path to the chromium \"src\" directory: "),
- self["chrome_path"] = self.ReadLine()
-
-
class SwitchChromium(Step):
MESSAGE = "Switch to Chromium checkout."
- REQUIRES = "chrome_path"
def RunStep(self):
self["v8_path"] = os.getcwd()
- os.chdir(self["chrome_path"])
- self.InitialEnvironmentChecks()
+ cwd = self._options.chromium
+ os.chdir(cwd)
+ self.InitialEnvironmentChecks(cwd)
# Check for a clean workdir.
- if not self.GitIsWorkdirClean(): # pragma: no cover
+ if not self.GitIsWorkdirClean(cwd=cwd): # pragma: no cover
self.Die("Workspace is not clean. Please commit or undo your changes.")
# Assert that the DEPS file is there.
- if not os.path.exists(self.Config(DEPS_FILE)): # pragma: no cover
+ if not os.path.exists(os.path.join(cwd, "DEPS")): # pragma: no cover
self.Die("DEPS file not present.")
class UpdateChromiumCheckout(Step):
MESSAGE = "Update the checkout and create a new branch."
- REQUIRES = "chrome_path"
def RunStep(self):
- os.chdir(self["chrome_path"])
- self.GitCheckout("master")
- self.GitPull()
- self.GitCreateBranch("v8-roll-%s" % self["trunk_revision"])
+ self.GitCheckout("master", cwd=self._options.chromium)
+ self.Command("gclient", "sync --nohooks", cwd=self._options.chromium)
+ self.GitPull(cwd=self._options.chromium)
+
+ # Update v8 remotes.
+ self.GitFetchOrigin()
+
+ self.GitCreateBranch("v8-roll-%s" % self["trunk_revision"],
+ cwd=self._options.chromium)
class UploadCL(Step):
MESSAGE = "Create and upload CL."
- REQUIRES = "chrome_path"
def RunStep(self):
- os.chdir(self["chrome_path"])
-
# Patch DEPS file.
- deps = FileToText(self.Config(DEPS_FILE))
- deps = re.sub("(?<=\"v8_revision\": \")([0-9]+)(?=\")",
- self["trunk_revision"],
- deps)
- TextToFile(deps, self.Config(DEPS_FILE))
-
- if self._options.reviewer and not self._options.manual:
- print "Using account %s for review." % self._options.reviewer
- rev = self._options.reviewer
- else:
- print "Please enter the email address of a reviewer for the roll CL: ",
- self.DieNoManualMode("A reviewer must be specified in forced mode.")
- rev = self.ReadLine()
+ if self.Command(
+ "roll-dep", "v8 %s" % self["trunk_revision"],
+ cwd=self._options.chromium) is None:
+ self.Die("Failed to create deps for %s" % self["trunk_revision"])
commit_title = "Update V8 to %s." % self["push_title"].lower()
sheriff = ""
if self["sheriff"]:
sheriff = ("\n\nPlease reply to the V8 sheriff %s in case of problems."
% self["sheriff"])
- self.GitCommit("%s%s\n\nTBR=%s" % (commit_title, sheriff, rev))
- self.GitUpload(author=self._options.author,
- force=self._options.force_upload,
- cq=self._options.use_commit_queue)
- print "CL uploaded."
+ self.GitCommit("%s%s\n\nTBR=%s" %
+ (commit_title, sheriff, self._options.reviewer),
+ author=self._options.author,
+ cwd=self._options.chromium)
+ if not self._options.dry_run:
+ self.GitUpload(author=self._options.author,
+ force=True,
+ cq=self._options.use_commit_queue,
+ cwd=self._options.chromium)
+ print "CL uploaded."
+ else:
+ self.GitCheckout("master", cwd=self._options.chromium)
+ self.GitDeleteBranch("v8-roll-%s" % self["trunk_revision"],
+ cwd=self._options.chromium)
+ print "Dry run - don't upload."
+# TODO(machenbach): Make this obsolete. We are only in the chromium chechout
+# for the initial .git check.
class SwitchV8(Step):
MESSAGE = "Returning to V8 checkout."
- REQUIRES = "chrome_path"
def RunStep(self):
os.chdir(self["v8_path"])
@@ -127,19 +110,12 @@ class CleanUp(Step):
% self["trunk_revision"])
# Clean up all temporary files.
- Command("rm", "-f %s*" % self._config[PERSISTFILE_BASENAME])
+ Command("rm", "-f %s*" % self._config["PERSISTFILE_BASENAME"])
class ChromiumRoll(ScriptsBase):
def _PrepareOptions(self, parser):
- group = parser.add_mutually_exclusive_group()
- group.add_argument("-f", "--force",
- help="Don't prompt the user.",
- default=False, action="store_true")
- group.add_argument("-m", "--manual",
- help="Prompt the user at every important step.",
- default=False, action="store_true")
- parser.add_argument("-c", "--chromium",
+ parser.add_argument("-c", "--chromium", required=True,
help=("The path to your Chromium src/ "
"directory to automate the V8 roll."))
parser.add_argument("-l", "--last-push",
@@ -149,24 +125,24 @@ class ChromiumRoll(ScriptsBase):
default=False, action="store_true")
def _ProcessOptions(self, options): # pragma: no cover
- if not options.manual and not options.reviewer:
- print "A reviewer (-r) is required in (semi-)automatic mode."
- return False
- if not options.manual and not options.chromium:
- print "A chromium checkout (-c) is required in (semi-)automatic mode."
- return False
- if not options.manual and not options.author:
- print "Specify your chromium.org email with -a in (semi-)automatic mode."
+ if not options.author or not options.reviewer:
+ print "A reviewer (-r) and an author (-a) are required."
return False
- options.tbr_commit = not options.manual
+ options.requires_editor = False
+ options.force = True
+ options.manual = False
return True
+ def _Config(self):
+ return {
+ "PERSISTFILE_BASENAME": "/tmp/v8-chromium-roll-tempfile",
+ }
+
def _Steps(self):
return [
Preparation,
DetectLastPush,
- CheckChromium,
DetermineV8Sheriff,
SwitchChromium,
UpdateChromiumCheckout,
@@ -177,4 +153,4 @@ class ChromiumRoll(ScriptsBase):
if __name__ == "__main__": # pragma: no cover
- sys.exit(ChromiumRoll(CONFIG).Run())
+ sys.exit(ChromiumRoll().Run())
diff --git a/deps/v8/tools/push-to-trunk/common_includes.py b/deps/v8/tools/push-to-trunk/common_includes.py
index 0e57a25bb1..7ea39f73c9 100644
--- a/deps/v8/tools/push-to-trunk/common_includes.py
+++ b/deps/v8/tools/push-to-trunk/common_includes.py
@@ -29,10 +29,12 @@
import argparse
import datetime
import httplib
+import glob
import imp
import json
import os
import re
+import shutil
import subprocess
import sys
import textwrap
@@ -43,14 +45,11 @@ import urllib2
from git_recipes import GitRecipesMixin
from git_recipes import GitFailedException
-PERSISTFILE_BASENAME = "PERSISTFILE_BASENAME"
-BRANCHNAME = "BRANCHNAME"
-DOT_GIT_LOCATION = "DOT_GIT_LOCATION"
-VERSION_FILE = "VERSION_FILE"
-CHANGELOG_FILE = "CHANGELOG_FILE"
-CHANGELOG_ENTRY_FILE = "CHANGELOG_ENTRY_FILE"
-COMMITMSG_FILE = "COMMITMSG_FILE"
-PATCH_FILE = "PATCH_FILE"
+VERSION_FILE = os.path.join("src", "version.cc")
+
+# V8 base directory.
+DEFAULT_CWD = os.path.dirname(
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
def TextToFile(text, file_name):
@@ -183,16 +182,18 @@ def SortingKey(version):
# Some commands don't like the pipe, e.g. calling vi from within the script or
# from subscripts like git cl upload.
-def Command(cmd, args="", prefix="", pipe=True):
+def Command(cmd, args="", prefix="", pipe=True, cwd=None):
+ cwd = cwd or os.getcwd()
# TODO(machenbach): Use timeout.
cmd_line = "%s %s %s" % (prefix, cmd, args)
print "Command: %s" % cmd_line
+ print "in %s" % cwd
sys.stdout.flush()
try:
if pipe:
- return subprocess.check_output(cmd_line, shell=True)
+ return subprocess.check_output(cmd_line, shell=True, cwd=cwd)
else:
- return subprocess.check_call(cmd_line, shell=True)
+ return subprocess.check_call(cmd_line, shell=True, cwd=cwd)
except subprocess.CalledProcessError:
return None
finally:
@@ -205,8 +206,8 @@ class SideEffectHandler(object): # pragma: no cover
def Call(self, fun, *args, **kwargs):
return fun(*args, **kwargs)
- def Command(self, cmd, args="", prefix="", pipe=True):
- return Command(cmd, args, prefix, pipe)
+ def Command(self, cmd, args="", prefix="", pipe=True, cwd=None):
+ return Command(cmd, args, prefix, pipe, cwd=cwd)
def ReadLine(self):
return sys.stdin.readline().strip()
@@ -254,15 +255,151 @@ class NoRetryException(Exception):
pass
+class VCInterface(object):
+ def InjectStep(self, step):
+ self.step=step
+
+ def Pull(self):
+ raise NotImplementedError()
+
+ def Fetch(self):
+ raise NotImplementedError()
+
+ def GetTags(self):
+ raise NotImplementedError()
+
+ def GetBranches(self):
+ raise NotImplementedError()
+
+ def GitSvn(self, hsh, branch=""):
+ raise NotImplementedError()
+
+ def SvnGit(self, rev, branch=""):
+ raise NotImplementedError()
+
+ def RemoteMasterBranch(self):
+ raise NotImplementedError()
+
+ def RemoteCandidateBranch(self):
+ raise NotImplementedError()
+
+ def RemoteBranch(self, name):
+ raise NotImplementedError()
+
+ def Land(self):
+ raise NotImplementedError()
+
+ def CLLand(self):
+ raise NotImplementedError()
+
+ # TODO(machenbach): There is some svn knowledge in this interface. In svn,
+ # tag and commit are different remote commands, while in git we would commit
+ # and tag locally and then push/land in one unique step.
+ def Tag(self, tag):
+ raise NotImplementedError()
+
+
+class GitSvnInterface(VCInterface):
+ def Pull(self):
+ self.step.GitSVNRebase()
+
+ def Fetch(self):
+ self.step.GitSVNFetch()
+
+ def GetTags(self):
+ # Get remote tags.
+ tags = filter(lambda s: re.match(r"^svn/tags/[\d+\.]+$", s),
+ self.step.GitRemotes())
+
+ # Remove 'svn/tags/' prefix.
+ return map(lambda s: s[9:], tags)
+
+ def GetBranches(self):
+ # Get relevant remote branches, e.g. "svn/3.25".
+ branches = filter(lambda s: re.match(r"^svn/\d+\.\d+$", s),
+ self.step.GitRemotes())
+ # Remove 'svn/' prefix.
+ return map(lambda s: s[4:], branches)
+
+ def GitSvn(self, hsh, branch=""):
+ return self.step.GitSVNFindSVNRev(hsh, branch)
+
+ def SvnGit(self, rev, branch=""):
+ return self.step.GitSVNFindGitHash(rev, branch)
+
+ def RemoteMasterBranch(self):
+ return "svn/bleeding_edge"
+
+ def RemoteCandidateBranch(self):
+ return "svn/trunk"
+
+ def RemoteBranch(self, name):
+ return "svn/%s" % name
+
+ def Land(self):
+ self.step.GitSVNDCommit()
+
+ def CLLand(self):
+ self.step.GitDCommit()
+
+ def Tag(self, tag):
+ self.step.GitSVNTag(tag)
+
+
+class GitReadOnlyMixin(VCInterface):
+ def Pull(self):
+ self.step.GitPull()
+
+ def Fetch(self):
+ self.step.Git("fetch")
+
+ def GetTags(self):
+ return self.step.Git("tag").strip().splitlines()
+
+ def GetBranches(self):
+ # Get relevant remote branches, e.g. "origin/branch-heads/3.25".
+ branches = filter(
+ lambda s: re.match(r"^origin/branch\-heads/\d+\.\d+$", s),
+ self.step.GitRemotes())
+ # Remove 'origin/branch-heads/' prefix.
+ return map(lambda s: s[20:], branches)
+
+ def RemoteMasterBranch(self):
+ return "origin/master"
+
+ def RemoteCandidateBranch(self):
+ return "origin/candidates"
+
+ def RemoteBranch(self, name):
+ if name in ["candidates", "master"]:
+ return "origin/%s" % name
+ return "origin/branch-heads/%s" % name
+
+
+class GitReadSvnWriteInterface(GitReadOnlyMixin, GitSvnInterface):
+ pass
+
+
+VC_INTERFACES = {
+ "git_svn": GitSvnInterface,
+ "git_read_svn_write": GitReadSvnWriteInterface,
+}
+
+
class Step(GitRecipesMixin):
- def __init__(self, text, requires, number, config, state, options, handler):
+ def __init__(self, text, number, config, state, options, handler):
self._text = text
- self._requires = requires
self._number = number
self._config = config
self._state = state
self._options = options
self._side_effect_handler = handler
+ self.vc = VC_INTERFACES[options.vc_interface]()
+ self.vc.InjectStep(self)
+
+ # The testing configuration might set a different default cwd.
+ self.default_cwd = self._config.get("DEFAULT_CWD") or DEFAULT_CWD
+
assert self._number >= 0
assert self._config is not None
assert self._state is not None
@@ -283,14 +420,10 @@ class Step(GitRecipesMixin):
def Run(self):
# Restore state.
- state_file = "%s-state.json" % self._config[PERSISTFILE_BASENAME]
+ state_file = "%s-state.json" % self._config["PERSISTFILE_BASENAME"]
if not self._state and os.path.exists(state_file):
self._state.update(json.loads(FileToText(state_file)))
- # Skip step if requirement is not met.
- if self._requires and not self._state.get(self._requires):
- return
-
print ">>> Step %d: %s" % (self._number, self._text)
try:
return self.RunStep()
@@ -318,13 +451,14 @@ class Step(GitRecipesMixin):
got_exception = False
try:
result = cb()
- except NoRetryException, e:
+ except NoRetryException as e:
raise e
- except Exception:
- got_exception = True
+ except Exception as e:
+ got_exception = e
if got_exception or retry_on(result):
if not wait_plan: # pragma: no cover
- raise Exception("Retried too often. Giving up.")
+ raise Exception("Retried too often. Giving up. Reason: %s" %
+ str(got_exception))
wait_time = wait_plan.pop()
print "Waiting for %f seconds." % wait_time
self._side_effect_handler.Sleep(wait_time)
@@ -340,21 +474,31 @@ class Step(GitRecipesMixin):
else:
return self._side_effect_handler.ReadLine()
- def Git(self, args="", prefix="", pipe=True, retry_on=None):
- cmd = lambda: self._side_effect_handler.Command("git", args, prefix, pipe)
+ def Command(self, name, args, cwd=None):
+ cmd = lambda: self._side_effect_handler.Command(
+ name, args, "", True, cwd=cwd or self.default_cwd)
+ return self.Retry(cmd, None, [5])
+
+ def Git(self, args="", prefix="", pipe=True, retry_on=None, cwd=None):
+ cmd = lambda: self._side_effect_handler.Command(
+ "git", args, prefix, pipe, cwd=cwd or self.default_cwd)
result = self.Retry(cmd, retry_on, [5, 30])
if result is None:
raise GitFailedException("'git %s' failed." % args)
return result
- def SVN(self, args="", prefix="", pipe=True, retry_on=None):
- cmd = lambda: self._side_effect_handler.Command("svn", args, prefix, pipe)
+ def SVN(self, args="", prefix="", pipe=True, retry_on=None, cwd=None):
+ cmd = lambda: self._side_effect_handler.Command(
+ "svn", args, prefix, pipe, cwd=cwd or self.default_cwd)
return self.Retry(cmd, retry_on, [5, 30])
def Editor(self, args):
if self._options.requires_editor:
- return self._side_effect_handler.Command(os.environ["EDITOR"], args,
- pipe=False)
+ return self._side_effect_handler.Command(
+ os.environ["EDITOR"],
+ args,
+ pipe=False,
+ cwd=self.default_cwd)
def ReadURL(self, url, params=None, retry_on=None, wait_plan=None):
wait_plan = wait_plan or [3, 60, 600]
@@ -391,14 +535,15 @@ class Step(GitRecipesMixin):
msg = "Can't continue. Please delete branch %s and try again." % name
self.Die(msg)
- def InitialEnvironmentChecks(self):
+ def InitialEnvironmentChecks(self, cwd):
# Cancel if this is not a git checkout.
- if not os.path.exists(self._config[DOT_GIT_LOCATION]): # pragma: no cover
+ if not os.path.exists(os.path.join(cwd, ".git")): # pragma: no cover
self.Die("This is not a git checkout, this script won't work for you.")
# Cancel if EDITOR is unset or not executable.
if (self._options.requires_editor and (not os.environ.get("EDITOR") or
- Command("which", os.environ["EDITOR"]) is None)): # pragma: no cover
+ self.Command(
+ "which", os.environ["EDITOR"]) is None)): # pragma: no cover
self.Die("Please set your EDITOR environment variable, you'll need it.")
def CommonPrepare(self):
@@ -410,19 +555,23 @@ class Step(GitRecipesMixin):
self["current_branch"] = self.GitCurrentBranch()
# Fetch unfetched revisions.
- self.GitSVNFetch()
+ self.vc.Fetch()
def PrepareBranch(self):
# Delete the branch that will be created later if it exists already.
- self.DeleteBranch(self._config[BRANCHNAME])
+ self.DeleteBranch(self._config["BRANCHNAME"])
def CommonCleanup(self):
self.GitCheckout(self["current_branch"])
- if self._config[BRANCHNAME] != self["current_branch"]:
- self.GitDeleteBranch(self._config[BRANCHNAME])
+ if self._config["BRANCHNAME"] != self["current_branch"]:
+ self.GitDeleteBranch(self._config["BRANCHNAME"])
# Clean up all temporary files.
- Command("rm", "-f %s*" % self._config[PERSISTFILE_BASENAME])
+ for f in glob.iglob("%s*" % self._config["PERSISTFILE_BASENAME"]):
+ if os.path.isfile(f):
+ os.remove(f)
+ if os.path.isdir(f):
+ shutil.rmtree(f)
def ReadAndPersistVersion(self, prefix=""):
def ReadAndPersist(var_name, def_name):
@@ -430,7 +579,7 @@ class Step(GitRecipesMixin):
if match:
value = match.group(1)
self["%s%s" % (prefix, var_name)] = value
- for line in LinesInFile(self._config[VERSION_FILE]):
+ for line in LinesInFile(os.path.join(self.default_cwd, VERSION_FILE)):
for (var_name, def_name) in [("major", "MAJOR_VERSION"),
("minor", "MINOR_VERSION"),
("build", "BUILD_NUMBER"),
@@ -470,13 +619,14 @@ class Step(GitRecipesMixin):
except GitFailedException:
self.WaitForResolvingConflicts(patch_file)
- def FindLastTrunkPush(self, parent_hash="", include_patches=False):
+ def FindLastTrunkPush(
+ self, parent_hash="", branch="", include_patches=False):
push_pattern = "^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*"
if not include_patches:
# Non-patched versions only have three numbers followed by the "(based
# on...) comment."
push_pattern += " (based"
- branch = "" if parent_hash else "svn/trunk"
+ branch = "" if parent_hash else branch or self.vc.RemoteCandidateBranch()
return self.GitLog(n=1, format="%H", grep=push_pattern,
parent_hash=parent_hash, branch=branch)
@@ -500,6 +650,23 @@ class Step(GitRecipesMixin):
output += "%s\n" % line
TextToFile(output, version_file)
+ def SVNCommit(self, root, commit_message):
+ patch = self.GitDiff("HEAD^", "HEAD")
+ TextToFile(patch, self._config["PATCH_FILE"])
+ self.Command("svn", "update", cwd=self._options.svn)
+ if self.Command("svn", "status", cwd=self._options.svn) != "":
+ self.Die("SVN checkout not clean.")
+ if not self.Command("patch", "-d %s -p1 -i %s" %
+ (root, self._config["PATCH_FILE"]),
+ cwd=self._options.svn):
+ self.Die("Could not apply patch.")
+ self.Command(
+ "svn",
+ "commit --non-interactive --username=%s --config-dir=%s -m \"%s\"" %
+ (self._options.author, self._options.svn_config, commit_message),
+ cwd=self._options.svn)
+
+
class UploadStep(Step):
MESSAGE = "Upload for code review."
@@ -511,7 +678,9 @@ class UploadStep(Step):
print "Please enter the email address of a V8 reviewer for your patch: ",
self.DieNoManualMode("A reviewer must be specified in forced mode.")
reviewer = self.ReadLine()
- self.GitUpload(reviewer, self._options.author, self._options.force_upload)
+ self.GitUpload(reviewer, self._options.author, self._options.force_upload,
+ bypass_hooks=self._options.bypass_upload_hooks,
+ cc=self._options.cc)
class DetermineV8Sheriff(Step):
@@ -558,21 +727,18 @@ def MakeStep(step_class=Step, number=0, state=None, config=None,
message = step_class.MESSAGE
except AttributeError:
message = step_class.__name__
- try:
- requires = step_class.REQUIRES
- except AttributeError:
- requires = None
- return step_class(message, requires, number=number, config=config,
+ return step_class(message, number=number, config=config,
state=state, options=options,
handler=side_effect_handler)
class ScriptsBase(object):
- # TODO(machenbach): Move static config here.
- def __init__(self, config, side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER,
+ def __init__(self,
+ config=None,
+ side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER,
state=None):
- self._config = config
+ self._config = config or self._Config()
self._side_effect_handler = side_effect_handler
self._state = state if state is not None else {}
@@ -588,10 +754,15 @@ class ScriptsBase(object):
def _Steps(self): # pragma: no cover
raise Exception("Not implemented.")
+ def _Config(self):
+ return {}
+
def MakeOptions(self, args=None):
parser = argparse.ArgumentParser(description=self._Description())
parser.add_argument("-a", "--author", default="",
help="The author email used for rietveld.")
+ parser.add_argument("--dry-run", default=False, action="store_true",
+ help="Perform only read-only actions.")
parser.add_argument("-g", "--googlers-mapping",
help="Path to the script mapping google accounts.")
parser.add_argument("-r", "--reviewer", default="",
@@ -600,10 +771,17 @@ class ScriptsBase(object):
help=("Determine current sheriff to review CLs. On "
"success, this will overwrite the reviewer "
"option."))
+ parser.add_argument("--svn",
+ help=("Optional full svn checkout for the commit."
+ "The folder needs to be the svn root."))
+ parser.add_argument("--svn-config",
+ help=("Optional folder used as svn --config-dir."))
parser.add_argument("-s", "--step",
help="Specify the step where to start work. Default: 0.",
default=0, type=int)
-
+ parser.add_argument("--vc-interface",
+ help=("Choose VC interface out of git_svn|"
+ "git_read_svn_write."))
self._PrepareOptions(parser)
if args is None: # pragma: no cover
@@ -620,10 +798,15 @@ class ScriptsBase(object):
print "To determine the current sheriff, requires the googler mapping"
parser.print_help()
return None
+ if options.svn and not options.svn_config:
+ print "Using pure svn for committing requires also --svn-config"
+ parser.print_help()
+ return None
# Defaults for options, common to all scripts.
options.manual = getattr(options, "manual", True)
options.force = getattr(options, "force", False)
+ options.bypass_upload_hooks = False
# Derived options.
options.requires_editor = not options.force
@@ -635,6 +818,9 @@ class ScriptsBase(object):
if not self._ProcessOptions(options):
parser.print_help()
return None
+
+ if not options.vc_interface:
+ options.vc_interface = "git_svn"
return options
def RunSteps(self, step_classes, args=None):
@@ -642,7 +828,7 @@ class ScriptsBase(object):
if not options:
return 1
- state_file = "%s-state.json" % self._config[PERSISTFILE_BASENAME]
+ state_file = "%s-state.json" % self._config["PERSISTFILE_BASENAME"]
if options.step == 0 and os.path.exists(state_file):
os.remove(state_file)
@@ -652,7 +838,7 @@ class ScriptsBase(object):
options, self._side_effect_handler))
for step in steps[options.step:]:
if step.Run():
- return 1
+ return 0
return 0
def Run(self, args=None):
diff --git a/deps/v8/tools/push-to-trunk/git_recipes.py b/deps/v8/tools/push-to-trunk/git_recipes.py
index 6ffb2da834..a1e6256df2 100644
--- a/deps/v8/tools/push-to-trunk/git_recipes.py
+++ b/deps/v8/tools/push-to-trunk/git_recipes.py
@@ -28,6 +28,51 @@
import re
+SHA1_RE = re.compile('^[a-fA-F0-9]{40}$')
+ROLL_DEPS_GIT_SVN_ID_RE = re.compile('^git-svn-id: .*@([0-9]+) .*$')
+
+# Regular expression that matches a single commit footer line.
+COMMIT_FOOTER_ENTRY_RE = re.compile(r'([^:]+):\s+(.+)')
+
+# Footer metadata key for commit position.
+COMMIT_POSITION_FOOTER_KEY = 'Cr-Commit-Position'
+
+# Regular expression to parse a commit position
+COMMIT_POSITION_RE = re.compile(r'(.+)@\{#(\d+)\}')
+
+# Key for the 'git-svn' ID metadata commit footer entry.
+GIT_SVN_ID_FOOTER_KEY = 'git-svn-id'
+
+# e.g., git-svn-id: https://v8.googlecode.com/svn/trunk@23117
+# ce2b1a6d-e550-0410-aec6-3dcde31c8c00
+GIT_SVN_ID_RE = re.compile(r'((?:\w+)://[^@]+)@(\d+)\s+(?:[a-zA-Z0-9\-]+)')
+
+
+# Copied from bot_update.py.
+def GetCommitMessageFooterMap(message):
+ """Returns: (dict) A dictionary of commit message footer entries.
+ """
+ footers = {}
+
+ # Extract the lines in the footer block.
+ lines = []
+ for line in message.strip().splitlines():
+ line = line.strip()
+ if len(line) == 0:
+ del(lines[:])
+ continue
+ lines.append(line)
+
+ # Parse the footer
+ for line in lines:
+ m = COMMIT_FOOTER_ENTRY_RE.match(line)
+ if not m:
+ # If any single line isn't valid, the entire footer is invalid.
+ footers.clear()
+ return footers
+ footers[m.group(1)] = m.group(2).strip()
+ return footers
+
class GitFailedException(Exception):
pass
@@ -49,54 +94,55 @@ def Quoted(s):
class GitRecipesMixin(object):
- def GitIsWorkdirClean(self):
- return self.Git("status -s -uno").strip() == ""
+ def GitIsWorkdirClean(self, **kwargs):
+ return self.Git("status -s -uno", **kwargs).strip() == ""
@Strip
- def GitBranch(self):
- return self.Git("branch")
+ def GitBranch(self, **kwargs):
+ return self.Git("branch", **kwargs)
- def GitCreateBranch(self, name, branch=""):
+ def GitCreateBranch(self, name, branch="", **kwargs):
assert name
- self.Git(MakeArgs(["checkout -b", name, branch]))
+ self.Git(MakeArgs(["checkout -b", name, branch]), **kwargs)
- def GitDeleteBranch(self, name):
+ def GitDeleteBranch(self, name, **kwargs):
assert name
- self.Git(MakeArgs(["branch -D", name]))
+ self.Git(MakeArgs(["branch -D", name]), **kwargs)
- def GitReset(self, name):
+ def GitReset(self, name, **kwargs):
assert name
- self.Git(MakeArgs(["reset --hard", name]))
+ self.Git(MakeArgs(["reset --hard", name]), **kwargs)
- def GitStash(self):
- self.Git(MakeArgs(["stash"]))
+ def GitStash(self, **kwargs):
+ self.Git(MakeArgs(["stash"]), **kwargs)
- def GitRemotes(self):
- return map(str.strip, self.Git(MakeArgs(["branch -r"])).splitlines())
+ def GitRemotes(self, **kwargs):
+ return map(str.strip,
+ self.Git(MakeArgs(["branch -r"]), **kwargs).splitlines())
- def GitCheckout(self, name):
+ def GitCheckout(self, name, **kwargs):
assert name
- self.Git(MakeArgs(["checkout -f", name]))
+ self.Git(MakeArgs(["checkout -f", name]), **kwargs)
- def GitCheckoutFile(self, name, branch_or_hash):
+ def GitCheckoutFile(self, name, branch_or_hash, **kwargs):
assert name
assert branch_or_hash
- self.Git(MakeArgs(["checkout -f", branch_or_hash, "--", name]))
+ self.Git(MakeArgs(["checkout -f", branch_or_hash, "--", name]), **kwargs)
- def GitCheckoutFileSafe(self, name, branch_or_hash):
+ def GitCheckoutFileSafe(self, name, branch_or_hash, **kwargs):
try:
- self.GitCheckoutFile(name, branch_or_hash)
+ self.GitCheckoutFile(name, branch_or_hash, **kwargs)
except GitFailedException: # pragma: no cover
# The file doesn't exist in that revision.
return False
return True
- def GitChangedFiles(self, git_hash):
+ def GitChangedFiles(self, git_hash, **kwargs):
assert git_hash
try:
files = self.Git(MakeArgs(["diff --name-only",
git_hash,
- "%s^" % git_hash]))
+ "%s^" % git_hash]), **kwargs)
return map(str.strip, files.splitlines())
except GitFailedException: # pragma: no cover
# Git fails using "^" at branch roots.
@@ -104,15 +150,15 @@ class GitRecipesMixin(object):
@Strip
- def GitCurrentBranch(self):
- for line in self.Git("status -s -b -uno").strip().splitlines():
+ def GitCurrentBranch(self, **kwargs):
+ for line in self.Git("status -s -b -uno", **kwargs).strip().splitlines():
match = re.match(r"^## (.+)", line)
if match: return match.group(1)
raise Exception("Couldn't find curent branch.") # pragma: no cover
@Strip
def GitLog(self, n=0, format="", grep="", git_hash="", parent_hash="",
- branch="", reverse=False):
+ branch="", reverse=False, **kwargs):
assert not (git_hash and parent_hash)
args = ["log"]
if n > 0:
@@ -128,27 +174,27 @@ class GitRecipesMixin(object):
if parent_hash:
args.append("%s^" % parent_hash)
args.append(branch)
- return self.Git(MakeArgs(args))
+ return self.Git(MakeArgs(args), **kwargs)
- def GitGetPatch(self, git_hash):
+ def GitGetPatch(self, git_hash, **kwargs):
assert git_hash
- return self.Git(MakeArgs(["log", "-1", "-p", git_hash]))
+ return self.Git(MakeArgs(["log", "-1", "-p", git_hash]), **kwargs)
# TODO(machenbach): Unused? Remove.
- def GitAdd(self, name):
+ def GitAdd(self, name, **kwargs):
assert name
- self.Git(MakeArgs(["add", Quoted(name)]))
+ self.Git(MakeArgs(["add", Quoted(name)]), **kwargs)
- def GitApplyPatch(self, patch_file, reverse=False):
+ def GitApplyPatch(self, patch_file, reverse=False, **kwargs):
assert patch_file
args = ["apply --index --reject"]
if reverse:
args.append("--reverse")
args.append(Quoted(patch_file))
- self.Git(MakeArgs(args))
+ self.Git(MakeArgs(args), **kwargs)
def GitUpload(self, reviewer="", author="", force=False, cq=False,
- bypass_hooks=False):
+ bypass_hooks=False, cc="", **kwargs):
args = ["cl upload --send-mail"]
if author:
args += ["--email", Quoted(author)]
@@ -160,54 +206,109 @@ class GitRecipesMixin(object):
args.append("--use-commit-queue")
if bypass_hooks:
args.append("--bypass-hooks")
+ if cc:
+ args += ["--cc", Quoted(cc)]
# TODO(machenbach): Check output in forced mode. Verify that all required
# base files were uploaded, if not retry.
- self.Git(MakeArgs(args), pipe=False)
+ self.Git(MakeArgs(args), pipe=False, **kwargs)
- def GitCommit(self, message="", file_name=""):
+ def GitCommit(self, message="", file_name="", author=None, **kwargs):
assert message or file_name
args = ["commit"]
if file_name:
args += ["-aF", Quoted(file_name)]
if message:
args += ["-am", Quoted(message)]
- self.Git(MakeArgs(args))
+ if author:
+ args += ["--author", "\"%s <%s>\"" % (author, author)]
+ self.Git(MakeArgs(args), **kwargs)
- def GitPresubmit(self):
- self.Git("cl presubmit", "PRESUBMIT_TREE_CHECK=\"skip\"")
+ def GitPresubmit(self, **kwargs):
+ self.Git("cl presubmit", "PRESUBMIT_TREE_CHECK=\"skip\"", **kwargs)
- def GitDCommit(self):
- self.Git("cl dcommit -f --bypass-hooks", retry_on=lambda x: x is None)
+ def GitDCommit(self, **kwargs):
+ self.Git(
+ "cl dcommit -f --bypass-hooks", retry_on=lambda x: x is None, **kwargs)
- def GitDiff(self, loc1, loc2):
- return self.Git(MakeArgs(["diff", loc1, loc2]))
+ def GitDiff(self, loc1, loc2, **kwargs):
+ return self.Git(MakeArgs(["diff", loc1, loc2]), **kwargs)
- def GitPull(self):
- self.Git("pull")
+ def GitPull(self, **kwargs):
+ self.Git("pull", **kwargs)
- def GitSVNFetch(self):
- self.Git("svn fetch")
+ def GitFetchOrigin(self, **kwargs):
+ self.Git("fetch origin", **kwargs)
- def GitSVNRebase(self):
- self.Git("svn rebase")
+ def GitConvertToSVNRevision(self, git_hash, **kwargs):
+ result = self.Git(MakeArgs(["rev-list", "-n", "1", git_hash]), **kwargs)
+ if not result or not SHA1_RE.match(result):
+ raise GitFailedException("Git hash %s is unknown." % git_hash)
+ log = self.GitLog(n=1, format="%B", git_hash=git_hash, **kwargs)
+ for line in reversed(log.splitlines()):
+ match = ROLL_DEPS_GIT_SVN_ID_RE.match(line.strip())
+ if match:
+ return match.group(1)
+ raise GitFailedException("Couldn't convert %s to SVN." % git_hash)
+
+ @Strip
+ # Copied from bot_update.py and modified for svn-like numbers only.
+ def GetCommitPositionNumber(self, git_hash, **kwargs):
+ """Dumps the 'git' log for a specific revision and parses out the commit
+ position number.
+
+ If a commit position metadata key is found, its number will be returned.
+
+ Otherwise, we will search for a 'git-svn' metadata entry. If one is found,
+ its SVN revision value is returned.
+ """
+ git_log = self.GitLog(format='%B', n=1, git_hash=git_hash, **kwargs)
+ footer_map = GetCommitMessageFooterMap(git_log)
+
+ # Search for commit position metadata
+ value = footer_map.get(COMMIT_POSITION_FOOTER_KEY)
+ if value:
+ match = COMMIT_POSITION_RE.match(value)
+ if match:
+ return match.group(2)
+
+ # Extract the svn revision from 'git-svn' metadata
+ value = footer_map.get(GIT_SVN_ID_FOOTER_KEY)
+ if value:
+ match = GIT_SVN_ID_RE.match(value)
+ if match:
+ return match.group(2)
+ return None
+
+ ### Git svn stuff
+
+ def GitSVNFetch(self, **kwargs):
+ self.Git("svn fetch", **kwargs)
+
+ def GitSVNRebase(self, **kwargs):
+ self.Git("svn rebase", **kwargs)
# TODO(machenbach): Unused? Remove.
@Strip
- def GitSVNLog(self):
- return self.Git("svn log -1 --oneline")
+ def GitSVNLog(self, **kwargs):
+ return self.Git("svn log -1 --oneline", **kwargs)
@Strip
- def GitSVNFindGitHash(self, revision, branch=""):
+ def GitSVNFindGitHash(self, revision, branch="", **kwargs):
assert revision
- return self.Git(MakeArgs(["svn find-rev", "r%s" % revision, branch]))
+ args = MakeArgs(["svn find-rev", "r%s" % revision, branch])
+
+ # Pick the last line if multiple lines are available. The first lines might
+ # print information about rebuilding the svn-git mapping.
+ return self.Git(args, **kwargs).splitlines()[-1]
@Strip
- def GitSVNFindSVNRev(self, git_hash, branch=""):
- return self.Git(MakeArgs(["svn find-rev", git_hash, branch]))
+ def GitSVNFindSVNRev(self, git_hash, branch="", **kwargs):
+ return self.Git(MakeArgs(["svn find-rev", git_hash, branch]), **kwargs)
- def GitSVNDCommit(self):
- return self.Git("svn dcommit 2>&1", retry_on=lambda x: x is None)
+ def GitSVNDCommit(self, **kwargs):
+ return self.Git("svn dcommit 2>&1", retry_on=lambda x: x is None, **kwargs)
- def GitSVNTag(self, version):
+ def GitSVNTag(self, version, **kwargs):
self.Git(("svn tag %s -m \"Tagging version %s\"" % (version, version)),
- retry_on=lambda x: x is None)
+ retry_on=lambda x: x is None,
+ **kwargs)
diff --git a/deps/v8/tools/push-to-trunk/merge_to_branch.py b/deps/v8/tools/push-to-trunk/merge_to_branch.py
index bd9531fb93..006afbb443 100755
--- a/deps/v8/tools/push-to-trunk/merge_to_branch.py
+++ b/deps/v8/tools/push-to-trunk/merge_to_branch.py
@@ -32,36 +32,20 @@ import sys
from common_includes import *
-ALREADY_MERGING_SENTINEL_FILE = "ALREADY_MERGING_SENTINEL_FILE"
-COMMIT_HASHES_FILE = "COMMIT_HASHES_FILE"
-TEMPORARY_PATCH_FILE = "TEMPORARY_PATCH_FILE"
-
-CONFIG = {
- BRANCHNAME: "prepare-merge",
- PERSISTFILE_BASENAME: "/tmp/v8-merge-to-branch-tempfile",
- ALREADY_MERGING_SENTINEL_FILE:
- "/tmp/v8-merge-to-branch-tempfile-already-merging",
- DOT_GIT_LOCATION: ".git",
- VERSION_FILE: "src/version.cc",
- TEMPORARY_PATCH_FILE: "/tmp/v8-prepare-merge-tempfile-temporary-patch",
- COMMITMSG_FILE: "/tmp/v8-prepare-merge-tempfile-commitmsg",
- COMMIT_HASHES_FILE: "/tmp/v8-merge-to-branch-tempfile-PATCH_COMMIT_HASHES",
-}
-
-
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
- if os.path.exists(self.Config(ALREADY_MERGING_SENTINEL_FILE)):
+ if os.path.exists(self.Config("ALREADY_MERGING_SENTINEL_FILE")):
if self._options.force:
- os.remove(self.Config(ALREADY_MERGING_SENTINEL_FILE))
+ os.remove(self.Config("ALREADY_MERGING_SENTINEL_FILE"))
elif self._options.step == 0: # pragma: no cover
self.Die("A merge is already in progress")
- open(self.Config(ALREADY_MERGING_SENTINEL_FILE), "a").close()
+ open(self.Config("ALREADY_MERGING_SENTINEL_FILE"), "a").close()
- self.InitialEnvironmentChecks()
+ self.InitialEnvironmentChecks(self.default_cwd)
if self._options.revert_bleeding_edge:
+ # FIXME(machenbach): Make revert bleeding_edge obsolete?
self["merge_to_branch"] = "bleeding_edge"
elif self._options.branch:
self["merge_to_branch"] = self._options.branch
@@ -76,8 +60,8 @@ class CreateBranch(Step):
MESSAGE = "Create a fresh branch for the patch."
def RunStep(self):
- self.GitCreateBranch(self.Config(BRANCHNAME),
- "svn/%s" % self["merge_to_branch"])
+ self.GitCreateBranch(self.Config("BRANCHNAME"),
+ self.vc.RemoteBranch(self["merge_to_branch"]))
class SearchArchitecturePorts(Step):
@@ -91,9 +75,9 @@ class SearchArchitecturePorts(Step):
# Search for commits which matches the "Port rXXX" pattern.
git_hashes = self.GitLog(reverse=True, format="%H",
grep="Port r%d" % int(revision),
- branch="svn/bleeding_edge")
+ branch=self.vc.RemoteMasterBranch())
for git_hash in git_hashes.splitlines():
- svn_revision = self.GitSVNFindSVNRev(git_hash, "svn/bleeding_edge")
+ svn_revision = self.vc.GitSvn(git_hash, self.vc.RemoteMasterBranch())
if not svn_revision: # pragma: no cover
self.Die("Cannot determine svn revision for %s" % git_hash)
revision_title = self.GitLog(n=1, format="%s", git_hash=git_hash)
@@ -121,7 +105,7 @@ class FindGitRevisions(Step):
def RunStep(self):
self["patch_commit_hashes"] = []
for revision in self["full_revision_list"]:
- next_hash = self.GitSVNFindGitHash(revision, "svn/bleeding_edge")
+ next_hash = self.vc.SvnGit(revision, self.vc.RemoteMasterBranch())
if not next_hash: # pragma: no cover
self.Die("Cannot determine git hash for r%s" % revision)
self["patch_commit_hashes"].append(next_hash)
@@ -159,8 +143,8 @@ class ApplyPatches(Step):
print("Applying patch for %s to %s..."
% (commit_hash, self["merge_to_branch"]))
patch = self.GitGetPatch(commit_hash)
- TextToFile(patch, self.Config(TEMPORARY_PATCH_FILE))
- self.ApplyPatch(self.Config(TEMPORARY_PATCH_FILE), self._options.revert)
+ TextToFile(patch, self.Config("TEMPORARY_PATCH_FILE"))
+ self.ApplyPatch(self.Config("TEMPORARY_PATCH_FILE"), self._options.revert)
if self._options.patch:
self.ApplyPatch(self._options.patch, self._options.revert)
@@ -185,14 +169,14 @@ class IncrementVersion(Step):
if self.Confirm("Automatically increment PATCH_LEVEL? (Saying 'n' will "
"fire up your EDITOR on %s so you can make arbitrary "
"changes. When you're done, save the file and exit your "
- "EDITOR.)" % self.Config(VERSION_FILE)):
- text = FileToText(self.Config(VERSION_FILE))
+ "EDITOR.)" % VERSION_FILE):
+ text = FileToText(os.path.join(self.default_cwd, VERSION_FILE))
text = MSub(r"(?<=#define PATCH_LEVEL)(?P<space>\s+)\d*$",
r"\g<space>%s" % new_patch,
text)
- TextToFile(text, self.Config(VERSION_FILE))
+ TextToFile(text, os.path.join(self.default_cwd, VERSION_FILE))
else:
- self.Editor(self.Config(VERSION_FILE))
+ self.Editor(os.path.join(self.default_cwd, VERSION_FILE))
self.ReadAndPersistVersion("new_")
self["version"] = "%s.%s.%s.%s" % (self["new_major"],
self["new_minor"],
@@ -215,33 +199,18 @@ class CommitLocal(Step):
title = ("Version %s (merged %s)"
% (self["version"], self["revision_list"]))
self["new_commit_msg"] = "%s\n\n%s" % (title, self["new_commit_msg"])
- TextToFile(self["new_commit_msg"], self.Config(COMMITMSG_FILE))
- self.GitCommit(file_name=self.Config(COMMITMSG_FILE))
+ TextToFile(self["new_commit_msg"], self.Config("COMMITMSG_FILE"))
+ self.GitCommit(file_name=self.Config("COMMITMSG_FILE"))
class CommitRepository(Step):
MESSAGE = "Commit to the repository."
def RunStep(self):
- self.GitCheckout(self.Config(BRANCHNAME))
+ self.GitCheckout(self.Config("BRANCHNAME"))
self.WaitForLGTM()
self.GitPresubmit()
- self.GitDCommit()
-
-
-class PrepareSVN(Step):
- MESSAGE = "Determine svn commit revision."
-
- def RunStep(self):
- if self._options.revert_bleeding_edge:
- return
- self.GitSVNFetch()
- commit_hash = self.GitLog(n=1, format="%H", grep=self["new_commit_msg"],
- branch="svn/%s" % self["merge_to_branch"])
- if not commit_hash: # pragma: no cover
- self.Die("Unable to map git commit to svn revision.")
- self["svn_revision"] = self.GitSVNFindSVNRev(commit_hash)
- print "subversion revision number is r%s" % self["svn_revision"]
+ self.vc.CLLand()
class TagRevision(Step):
@@ -251,15 +220,7 @@ class TagRevision(Step):
if self._options.revert_bleeding_edge:
return
print "Creating tag svn/tags/%s" % self["version"]
- if self["merge_to_branch"] == "trunk":
- self["to_url"] = "trunk"
- else:
- self["to_url"] = "branches/%s" % self["merge_to_branch"]
- self.SVN("copy -r %s https://v8.googlecode.com/svn/%s "
- "https://v8.googlecode.com/svn/tags/%s -m "
- "\"Tagging version %s\""
- % (self["svn_revision"], self["to_url"],
- self["version"], self["version"]))
+ self.vc.Tag(self["version"])
class CleanUp(Step):
@@ -270,8 +231,7 @@ class CleanUp(Step):
if not self._options.revert_bleeding_edge:
print "*** SUMMARY ***"
print "version: %s" % self["version"]
- print "branch: %s" % self["to_url"]
- print "svn revision: %s" % self["svn_revision"]
+ print "branch: %s" % self["merge_to_branch"]
if self["revision_list"]:
print "patches: %s" % self["revision_list"]
@@ -309,8 +269,21 @@ class MergeToBranch(ScriptsBase):
if not options.message:
print "You must specify a merge comment if no patches are specified"
return False
+ options.bypass_upload_hooks = True
+ # CC ulan to make sure that fixes are merged to Google3.
+ options.cc = "ulan@chromium.org"
return True
+ def _Config(self):
+ return {
+ "BRANCHNAME": "prepare-merge",
+ "PERSISTFILE_BASENAME": "/tmp/v8-merge-to-branch-tempfile",
+ "ALREADY_MERGING_SENTINEL_FILE":
+ "/tmp/v8-merge-to-branch-tempfile-already-merging",
+ "TEMPORARY_PATCH_FILE": "/tmp/v8-prepare-merge-tempfile-temporary-patch",
+ "COMMITMSG_FILE": "/tmp/v8-prepare-merge-tempfile-commitmsg",
+ }
+
def _Steps(self):
return [
Preparation,
@@ -323,11 +296,10 @@ class MergeToBranch(ScriptsBase):
CommitLocal,
UploadStep,
CommitRepository,
- PrepareSVN,
TagRevision,
CleanUp,
]
if __name__ == "__main__": # pragma: no cover
- sys.exit(MergeToBranch(CONFIG).Run())
+ sys.exit(MergeToBranch().Run())
diff --git a/deps/v8/tools/push-to-trunk/push_to_trunk.py b/deps/v8/tools/push-to-trunk/push_to_trunk.py
index 56375fe79b..184617d159 100755
--- a/deps/v8/tools/push-to-trunk/push_to_trunk.py
+++ b/deps/v8/tools/push-to-trunk/push_to_trunk.py
@@ -27,50 +27,37 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
+import os
import sys
import tempfile
import urllib2
from common_includes import *
-TRUNKBRANCH = "TRUNKBRANCH"
-
-CONFIG = {
- BRANCHNAME: "prepare-push",
- TRUNKBRANCH: "trunk-push",
- PERSISTFILE_BASENAME: "/tmp/v8-push-to-trunk-tempfile",
- DOT_GIT_LOCATION: ".git",
- VERSION_FILE: "src/version.cc",
- CHANGELOG_FILE: "ChangeLog",
- CHANGELOG_ENTRY_FILE: "/tmp/v8-push-to-trunk-tempfile-changelog-entry",
- PATCH_FILE: "/tmp/v8-push-to-trunk-tempfile-patch-file",
- COMMITMSG_FILE: "/tmp/v8-push-to-trunk-tempfile-commitmsg",
-}
-
PUSH_MESSAGE_SUFFIX = " (based on bleeding_edge revision r%d)"
PUSH_MESSAGE_RE = re.compile(r".* \(based on bleeding_edge revision r(\d+)\)$")
-
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
- self.InitialEnvironmentChecks()
+ self.InitialEnvironmentChecks(self.default_cwd)
self.CommonPrepare()
- if(self["current_branch"] == self.Config(TRUNKBRANCH)
- or self["current_branch"] == self.Config(BRANCHNAME)):
+ if(self["current_branch"] == self.Config("TRUNKBRANCH")
+ or self["current_branch"] == self.Config("BRANCHNAME")):
print "Warning: Script started on branch %s" % self["current_branch"]
self.PrepareBranch()
- self.DeleteBranch(self.Config(TRUNKBRANCH))
+ self.DeleteBranch(self.Config("TRUNKBRANCH"))
class FreshBranch(Step):
MESSAGE = "Create a fresh branch."
def RunStep(self):
- self.GitCreateBranch(self.Config(BRANCHNAME), "svn/bleeding_edge")
+ self.GitCreateBranch(self.Config("BRANCHNAME"),
+ self.vc.RemoteMasterBranch())
class PreparePushRevision(Step):
@@ -78,7 +65,7 @@ class PreparePushRevision(Step):
def RunStep(self):
if self._options.revision:
- self["push_hash"] = self.GitSVNFindGitHash(self._options.revision)
+ self["push_hash"] = self.vc.SvnGit(self._options.revision)
else:
self["push_hash"] = self.GitLog(n=1, format="%H", git_hash="HEAD")
if not self["push_hash"]: # pragma: no cover
@@ -109,7 +96,7 @@ class DetectLastPush(Step):
if not last_push_be_svn: # pragma: no cover
self.Die("Could not retrieve bleeding edge revision for trunk push %s"
% last_push)
- last_push_bleeding_edge = self.GitSVNFindGitHash(last_push_be_svn)
+ last_push_bleeding_edge = self.vc.SvnGit(last_push_be_svn)
if not last_push_bleeding_edge: # pragma: no cover
self.Die("Could not retrieve bleeding edge git hash for trunk push %s"
% last_push)
@@ -130,7 +117,7 @@ class GetCurrentBleedingEdgeVersion(Step):
MESSAGE = "Get latest bleeding edge version."
def RunStep(self):
- self.GitCheckoutFile(self.Config(VERSION_FILE), "svn/bleeding_edge")
+ self.GitCheckoutFile(VERSION_FILE, self.vc.RemoteMasterBranch())
# Store latest version.
self.ReadAndPersistVersion("latest_")
@@ -143,7 +130,7 @@ class IncrementVersion(Step):
def RunStep(self):
# Retrieve current version from last trunk push.
- self.GitCheckoutFile(self.Config(VERSION_FILE), self["last_push_trunk"])
+ self.GitCheckoutFile(VERSION_FILE, self["last_push_trunk"])
self.ReadAndPersistVersion()
self["trunk_version"] = self.ArrayToVersion("")
@@ -154,21 +141,21 @@ class IncrementVersion(Step):
if SortingKey(self["trunk_version"]) < SortingKey(self["latest_version"]):
# If the version on bleeding_edge is newer than on trunk, use it.
- self.GitCheckoutFile(self.Config(VERSION_FILE), "svn/bleeding_edge")
+ self.GitCheckoutFile(VERSION_FILE, self.vc.RemoteMasterBranch())
self.ReadAndPersistVersion()
if self.Confirm(("Automatically increment BUILD_NUMBER? (Saying 'n' will "
"fire up your EDITOR on %s so you can make arbitrary "
"changes. When you're done, save the file and exit your "
- "EDITOR.)" % self.Config(VERSION_FILE))):
+ "EDITOR.)" % VERSION_FILE)):
- text = FileToText(self.Config(VERSION_FILE))
+ text = FileToText(os.path.join(self.default_cwd, VERSION_FILE))
text = MSub(r"(?<=#define BUILD_NUMBER)(?P<space>\s+)\d*$",
r"\g<space>%s" % str(int(self["build"]) + 1),
text)
- TextToFile(text, self.Config(VERSION_FILE))
+ TextToFile(text, os.path.join(self.default_cwd, VERSION_FILE))
else:
- self.Editor(self.Config(VERSION_FILE))
+ self.Editor(os.path.join(self.default_cwd, VERSION_FILE))
# Variables prefixed with 'new_' contain the new version numbers for the
# ongoing trunk push.
@@ -206,7 +193,7 @@ class PrepareChangeLog(Step):
def RunStep(self):
self["date"] = self.GetDate()
output = "%s: Version %s\n\n" % (self["date"], self["version"])
- TextToFile(output, self.Config(CHANGELOG_ENTRY_FILE))
+ TextToFile(output, self.Config("CHANGELOG_ENTRY_FILE"))
commits = self.GitLog(format="%H",
git_hash="%s..%s" % (self["last_push_bleeding_edge"],
self["push_hash"]))
@@ -222,17 +209,17 @@ class PrepareChangeLog(Step):
# Auto-format commit messages.
body = MakeChangeLogBody(commit_messages, auto_format=True)
- AppendToFile(body, self.Config(CHANGELOG_ENTRY_FILE))
+ AppendToFile(body, self.Config("CHANGELOG_ENTRY_FILE"))
msg = (" Performance and stability improvements on all platforms."
"\n#\n# The change log above is auto-generated. Please review if "
"all relevant\n# commit messages from the list below are included."
"\n# All lines starting with # will be stripped.\n#\n")
- AppendToFile(msg, self.Config(CHANGELOG_ENTRY_FILE))
+ AppendToFile(msg, self.Config("CHANGELOG_ENTRY_FILE"))
# Include unformatted commit messages as a reference in a comment.
comment_body = MakeComment(MakeChangeLogBody(commit_messages))
- AppendToFile(comment_body, self.Config(CHANGELOG_ENTRY_FILE))
+ AppendToFile(comment_body, self.Config("CHANGELOG_ENTRY_FILE"))
class EditChangeLog(Step):
@@ -243,10 +230,10 @@ class EditChangeLog(Step):
"entry, then edit its contents to your liking. When you're done, "
"save the file and exit your EDITOR. ")
self.ReadLine(default="")
- self.Editor(self.Config(CHANGELOG_ENTRY_FILE))
+ self.Editor(self.Config("CHANGELOG_ENTRY_FILE"))
# Strip comments and reformat with correct indentation.
- changelog_entry = FileToText(self.Config(CHANGELOG_ENTRY_FILE)).rstrip()
+ changelog_entry = FileToText(self.Config("CHANGELOG_ENTRY_FILE")).rstrip()
changelog_entry = StripComments(changelog_entry)
changelog_entry = "\n".join(map(Fill80, changelog_entry.splitlines()))
changelog_entry = changelog_entry.lstrip()
@@ -255,7 +242,7 @@ class EditChangeLog(Step):
self.Die("Empty ChangeLog entry.")
# Safe new change log for adding it later to the trunk patch.
- TextToFile(changelog_entry, self.Config(CHANGELOG_ENTRY_FILE))
+ TextToFile(changelog_entry, self.Config("CHANGELOG_ENTRY_FILE"))
class StragglerCommits(Step):
@@ -263,8 +250,8 @@ class StragglerCommits(Step):
"started.")
def RunStep(self):
- self.GitSVNFetch()
- self.GitCheckout("svn/bleeding_edge")
+ self.vc.Fetch()
+ self.GitCheckout(self.vc.RemoteMasterBranch())
class SquashCommits(Step):
@@ -273,18 +260,19 @@ class SquashCommits(Step):
def RunStep(self):
# Instead of relying on "git rebase -i", we'll just create a diff, because
# that's easier to automate.
- TextToFile(self.GitDiff("svn/trunk", self["push_hash"]),
- self.Config(PATCH_FILE))
+ TextToFile(self.GitDiff(self.vc.RemoteCandidateBranch(),
+ self["push_hash"]),
+ self.Config("PATCH_FILE"))
# Convert the ChangeLog entry to commit message format.
- text = FileToText(self.Config(CHANGELOG_ENTRY_FILE))
+ text = FileToText(self.Config("CHANGELOG_ENTRY_FILE"))
# Remove date and trailing white space.
text = re.sub(r"^%s: " % self["date"], "", text.rstrip())
# Retrieve svn revision for showing the used bleeding edge revision in the
# commit message.
- self["svn_revision"] = self.GitSVNFindSVNRev(self["push_hash"])
+ self["svn_revision"] = self.vc.GitSvn(self["push_hash"])
suffix = PUSH_MESSAGE_SUFFIX % int(self["svn_revision"])
text = MSub(r"^(Version \d+\.\d+\.\d+)$", "\\1%s" % suffix, text)
@@ -297,22 +285,23 @@ class SquashCommits(Step):
if not text: # pragma: no cover
self.Die("Commit message editing failed.")
- TextToFile(text, self.Config(COMMITMSG_FILE))
+ TextToFile(text, self.Config("COMMITMSG_FILE"))
class NewBranch(Step):
MESSAGE = "Create a new branch from trunk."
def RunStep(self):
- self.GitCreateBranch(self.Config(TRUNKBRANCH), "svn/trunk")
+ self.GitCreateBranch(self.Config("TRUNKBRANCH"),
+ self.vc.RemoteCandidateBranch())
class ApplyChanges(Step):
MESSAGE = "Apply squashed changes."
def RunStep(self):
- self.ApplyPatch(self.Config(PATCH_FILE))
- Command("rm", "-f %s*" % self.Config(PATCH_FILE))
+ self.ApplyPatch(self.Config("PATCH_FILE"))
+ os.remove(self.Config("PATCH_FILE"))
class AddChangeLog(Step):
@@ -322,12 +311,13 @@ class AddChangeLog(Step):
# The change log has been modified by the patch. Reset it to the version
# on trunk and apply the exact changes determined by this PrepareChangeLog
# step above.
- self.GitCheckoutFile(self.Config(CHANGELOG_FILE), "svn/trunk")
- changelog_entry = FileToText(self.Config(CHANGELOG_ENTRY_FILE))
- old_change_log = FileToText(self.Config(CHANGELOG_FILE))
+ self.GitCheckoutFile(self.Config("CHANGELOG_FILE"),
+ self.vc.RemoteCandidateBranch())
+ changelog_entry = FileToText(self.Config("CHANGELOG_ENTRY_FILE"))
+ old_change_log = FileToText(self.Config("CHANGELOG_FILE"))
new_change_log = "%s\n\n\n%s" % (changelog_entry, old_change_log)
- TextToFile(new_change_log, self.Config(CHANGELOG_FILE))
- os.remove(self.Config(CHANGELOG_ENTRY_FILE))
+ TextToFile(new_change_log, self.Config("CHANGELOG_FILE"))
+ os.remove(self.Config("CHANGELOG_ENTRY_FILE"))
class SetVersion(Step):
@@ -336,16 +326,16 @@ class SetVersion(Step):
def RunStep(self):
# The version file has been modified by the patch. Reset it to the version
# on trunk and apply the correct version.
- self.GitCheckoutFile(self.Config(VERSION_FILE), "svn/trunk")
- self.SetVersion(self.Config(VERSION_FILE), "new_")
+ self.GitCheckoutFile(VERSION_FILE, self.vc.RemoteCandidateBranch())
+ self.SetVersion(os.path.join(self.default_cwd, VERSION_FILE), "new_")
class CommitTrunk(Step):
MESSAGE = "Commit to local trunk branch."
def RunStep(self):
- self.GitCommit(file_name = self.Config(COMMITMSG_FILE))
- Command("rm", "-f %s*" % self.Config(COMMITMSG_FILE))
+ self.GitCommit(file_name = self.Config("COMMITMSG_FILE"))
+ os.remove(self.Config("COMMITMSG_FILE"))
class SanityCheck(Step):
@@ -356,7 +346,7 @@ class SanityCheck(Step):
# prepare push process.
if not self.Confirm("Please check if your local checkout is sane: Inspect "
"%s, compile, run tests. Do you want to commit this new trunk "
- "revision to the repository?" % self.Config(VERSION_FILE)):
+ "revision to the repository?" % VERSION_FILE):
self.Die("Execution canceled.") # pragma: no cover
@@ -364,31 +354,14 @@ class CommitSVN(Step):
MESSAGE = "Commit to SVN."
def RunStep(self):
- result = self.GitSVNDCommit()
- if not result: # pragma: no cover
- self.Die("'git svn dcommit' failed.")
- result = filter(lambda x: re.search(r"^Committed r[0-9]+", x),
- result.splitlines())
- if len(result) > 0:
- self["trunk_revision"] = re.sub(r"^Committed r([0-9]+)", r"\1",result[0])
-
- # Sometimes grepping for the revision fails. No idea why. If you figure
- # out why it is flaky, please do fix it properly.
- if not self["trunk_revision"]:
- print("Sorry, grepping for the SVN revision failed. Please look for it "
- "in the last command's output above and provide it manually (just "
- "the number, without the leading \"r\").")
- self.DieNoManualMode("Can't prompt in forced mode.")
- while not self["trunk_revision"]:
- print "> ",
- self["trunk_revision"] = self.ReadLine()
+ result = self.vc.Land()
class TagRevision(Step):
MESSAGE = "Tag the new revision."
def RunStep(self):
- self.GitSVNTag(self["version"])
+ self.vc.Tag(self["version"])
class CleanUp(Step):
@@ -396,14 +369,12 @@ class CleanUp(Step):
def RunStep(self):
print("Congratulations, you have successfully created the trunk "
- "revision %s. Please don't forget to roll this new version into "
- "Chromium, and to update the v8rel spreadsheet:"
+ "revision %s."
% self["version"])
- print "%s\ttrunk\t%s" % (self["version"], self["trunk_revision"])
self.CommonCleanup()
- if self.Config(TRUNKBRANCH) != self["current_branch"]:
- self.GitDeleteBranch(self.Config(TRUNKBRANCH))
+ if self.Config("TRUNKBRANCH") != self["current_branch"]:
+ self.GitDeleteBranch(self.Config("TRUNKBRANCH"))
class PushToTrunk(ScriptsBase):
@@ -439,6 +410,17 @@ class PushToTrunk(ScriptsBase):
options.tbr_commit = not options.manual
return True
+ def _Config(self):
+ return {
+ "BRANCHNAME": "prepare-push",
+ "TRUNKBRANCH": "trunk-push",
+ "PERSISTFILE_BASENAME": "/tmp/v8-push-to-trunk-tempfile",
+ "CHANGELOG_FILE": "ChangeLog",
+ "CHANGELOG_ENTRY_FILE": "/tmp/v8-push-to-trunk-tempfile-changelog-entry",
+ "PATCH_FILE": "/tmp/v8-push-to-trunk-tempfile-patch-file",
+ "COMMITMSG_FILE": "/tmp/v8-push-to-trunk-tempfile-commitmsg",
+ }
+
def _Steps(self):
return [
Preparation,
@@ -464,4 +446,4 @@ class PushToTrunk(ScriptsBase):
if __name__ == "__main__": # pragma: no cover
- sys.exit(PushToTrunk(CONFIG).Run())
+ sys.exit(PushToTrunk().Run())
diff --git a/deps/v8/tools/push-to-trunk/releases.py b/deps/v8/tools/push-to-trunk/releases.py
index ff57844968..646e8c03cf 100755
--- a/deps/v8/tools/push-to-trunk/releases.py
+++ b/deps/v8/tools/push-to-trunk/releases.py
@@ -20,15 +20,9 @@ import sys
from common_includes import *
-DEPS_FILE = "DEPS_FILE"
-CHROMIUM = "CHROMIUM"
-
CONFIG = {
- BRANCHNAME: "retrieve-v8-releases",
- PERSISTFILE_BASENAME: "/tmp/v8-releases-tempfile",
- DOT_GIT_LOCATION: ".git",
- VERSION_FILE: "src/version.cc",
- DEPS_FILE: "DEPS",
+ "BRANCHNAME": "retrieve-v8-releases",
+ "PERSISTFILE_BASENAME": "/tmp/v8-releases-tempfile",
}
# Expression for retrieving the bleeding edge revision from a commit message.
@@ -47,10 +41,10 @@ REVIEW_LINK_RE = re.compile(r"^Review URL: (.+)$", re.M)
# Expression with three versions (historical) for extracting the v8 revision
# from the chromium DEPS file.
-DEPS_RE = re.compile(r'^\s*(?:"v8_revision": "'
- '|\(Var\("googlecode_url"\) % "v8"\) \+ "\/trunk@'
- '|"http\:\/\/v8\.googlecode\.com\/svn\/trunk@)'
- '([0-9]+)".*$', re.M)
+DEPS_RE = re.compile(r"""^\s*(?:["']v8_revision["']: ["']"""
+ """|\(Var\("googlecode_url"\) % "v8"\) \+ "\/trunk@"""
+ """|"http\:\/\/v8\.googlecode\.com\/svn\/trunk@)"""
+ """([^"']+)["'].*$""", re.M)
# Expression to pick tag and revision for bleeding edge tags. To be used with
# output of 'svn log'.
@@ -147,7 +141,7 @@ class RetrieveV8Releases(Step):
def GetReleaseDict(
self, git_hash, bleeding_edge_rev, branch, version, patches, cl_body):
- revision = self.GitSVNFindSVNRev(git_hash)
+ revision = self.vc.GitSvn(git_hash)
return {
# The SVN revision on the branch.
"revision": revision,
@@ -193,7 +187,7 @@ class RetrieveV8Releases(Step):
tag_text = self.SVN("log https://v8.googlecode.com/svn/tags -v --limit 20")
releases = []
for (tag, revision) in re.findall(BLEEDING_EDGE_TAGS_RE, tag_text):
- git_hash = self.GitSVNFindGitHash(revision)
+ git_hash = self.vc.SvnGit(revision)
# Add bleeding edge release. It does not contain patches or a code
# review link, as tags are not uploaded.
@@ -202,18 +196,19 @@ class RetrieveV8Releases(Step):
return releases
def GetReleasesFromBranch(self, branch):
- self.GitReset("svn/%s" % branch)
+ self.GitReset(self.vc.RemoteBranch(branch))
+ # TODO(machenbach): Rename this when switching to the git mirror.
if branch == 'bleeding_edge':
return self.GetReleasesFromBleedingEdge()
releases = []
try:
for git_hash in self.GitLog(format="%H").splitlines():
- if self._config[VERSION_FILE] not in self.GitChangedFiles(git_hash):
+ if VERSION_FILE not in self.GitChangedFiles(git_hash):
continue
if self.ExceedsMax(releases):
break # pragma: no cover
- if not self.GitCheckoutFileSafe(self._config[VERSION_FILE], git_hash):
+ if not self.GitCheckoutFileSafe(VERSION_FILE, git_hash):
break # pragma: no cover
release, patch_level = self.GetRelease(git_hash, branch)
@@ -231,17 +226,12 @@ class RetrieveV8Releases(Step):
pass
# Clean up checked-out version file.
- self.GitCheckoutFileSafe(self._config[VERSION_FILE], "HEAD")
+ self.GitCheckoutFileSafe(VERSION_FILE, "HEAD")
return releases
def RunStep(self):
- self.GitCreateBranch(self._config[BRANCHNAME])
- # Get relevant remote branches, e.g. "svn/3.25".
- branches = filter(lambda s: re.match(r"^svn/\d+\.\d+$", s),
- self.GitRemotes())
- # Remove 'svn/' prefix.
- branches = map(lambda s: s[4:], branches)
-
+ self.GitCreateBranch(self._config["BRANCHNAME"])
+ branches = self.vc.GetBranches()
releases = []
if self._options.branch == 'recent':
# Get only recent development on trunk, beta and stable.
@@ -268,68 +258,68 @@ class RetrieveV8Releases(Step):
reverse=True)
-# TODO(machenbach): Parts of the Chromium setup are c/p from the chromium_roll
-# script -> unify.
-class CheckChromium(Step):
- MESSAGE = "Check the chromium checkout."
-
- def Run(self):
- self["chrome_path"] = self._options.chromium
-
-
class SwitchChromium(Step):
MESSAGE = "Switch to Chromium checkout."
- REQUIRES = "chrome_path"
def RunStep(self):
- self["v8_path"] = os.getcwd()
- os.chdir(self["chrome_path"])
+ cwd = self._options.chromium
# Check for a clean workdir.
- if not self.GitIsWorkdirClean(): # pragma: no cover
+ if not self.GitIsWorkdirClean(cwd=cwd): # pragma: no cover
self.Die("Workspace is not clean. Please commit or undo your changes.")
# Assert that the DEPS file is there.
- if not os.path.exists(self.Config(DEPS_FILE)): # pragma: no cover
+ if not os.path.exists(os.path.join(cwd, "DEPS")): # pragma: no cover
self.Die("DEPS file not present.")
class UpdateChromiumCheckout(Step):
MESSAGE = "Update the checkout and create a new branch."
- REQUIRES = "chrome_path"
def RunStep(self):
- os.chdir(self["chrome_path"])
- self.GitCheckout("master")
- self.GitPull()
- self.GitCreateBranch(self.Config(BRANCHNAME))
+ cwd = self._options.chromium
+ self.GitCheckout("master", cwd=cwd)
+ self.GitPull(cwd=cwd)
+ self.GitCreateBranch(self.Config("BRANCHNAME"), cwd=cwd)
+
+
+def ConvertToCommitNumber(step, revision):
+ # Simple check for git hashes.
+ if revision.isdigit() and len(revision) < 8:
+ return revision
+ return step.GitConvertToSVNRevision(
+ revision, cwd=os.path.join(step._options.chromium, "v8"))
class RetrieveChromiumV8Releases(Step):
MESSAGE = "Retrieve V8 releases from Chromium DEPS."
- REQUIRES = "chrome_path"
def RunStep(self):
- os.chdir(self["chrome_path"])
-
- trunk_releases = filter(lambda r: r["branch"] == "trunk", self["releases"])
- if not trunk_releases: # pragma: no cover
- print "No trunk releases detected. Skipping chromium history."
+ cwd = self._options.chromium
+ releases = filter(
+ lambda r: r["branch"] in ["trunk", "bleeding_edge"], self["releases"])
+ if not releases: # pragma: no cover
+ print "No releases detected. Skipping chromium history."
return True
- oldest_v8_rev = int(trunk_releases[-1]["revision"])
+ # Update v8 checkout in chromium.
+ self.GitFetchOrigin(cwd=os.path.join(cwd, "v8"))
+
+ oldest_v8_rev = int(releases[-1]["revision"])
cr_releases = []
try:
- for git_hash in self.GitLog(format="%H", grep="V8").splitlines():
- if self._config[DEPS_FILE] not in self.GitChangedFiles(git_hash):
+ for git_hash in self.GitLog(
+ format="%H", grep="V8", cwd=cwd).splitlines():
+ if "DEPS" not in self.GitChangedFiles(git_hash, cwd=cwd):
continue
- if not self.GitCheckoutFileSafe(self._config[DEPS_FILE], git_hash):
+ if not self.GitCheckoutFileSafe("DEPS", git_hash, cwd=cwd):
break # pragma: no cover
- deps = FileToText(self.Config(DEPS_FILE))
+ deps = FileToText(os.path.join(cwd, "DEPS"))
match = DEPS_RE.search(deps)
if match:
- svn_rev = self.GitSVNFindSVNRev(git_hash)
- v8_rev = match.group(1)
- cr_releases.append([svn_rev, v8_rev])
+ cr_rev = self.GetCommitPositionNumber(git_hash, cwd=cwd)
+ if cr_rev:
+ v8_rev = ConvertToCommitNumber(self, match.group(1))
+ cr_releases.append([cr_rev, v8_rev])
# Stop after reaching beyond the last v8 revision we want to update.
# We need a small buffer for possible revert/reland frenzies.
@@ -342,23 +332,21 @@ class RetrieveChromiumV8Releases(Step):
pass
# Clean up.
- self.GitCheckoutFileSafe(self._config[DEPS_FILE], "HEAD")
+ self.GitCheckoutFileSafe("DEPS", "HEAD", cwd=cwd)
- # Add the chromium ranges to the v8 trunk releases.
+ # Add the chromium ranges to the v8 trunk and bleeding_edge releases.
all_ranges = BuildRevisionRanges(cr_releases)
- trunk_dict = dict((r["revision"], r) for r in trunk_releases)
+ releases_dict = dict((r["revision"], r) for r in releases)
for revision, ranges in all_ranges.iteritems():
- trunk_dict.get(revision, {})["chromium_revision"] = ranges
+ releases_dict.get(revision, {})["chromium_revision"] = ranges
# TODO(machenbach): Unify common code with method above.
class RietrieveChromiumBranches(Step):
MESSAGE = "Retrieve Chromium branch information."
- REQUIRES = "chrome_path"
def RunStep(self):
- os.chdir(self["chrome_path"])
-
+ cwd = self._options.chromium
trunk_releases = filter(lambda r: r["branch"] == "trunk", self["releases"])
if not trunk_releases: # pragma: no cover
print "No trunk releases detected. Skipping chromium history."
@@ -368,7 +356,7 @@ class RietrieveChromiumBranches(Step):
# Filter out irrelevant branches.
branches = filter(lambda r: re.match(r"branch-heads/\d+", r),
- self.GitRemotes())
+ self.GitRemotes(cwd=cwd))
# Transform into pure branch numbers.
branches = map(lambda r: int(re.match(r"branch-heads/(\d+)", r).group(1)),
@@ -379,13 +367,14 @@ class RietrieveChromiumBranches(Step):
cr_branches = []
try:
for branch in branches:
- if not self.GitCheckoutFileSafe(self._config[DEPS_FILE],
- "branch-heads/%d" % branch):
+ if not self.GitCheckoutFileSafe("DEPS",
+ "branch-heads/%d" % branch,
+ cwd=cwd):
break # pragma: no cover
- deps = FileToText(self.Config(DEPS_FILE))
+ deps = FileToText(os.path.join(cwd, "DEPS"))
match = DEPS_RE.search(deps)
if match:
- v8_rev = match.group(1)
+ v8_rev = ConvertToCommitNumber(self, match.group(1))
cr_branches.append([str(branch), v8_rev])
# Stop after reaching beyond the last v8 revision we want to update.
@@ -399,7 +388,7 @@ class RietrieveChromiumBranches(Step):
pass
# Clean up.
- self.GitCheckoutFileSafe(self._config[DEPS_FILE], "HEAD")
+ self.GitCheckoutFileSafe("DEPS", "HEAD", cwd=cwd)
# Add the chromium branches to the v8 trunk releases.
all_ranges = BuildRevisionRanges(cr_branches)
@@ -408,20 +397,12 @@ class RietrieveChromiumBranches(Step):
trunk_dict.get(revision, {})["chromium_branch"] = ranges
-class SwitchV8(Step):
- MESSAGE = "Returning to V8 checkout."
- REQUIRES = "chrome_path"
-
- def RunStep(self):
- self.GitCheckout("master")
- self.GitDeleteBranch(self.Config(BRANCHNAME))
- os.chdir(self["v8_path"])
-
-
class CleanUp(Step):
MESSAGE = "Clean up."
def RunStep(self):
+ self.GitCheckout("master", cwd=self._options.chromium)
+ self.GitDeleteBranch(self.Config("BRANCHNAME"), cwd=self._options.chromium)
self.CommonCleanup()
@@ -462,20 +443,24 @@ class Releases(ScriptsBase):
def _ProcessOptions(self, options): # pragma: no cover
return True
+ def _Config(self):
+ return {
+ "BRANCHNAME": "retrieve-v8-releases",
+ "PERSISTFILE_BASENAME": "/tmp/v8-releases-tempfile",
+ }
+
def _Steps(self):
return [
Preparation,
RetrieveV8Releases,
- CheckChromium,
SwitchChromium,
UpdateChromiumCheckout,
RetrieveChromiumV8Releases,
RietrieveChromiumBranches,
- SwitchV8,
CleanUp,
WriteOutput,
]
if __name__ == "__main__": # pragma: no cover
- sys.exit(Releases(CONFIG).Run())
+ sys.exit(Releases().Run())
diff --git a/deps/v8/tools/push-to-trunk/test_scripts.py b/deps/v8/tools/push-to-trunk/test_scripts.py
index 82a4d15f2e..4edb3481b8 100644
--- a/deps/v8/tools/push-to-trunk/test_scripts.py
+++ b/deps/v8/tools/push-to-trunk/test_scripts.py
@@ -27,15 +27,14 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
+import shutil
import tempfile
import traceback
import unittest
import auto_push
from auto_push import CheckLastPush
-from auto_push import SETTINGS_LOCATION
import auto_roll
-from auto_roll import CLUSTERFUZZ_API_KEY_FILE
import common_includes
from common_includes import *
import merge_to_branch
@@ -43,8 +42,6 @@ from merge_to_branch import *
import push_to_trunk
from push_to_trunk import *
import chromium_roll
-from chromium_roll import CHROMIUM
-from chromium_roll import DEPS_FILE
from chromium_roll import ChromiumRoll
import releases
from releases import Releases
@@ -56,23 +53,20 @@ from auto_tag import AutoTag
TEST_CONFIG = {
- BRANCHNAME: "test-prepare-push",
- TRUNKBRANCH: "test-trunk-push",
- PERSISTFILE_BASENAME: "/tmp/test-v8-push-to-trunk-tempfile",
- DOT_GIT_LOCATION: None,
- VERSION_FILE: None,
- CHANGELOG_FILE: None,
- CHANGELOG_ENTRY_FILE: "/tmp/test-v8-push-to-trunk-tempfile-changelog-entry",
- PATCH_FILE: "/tmp/test-v8-push-to-trunk-tempfile-patch",
- COMMITMSG_FILE: "/tmp/test-v8-push-to-trunk-tempfile-commitmsg",
- CHROMIUM: "/tmp/test-v8-push-to-trunk-tempfile-chromium",
- DEPS_FILE: "/tmp/test-v8-push-to-trunk-tempfile-chromium/DEPS",
- SETTINGS_LOCATION: None,
- ALREADY_MERGING_SENTINEL_FILE:
+ "DEFAULT_CWD": None,
+ "BRANCHNAME": "test-prepare-push",
+ "TRUNKBRANCH": "test-trunk-push",
+ "PERSISTFILE_BASENAME": "/tmp/test-v8-push-to-trunk-tempfile",
+ "CHANGELOG_FILE": None,
+ "CHANGELOG_ENTRY_FILE": "/tmp/test-v8-push-to-trunk-tempfile-changelog-entry",
+ "PATCH_FILE": "/tmp/test-v8-push-to-trunk-tempfile-patch",
+ "COMMITMSG_FILE": "/tmp/test-v8-push-to-trunk-tempfile-commitmsg",
+ "CHROMIUM": "/tmp/test-v8-push-to-trunk-tempfile-chromium",
+ "SETTINGS_LOCATION": None,
+ "ALREADY_MERGING_SENTINEL_FILE":
"/tmp/test-merge-to-branch-tempfile-already-merging",
- COMMIT_HASHES_FILE: "/tmp/test-merge-to-branch-tempfile-PATCH_COMMIT_HASHES",
- TEMPORARY_PATCH_FILE: "/tmp/test-merge-to-branch-tempfile-temporary-patch",
- CLUSTERFUZZ_API_KEY_FILE: "/tmp/test-fake-cf-api-key",
+ "TEMPORARY_PATCH_FILE": "/tmp/test-merge-to-branch-tempfile-temporary-patch",
+ "CLUSTERFUZZ_API_KEY_FILE": "/tmp/test-fake-cf-api-key",
}
@@ -252,19 +246,26 @@ Committed: https://code.google.com/p/v8/source/detail?r=18210
"BUG=1234567890\n"))
-def Git(*args, **kwargs):
- """Convenience function returning a git test expectation."""
+def Cmd(*args, **kwargs):
+ """Convenience function returning a shell command test expectation."""
return {
- "name": "git",
- "args": args[:-1],
+ "name": "command",
+ "args": args,
"ret": args[-1],
"cb": kwargs.get("cb"),
+ "cwd": kwargs.get("cwd", TEST_CONFIG["DEFAULT_CWD"]),
}
def RL(text, cb=None):
"""Convenience function returning a readline test expectation."""
- return {"name": "readline", "args": [], "ret": text, "cb": cb}
+ return {
+ "name": "readline",
+ "args": [],
+ "ret": text,
+ "cb": cb,
+ "cwd": None,
+ }
def URL(*args, **kwargs):
@@ -274,19 +275,19 @@ def URL(*args, **kwargs):
"args": args[:-1],
"ret": args[-1],
"cb": kwargs.get("cb"),
+ "cwd": None,
}
class SimpleMock(object):
- def __init__(self, name):
- self._name = name
+ def __init__(self):
self._recipe = []
self._index = -1
def Expect(self, recipe):
self._recipe = recipe
- def Call(self, name, *args): # pragma: no cover
+ def Call(self, name, *args, **kwargs): # pragma: no cover
self._index += 1
try:
expected_call = self._recipe[self._index]
@@ -294,21 +295,33 @@ class SimpleMock(object):
raise NoRetryException("Calling %s %s" % (name, " ".join(args)))
if not isinstance(expected_call, dict):
- raise NoRetryException("Found wrong expectation type for %s %s"
- % (name, " ".join(args)))
+ raise NoRetryException("Found wrong expectation type for %s %s" %
+ (name, " ".join(args)))
+ if expected_call["name"] != name:
+ raise NoRetryException("Expected action: %s %s - Actual: %s" %
+ (expected_call["name"], expected_call["args"], name))
+
+ # Check if the given working directory matches the expected one.
+ if expected_call["cwd"] != kwargs.get("cwd"):
+ raise NoRetryException("Expected cwd: %s in %s %s - Actual: %s" %
+ (expected_call["cwd"],
+ expected_call["name"],
+ expected_call["args"],
+ kwargs.get("cwd")))
# The number of arguments in the expectation must match the actual
# arguments.
if len(args) > len(expected_call['args']):
raise NoRetryException("When calling %s with arguments, the "
- "expectations must consist of at least as many arguments." % name)
+ "expectations must consist of at least as many arguments." %
+ name)
# Compare expected and actual arguments.
for (expected_arg, actual_arg) in zip(expected_call['args'], args):
if expected_arg != actual_arg:
- raise NoRetryException("Expected: %s - Actual: %s"
- % (expected_arg, actual_arg))
+ raise NoRetryException("Expected: %s - Actual: %s" %
+ (expected_arg, actual_arg))
# The expected call contains an optional callback for checking the context
# at the time of the call.
@@ -326,8 +339,8 @@ class SimpleMock(object):
def AssertFinished(self): # pragma: no cover
if self._index < len(self._recipe) -1:
- raise NoRetryException("Called %s too seldom: %d vs. %d"
- % (self._name, self._index, len(self._recipe)))
+ raise NoRetryException("Called mock too seldom: %d vs. %d" %
+ (self._index, len(self._recipe)))
class ScriptTest(unittest.TestCase):
@@ -337,8 +350,17 @@ class ScriptTest(unittest.TestCase):
self._tmp_files.append(name)
return name
+ def MakeEmptyTempDirectory(self):
+ name = tempfile.mkdtemp()
+ self._tmp_files.append(name)
+ return name
+
+
def WriteFakeVersionFile(self, minor=22, build=4, patch=0):
- with open(TEST_CONFIG[VERSION_FILE], "w") as f:
+ version_file = os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE)
+ if not os.path.exists(os.path.dirname(version_file)):
+ os.makedirs(os.path.dirname(version_file))
+ with open(version_file, "w") as f:
f.write(" // Some line...\n")
f.write("\n")
f.write("#define MAJOR_VERSION 3\n")
@@ -360,36 +382,22 @@ class ScriptTest(unittest.TestCase):
args = args if args is not None else ["-m"]
return script(TEST_CONFIG, self, self._state).RunSteps([step_class], args)
- def GitMock(self, cmd, args="", pipe=True):
- print "%s %s" % (cmd, args)
- return self._git_mock.Call("git", args)
-
- def LogMock(self, cmd, args=""):
- print "Log: %s %s" % (cmd, args)
-
- MOCKS = {
- "git": GitMock,
- # TODO(machenbach): Little hack to reuse the git mock for the one svn call
- # in merge-to-branch. The command should be made explicit in the test
- # expectations.
- "svn": GitMock,
- "vi": LogMock,
- }
-
def Call(self, fun, *args, **kwargs):
print "Calling %s with %s and %s" % (str(fun), str(args), str(kwargs))
- def Command(self, cmd, args="", prefix="", pipe=True):
- return ScriptTest.MOCKS[cmd](self, cmd, args)
+ def Command(self, cmd, args="", prefix="", pipe=True, cwd=None):
+ print "%s %s" % (cmd, args)
+ print "in %s" % cwd
+ return self._mock.Call("command", cmd + " " + args, cwd=cwd)
def ReadLine(self):
- return self._rl_mock.Call("readline")
+ return self._mock.Call("readline")
def ReadURL(self, url, params):
if params is not None:
- return self._url_mock.Call("readurl", url, params)
+ return self._mock.Call("readurl", url, params)
else:
- return self._url_mock.Call("readurl", url)
+ return self._mock.Call("readurl", url)
def ReadClusterFuzzAPI(self, api_key, **params):
# TODO(machenbach): Use a mock for this and add a test that stops rolling
@@ -405,90 +413,82 @@ class ScriptTest(unittest.TestCase):
def GetUTCStamp(self):
return "100000"
- def ExpectGit(self, *args):
- """Convenience wrapper."""
- self._git_mock.Expect(*args)
-
- def ExpectReadline(self, *args):
- """Convenience wrapper."""
- self._rl_mock.Expect(*args)
-
- def ExpectReadURL(self, *args):
+ def Expect(self, *args):
"""Convenience wrapper."""
- self._url_mock.Expect(*args)
+ self._mock.Expect(*args)
def setUp(self):
- self._git_mock = SimpleMock("git")
- self._rl_mock = SimpleMock("readline")
- self._url_mock = SimpleMock("readurl")
+ self._mock = SimpleMock()
self._tmp_files = []
self._state = {}
+ TEST_CONFIG["DEFAULT_CWD"] = self.MakeEmptyTempDirectory()
def tearDown(self):
- Command("rm", "-rf %s*" % TEST_CONFIG[PERSISTFILE_BASENAME])
+ if os.path.exists(TEST_CONFIG["PERSISTFILE_BASENAME"]):
+ shutil.rmtree(TEST_CONFIG["PERSISTFILE_BASENAME"])
# Clean up temps. Doesn't work automatically.
for name in self._tmp_files:
- if os.path.exists(name):
+ if os.path.isfile(name):
os.remove(name)
+ if os.path.isdir(name):
+ shutil.rmtree(name)
- self._git_mock.AssertFinished()
- self._rl_mock.AssertFinished()
- self._url_mock.AssertFinished()
-
- def testGitOrig(self):
- self.assertTrue(Command("git", "--version").startswith("git version"))
+ self._mock.AssertFinished()
def testGitMock(self):
- self.ExpectGit([Git("--version", "git version 1.2.3"), Git("dummy", "")])
+ self.Expect([Cmd("git --version", "git version 1.2.3"),
+ Cmd("git dummy", "")])
self.assertEquals("git version 1.2.3", self.MakeStep().Git("--version"))
self.assertEquals("", self.MakeStep().Git("dummy"))
def testCommonPrepareDefault(self):
- self.ExpectGit([
- Git("status -s -uno", ""),
- Git("status -s -b -uno", "## some_branch"),
- Git("svn fetch", ""),
- Git("branch", " branch1\n* %s" % TEST_CONFIG[BRANCHNAME]),
- Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
+ self.Expect([
+ Cmd("git status -s -uno", ""),
+ Cmd("git status -s -b -uno", "## some_branch"),
+ Cmd("git svn fetch", ""),
+ Cmd("git branch", " branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
+ RL("Y"),
+ Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
])
- self.ExpectReadline([RL("Y")])
self.MakeStep().CommonPrepare()
self.MakeStep().PrepareBranch()
self.assertEquals("some_branch", self._state["current_branch"])
def testCommonPrepareNoConfirm(self):
- self.ExpectGit([
- Git("status -s -uno", ""),
- Git("status -s -b -uno", "## some_branch"),
- Git("svn fetch", ""),
- Git("branch", " branch1\n* %s" % TEST_CONFIG[BRANCHNAME]),
+ self.Expect([
+ Cmd("git status -s -uno", ""),
+ Cmd("git status -s -b -uno", "## some_branch"),
+ Cmd("git svn fetch", ""),
+ Cmd("git branch", " branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
+ RL("n"),
])
- self.ExpectReadline([RL("n")])
self.MakeStep().CommonPrepare()
self.assertRaises(Exception, self.MakeStep().PrepareBranch)
self.assertEquals("some_branch", self._state["current_branch"])
def testCommonPrepareDeleteBranchFailure(self):
- self.ExpectGit([
- Git("status -s -uno", ""),
- Git("status -s -b -uno", "## some_branch"),
- Git("svn fetch", ""),
- Git("branch", " branch1\n* %s" % TEST_CONFIG[BRANCHNAME]),
- Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], None),
+ self.Expect([
+ Cmd("git status -s -uno", ""),
+ Cmd("git status -s -b -uno", "## some_branch"),
+ Cmd("git svn fetch", ""),
+ Cmd("git branch", " branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
+ RL("Y"),
+ Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], None),
])
- self.ExpectReadline([RL("Y")])
self.MakeStep().CommonPrepare()
self.assertRaises(Exception, self.MakeStep().PrepareBranch)
self.assertEquals("some_branch", self._state["current_branch"])
def testInitialEnvironmentChecks(self):
- TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
+ TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
os.environ["EDITOR"] = "vi"
- self.MakeStep().InitialEnvironmentChecks()
+ self.Expect([
+ Cmd("which vi", "/usr/bin/vi"),
+ ])
+ self.MakeStep().InitialEnvironmentChecks(TEST_CONFIG["DEFAULT_CWD"])
def testReadAndPersistVersion(self):
- TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
self.WriteFakeVersionFile(build=5)
step = self.MakeStep()
step.ReadAndPersistVersion()
@@ -520,40 +520,35 @@ class ScriptTest(unittest.TestCase):
def testPreparePushRevision(self):
# Tests the default push hash used when the --revision option is not set.
- self.ExpectGit([
- Git("log -1 --format=%H HEAD", "push_hash")
+ self.Expect([
+ Cmd("git log -1 --format=%H HEAD", "push_hash")
])
self.RunStep(PushToTrunk, PreparePushRevision)
self.assertEquals("push_hash", self._state["push_hash"])
def testPrepareChangeLog(self):
- TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
self.WriteFakeVersionFile()
- TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
-
- self.ExpectGit([
- Git("log --format=%H 1234..push_hash", "rev1\nrev2\nrev3\nrev4"),
- Git("log -1 --format=%s rev1", "Title text 1"),
- Git("log -1 --format=%B rev1", "Title\n\nBUG=\nLOG=y\n"),
- Git("log -1 --format=%an rev1", "author1@chromium.org"),
- Git("log -1 --format=%s rev2", "Title text 2."),
- Git("log -1 --format=%B rev2", "Title\n\nBUG=123\nLOG= \n"),
- Git("log -1 --format=%an rev2", "author2@chromium.org"),
- Git("log -1 --format=%s rev3", "Title text 3"),
- Git("log -1 --format=%B rev3", "Title\n\nBUG=321\nLOG=true\n"),
- Git("log -1 --format=%an rev3", "author3@chromium.org"),
- Git("log -1 --format=%s rev4", "Title text 4"),
- Git("log -1 --format=%B rev4",
+ TEST_CONFIG["CHANGELOG_ENTRY_FILE"] = self.MakeEmptyTempFile()
+
+ self.Expect([
+ Cmd("git log --format=%H 1234..push_hash", "rev1\nrev2\nrev3\nrev4"),
+ Cmd("git log -1 --format=%s rev1", "Title text 1"),
+ Cmd("git log -1 --format=%B rev1", "Title\n\nBUG=\nLOG=y\n"),
+ Cmd("git log -1 --format=%an rev1", "author1@chromium.org"),
+ Cmd("git log -1 --format=%s rev2", "Title text 2."),
+ Cmd("git log -1 --format=%B rev2", "Title\n\nBUG=123\nLOG= \n"),
+ Cmd("git log -1 --format=%an rev2", "author2@chromium.org"),
+ Cmd("git log -1 --format=%s rev3", "Title text 3"),
+ Cmd("git log -1 --format=%B rev3", "Title\n\nBUG=321\nLOG=true\n"),
+ Cmd("git log -1 --format=%an rev3", "author3@chromium.org"),
+ Cmd("git log -1 --format=%s rev4", "Title text 4"),
+ Cmd("git log -1 --format=%B rev4",
("Title\n\nBUG=456\nLOG=Y\n\n"
"Review URL: https://codereview.chromium.org/9876543210\n")),
- Git("log -1 --format=%an rev4", "author4@chromium.org"),
- ])
-
- # The cl for rev4 on rietveld has an updated LOG flag.
- self.ExpectReadURL([
URL("https://codereview.chromium.org/9876543210/description",
"Title\n\nBUG=456\nLOG=N\n\n"),
+ Cmd("git log -1 --format=%an rev4", "author4@chromium.org"),
])
self._state["last_push_bleeding_edge"] = "1234"
@@ -561,7 +556,7 @@ class ScriptTest(unittest.TestCase):
self._state["version"] = "3.22.5"
self.RunStep(PushToTrunk, PrepareChangeLog)
- actual_cl = FileToText(TEST_CONFIG[CHANGELOG_ENTRY_FILE])
+ actual_cl = FileToText(TEST_CONFIG["CHANGELOG_ENTRY_FILE"])
expected_cl = """1999-07-31: Version 3.22.5
@@ -592,35 +587,31 @@ class ScriptTest(unittest.TestCase):
self.assertEquals(expected_cl, actual_cl)
def testEditChangeLog(self):
- TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
- TextToFile(" New \n\tLines \n", TEST_CONFIG[CHANGELOG_ENTRY_FILE])
+ TEST_CONFIG["CHANGELOG_ENTRY_FILE"] = self.MakeEmptyTempFile()
+ TextToFile(" New \n\tLines \n", TEST_CONFIG["CHANGELOG_ENTRY_FILE"])
os.environ["EDITOR"] = "vi"
-
- self.ExpectReadline([
+ self.Expect([
RL(""), # Open editor.
+ Cmd("vi %s" % TEST_CONFIG["CHANGELOG_ENTRY_FILE"], ""),
])
self.RunStep(PushToTrunk, EditChangeLog)
self.assertEquals("New\n Lines",
- FileToText(TEST_CONFIG[CHANGELOG_ENTRY_FILE]))
+ FileToText(TEST_CONFIG["CHANGELOG_ENTRY_FILE"]))
# Version on trunk: 3.22.4.0. Version on master (bleeding_edge): 3.22.6.
# Make sure that the increment is 3.22.7.0.
def testIncrementVersion(self):
- TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
self.WriteFakeVersionFile()
self._state["last_push_trunk"] = "hash1"
self._state["latest_build"] = "6"
self._state["latest_version"] = "3.22.6.0"
- self.ExpectGit([
- Git("checkout -f hash1 -- %s" % TEST_CONFIG[VERSION_FILE], ""),
- Git("checkout -f svn/bleeding_edge -- %s" % TEST_CONFIG[VERSION_FILE],
+ self.Expect([
+ Cmd("git checkout -f hash1 -- src/version.cc", ""),
+ Cmd("git checkout -f svn/bleeding_edge -- src/version.cc",
"", cb=lambda: self.WriteFakeVersionFile(22, 6)),
- ])
-
- self.ExpectReadline([
RL("Y"), # Increment build number.
])
@@ -632,22 +623,22 @@ class ScriptTest(unittest.TestCase):
self.assertEquals("0", self._state["new_patch"])
def _TestSquashCommits(self, change_log, expected_msg):
- TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
- with open(TEST_CONFIG[CHANGELOG_ENTRY_FILE], "w") as f:
+ TEST_CONFIG["CHANGELOG_ENTRY_FILE"] = self.MakeEmptyTempFile()
+ with open(TEST_CONFIG["CHANGELOG_ENTRY_FILE"], "w") as f:
f.write(change_log)
- self.ExpectGit([
- Git("diff svn/trunk hash1", "patch content"),
- Git("svn find-rev hash1", "123455\n"),
+ self.Expect([
+ Cmd("git diff svn/trunk hash1", "patch content"),
+ Cmd("git svn find-rev hash1", "123455\n"),
])
self._state["push_hash"] = "hash1"
self._state["date"] = "1999-11-11"
self.RunStep(PushToTrunk, SquashCommits)
- self.assertEquals(FileToText(TEST_CONFIG[COMMITMSG_FILE]), expected_msg)
+ self.assertEquals(FileToText(TEST_CONFIG["COMMITMSG_FILE"]), expected_msg)
- patch = FileToText(TEST_CONFIG[ PATCH_FILE])
+ patch = FileToText(TEST_CONFIG["PATCH_FILE"])
self.assertTrue(re.search(r"patch content", patch))
def testSquashCommitsUnformatted(self):
@@ -684,17 +675,16 @@ Performance and stability improvements on all platforms."""
self._TestSquashCommits(change_log, commit_msg)
def _PushToTrunk(self, force=False, manual=False):
- TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
+ TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
# The version file on bleeding edge has build level 5, while the version
# file from trunk has build level 4.
- TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
self.WriteFakeVersionFile(build=5)
- TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
- TEST_CONFIG[CHANGELOG_FILE] = self.MakeEmptyTempFile()
+ TEST_CONFIG["CHANGELOG_ENTRY_FILE"] = self.MakeEmptyTempFile()
+ TEST_CONFIG["CHANGELOG_FILE"] = self.MakeEmptyTempFile()
bleeding_edge_change_log = "2014-03-17: Sentinel\n"
- TextToFile(bleeding_edge_change_log, TEST_CONFIG[CHANGELOG_FILE])
+ TextToFile(bleeding_edge_change_log, TEST_CONFIG["CHANGELOG_FILE"])
os.environ["EDITOR"] = "vi"
def ResetChangeLog():
@@ -703,21 +693,22 @@ Performance and stability improvements on all platforms."""
trunk_change_log = """1999-04-05: Version 3.22.4
Performance and stability improvements on all platforms.\n"""
- TextToFile(trunk_change_log, TEST_CONFIG[CHANGELOG_FILE])
+ TextToFile(trunk_change_log, TEST_CONFIG["CHANGELOG_FILE"])
def ResetToTrunk():
ResetChangeLog()
self.WriteFakeVersionFile()
def CheckSVNCommit():
- commit = FileToText(TEST_CONFIG[COMMITMSG_FILE])
+ commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
self.assertEquals(
"""Version 3.22.5 (based on bleeding_edge revision r123455)
Log text 1 (issue 321).
Performance and stability improvements on all platforms.""", commit)
- version = FileToText(TEST_CONFIG[VERSION_FILE])
+ version = FileToText(
+ os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
self.assertTrue(re.search(r"#define MINOR_VERSION\s+22", version))
self.assertTrue(re.search(r"#define BUILD_NUMBER\s+5", version))
self.assertFalse(re.search(r"#define BUILD_NUMBER\s+6", version))
@@ -725,7 +716,7 @@ Performance and stability improvements on all platforms.""", commit)
self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
# Check that the change log on the trunk branch got correctly modified.
- change_log = FileToText(TEST_CONFIG[CHANGELOG_FILE])
+ change_log = FileToText(TEST_CONFIG["CHANGELOG_FILE"])
self.assertEquals(
"""1999-07-31: Version 3.22.5
@@ -740,61 +731,72 @@ Performance and stability improvements on all platforms.""", commit)
change_log)
force_flag = " -f" if not manual else ""
- self.ExpectGit([
- Git("status -s -uno", ""),
- Git("status -s -b -uno", "## some_branch\n"),
- Git("svn fetch", ""),
- Git("branch", " branch1\n* branch2\n"),
- Git("branch", " branch1\n* branch2\n"),
- Git("checkout -b %s svn/bleeding_edge" % TEST_CONFIG[BRANCHNAME], ""),
- Git("svn find-rev r123455", "push_hash\n"),
- Git(("log -1 --format=%H --grep="
+ expectations = []
+ if not force:
+ expectations.append(Cmd("which vi", "/usr/bin/vi"))
+ expectations += [
+ Cmd("git status -s -uno", ""),
+ Cmd("git status -s -b -uno", "## some_branch\n"),
+ Cmd("git svn fetch", ""),
+ Cmd("git branch", " branch1\n* branch2\n"),
+ Cmd("git branch", " branch1\n* branch2\n"),
+ Cmd("git checkout -b %s svn/bleeding_edge" % TEST_CONFIG["BRANCHNAME"],
+ ""),
+ Cmd("git svn find-rev r123455", "push_hash\n"),
+ Cmd(("git log -1 --format=%H --grep="
"\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based\" "
"svn/trunk"), "hash2\n"),
- Git("log -1 hash2", "Log message\n"),
- Git("log -1 --format=%s hash2",
+ Cmd("git log -1 hash2", "Log message\n"),
+ ]
+ if manual:
+ expectations.append(RL("Y")) # Confirm last push.
+ expectations += [
+ Cmd("git log -1 --format=%s hash2",
"Version 3.4.5 (based on bleeding_edge revision r1234)\n"),
- Git("svn find-rev r1234", "hash3\n"),
- Git("checkout -f svn/bleeding_edge -- %s" % TEST_CONFIG[VERSION_FILE],
+ Cmd("git svn find-rev r1234", "hash3\n"),
+ Cmd("git checkout -f svn/bleeding_edge -- src/version.cc",
"", cb=self.WriteFakeVersionFile),
- Git("checkout -f hash2 -- %s" % TEST_CONFIG[VERSION_FILE], "",
+ Cmd("git checkout -f hash2 -- src/version.cc", "",
cb=self.WriteFakeVersionFile),
- Git("log --format=%H hash3..push_hash", "rev1\n"),
- Git("log -1 --format=%s rev1", "Log text 1.\n"),
- Git("log -1 --format=%B rev1", "Text\nLOG=YES\nBUG=v8:321\nText\n"),
- Git("log -1 --format=%an rev1", "author1@chromium.org\n"),
- Git("svn fetch", "fetch result\n"),
- Git("checkout -f svn/bleeding_edge", ""),
- Git("diff svn/trunk push_hash", "patch content\n"),
- Git("svn find-rev push_hash", "123455\n"),
- Git("checkout -b %s svn/trunk" % TEST_CONFIG[TRUNKBRANCH], "",
+ ]
+ if manual:
+ expectations.append(RL("")) # Increment build number.
+ expectations += [
+ Cmd("git log --format=%H hash3..push_hash", "rev1\n"),
+ Cmd("git log -1 --format=%s rev1", "Log text 1.\n"),
+ Cmd("git log -1 --format=%B rev1", "Text\nLOG=YES\nBUG=v8:321\nText\n"),
+ Cmd("git log -1 --format=%an rev1", "author1@chromium.org\n"),
+ ]
+ if manual:
+ expectations.append(RL("")) # Open editor.
+ if not force:
+ expectations.append(
+ Cmd("vi %s" % TEST_CONFIG["CHANGELOG_ENTRY_FILE"], ""))
+ expectations += [
+ Cmd("git svn fetch", "fetch result\n"),
+ Cmd("git checkout -f svn/bleeding_edge", ""),
+ Cmd("git diff svn/trunk push_hash", "patch content\n"),
+ Cmd("git svn find-rev push_hash", "123455\n"),
+ Cmd("git checkout -b %s svn/trunk" % TEST_CONFIG["TRUNKBRANCH"], "",
cb=ResetToTrunk),
- Git("apply --index --reject \"%s\"" % TEST_CONFIG[PATCH_FILE], ""),
- Git("checkout -f svn/trunk -- %s" % TEST_CONFIG[CHANGELOG_FILE], "",
+ Cmd("git apply --index --reject \"%s\"" % TEST_CONFIG["PATCH_FILE"], ""),
+ Cmd("git checkout -f svn/trunk -- %s" % TEST_CONFIG["CHANGELOG_FILE"], "",
cb=ResetChangeLog),
- Git("checkout -f svn/trunk -- %s" % TEST_CONFIG[VERSION_FILE], "",
+ Cmd("git checkout -f svn/trunk -- src/version.cc", "",
cb=self.WriteFakeVersionFile),
- Git("commit -aF \"%s\"" % TEST_CONFIG[COMMITMSG_FILE], "",
+ Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
cb=CheckSVNCommit),
- Git("svn dcommit 2>&1", "Some output\nCommitted r123456\nSome output\n"),
- Git("svn tag 3.22.5 -m \"Tagging version 3.22.5\"", ""),
- Git("checkout -f some_branch", ""),
- Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
- Git("branch -D %s" % TEST_CONFIG[TRUNKBRANCH], ""),
- ])
-
- # Expected keyboard input in manual mode:
+ ]
if manual:
- self.ExpectReadline([
- RL("Y"), # Confirm last push.
- RL(""), # Open editor.
- RL("Y"), # Increment build number.
- RL("Y"), # Sanity check.
- ])
-
- # Expected keyboard input in semi-automatic mode and forced mode:
- if not manual:
- self.ExpectReadline([])
+ expectations.append(RL("Y")) # Sanity check.
+ expectations += [
+ Cmd("git svn dcommit 2>&1", ""),
+ Cmd("git svn tag 3.22.5 -m \"Tagging version 3.22.5\"", ""),
+ Cmd("git checkout -f some_branch", ""),
+ Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
+ Cmd("git branch -D %s" % TEST_CONFIG["TRUNKBRANCH"], ""),
+ ]
+ self.Expect(expectations)
args = ["-a", "author@chromium.org", "--revision", "123455"]
if force: args.append("-f")
@@ -802,7 +804,7 @@ Performance and stability improvements on all platforms.""", commit)
else: args += ["-r", "reviewer@chromium.org"]
PushToTrunk(TEST_CONFIG, self).Run(args)
- cl = FileToText(TEST_CONFIG[CHANGELOG_FILE])
+ cl = FileToText(TEST_CONFIG["CHANGELOG_FILE"])
self.assertTrue(re.search(r"^\d\d\d\d\-\d+\-\d+: Version 3\.22\.5", cl))
self.assertTrue(re.search(r" Log text 1 \(issue 321\).", cl))
self.assertTrue(re.search(r"1999\-04\-05: Version 3\.22\.4", cl))
@@ -820,8 +822,26 @@ Performance and stability improvements on all platforms.""", commit)
def testPushToTrunkForced(self):
self._PushToTrunk(force=True)
- def _ChromiumRoll(self, force=False, manual=False):
- googlers_mapping_py = "%s-mapping.py" % TEST_CONFIG[PERSISTFILE_BASENAME]
+ C_V8_22624_LOG = """V8 CL.
+
+git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22624 123
+
+"""
+
+ C_V8_123455_LOG = """V8 CL.
+
+git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@123455 123
+
+"""
+
+ C_V8_123456_LOG = """V8 CL.
+
+git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@123456 123
+
+"""
+
+ def testChromiumRoll(self):
+ googlers_mapping_py = "%s-mapping.py" % TEST_CONFIG["PERSISTFILE_BASENAME"]
with open(googlers_mapping_py, "w") as f:
f.write("""
def list_to_dict(entries):
@@ -829,77 +849,61 @@ def list_to_dict(entries):
def get_list():
pass""")
- TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
- if not os.path.exists(TEST_CONFIG[CHROMIUM]):
- os.makedirs(TEST_CONFIG[CHROMIUM])
- TextToFile("Some line\n \"v8_revision\": \"123444\",\n some line",
- TEST_CONFIG[DEPS_FILE])
+ # Setup fake directory structures.
+ TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
+ TextToFile("", os.path.join(TEST_CONFIG["CHROMIUM"], ".git"))
+ chrome_dir = TEST_CONFIG["CHROMIUM"]
+ os.makedirs(os.path.join(chrome_dir, "v8"))
- os.environ["EDITOR"] = "vi"
- force_flag = " -f" if not manual else ""
- self.ExpectGit([
- Git("status -s -uno", ""),
- Git("status -s -b -uno", "## some_branch\n"),
- Git("svn fetch", ""),
- Git(("log -1 --format=%H --grep="
+ # Write fake deps file.
+ TextToFile("Some line\n \"v8_revision\": \"123444\",\n some line",
+ os.path.join(chrome_dir, "DEPS"))
+ def WriteDeps():
+ TextToFile("Some line\n \"v8_revision\": \"22624\",\n some line",
+ os.path.join(chrome_dir, "DEPS"))
+
+ expectations = [
+ Cmd("git fetch origin", ""),
+ Cmd(("git log -1 --format=%H --grep="
"\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*\" "
- "svn/trunk"), "push_hash\n"),
- Git("svn find-rev push_hash", "123455\n"),
- Git("log -1 --format=%s push_hash",
- "Version 3.22.5 (based on bleeding_edge revision r123454)\n"),
- Git("status -s -uno", ""),
- Git("checkout -f master", ""),
- Git("pull", ""),
- Git("checkout -b v8-roll-123455", ""),
- Git(("commit -am \"Update V8 to version 3.22.5 "
- "(based on bleeding_edge revision r123454).\n\n"
- "Please reply to the V8 sheriff c_name@chromium.org in "
- "case of problems.\n\nTBR=c_name@chromium.org\""),
- ""),
- Git(("cl upload --send-mail --email \"author@chromium.org\"%s"
- % force_flag), ""),
- ])
-
- self.ExpectReadURL([
+ "origin/candidates"), "push_hash\n"),
+ Cmd("git log -1 --format=%B push_hash", self.C_V8_22624_LOG),
+ Cmd("git log -1 --format=%s push_hash",
+ "Version 3.22.5 (based on bleeding_edge revision r22622)\n"),
URL("https://chromium-build.appspot.com/p/chromium/sheriff_v8.js",
"document.write('g_name')"),
- ])
-
- # Expected keyboard input in manual mode:
- if manual:
- self.ExpectReadline([
- RL("c_name@chromium.org"), # Chromium reviewer.
- ])
-
- # Expected keyboard input in semi-automatic mode and forced mode:
- if not manual:
- self.ExpectReadline([])
+ Cmd("git status -s -uno", "", cwd=chrome_dir),
+ Cmd("git checkout -f master", "", cwd=chrome_dir),
+ Cmd("gclient sync --nohooks", "syncing...", cwd=chrome_dir),
+ Cmd("git pull", "", cwd=chrome_dir),
+ Cmd("git fetch origin", ""),
+ Cmd("git checkout -b v8-roll-22624", "", cwd=chrome_dir),
+ Cmd("roll-dep v8 22624", "rolled", cb=WriteDeps, cwd=chrome_dir),
+ Cmd(("git commit -am \"Update V8 to version 3.22.5 "
+ "(based on bleeding_edge revision r22622).\n\n"
+ "Please reply to the V8 sheriff c_name@chromium.org in "
+ "case of problems.\n\nTBR=c_name@chromium.org\" "
+ "--author \"author@chromium.org <author@chromium.org>\""),
+ "", cwd=chrome_dir),
+ Cmd("git cl upload --send-mail --email \"author@chromium.org\" -f", "",
+ cwd=chrome_dir),
+ ]
+ self.Expect(expectations)
- args = ["-a", "author@chromium.org", "-c", TEST_CONFIG[CHROMIUM],
- "--sheriff", "--googlers-mapping", googlers_mapping_py]
- if force: args.append("-f")
- if manual: args.append("-m")
- else: args += ["-r", "reviewer@chromium.org"]
+ args = ["-a", "author@chromium.org", "-c", chrome_dir,
+ "--sheriff", "--googlers-mapping", googlers_mapping_py,
+ "-r", "reviewer@chromium.org"]
ChromiumRoll(TEST_CONFIG, self).Run(args)
- deps = FileToText(TEST_CONFIG[DEPS_FILE])
- self.assertTrue(re.search("\"v8_revision\": \"123455\"", deps))
-
- def testChromiumRollManual(self):
- self._ChromiumRoll(manual=True)
-
- def testChromiumRollSemiAutomatic(self):
- self._ChromiumRoll()
-
- def testChromiumRollForced(self):
- self._ChromiumRoll(force=True)
+ deps = FileToText(os.path.join(chrome_dir, "DEPS"))
+ self.assertTrue(re.search("\"v8_revision\": \"22624\"", deps))
def testCheckLastPushRecently(self):
- self.ExpectGit([
- Git(("log -1 --format=%H --grep="
+ self.Expect([
+ Cmd(("git log -1 --format=%H --grep="
"\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based\" "
"svn/trunk"), "hash2\n"),
- Git("log -1 --format=%s hash2",
+ Cmd("git log -1 --format=%s hash2",
"Version 3.4.5 (based on bleeding_edge revision r99)\n"),
])
@@ -910,45 +914,41 @@ def get_list():
AUTO_PUSH_ARGS))
def testAutoPush(self):
- TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
- TEST_CONFIG[SETTINGS_LOCATION] = "~/.doesnotexist"
+ TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
+ TEST_CONFIG["SETTINGS_LOCATION"] = "~/.doesnotexist"
- self.ExpectReadURL([
+ self.Expect([
+ Cmd("git status -s -uno", ""),
+ Cmd("git status -s -b -uno", "## some_branch\n"),
+ Cmd("git svn fetch", ""),
URL("https://v8-status.appspot.com/current?format=json",
"{\"message\": \"Tree is throttled\"}"),
URL("https://v8-status.appspot.com/lkgr", Exception("Network problem")),
URL("https://v8-status.appspot.com/lkgr", "100"),
- ])
-
- self.ExpectGit([
- Git("status -s -uno", ""),
- Git("status -s -b -uno", "## some_branch\n"),
- Git("svn fetch", ""),
- Git(("log -1 --format=%H --grep=\""
+ Cmd(("git log -1 --format=%H --grep=\""
"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based\""
" svn/trunk"), "push_hash\n"),
- Git("log -1 --format=%s push_hash",
+ Cmd("git log -1 --format=%s push_hash",
"Version 3.4.5 (based on bleeding_edge revision r79)\n"),
])
auto_push.AutoPush(TEST_CONFIG, self).Run(AUTO_PUSH_ARGS + ["--push"])
state = json.loads(FileToText("%s-state.json"
- % TEST_CONFIG[PERSISTFILE_BASENAME]))
+ % TEST_CONFIG["PERSISTFILE_BASENAME"]))
self.assertEquals("100", state["lkgr"])
def testAutoPushStoppedBySettings(self):
- TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
- TEST_CONFIG[SETTINGS_LOCATION] = self.MakeEmptyTempFile()
- TextToFile("{\"enable_auto_push\": false}", TEST_CONFIG[SETTINGS_LOCATION])
-
- self.ExpectReadURL([])
-
- self.ExpectGit([
- Git("status -s -uno", ""),
- Git("status -s -b -uno", "## some_branch\n"),
- Git("svn fetch", ""),
+ TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
+ TEST_CONFIG["SETTINGS_LOCATION"] = self.MakeEmptyTempFile()
+ TextToFile("{\"enable_auto_push\": false}",
+ TEST_CONFIG["SETTINGS_LOCATION"])
+
+ self.Expect([
+ Cmd("git status -s -uno", ""),
+ Cmd("git status -s -b -uno", "## some_branch\n"),
+ Cmd("git svn fetch", ""),
])
def RunAutoPush():
@@ -956,26 +956,23 @@ def get_list():
self.assertRaises(Exception, RunAutoPush)
def testAutoPushStoppedByTreeStatus(self):
- TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
- TEST_CONFIG[SETTINGS_LOCATION] = "~/.doesnotexist"
+ TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
+ TEST_CONFIG["SETTINGS_LOCATION"] = "~/.doesnotexist"
- self.ExpectReadURL([
+ self.Expect([
+ Cmd("git status -s -uno", ""),
+ Cmd("git status -s -b -uno", "## some_branch\n"),
+ Cmd("git svn fetch", ""),
URL("https://v8-status.appspot.com/current?format=json",
"{\"message\": \"Tree is throttled (no push)\"}"),
])
- self.ExpectGit([
- Git("status -s -uno", ""),
- Git("status -s -b -uno", "## some_branch\n"),
- Git("svn fetch", ""),
- ])
-
def RunAutoPush():
auto_push.AutoPush(TEST_CONFIG, self).Run(AUTO_PUSH_ARGS)
self.assertRaises(Exception, RunAutoPush)
def testAutoRollExistingRoll(self):
- self.ExpectReadURL([
+ self.Expect([
URL("https://codereview.chromium.org/search",
"owner=author%40chromium.org&limit=30&closed=3&format=json",
("{\"results\": [{\"subject\": \"different\"},"
@@ -983,13 +980,13 @@ def get_list():
])
result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
- AUTO_PUSH_ARGS + ["-c", TEST_CONFIG[CHROMIUM]])
- self.assertEquals(1, result)
+ AUTO_PUSH_ARGS + ["-c", TEST_CONFIG["CHROMIUM"]])
+ self.assertEquals(0, result)
# Snippet from the original DEPS file.
FAKE_DEPS = """
vars = {
- "v8_revision": "123455",
+ "v8_revision": "abcd123455",
}
deps = {
"src/v8":
@@ -999,58 +996,54 @@ deps = {
"""
def testAutoRollUpToDate(self):
- self.ExpectReadURL([
+ TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
+ TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
+ self.Expect([
URL("https://codereview.chromium.org/search",
"owner=author%40chromium.org&limit=30&closed=3&format=json",
("{\"results\": [{\"subject\": \"different\"}]}")),
- URL("http://src.chromium.org/svn/trunk/src/DEPS",
- self.FAKE_DEPS),
- ])
-
- self.ExpectGit([
- Git(("log -1 --format=%H --grep="
+ Cmd(("git log -1 --format=%H --grep="
"\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*\" "
- "svn/trunk"), "push_hash\n"),
- Git("svn find-rev push_hash", "123455\n"),
+ "origin/candidates"), "push_hash\n"),
+ Cmd("git log -1 --format=%B push_hash", self.C_V8_22624_LOG),
+ Cmd("git log -1 --format=%B abcd123455", self.C_V8_123455_LOG),
])
result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
- AUTO_PUSH_ARGS + ["-c", TEST_CONFIG[CHROMIUM]])
- self.assertEquals(1, result)
+ AUTO_PUSH_ARGS + ["-c", TEST_CONFIG["CHROMIUM"]])
+ self.assertEquals(0, result)
def testAutoRoll(self):
- TEST_CONFIG[CLUSTERFUZZ_API_KEY_FILE] = self.MakeEmptyTempFile()
- TextToFile("fake key", TEST_CONFIG[CLUSTERFUZZ_API_KEY_FILE])
- self.ExpectReadURL([
+ TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
+ TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
+ TEST_CONFIG["CLUSTERFUZZ_API_KEY_FILE"] = self.MakeEmptyTempFile()
+ TextToFile("fake key", TEST_CONFIG["CLUSTERFUZZ_API_KEY_FILE"])
+
+ self.Expect([
URL("https://codereview.chromium.org/search",
"owner=author%40chromium.org&limit=30&closed=3&format=json",
("{\"results\": [{\"subject\": \"different\"}]}")),
- URL("http://src.chromium.org/svn/trunk/src/DEPS",
- self.FAKE_DEPS),
- ])
-
- self.ExpectGit([
- Git(("log -1 --format=%H --grep="
+ Cmd(("git log -1 --format=%H --grep="
"\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*\" "
- "svn/trunk"), "push_hash\n"),
- Git("svn find-rev push_hash", "123456\n"),
+ "origin/candidates"), "push_hash\n"),
+ Cmd("git log -1 --format=%B push_hash", self.C_V8_123456_LOG),
+ Cmd("git log -1 --format=%B abcd123455", self.C_V8_123455_LOG),
])
result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
- AUTO_PUSH_ARGS + ["-c", TEST_CONFIG[CHROMIUM], "--roll"])
+ AUTO_PUSH_ARGS + ["-c", TEST_CONFIG["CHROMIUM"], "--roll"])
self.assertEquals(0, result)
def testMergeToBranch(self):
- TEST_CONFIG[ALREADY_MERGING_SENTINEL_FILE] = self.MakeEmptyTempFile()
- TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
- TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
+ TEST_CONFIG["ALREADY_MERGING_SENTINEL_FILE"] = self.MakeEmptyTempFile()
+ TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
self.WriteFakeVersionFile(build=5)
os.environ["EDITOR"] = "vi"
extra_patch = self.MakeEmptyTempFile()
def VerifyPatch(patch):
return lambda: self.assertEquals(patch,
- FileToText(TEST_CONFIG[TEMPORARY_PATCH_FILE]))
+ FileToText(TEST_CONFIG["TEMPORARY_PATCH_FILE"]))
msg = """Version 3.22.5.1 (merged r12345, r23456, r34567, r45678, r56789)
@@ -1069,94 +1062,225 @@ LOG=N
"""
def VerifySVNCommit():
- commit = FileToText(TEST_CONFIG[COMMITMSG_FILE])
+ commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
self.assertEquals(msg, commit)
- version = FileToText(TEST_CONFIG[VERSION_FILE])
+ version = FileToText(
+ os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
self.assertTrue(re.search(r"#define MINOR_VERSION\s+22", version))
self.assertTrue(re.search(r"#define BUILD_NUMBER\s+5", version))
self.assertTrue(re.search(r"#define PATCH_LEVEL\s+1", version))
self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
- self.ExpectGit([
- Git("status -s -uno", ""),
- Git("status -s -b -uno", "## some_branch\n"),
- Git("svn fetch", ""),
- Git("branch", " branch1\n* branch2\n"),
- Git("checkout -b %s svn/trunk" % TEST_CONFIG[BRANCHNAME], ""),
- Git("log --format=%H --grep=\"Port r12345\" --reverse svn/bleeding_edge",
+ self.Expect([
+ Cmd("git status -s -uno", ""),
+ Cmd("git status -s -b -uno", "## some_branch\n"),
+ Cmd("git svn fetch", ""),
+ Cmd("git branch", " branch1\n* branch2\n"),
+ Cmd("git checkout -b %s svn/trunk" % TEST_CONFIG["BRANCHNAME"], ""),
+ Cmd(("git log --format=%H --grep=\"Port r12345\" "
+ "--reverse svn/bleeding_edge"),
"hash1\nhash2"),
- Git("svn find-rev hash1 svn/bleeding_edge", "45678"),
- Git("log -1 --format=%s hash1", "Title1"),
- Git("svn find-rev hash2 svn/bleeding_edge", "23456"),
- Git("log -1 --format=%s hash2", "Title2"),
- Git("log --format=%H --grep=\"Port r23456\" --reverse svn/bleeding_edge",
+ Cmd("git svn find-rev hash1 svn/bleeding_edge", "45678"),
+ Cmd("git log -1 --format=%s hash1", "Title1"),
+ Cmd("git svn find-rev hash2 svn/bleeding_edge", "23456"),
+ Cmd("git log -1 --format=%s hash2", "Title2"),
+ Cmd(("git log --format=%H --grep=\"Port r23456\" "
+ "--reverse svn/bleeding_edge"),
""),
- Git("log --format=%H --grep=\"Port r34567\" --reverse svn/bleeding_edge",
+ Cmd(("git log --format=%H --grep=\"Port r34567\" "
+ "--reverse svn/bleeding_edge"),
"hash3"),
- Git("svn find-rev hash3 svn/bleeding_edge", "56789"),
- Git("log -1 --format=%s hash3", "Title3"),
- Git("svn find-rev r12345 svn/bleeding_edge", "hash4"),
+ Cmd("git svn find-rev hash3 svn/bleeding_edge", "56789"),
+ Cmd("git log -1 --format=%s hash3", "Title3"),
+ RL("Y"), # Automatically add corresponding ports (34567, 56789)?
+ Cmd("git svn find-rev r12345 svn/bleeding_edge", "hash4"),
# Simulate svn being down which stops the script.
- Git("svn find-rev r23456 svn/bleeding_edge", None),
+ Cmd("git svn find-rev r23456 svn/bleeding_edge", None),
# Restart script in the failing step.
- Git("svn find-rev r12345 svn/bleeding_edge", "hash4"),
- Git("svn find-rev r23456 svn/bleeding_edge", "hash2"),
- Git("svn find-rev r34567 svn/bleeding_edge", "hash3"),
- Git("svn find-rev r45678 svn/bleeding_edge", "hash1"),
- Git("svn find-rev r56789 svn/bleeding_edge", "hash5"),
- Git("log -1 --format=%s hash4", "Title4"),
- Git("log -1 --format=%s hash2", "Title2"),
- Git("log -1 --format=%s hash3", "Title3"),
- Git("log -1 --format=%s hash1", "Title1"),
- Git("log -1 --format=%s hash5", "Revert \"Something\""),
- Git("log -1 hash4", "Title4\nBUG=123\nBUG=234"),
- Git("log -1 hash2", "Title2\n BUG = v8:123,345"),
- Git("log -1 hash3", "Title3\nLOG=n\nBUG=567, 456"),
- Git("log -1 hash1", "Title1\nBUG="),
- Git("log -1 hash5", "Revert \"Something\"\nBUG=none"),
- Git("log -1 -p hash4", "patch4"),
- Git("apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
+ Cmd("git svn find-rev r12345 svn/bleeding_edge", "hash4"),
+ Cmd("git svn find-rev r23456 svn/bleeding_edge", "hash2"),
+ Cmd("git svn find-rev r34567 svn/bleeding_edge", "hash3"),
+ Cmd("git svn find-rev r45678 svn/bleeding_edge", "hash1"),
+ Cmd("git svn find-rev r56789 svn/bleeding_edge", "hash5"),
+ Cmd("git log -1 --format=%s hash4", "Title4"),
+ Cmd("git log -1 --format=%s hash2", "Title2"),
+ Cmd("git log -1 --format=%s hash3", "Title3"),
+ Cmd("git log -1 --format=%s hash1", "Title1"),
+ Cmd("git log -1 --format=%s hash5", "Revert \"Something\""),
+ Cmd("git log -1 hash4", "Title4\nBUG=123\nBUG=234"),
+ Cmd("git log -1 hash2", "Title2\n BUG = v8:123,345"),
+ Cmd("git log -1 hash3", "Title3\nLOG=n\nBUG=567, 456"),
+ Cmd("git log -1 hash1", "Title1\nBUG="),
+ Cmd("git log -1 hash5", "Revert \"Something\"\nBUG=none"),
+ Cmd("git log -1 -p hash4", "patch4"),
+ Cmd(("git apply --index --reject \"%s\"" %
+ TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
"", cb=VerifyPatch("patch4")),
- Git("log -1 -p hash2", "patch2"),
- Git("apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
+ Cmd("git log -1 -p hash2", "patch2"),
+ Cmd(("git apply --index --reject \"%s\"" %
+ TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
"", cb=VerifyPatch("patch2")),
- Git("log -1 -p hash3", "patch3"),
- Git("apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
+ Cmd("git log -1 -p hash3", "patch3"),
+ Cmd(("git apply --index --reject \"%s\"" %
+ TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
"", cb=VerifyPatch("patch3")),
- Git("log -1 -p hash1", "patch1"),
- Git("apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
+ Cmd("git log -1 -p hash1", "patch1"),
+ Cmd(("git apply --index --reject \"%s\"" %
+ TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
"", cb=VerifyPatch("patch1")),
- Git("log -1 -p hash5", "patch5\n"),
- Git("apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
+ Cmd("git log -1 -p hash5", "patch5\n"),
+ Cmd(("git apply --index --reject \"%s\"" %
+ TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
"", cb=VerifyPatch("patch5\n")),
- Git("apply --index --reject \"%s\"" % extra_patch, ""),
- Git("commit -aF \"%s\"" % TEST_CONFIG[COMMITMSG_FILE], ""),
- Git("cl upload --send-mail -r \"reviewer@chromium.org\"", ""),
- Git("checkout -f %s" % TEST_CONFIG[BRANCHNAME], ""),
- Git("cl presubmit", "Presubmit successfull\n"),
- Git("cl dcommit -f --bypass-hooks", "Closing issue\n", cb=VerifySVNCommit),
- Git("svn fetch", ""),
- Git(("log -1 --format=%%H --grep=\"%s\" svn/trunk"
- % msg.replace("\"", "\\\"")), "hash6"),
- Git("svn find-rev hash6", "1324"),
- Git(("copy -r 1324 https://v8.googlecode.com/svn/trunk "
- "https://v8.googlecode.com/svn/tags/3.22.5.1 -m "
- "\"Tagging version 3.22.5.1\""), ""),
- Git("checkout -f some_branch", ""),
- Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
+ Cmd("git apply --index --reject \"%s\"" % extra_patch, ""),
+ RL("Y"), # Automatically increment patch level?
+ Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], ""),
+ RL("reviewer@chromium.org"), # V8 reviewer.
+ Cmd("git cl upload --send-mail -r \"reviewer@chromium.org\" "
+ "--bypass-hooks --cc \"ulan@chromium.org\"", ""),
+ Cmd("git checkout -f %s" % TEST_CONFIG["BRANCHNAME"], ""),
+ RL("LGTM"), # Enter LGTM for V8 CL.
+ Cmd("git cl presubmit", "Presubmit successfull\n"),
+ Cmd("git cl dcommit -f --bypass-hooks", "Closing issue\n",
+ cb=VerifySVNCommit),
+ Cmd("git svn tag 3.22.5.1 -m \"Tagging version 3.22.5.1\"", ""),
+ Cmd("git checkout -f some_branch", ""),
+ Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
])
- self.ExpectReadline([
+ # r12345 and r34567 are patches. r23456 (included) and r45678 are the MIPS
+ # ports of r12345. r56789 is the MIPS port of r34567.
+ args = ["-f", "-p", extra_patch, "--branch", "trunk",
+ "--vc-interface", "git_svn", "12345", "23456", "34567"]
+
+ # The first run of the script stops because of the svn being down.
+ self.assertRaises(GitFailedException,
+ lambda: MergeToBranch(TEST_CONFIG, self).Run(args))
+
+ # Test that state recovery after restarting the script works.
+ args += ["-s", "3"]
+ MergeToBranch(TEST_CONFIG, self).Run(args)
+
+ def testMergeToBranchNewGit(self):
+ TEST_CONFIG["ALREADY_MERGING_SENTINEL_FILE"] = self.MakeEmptyTempFile()
+ TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
+ self.WriteFakeVersionFile(build=5)
+ os.environ["EDITOR"] = "vi"
+ extra_patch = self.MakeEmptyTempFile()
+
+ def VerifyPatch(patch):
+ return lambda: self.assertEquals(patch,
+ FileToText(TEST_CONFIG["TEMPORARY_PATCH_FILE"]))
+
+ msg = """Version 3.22.5.1 (merged r12345, r23456, r34567, r45678, r56789)
+
+Title4
+
+Title2
+
+Title3
+
+Title1
+
+Revert "Something"
+
+BUG=123,234,345,456,567,v8:123
+LOG=N
+"""
+
+ def VerifySVNCommit():
+ commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
+ self.assertEquals(msg, commit)
+ version = FileToText(
+ os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
+ self.assertTrue(re.search(r"#define MINOR_VERSION\s+22", version))
+ self.assertTrue(re.search(r"#define BUILD_NUMBER\s+5", version))
+ self.assertTrue(re.search(r"#define PATCH_LEVEL\s+1", version))
+ self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
+
+ self.Expect([
+ Cmd("git status -s -uno", ""),
+ Cmd("git status -s -b -uno", "## some_branch\n"),
+ Cmd("git fetch", ""),
+ Cmd("git branch", " branch1\n* branch2\n"),
+ Cmd("git checkout -b %s origin/candidates" %
+ TEST_CONFIG["BRANCHNAME"], ""),
+ Cmd(("git log --format=%H --grep=\"Port r12345\" "
+ "--reverse origin/master"),
+ "hash1\nhash2"),
+ Cmd("git svn find-rev hash1 origin/master", "45678"),
+ Cmd("git log -1 --format=%s hash1", "Title1"),
+ Cmd("git svn find-rev hash2 origin/master", "23456"),
+ Cmd("git log -1 --format=%s hash2", "Title2"),
+ Cmd(("git log --format=%H --grep=\"Port r23456\" "
+ "--reverse origin/master"),
+ ""),
+ Cmd(("git log --format=%H --grep=\"Port r34567\" "
+ "--reverse origin/master"),
+ "hash3"),
+ Cmd("git svn find-rev hash3 origin/master", "56789"),
+ Cmd("git log -1 --format=%s hash3", "Title3"),
RL("Y"), # Automatically add corresponding ports (34567, 56789)?
+ Cmd("git svn find-rev r12345 origin/master",
+ "Partial-rebuilding bla\nDone rebuilding blub\nhash4"),
+ # Simulate svn being down which stops the script.
+ Cmd("git svn find-rev r23456 origin/master", None),
+ # Restart script in the failing step.
+ Cmd("git svn find-rev r12345 origin/master", "hash4"),
+ Cmd("git svn find-rev r23456 origin/master", "hash2"),
+ Cmd("git svn find-rev r34567 origin/master", "hash3"),
+ Cmd("git svn find-rev r45678 origin/master", "hash1"),
+ Cmd("git svn find-rev r56789 origin/master", "hash5"),
+ Cmd("git log -1 --format=%s hash4", "Title4"),
+ Cmd("git log -1 --format=%s hash2", "Title2"),
+ Cmd("git log -1 --format=%s hash3", "Title3"),
+ Cmd("git log -1 --format=%s hash1", "Title1"),
+ Cmd("git log -1 --format=%s hash5", "Revert \"Something\""),
+ Cmd("git log -1 hash4", "Title4\nBUG=123\nBUG=234"),
+ Cmd("git log -1 hash2", "Title2\n BUG = v8:123,345"),
+ Cmd("git log -1 hash3", "Title3\nLOG=n\nBUG=567, 456"),
+ Cmd("git log -1 hash1", "Title1\nBUG="),
+ Cmd("git log -1 hash5", "Revert \"Something\"\nBUG=none"),
+ Cmd("git log -1 -p hash4", "patch4"),
+ Cmd(("git apply --index --reject \"%s\"" %
+ TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
+ "", cb=VerifyPatch("patch4")),
+ Cmd("git log -1 -p hash2", "patch2"),
+ Cmd(("git apply --index --reject \"%s\"" %
+ TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
+ "", cb=VerifyPatch("patch2")),
+ Cmd("git log -1 -p hash3", "patch3"),
+ Cmd(("git apply --index --reject \"%s\"" %
+ TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
+ "", cb=VerifyPatch("patch3")),
+ Cmd("git log -1 -p hash1", "patch1"),
+ Cmd(("git apply --index --reject \"%s\"" %
+ TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
+ "", cb=VerifyPatch("patch1")),
+ Cmd("git log -1 -p hash5", "patch5\n"),
+ Cmd(("git apply --index --reject \"%s\"" %
+ TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
+ "", cb=VerifyPatch("patch5\n")),
+ Cmd("git apply --index --reject \"%s\"" % extra_patch, ""),
RL("Y"), # Automatically increment patch level?
+ Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], ""),
RL("reviewer@chromium.org"), # V8 reviewer.
+ Cmd("git cl upload --send-mail -r \"reviewer@chromium.org\" "
+ "--bypass-hooks --cc \"ulan@chromium.org\"", ""),
+ Cmd("git checkout -f %s" % TEST_CONFIG["BRANCHNAME"], ""),
RL("LGTM"), # Enter LGTM for V8 CL.
+ Cmd("git cl presubmit", "Presubmit successfull\n"),
+ Cmd("git cl dcommit -f --bypass-hooks", "Closing issue\n",
+ cb=VerifySVNCommit),
+ Cmd("git svn tag 3.22.5.1 -m \"Tagging version 3.22.5.1\"", ""),
+ Cmd("git checkout -f some_branch", ""),
+ Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
])
# r12345 and r34567 are patches. r23456 (included) and r45678 are the MIPS
# ports of r12345. r56789 is the MIPS port of r34567.
- args = ["-f", "-p", extra_patch, "--branch", "trunk", "12345", "23456",
- "34567"]
+ args = ["-f", "-p", extra_patch, "--branch", "candidates",
+ "--vc-interface", "git_read_svn_write", "12345", "23456", "34567"]
# The first run of the script stops because of the svn being down.
self.assertRaises(GitFailedException,
@@ -1194,17 +1318,37 @@ Changed paths:
Tagging version 3.28.40
------------------------------------------------------------------------
"""
+ c_hash2_commit_log = """Revert something.
+
+BUG=12345
+
+Reason:
+> Some reason.
+> Cr-Commit-Position: refs/heads/master@{#12345}
+> git-svn-id: svn://svn.chromium.org/chrome/trunk/src@12345 003-1c4
+
+Review URL: https://codereview.chromium.org/12345
+
+Cr-Commit-Position: refs/heads/master@{#4567}
+git-svn-id: svn://svn.chromium.org/chrome/trunk/src@4567 0039-1c4b
+
+"""
+ c_hash3_commit_log = """Simple.
+
+git-svn-id: svn://svn.chromium.org/chrome/trunk/src@3456 0039-1c4b
+
+"""
json_output = self.MakeEmptyTempFile()
csv_output = self.MakeEmptyTempFile()
- TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
self.WriteFakeVersionFile()
- TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
- if not os.path.exists(TEST_CONFIG[CHROMIUM]):
- os.makedirs(TEST_CONFIG[CHROMIUM])
+ TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
+ chrome_dir = TEST_CONFIG["CHROMIUM"]
+ chrome_v8_dir = os.path.join(chrome_dir, "v8")
+ os.makedirs(chrome_v8_dir)
def WriteDEPS(revision):
TextToFile("Line\n \"v8_revision\": \"%s\",\n line\n" % revision,
- TEST_CONFIG[DEPS_FILE])
+ os.path.join(chrome_dir, "DEPS"))
WriteDEPS(567)
def ResetVersion(minor, build, patch=0):
@@ -1215,81 +1359,94 @@ Tagging version 3.28.40
def ResetDEPS(revision):
return lambda: WriteDEPS(revision)
- self.ExpectGit([
- Git("status -s -uno", ""),
- Git("status -s -b -uno", "## some_branch\n"),
- Git("svn fetch", ""),
- Git("branch", " branch1\n* branch2\n"),
- Git("checkout -b %s" % TEST_CONFIG[BRANCHNAME], ""),
- Git("branch -r", " svn/3.21\n svn/3.3\n"),
- Git("reset --hard svn/3.3", ""),
- Git("log --format=%H", "hash1\nhash2"),
- Git("diff --name-only hash1 hash1^", ""),
- Git("diff --name-only hash2 hash2^", TEST_CONFIG[VERSION_FILE]),
- Git("checkout -f hash2 -- %s" % TEST_CONFIG[VERSION_FILE], "",
+ self.Expect([
+ Cmd("git status -s -uno", ""),
+ Cmd("git status -s -b -uno", "## some_branch\n"),
+ Cmd("git svn fetch", ""),
+ Cmd("git branch", " branch1\n* branch2\n"),
+ Cmd("git checkout -b %s" % TEST_CONFIG["BRANCHNAME"], ""),
+ Cmd("git branch -r", " svn/3.21\n svn/3.3\n"),
+ Cmd("git reset --hard svn/3.3", ""),
+ Cmd("git log --format=%H", "hash1\nhash2"),
+ Cmd("git diff --name-only hash1 hash1^", ""),
+ Cmd("git diff --name-only hash2 hash2^", VERSION_FILE),
+ Cmd("git checkout -f hash2 -- %s" % VERSION_FILE, "",
cb=ResetVersion(3, 1, 1)),
- Git("log -1 --format=%B hash2",
+ Cmd("git log -1 --format=%B hash2",
"Version 3.3.1.1 (merged 12)\n\nReview URL: fake.com\n"),
- Git("log -1 --format=%s hash2", ""),
- Git("svn find-rev hash2", "234"),
- Git("log -1 --format=%ci hash2", "18:15"),
- Git("checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "",
+ Cmd("git log -1 --format=%s hash2", ""),
+ Cmd("git svn find-rev hash2", "234"),
+ Cmd("git log -1 --format=%ci hash2", "18:15"),
+ Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
cb=ResetVersion(22, 5)),
- Git("reset --hard svn/3.21", ""),
- Git("log --format=%H", "hash3\nhash4\nhash5\n"),
- Git("diff --name-only hash3 hash3^", TEST_CONFIG[VERSION_FILE]),
- Git("checkout -f hash3 -- %s" % TEST_CONFIG[VERSION_FILE], "",
+ Cmd("git reset --hard svn/3.21", ""),
+ Cmd("git log --format=%H", "hash3\nhash4\nhash5\n"),
+ Cmd("git diff --name-only hash3 hash3^", VERSION_FILE),
+ Cmd("git checkout -f hash3 -- %s" % VERSION_FILE, "",
cb=ResetVersion(21, 2)),
- Git("log -1 --format=%B hash3", ""),
- Git("log -1 --format=%s hash3", ""),
- Git("svn find-rev hash3", "123"),
- Git("log -1 --format=%ci hash3", "03:15"),
- Git("checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "",
+ Cmd("git log -1 --format=%B hash3", ""),
+ Cmd("git log -1 --format=%s hash3", ""),
+ Cmd("git svn find-rev hash3", "123"),
+ Cmd("git log -1 --format=%ci hash3", "03:15"),
+ Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
cb=ResetVersion(22, 5)),
- Git("reset --hard svn/trunk", ""),
- Git("log --format=%H", "hash6\n"),
- Git("diff --name-only hash6 hash6^", TEST_CONFIG[VERSION_FILE]),
- Git("checkout -f hash6 -- %s" % TEST_CONFIG[VERSION_FILE], "",
+ Cmd("git reset --hard svn/trunk", ""),
+ Cmd("git log --format=%H", "hash6\n"),
+ Cmd("git diff --name-only hash6 hash6^", VERSION_FILE),
+ Cmd("git checkout -f hash6 -- %s" % VERSION_FILE, "",
cb=ResetVersion(22, 3)),
- Git("log -1 --format=%B hash6", ""),
- Git("log -1 --format=%s hash6", ""),
- Git("svn find-rev hash6", "345"),
- Git("log -1 --format=%ci hash6", ""),
- Git("checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "",
+ Cmd("git log -1 --format=%B hash6", ""),
+ Cmd("git log -1 --format=%s hash6", ""),
+ Cmd("git svn find-rev hash6", "345"),
+ Cmd("git log -1 --format=%ci hash6", ""),
+ Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
cb=ResetVersion(22, 5)),
- Git("reset --hard svn/bleeding_edge", ""),
- Git("log https://v8.googlecode.com/svn/tags -v --limit 20",
+ Cmd("git reset --hard svn/bleeding_edge", ""),
+ Cmd("svn log https://v8.googlecode.com/svn/tags -v --limit 20",
tag_response_text),
- Git("svn find-rev r22626", "hash_22626"),
- Git("svn find-rev hash_22626", "22626"),
- Git("log -1 --format=%ci hash_22626", "01:23"),
- Git("svn find-rev r22624", "hash_22624"),
- Git("svn find-rev hash_22624", "22624"),
- Git("log -1 --format=%ci hash_22624", "02:34"),
- Git("status -s -uno", ""),
- Git("checkout -f master", ""),
- Git("pull", ""),
- Git("checkout -b %s" % TEST_CONFIG[BRANCHNAME], ""),
- Git("log --format=%H --grep=\"V8\"", "c_hash1\nc_hash2\n"),
- Git("diff --name-only c_hash1 c_hash1^", ""),
- Git("diff --name-only c_hash2 c_hash2^", TEST_CONFIG[DEPS_FILE]),
- Git("checkout -f c_hash2 -- %s" % TEST_CONFIG[DEPS_FILE], "",
- cb=ResetDEPS(345)),
- Git("svn find-rev c_hash2", "4567"),
- Git("checkout -f HEAD -- %s" % TEST_CONFIG[DEPS_FILE], "",
- cb=ResetDEPS(567)),
- Git("branch -r", " weird/123\n branch-heads/7\n"),
- Git("checkout -f branch-heads/7 -- %s" % TEST_CONFIG[DEPS_FILE], "",
- cb=ResetDEPS(345)),
- Git("checkout -f HEAD -- %s" % TEST_CONFIG[DEPS_FILE], "",
- cb=ResetDEPS(567)),
- Git("checkout -f master", ""),
- Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
- Git("checkout -f some_branch", ""),
- Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
+ Cmd("git svn find-rev r22626", "hash_22626"),
+ Cmd("git svn find-rev hash_22626", "22626"),
+ Cmd("git log -1 --format=%ci hash_22626", "01:23"),
+ Cmd("git svn find-rev r22624", "hash_22624"),
+ Cmd("git svn find-rev hash_22624", "22624"),
+ Cmd("git log -1 --format=%ci hash_22624", "02:34"),
+ Cmd("git status -s -uno", "", cwd=chrome_dir),
+ Cmd("git checkout -f master", "", cwd=chrome_dir),
+ Cmd("git pull", "", cwd=chrome_dir),
+ Cmd("git checkout -b %s" % TEST_CONFIG["BRANCHNAME"], "", cwd=chrome_dir),
+ Cmd("git fetch origin", "", cwd=chrome_v8_dir),
+ Cmd("git log --format=%H --grep=\"V8\"", "c_hash1\nc_hash2\nc_hash3\n",
+ cwd=chrome_dir),
+ Cmd("git diff --name-only c_hash1 c_hash1^", "", cwd=chrome_dir),
+ Cmd("git diff --name-only c_hash2 c_hash2^", "DEPS", cwd=chrome_dir),
+ Cmd("git checkout -f c_hash2 -- DEPS", "",
+ cb=ResetDEPS("0123456789012345678901234567890123456789"),
+ cwd=chrome_dir),
+ Cmd("git log -1 --format=%B c_hash2", c_hash2_commit_log,
+ cwd=chrome_dir),
+ Cmd("git rev-list -n 1 0123456789012345678901234567890123456789",
+ "0123456789012345678901234567890123456789", cwd=chrome_v8_dir),
+ Cmd("git log -1 --format=%B 0123456789012345678901234567890123456789",
+ self.C_V8_22624_LOG, cwd=chrome_v8_dir),
+ Cmd("git diff --name-only c_hash3 c_hash3^", "DEPS", cwd=chrome_dir),
+ Cmd("git checkout -f c_hash3 -- DEPS", "", cb=ResetDEPS(345),
+ cwd=chrome_dir),
+ Cmd("git log -1 --format=%B c_hash3", c_hash3_commit_log,
+ cwd=chrome_dir),
+ Cmd("git checkout -f HEAD -- DEPS", "", cb=ResetDEPS(567),
+ cwd=chrome_dir),
+ Cmd("git branch -r", " weird/123\n branch-heads/7\n", cwd=chrome_dir),
+ Cmd("git checkout -f branch-heads/7 -- DEPS", "", cb=ResetDEPS(345),
+ cwd=chrome_dir),
+ Cmd("git checkout -f HEAD -- DEPS", "", cb=ResetDEPS(567),
+ cwd=chrome_dir),
+ Cmd("git checkout -f master", "", cwd=chrome_dir),
+ Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], "", cwd=chrome_dir),
+ Cmd("git checkout -f some_branch", ""),
+ Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
])
- args = ["-c", TEST_CONFIG[CHROMIUM],
+ args = ["-c", TEST_CONFIG["CHROMIUM"],
"--json", json_output,
"--csv", csv_output,
"--max-releases", "1"]
@@ -1297,8 +1454,8 @@ Tagging version 3.28.40
# Check expected output.
csv = ("3.28.41,bleeding_edge,22626,,\r\n"
- "3.28.40,bleeding_edge,22624,,\r\n"
- "3.22.3,trunk,345,4567,\r\n"
+ "3.28.40,bleeding_edge,22624,4567,\r\n"
+ "3.22.3,trunk,345,3456:4566,\r\n"
"3.21.2,3.21,123,,\r\n"
"3.3.1.1,3.3,234,,12\r\n")
self.assertEquals(csv, FileToText(csv_output))
@@ -1309,11 +1466,12 @@ Tagging version 3.28.40
"review_link": "", "date": "01:23", "chromium_branch": "",
"revision_link": "https://code.google.com/p/v8/source/detail?r=22626"},
{"bleeding_edge": "22624", "patches_merged": "", "version": "3.28.40",
- "chromium_revision": "", "branch": "bleeding_edge", "revision": "22624",
- "review_link": "", "date": "02:34", "chromium_branch": "",
+ "chromium_revision": "4567", "branch": "bleeding_edge",
+ "revision": "22624", "review_link": "", "date": "02:34",
+ "chromium_branch": "",
"revision_link": "https://code.google.com/p/v8/source/detail?r=22624"},
{"bleeding_edge": "", "patches_merged": "", "version": "3.22.3",
- "chromium_revision": "4567", "branch": "trunk", "revision": "345",
+ "chromium_revision": "3456:4566", "branch": "trunk", "revision": "345",
"review_link": "", "date": "", "chromium_branch": "7",
"revision_link": "https://code.google.com/p/v8/source/detail?r=345"},
{"patches_merged": "", "bleeding_edge": "", "version": "3.21.2",
@@ -1328,8 +1486,7 @@ Tagging version 3.28.40
self.assertEquals(expected_json, json.loads(FileToText(json_output)))
- def testBumpUpVersion(self):
- TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
+ def _bumpUpVersion(self):
self.WriteFakeVersionFile()
def ResetVersion(minor, build, patch=0):
@@ -1337,45 +1494,74 @@ Tagging version 3.28.40
build=build,
patch=patch)
- self.ExpectGit([
- Git("status -s -uno", ""),
- Git("checkout -f bleeding_edge", "", cb=ResetVersion(11, 4)),
- Git("pull", ""),
- Git("branch", ""),
- Git("checkout -f bleeding_edge", ""),
- Git("log -1 --format=%H", "latest_hash"),
- Git("diff --name-only latest_hash latest_hash^", ""),
- Git("checkout -f bleeding_edge", ""),
- Git("log --format=%H --grep=\"^git-svn-id: [^@]*@12345 [A-Za-z0-9-]*$\"",
- "lkgr_hash"),
- Git("checkout -b auto-bump-up-version lkgr_hash", ""),
- Git("checkout -f bleeding_edge", ""),
- Git("branch", ""),
- Git("diff --name-only lkgr_hash lkgr_hash^", ""),
- Git("checkout -f master", "", cb=ResetVersion(11, 5)),
- Git("pull", ""),
- Git("checkout -b auto-bump-up-version bleeding_edge", "",
- cb=ResetVersion(11, 4)),
- Git("commit -am \"[Auto-roll] Bump up version to 3.11.6.0\n\n"
- "TBR=author@chromium.org\"", ""),
- Git("cl upload --send-mail --email \"author@chromium.org\" -f "
- "--bypass-hooks", ""),
- Git("cl dcommit -f --bypass-hooks", ""),
- Git("checkout -f bleeding_edge", ""),
- Git("branch", "auto-bump-up-version\n* bleeding_edge"),
- Git("branch -D auto-bump-up-version", ""),
- ])
-
- self.ExpectReadURL([
+ return [
+ Cmd("git status -s -uno", ""),
+ Cmd("git checkout -f master", "", cb=ResetVersion(11, 4)),
+ Cmd("git pull", ""),
+ Cmd("git branch", ""),
+ Cmd("git checkout -f master", ""),
+ Cmd("git log -1 --format=%H", "latest_hash"),
+ Cmd("git diff --name-only latest_hash latest_hash^", ""),
URL("https://v8-status.appspot.com/lkgr", "12345"),
+ Cmd("git checkout -f master", ""),
+ Cmd(("git log --format=%H --grep="
+ "\"^git-svn-id: [^@]*@12345 [A-Za-z0-9-]*$\""),
+ "lkgr_hash"),
+ Cmd("git checkout -b auto-bump-up-version lkgr_hash", ""),
+ Cmd("git checkout -f master", ""),
+ Cmd("git branch", "auto-bump-up-version\n* master"),
+ Cmd("git branch -D auto-bump-up-version", ""),
+ Cmd("git diff --name-only lkgr_hash lkgr_hash^", ""),
+ Cmd("git checkout -f candidates", "", cb=ResetVersion(11, 5)),
+ Cmd("git pull", ""),
URL("https://v8-status.appspot.com/current?format=json",
"{\"message\": \"Tree is open\"}"),
- ])
+ Cmd("git checkout -b auto-bump-up-version master", "",
+ cb=ResetVersion(11, 4)),
+ Cmd("git commit -am \"[Auto-roll] Bump up version to 3.11.6.0\n\n"
+ "TBR=author@chromium.org\" "
+ "--author \"author@chromium.org <author@chromium.org>\"", ""),
+ ]
+
+ def testBumpUpVersionGit(self):
+ expectations = self._bumpUpVersion()
+ expectations += [
+ Cmd("git cl upload --send-mail --email \"author@chromium.org\" -f "
+ "--bypass-hooks", ""),
+ Cmd("git cl dcommit -f --bypass-hooks", ""),
+ Cmd("git checkout -f master", ""),
+ Cmd("git branch", "auto-bump-up-version\n* master"),
+ Cmd("git branch -D auto-bump-up-version", ""),
+ ]
+ self.Expect(expectations)
BumpUpVersion(TEST_CONFIG, self).Run(["-a", "author@chromium.org"])
+ def testBumpUpVersionSvn(self):
+ svn_root = self.MakeEmptyTempDirectory()
+ expectations = self._bumpUpVersion()
+ expectations += [
+ Cmd("git diff HEAD^ HEAD", "patch content"),
+ Cmd("svn update", "", cwd=svn_root),
+ Cmd("svn status", "", cwd=svn_root),
+ Cmd("patch -d branches/bleeding_edge -p1 -i %s" %
+ TEST_CONFIG["PATCH_FILE"], "Applied patch...", cwd=svn_root),
+ Cmd("svn commit --non-interactive --username=author@chromium.org "
+ "--config-dir=[CONFIG_DIR] "
+ "-m \"[Auto-roll] Bump up version to 3.11.6.0\"",
+ "", cwd=svn_root),
+ Cmd("git checkout -f master", ""),
+ Cmd("git branch", "auto-bump-up-version\n* master"),
+ Cmd("git branch -D auto-bump-up-version", ""),
+ ]
+ self.Expect(expectations)
+
+ BumpUpVersion(TEST_CONFIG, self).Run(
+ ["-a", "author@chromium.org",
+ "--svn", svn_root,
+ "--svn-config", "[CONFIG_DIR]"])
+
def testAutoTag(self):
- TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
self.WriteFakeVersionFile()
def ResetVersion(minor, build, patch=0):
@@ -1383,93 +1569,91 @@ Tagging version 3.28.40
build=build,
patch=patch)
- self.ExpectGit([
- Git("status -s -uno", ""),
- Git("status -s -b -uno", "## some_branch\n"),
- Git("svn fetch", ""),
- Git("branch", " branch1\n* branch2\n"),
- Git("checkout -f master", ""),
- Git("svn rebase", ""),
- Git("checkout -b %s" % TEST_CONFIG[BRANCHNAME], "",
+ self.Expect([
+ Cmd("git status -s -uno", ""),
+ Cmd("git status -s -b -uno", "## some_branch\n"),
+ Cmd("git svn fetch", ""),
+ Cmd("git branch", " branch1\n* branch2\n"),
+ Cmd("git checkout -f master", ""),
+ Cmd("git svn rebase", ""),
+ Cmd("git checkout -b %s" % TEST_CONFIG["BRANCHNAME"], "",
cb=ResetVersion(4, 5)),
- Git("branch -r", "svn/tags/3.4.2\nsvn/tags/3.2.1.0\nsvn/branches/3.4"),
- Git("log --format=%H --grep=\"\\[Auto\\-roll\\] Bump up version to\"",
+ Cmd("git branch -r",
+ "svn/tags/3.4.2\nsvn/tags/3.2.1.0\nsvn/branches/3.4"),
+ Cmd(("git log --format=%H --grep="
+ "\"\\[Auto\\-roll\\] Bump up version to\""),
"hash125\nhash118\nhash111\nhash101"),
- Git("checkout -f hash125 -- %s" % TEST_CONFIG[VERSION_FILE], "",
+ Cmd("git checkout -f hash125 -- %s" % VERSION_FILE, "",
cb=ResetVersion(4, 4)),
- Git("checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "",
+ Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
cb=ResetVersion(4, 5)),
- Git("checkout -f hash118 -- %s" % TEST_CONFIG[VERSION_FILE], "",
+ Cmd("git checkout -f hash118 -- %s" % VERSION_FILE, "",
cb=ResetVersion(4, 3)),
- Git("checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "",
+ Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
cb=ResetVersion(4, 5)),
- Git("checkout -f hash111 -- %s" % TEST_CONFIG[VERSION_FILE], "",
+ Cmd("git checkout -f hash111 -- %s" % VERSION_FILE, "",
cb=ResetVersion(4, 2)),
- Git("checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "",
+ Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
cb=ResetVersion(4, 5)),
- Git("svn find-rev hash118", "118"),
- Git("svn find-rev hash125", "125"),
- Git("svn find-rev r123", "hash123"),
- Git("log -1 --format=%at hash123", "1"),
- Git("reset --hard hash123", ""),
- Git("svn tag 3.4.3 -m \"Tagging version 3.4.3\"", ""),
- Git("checkout -f some_branch", ""),
- Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
- ])
-
- self.ExpectReadURL([
URL("https://v8-status.appspot.com/revisions?format=json",
"[{\"revision\": \"126\", \"status\": true},"
"{\"revision\": \"123\", \"status\": true},"
"{\"revision\": \"112\", \"status\": true}]"),
+ Cmd("git svn find-rev hash118", "118"),
+ Cmd("git svn find-rev hash125", "125"),
+ Cmd("git svn find-rev r123", "hash123"),
+ Cmd("git log -1 --format=%at hash123", "1"),
+ Cmd("git reset --hard hash123", ""),
+ Cmd("git svn tag 3.4.3 -m \"Tagging version 3.4.3\"", ""),
+ Cmd("git checkout -f some_branch", ""),
+ Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
])
AutoTag(TEST_CONFIG, self).Run(["-a", "author@chromium.org"])
# Test that we bail out if the last change was a version change.
def testBumpUpVersionBailout1(self):
- TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
self._state["latest"] = "latest_hash"
- self.ExpectGit([
- Git("diff --name-only latest_hash latest_hash^",
- TEST_CONFIG[VERSION_FILE]),
+ self.Expect([
+ Cmd("git diff --name-only latest_hash latest_hash^", VERSION_FILE),
])
- self.assertEquals(1,
+ self.assertEquals(0,
self.RunStep(BumpUpVersion, LastChangeBailout, ["--dry_run"]))
# Test that we bail out if the lkgr was a version change.
def testBumpUpVersionBailout2(self):
- TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
self._state["lkgr"] = "lkgr_hash"
- self.ExpectGit([
- Git("diff --name-only lkgr_hash lkgr_hash^", TEST_CONFIG[VERSION_FILE]),
+ self.Expect([
+ Cmd("git diff --name-only lkgr_hash lkgr_hash^", VERSION_FILE),
])
- self.assertEquals(1,
+ self.assertEquals(0,
self.RunStep(BumpUpVersion, LKGRVersionUpToDateBailout, ["--dry_run"]))
# Test that we bail out if the last version is already newer than the lkgr's
# version.
def testBumpUpVersionBailout3(self):
- TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
self._state["lkgr"] = "lkgr_hash"
self._state["lkgr_version"] = "3.22.4.0"
self._state["latest_version"] = "3.22.5.0"
- self.ExpectGit([
- Git("diff --name-only lkgr_hash lkgr_hash^", ""),
+ self.Expect([
+ Cmd("git diff --name-only lkgr_hash lkgr_hash^", ""),
])
- self.assertEquals(1,
+ self.assertEquals(0,
self.RunStep(BumpUpVersion, LKGRVersionUpToDateBailout, ["--dry_run"]))
class SystemTest(unittest.TestCase):
def testReload(self):
+ options = ScriptsBase(
+ TEST_CONFIG, DEFAULT_SIDE_EFFECT_HANDLER, {}).MakeOptions([])
step = MakeStep(step_class=PrepareChangeLog, number=0, state={}, config={},
+ options=options,
side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER)
body = step.Reload(
"""------------------------------------------------------------------------
diff --git a/deps/v8/tools/run-tests.py b/deps/v8/tools/run-tests.py
index 6e9f5549d8..c8481e6838 100755
--- a/deps/v8/tools/run-tests.py
+++ b/deps/v8/tools/run-tests.py
@@ -28,6 +28,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from collections import OrderedDict
import itertools
import multiprocessing
import optparse
@@ -51,7 +52,33 @@ from testrunner.objects import context
ARCH_GUESS = utils.DefaultArch()
DEFAULT_TESTS = ["mjsunit", "fuzz-natives", "base-unittests",
- "cctest", "compiler-unittests", "message", "preparser"]
+ "cctest", "compiler-unittests", "heap-unittests",
+ "libplatform-unittests", "message", "preparser"]
+
+# Map of test name synonyms to lists of test suites. Should be ordered by
+# expected runtimes (suites with slow test cases first). These groups are
+# invoked in seperate steps on the bots.
+TEST_MAP = {
+ "default": [
+ "mjsunit",
+ "fuzz-natives",
+ "cctest",
+ "message",
+ "preparser",
+ ],
+ "optimize_for_size": [
+ "mjsunit",
+ "cctest",
+ "webkit",
+ ],
+ "unittests": [
+ "compiler-unittests",
+ "heap-unittests",
+ "base-unittests",
+ "libplatform-unittests",
+ ],
+}
+
TIMEOUT_DEFAULT = 60
TIMEOUT_SCALEFACTOR = {"debug" : 4,
"release" : 1 }
@@ -60,7 +87,7 @@ TIMEOUT_SCALEFACTOR = {"debug" : 4,
VARIANT_FLAGS = {
"default": [],
"stress": ["--stress-opt", "--always-opt"],
- "turbofan": ["--turbo-filter=*", "--always-opt"],
+ "turbofan": ["--turbo-asm", "--turbo-filter=*", "--always-opt"],
"nocrankshaft": ["--nocrankshaft"]}
VARIANTS = ["default", "stress", "turbofan", "nocrankshaft"]
@@ -294,10 +321,15 @@ def ProcessOptions(options):
return reduce(lambda x, y: x + y, args) <= 1
if not excl(options.no_stress, options.stress_only, options.no_variants,
- bool(options.variants), options.quickcheck):
+ bool(options.variants)):
print("Use only one of --no-stress, --stress-only, --no-variants, "
- "--variants, or --quickcheck.")
+ "or --variants.")
return False
+ if options.quickcheck:
+ VARIANTS = ["default", "stress"]
+ options.flaky_tests = "skip"
+ options.slow_tests = "skip"
+ options.pass_fail_tests = "skip"
if options.no_stress:
VARIANTS = ["default", "nocrankshaft"]
if options.no_variants:
@@ -309,11 +341,6 @@ def ProcessOptions(options):
if not set(VARIANTS).issubset(VARIANT_FLAGS.keys()):
print "All variants must be in %s" % str(VARIANT_FLAGS.keys())
return False
- if options.quickcheck:
- VARIANTS = ["default", "stress"]
- options.flaky_tests = "skip"
- options.slow_tests = "skip"
- options.pass_fail_tests = "skip"
if options.predictable:
VARIANTS = ["default"]
options.extra_flags.append("--predictable")
@@ -377,14 +404,23 @@ def Main():
suite_paths = utils.GetSuitePaths(join(workspace, "test"))
+ # Expand arguments with grouped tests. The args should reflect the list of
+ # suites as otherwise filters would break.
+ def ExpandTestGroups(name):
+ if name in TEST_MAP:
+ return [suite for suite in TEST_MAP[arg]]
+ else:
+ return [name]
+ args = reduce(lambda x, y: x + y,
+ [ExpandTestGroups(arg) for arg in args],
+ [])
+
if len(args) == 0:
suite_paths = [ s for s in DEFAULT_TESTS if s in suite_paths ]
else:
- args_suites = set()
+ args_suites = OrderedDict() # Used as set
for arg in args:
- suite = arg.split(os.path.sep)[0]
- if not suite in args_suites:
- args_suites.add(suite)
+ args_suites[arg.split(os.path.sep)[0]] = True
suite_paths = [ s for s in args_suites if s in suite_paths ]
suites = []
diff --git a/deps/v8/tools/run_benchmarks.py b/deps/v8/tools/run_perf.py
index d6e9145dac..920c18d24a 100755
--- a/deps/v8/tools/run_benchmarks.py
+++ b/deps/v8/tools/run_perf.py
@@ -6,11 +6,11 @@
"""
Performance runner for d8.
-Call e.g. with tools/run-benchmarks.py --arch ia32 some_suite.json
+Call e.g. with tools/run-perf.py --arch ia32 some_suite.json
The suite json format is expected to be:
{
- "path": <relative path chunks to benchmark resources and main file>,
+ "path": <relative path chunks to perf resources and main file>,
"name": <optional suite name, file name is default>,
"archs": [<architecture name for which this suite is run>, ...],
"binary": <name of binary to run, default "d8">,
@@ -18,13 +18,13 @@ The suite json format is expected to be:
"run_count": <how often will this suite run (optional)>,
"run_count_XXX": <how often will this suite run for arch XXX (optional)>,
"resources": [<js file to be loaded before main>, ...]
- "main": <main js benchmark runner file>,
+ "main": <main js perf runner file>,
"results_regexp": <optional regexp>,
"results_processor": <optional python results processor script>,
"units": <the unit specification for the performance dashboard>,
- "benchmarks": [
+ "tests": [
{
- "name": <name of the benchmark>,
+ "name": <name of the trace>,
"results_regexp": <optional more specific regexp>,
"results_processor": <optional python results processor script>,
"units": <the unit specification for the performance dashboard>,
@@ -32,23 +32,23 @@ The suite json format is expected to be:
]
}
-The benchmarks field can also nest other suites in arbitrary depth. A suite
+The tests field can also nest other suites in arbitrary depth. A suite
with a "main" file is a leaf suite that can contain one more level of
-benchmarks.
+tests.
A suite's results_regexp is expected to have one string place holder
-"%s" for the benchmark name. A benchmark's results_regexp overwrites suite
+"%s" for the trace name. A trace's results_regexp overwrites suite
defaults.
A suite's results_processor may point to an optional python script. If
-specified, it is called after running the benchmarks like this (with a path
+specified, it is called after running the tests like this (with a path
relatve to the suite level's path):
<results_processor file> <same flags as for d8> <suite level name> <output>
The <output> is a temporary file containing d8 output. The results_regexp will
be applied to the output of this script.
-A suite without "benchmarks" is considered a benchmark itself.
+A suite without "tests" is considered a performance test itself.
Full example (suite with one runner):
{
@@ -60,7 +60,7 @@ Full example (suite with one runner):
"main": "run.js",
"results_regexp": "^%s: (.+)$",
"units": "score",
- "benchmarks": [
+ "tests": [
{"name": "Richards"},
{"name": "DeltaBlue"},
{"name": "NavierStokes",
@@ -75,7 +75,7 @@ Full example (suite with several runners):
"archs": ["ia32", "x64"],
"run_count": 5,
"units": "score",
- "benchmarks": [
+ "tests": [
{"name": "Richards",
"path": ["richards"],
"main": "run.js",
@@ -92,6 +92,7 @@ Path pieces are concatenated. D8 is always run with the suite's path as cwd.
"""
import json
+import math
import optparse
import os
import re
@@ -113,6 +114,18 @@ SUPPORTED_ARCHS = ["android_arm",
"x64",
"arm64"]
+GENERIC_RESULTS_RE = re.compile(
+ r"^Trace\(([^\)]+)\), Result\(([^\)]+)\), StdDev\(([^\)]+)\)$")
+
+
+def GeometricMean(values):
+ """Returns the geometric mean of a list of values.
+
+ The mean is calculated using log to avoid overflow.
+ """
+ values = map(float, values)
+ return str(math.exp(sum(map(math.log, values)) / len(values)))
+
class Results(object):
"""Place holder for result traces."""
@@ -137,7 +150,7 @@ class Results(object):
class Node(object):
- """Represents a node in the benchmark suite tree structure."""
+ """Represents a node in the suite tree structure."""
def __init__(self, *args):
self._children = []
@@ -151,6 +164,7 @@ class DefaultSentinel(Node):
super(DefaultSentinel, self).__init__()
self.binary = "d8"
self.run_count = 10
+ self.timeout = 60
self.path = []
self.graphs = []
self.flags = []
@@ -158,10 +172,11 @@ class DefaultSentinel(Node):
self.results_regexp = None
self.stddev_regexp = None
self.units = "score"
+ self.total = False
class Graph(Node):
- """Represents a benchmark suite definition.
+ """Represents a suite definition.
Can either be a leaf or an inner node that provides default values.
"""
@@ -184,7 +199,9 @@ class Graph(Node):
self.binary = suite.get("binary", parent.binary)
self.run_count = suite.get("run_count", parent.run_count)
self.run_count = suite.get("run_count_%s" % arch, self.run_count)
+ self.timeout = suite.get("timeout", parent.timeout)
self.units = suite.get("units", parent.units)
+ self.total = suite.get("total", parent.total)
# A regular expression for results. If the parent graph provides a
# regexp and the current suite has none, a string place holder for the
@@ -206,7 +223,7 @@ class Graph(Node):
class Trace(Graph):
- """Represents a leaf in the benchmark suite tree structure.
+ """Represents a leaf in the suite tree structure.
Handles collection of measurements.
"""
@@ -222,17 +239,17 @@ class Trace(Graph):
self.results.append(
re.search(self.results_regexp, stdout, re.M).group(1))
except:
- self.errors.append("Regexp \"%s\" didn't match for benchmark %s."
+ self.errors.append("Regexp \"%s\" didn't match for test %s."
% (self.results_regexp, self.graphs[-1]))
try:
if self.stddev_regexp and self.stddev:
- self.errors.append("Benchmark %s should only run once since a stddev "
- "is provided by the benchmark." % self.graphs[-1])
+ self.errors.append("Test %s should only run once since a stddev "
+ "is provided by the test." % self.graphs[-1])
if self.stddev_regexp:
self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
except:
- self.errors.append("Regexp \"%s\" didn't match for benchmark %s."
+ self.errors.append("Regexp \"%s\" didn't match for test %s."
% (self.stddev_regexp, self.graphs[-1]))
def GetResults(self):
@@ -245,16 +262,16 @@ class Trace(Graph):
class Runnable(Graph):
- """Represents a runnable benchmark suite definition (i.e. has a main file).
+ """Represents a runnable suite definition (i.e. has a main file).
"""
@property
def main(self):
- return self._suite["main"]
+ return self._suite.get("main", "")
def ChangeCWD(self, suite_path):
"""Changes the cwd to to path defined in the current graph.
- The benchmarks are supposed to be relative to the suite configuration.
+ The tests are supposed to be relative to the suite configuration.
"""
suite_dir = os.path.abspath(os.path.dirname(suite_path))
bench_dir = os.path.normpath(os.path.join(*self.path))
@@ -274,11 +291,32 @@ class Runnable(Graph):
for stdout in runner():
for trace in self._children:
trace.ConsumeOutput(stdout)
- return reduce(lambda r, t: r + t.GetResults(), self._children, Results())
-
+ res = reduce(lambda r, t: r + t.GetResults(), self._children, Results())
+
+ if not res.traces or not self.total:
+ return res
+
+ # Assume all traces have the same structure.
+ if len(set(map(lambda t: len(t["results"]), res.traces))) != 1:
+ res.errors.append("Not all traces have the same number of results.")
+ return res
+
+ # Calculate the geometric means for all traces. Above we made sure that
+ # there is at least one trace and that the number of results is the same
+ # for each trace.
+ n_results = len(res.traces[0]["results"])
+ total_results = [GeometricMean(t["results"][i] for t in res.traces)
+ for i in range(0, n_results)]
+ res.traces.append({
+ "graphs": self.graphs + ["Total"],
+ "units": res.traces[0]["units"],
+ "results": total_results,
+ "stddev": "",
+ })
+ return res
class RunnableTrace(Trace, Runnable):
- """Represents a runnable benchmark suite definition that is a leaf."""
+ """Represents a runnable suite definition that is a leaf."""
def __init__(self, suite, parent, arch):
super(RunnableTrace, self).__init__(suite, parent, arch)
@@ -289,6 +327,33 @@ class RunnableTrace(Trace, Runnable):
return self.GetResults()
+class RunnableGeneric(Runnable):
+ """Represents a runnable suite definition with generic traces."""
+ def __init__(self, suite, parent, arch):
+ super(RunnableGeneric, self).__init__(suite, parent, arch)
+
+ def Run(self, runner):
+ """Iterates over several runs and handles the output."""
+ traces = {}
+ for stdout in runner():
+ for line in stdout.strip().splitlines():
+ match = GENERIC_RESULTS_RE.match(line)
+ if match:
+ trace = match.group(1)
+ result = match.group(2)
+ stddev = match.group(3)
+ trace_result = traces.setdefault(trace, Results([{
+ "graphs": self.graphs + [trace],
+ "units": self.units,
+ "results": [],
+ "stddev": "",
+ }], []))
+ trace_result.traces[0]["results"].append(result)
+ trace_result.traces[0]["stddev"] = stddev
+
+ return reduce(lambda r, t: r + t, traces.itervalues(), Results())
+
+
def MakeGraph(suite, arch, parent):
"""Factory method for making graph objects."""
if isinstance(parent, Runnable):
@@ -296,17 +361,21 @@ def MakeGraph(suite, arch, parent):
return Trace(suite, parent, arch)
elif suite.get("main"):
# A main file makes this graph runnable.
- if suite.get("benchmarks"):
- # This graph has subbenchmarks (traces).
+ if suite.get("tests"):
+ # This graph has subgraphs (traces).
return Runnable(suite, parent, arch)
else:
- # This graph has no subbenchmarks, it's a leaf.
+ # This graph has no subgraphs, it's a leaf.
return RunnableTrace(suite, parent, arch)
- elif suite.get("benchmarks"):
+ elif suite.get("generic"):
+ # This is a generic suite definition. It is either a runnable executable
+ # or has a main js file.
+ return RunnableGeneric(suite, parent, arch)
+ elif suite.get("tests"):
# This is neither a leaf nor a runnable.
return Graph(suite, parent, arch)
else: # pragma: no cover
- raise Exception("Invalid benchmark suite configuration.")
+ raise Exception("Invalid suite configuration.")
def BuildGraphs(suite, arch, parent=None):
@@ -320,7 +389,7 @@ def BuildGraphs(suite, arch, parent=None):
return None
graph = MakeGraph(suite, arch, parent)
- for subsuite in suite.get("benchmarks", []):
+ for subsuite in suite.get("tests", []):
BuildGraphs(subsuite, arch, graph)
parent.AppendChild(graph)
return graph
@@ -337,7 +406,7 @@ def FlattenRunnables(node):
for result in FlattenRunnables(child):
yield result
else: # pragma: no cover
- raise Exception("Invalid benchmark suite configuration.")
+ raise Exception("Invalid suite configuration.")
# TODO: Implement results_processor.
@@ -380,7 +449,7 @@ def Main(args):
path = os.path.abspath(path)
if not os.path.exists(path): # pragma: no cover
- results.errors.append("Benchmark file %s does not exist." % path)
+ results.errors.append("Configuration file %s does not exist." % path)
continue
with open(path) as f:
@@ -396,15 +465,18 @@ def Main(args):
def Runner():
"""Output generator that reruns several times."""
for i in xrange(0, max(1, runnable.run_count)):
- # TODO(machenbach): Make timeout configurable in the suite definition.
- # Allow timeout per arch like with run_count per arch.
- output = commands.Execute(runnable.GetCommand(shell_dir), timeout=60)
+ # TODO(machenbach): Allow timeout per arch like with run_count per
+ # arch.
+ output = commands.Execute(runnable.GetCommand(shell_dir),
+ timeout=runnable.timeout)
print ">>> Stdout (#%d):" % (i + 1)
print output.stdout
if output.stderr: # pragma: no cover
# Print stderr for debugging.
print ">>> Stderr (#%d):" % (i + 1)
print output.stderr
+ if output.timed_out:
+ print ">>> Test timed out after %ss." % runnable.timeout
yield output.stdout
# Let runnable iterate over all runs and handle output.
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index 0fd3f3a300..148697bfde 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -29,8 +29,10 @@
import imp
import os
+from . import commands
from . import statusfile
from . import utils
+from ..objects import testcase
class TestSuite(object):
@@ -41,11 +43,13 @@ class TestSuite(object):
try:
(f, pathname, description) = imp.find_module("testcfg", [root])
module = imp.load_module("testcfg", f, pathname, description)
- suite = module.GetSuite(name, root)
+ return module.GetSuite(name, root)
+ except:
+ # Use default if no testcfg is present.
+ return GoogleTestSuite(name, root)
finally:
if f:
f.close()
- return suite
def __init__(self, name, root):
self.name = name # string
@@ -214,3 +218,40 @@ class TestSuite(object):
for t in self.tests:
self.total_duration += t.duration
return self.total_duration
+
+
+class GoogleTestSuite(TestSuite):
+ def __init__(self, name, root):
+ super(GoogleTestSuite, self).__init__(name, root)
+
+ def ListTests(self, context):
+ shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
+ if utils.IsWindows():
+ shell += ".exe"
+ output = commands.Execute(context.command_prefix +
+ [shell, "--gtest_list_tests"] +
+ context.extra_flags)
+ if output.exit_code != 0:
+ print output.stdout
+ print output.stderr
+ raise Exception("Test executable failed to list the tests.")
+ tests = []
+ test_case = ''
+ for line in output.stdout.splitlines():
+ test_desc = line.strip().split()[0]
+ if test_desc.endswith('.'):
+ test_case = test_desc
+ elif test_case and test_desc:
+ test = testcase.TestCase(self, test_case + test_desc, dependency=None)
+ tests.append(test)
+ tests.sort()
+ return tests
+
+ def GetFlagsForTestCase(self, testcase, context):
+ return (testcase.flags + ["--gtest_filter=" + testcase.path] +
+ ["--gtest_random_seed=%s" % context.random_seed] +
+ ["--gtest_print_time=0"] +
+ context.mode_flags)
+
+ def shell(self):
+ return self.name
diff --git a/deps/v8/tools/testrunner/local/utils.py b/deps/v8/tools/testrunner/local/utils.py
index 707fa24fbf..7bc21b1fc0 100644
--- a/deps/v8/tools/testrunner/local/utils.py
+++ b/deps/v8/tools/testrunner/local/utils.py
@@ -36,9 +36,7 @@ import urllib2
def GetSuitePaths(test_root):
- def IsSuite(path):
- return isdir(path) and exists(join(path, 'testcfg.py'))
- return [ f for f in os.listdir(test_root) if IsSuite(join(test_root, f)) ]
+ return [ f for f in os.listdir(test_root) if isdir(join(test_root, f)) ]
# Reads a file into an array of strings
diff --git a/deps/v8/tools/unittests/run_benchmarks_test.py b/deps/v8/tools/unittests/run_perf_test.py
index 37a816e760..76e8d23e34 100644
--- a/deps/v8/tools/unittests/run_benchmarks_test.py
+++ b/deps/v8/tools/unittests/run_perf_test.py
@@ -17,7 +17,7 @@ import unittest
# Requires python-coverage and python-mock. Native python coverage
# version >= 3.7.1 should be installed to get the best speed.
-TEST_WORKSPACE = path.join(tempfile.gettempdir(), "test-v8-run-benchmarks")
+TEST_WORKSPACE = path.join(tempfile.gettempdir(), "test-v8-run-perf")
V8_JSON = {
"path": ["."],
@@ -26,7 +26,7 @@ V8_JSON = {
"main": "run.js",
"run_count": 1,
"results_regexp": "^%s: (.+)$",
- "benchmarks": [
+ "tests": [
{"name": "Richards"},
{"name": "DeltaBlue"},
]
@@ -37,7 +37,7 @@ V8_NESTED_SUITES_JSON = {
"flags": ["--flag"],
"run_count": 1,
"units": "score",
- "benchmarks": [
+ "tests": [
{"name": "Richards",
"path": ["richards"],
"binary": "d7",
@@ -47,7 +47,7 @@ V8_NESTED_SUITES_JSON = {
"results_regexp": "^Richards: (.+)$"},
{"name": "Sub",
"path": ["sub"],
- "benchmarks": [
+ "tests": [
{"name": "Leaf",
"path": ["leaf"],
"run_count_x64": 3,
@@ -68,20 +68,29 @@ V8_NESTED_SUITES_JSON = {
]
}
-Output = namedtuple("Output", "stdout, stderr")
+V8_GENERIC_JSON = {
+ "path": ["."],
+ "binary": "cc",
+ "flags": ["--flag"],
+ "generic": True,
+ "run_count": 1,
+ "units": "ms",
+}
+
+Output = namedtuple("Output", "stdout, stderr, timed_out")
-class BenchmarksTest(unittest.TestCase):
+class PerfTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.base = path.dirname(path.dirname(path.abspath(__file__)))
sys.path.append(cls.base)
cls._cov = coverage.coverage(
- include=([os.path.join(cls.base, "run_benchmarks.py")]))
+ include=([os.path.join(cls.base, "run_perf.py")]))
cls._cov.start()
- import run_benchmarks
+ import run_perf
from testrunner.local import commands
global commands
- global run_benchmarks
+ global run_perf
@classmethod
def tearDownClass(cls):
@@ -104,14 +113,17 @@ class BenchmarksTest(unittest.TestCase):
with open(self._test_input, "w") as f:
f.write(json.dumps(json_content))
- def _MockCommand(self, *args):
- # Fake output for each benchmark run.
- benchmark_outputs = [Output(stdout=arg, stderr=None) for arg in args[1]]
+ def _MockCommand(self, *args, **kwargs):
+ # Fake output for each test run.
+ test_outputs = [Output(stdout=arg,
+ stderr=None,
+ timed_out=kwargs.get("timed_out", False))
+ for arg in args[1]]
def execute(*args, **kwargs):
- return benchmark_outputs.pop()
+ return test_outputs.pop()
commands.Execute = MagicMock(side_effect=execute)
- # Check that d8 is called from the correct cwd for each benchmark run.
+ # Check that d8 is called from the correct cwd for each test run.
dirs = [path.join(TEST_WORKSPACE, arg) for arg in args[0]]
def chdir(*args, **kwargs):
self.assertEquals(dirs.pop(), args[0])
@@ -125,7 +137,7 @@ class BenchmarksTest(unittest.TestCase):
self._test_input,
]
all_args += args
- return run_benchmarks.Main(all_args)
+ return run_perf.Main(all_args)
def _LoadResults(self):
with open(self._test_output) as f:
@@ -142,17 +154,18 @@ class BenchmarksTest(unittest.TestCase):
def _VerifyErrors(self, errors):
self.assertEquals(errors, self._LoadResults()["errors"])
- def _VerifyMock(self, binary, *args):
+ def _VerifyMock(self, binary, *args, **kwargs):
arg = [path.join(path.dirname(self.base), binary)]
arg += args
- commands.Execute.assert_called_with(arg, timeout=60)
+ commands.Execute.assert_called_with(
+ arg, timeout=kwargs.get("timeout", 60))
- def _VerifyMockMultiple(self, *args):
+ def _VerifyMockMultiple(self, *args, **kwargs):
expected = []
for arg in args:
a = [path.join(path.dirname(self.base), arg[0])]
a += arg[1:]
- expected.append(((a,), {"timeout": 60}))
+ expected.append(((a,), {"timeout": kwargs.get("timeout", 60)}))
self.assertEquals(expected, commands.Execute.call_args_list)
def testOneRun(self):
@@ -187,8 +200,8 @@ class BenchmarksTest(unittest.TestCase):
test_input = dict(V8_JSON)
test_input["run_count"] = 2
del test_input["results_regexp"]
- test_input["benchmarks"][0]["results_regexp"] = "^Richards: (.+)$"
- test_input["benchmarks"][1]["results_regexp"] = "^DeltaBlue: (.+)$"
+ test_input["tests"][0]["results_regexp"] = "^Richards: (.+)$"
+ test_input["tests"][1]["results_regexp"] = "^DeltaBlue: (.+)$"
self._WriteTestInput(test_input)
self._MockCommand([".", "."],
["Richards: 100\nDeltaBlue: 200\n",
@@ -265,11 +278,11 @@ class BenchmarksTest(unittest.TestCase):
{"name": "DeltaBlue", "results": ["5", "6"], "stddev": "0.8"},
])
self._VerifyErrors(
- ["Benchmark Richards should only run once since a stddev is provided "
- "by the benchmark.",
- "Benchmark DeltaBlue should only run once since a stddev is provided "
- "by the benchmark.",
- "Regexp \"^DeltaBlue\-stddev: (.+)$\" didn't match for benchmark "
+ ["Test Richards should only run once since a stddev is provided "
+ "by the test.",
+ "Test DeltaBlue should only run once since a stddev is provided "
+ "by the test.",
+ "Regexp \"^DeltaBlue\-stddev: (.+)$\" didn't match for test "
"DeltaBlue."])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
@@ -284,6 +297,35 @@ class BenchmarksTest(unittest.TestCase):
self._VerifyErrors([])
self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
+ def testBuildbotWithTotal(self):
+ test_input = dict(V8_JSON)
+ test_input["total"] = True
+ self._WriteTestInput(test_input)
+ self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"])
+ self.assertEquals(0, self._CallMain("--buildbot"))
+ self._VerifyResults("test", "score", [
+ {"name": "Richards", "results": ["1.234"], "stddev": ""},
+ {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
+ {"name": "Total", "results": ["3626.49109719"], "stddev": ""},
+ ])
+ self._VerifyErrors([])
+ self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
+
+ def testBuildbotWithTotalAndErrors(self):
+ test_input = dict(V8_JSON)
+ test_input["total"] = True
+ self._WriteTestInput(test_input)
+ self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n"])
+ self.assertEquals(1, self._CallMain("--buildbot"))
+ self._VerifyResults("test", "score", [
+ {"name": "Richards", "results": [], "stddev": ""},
+ {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
+ ])
+ self._VerifyErrors(
+ ["Regexp \"^Richards: (.+)$\" didn't match for test Richards.",
+ "Not all traces have the same number of results."])
+ self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
+
def testRegexpNoMatch(self):
self._WriteTestInput(V8_JSON)
self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n"])
@@ -293,5 +335,36 @@ class BenchmarksTest(unittest.TestCase):
{"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
])
self._VerifyErrors(
- ["Regexp \"^Richards: (.+)$\" didn't match for benchmark Richards."])
+ ["Regexp \"^Richards: (.+)$\" didn't match for test Richards."])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+
+ def testOneRunGeneric(self):
+ test_input = dict(V8_GENERIC_JSON)
+ self._WriteTestInput(test_input)
+ self._MockCommand(["."], [
+ "Trace(Test1), Result(1.234), StdDev(0.23)\n"
+ "Trace(Test2), Result(10657567), StdDev(106)\n"])
+ self.assertEquals(0, self._CallMain())
+ self._VerifyResults("test", "ms", [
+ {"name": "Test1", "results": ["1.234"], "stddev": "0.23"},
+ {"name": "Test2", "results": ["10657567"], "stddev": "106"},
+ ])
+ self._VerifyErrors([])
+ self._VerifyMock(path.join("out", "x64.release", "cc"), "--flag", "")
+
+ def testOneRunTimingOut(self):
+ test_input = dict(V8_JSON)
+ test_input["timeout"] = 70
+ self._WriteTestInput(test_input)
+ self._MockCommand(["."], [""], timed_out=True)
+ self.assertEquals(1, self._CallMain())
+ self._VerifyResults("test", "score", [
+ {"name": "Richards", "results": [], "stddev": ""},
+ {"name": "DeltaBlue", "results": [], "stddev": ""},
+ ])
+ self._VerifyErrors([
+ "Regexp \"^Richards: (.+)$\" didn't match for test Richards.",
+ "Regexp \"^DeltaBlue: (.+)$\" didn't match for test DeltaBlue.",
+ ])
+ self._VerifyMock(
+ path.join("out", "x64.release", "d7"), "--flag", "run.js", timeout=70)
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index c7f1ddc16e..5e3e841f8d 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -31,26 +31,26 @@
# List of known V8 instance types.
INSTANCE_TYPES = {
64: "STRING_TYPE",
- 68: "ASCII_STRING_TYPE",
+ 68: "ONE_BYTE_STRING_TYPE",
65: "CONS_STRING_TYPE",
- 69: "CONS_ASCII_STRING_TYPE",
+ 69: "CONS_ONE_BYTE_STRING_TYPE",
67: "SLICED_STRING_TYPE",
- 71: "SLICED_ASCII_STRING_TYPE",
+ 71: "SLICED_ONE_BYTE_STRING_TYPE",
66: "EXTERNAL_STRING_TYPE",
- 70: "EXTERNAL_ASCII_STRING_TYPE",
+ 70: "EXTERNAL_ONE_BYTE_STRING_TYPE",
74: "EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
82: "SHORT_EXTERNAL_STRING_TYPE",
- 86: "SHORT_EXTERNAL_ASCII_STRING_TYPE",
+ 86: "SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE",
90: "SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
0: "INTERNALIZED_STRING_TYPE",
- 4: "ASCII_INTERNALIZED_STRING_TYPE",
+ 4: "ONE_BYTE_INTERNALIZED_STRING_TYPE",
1: "CONS_INTERNALIZED_STRING_TYPE",
- 5: "CONS_ASCII_INTERNALIZED_STRING_TYPE",
+ 5: "CONS_ONE_BYTE_INTERNALIZED_STRING_TYPE",
2: "EXTERNAL_INTERNALIZED_STRING_TYPE",
- 6: "EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE",
+ 6: "EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
10: "EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
18: "SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE",
- 22: "SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE",
+ 22: "SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
26: "SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
128: "SYMBOL_TYPE",
129: "MAP_TYPE",
@@ -135,7 +135,7 @@ KNOWN_MAPS = {
0x08081: (136, "ByteArrayMap"),
0x080a9: (129, "MetaMap"),
0x080d1: (131, "OddballMap"),
- 0x080f9: (4, "AsciiInternalizedStringMap"),
+ 0x080f9: (4, "OneByteInternalizedStringMap"),
0x08121: (179, "FixedArrayMap"),
0x08149: (134, "HeapNumberMap"),
0x08171: (137, "FreeSpaceMap"),
@@ -153,28 +153,28 @@ KNOWN_MAPS = {
0x08351: (179, "HashTableMap"),
0x08379: (128, "SymbolMap"),
0x083a1: (64, "StringMap"),
- 0x083c9: (68, "AsciiStringMap"),
+ 0x083c9: (68, "OneByteStringMap"),
0x083f1: (65, "ConsStringMap"),
- 0x08419: (69, "ConsAsciiStringMap"),
+ 0x08419: (69, "ConsOneByteStringMap"),
0x08441: (67, "SlicedStringMap"),
- 0x08469: (71, "SlicedAsciiStringMap"),
+ 0x08469: (71, "SlicedOneByteStringMap"),
0x08491: (66, "ExternalStringMap"),
0x084b9: (74, "ExternalStringWithOneByteDataMap"),
- 0x084e1: (70, "ExternalAsciiStringMap"),
+ 0x084e1: (70, "ExternalOneByteStringMap"),
0x08509: (82, "ShortExternalStringMap"),
0x08531: (90, "ShortExternalStringWithOneByteDataMap"),
0x08559: (0, "InternalizedStringMap"),
0x08581: (1, "ConsInternalizedStringMap"),
- 0x085a9: (5, "ConsAsciiInternalizedStringMap"),
+ 0x085a9: (5, "ConsOneByteInternalizedStringMap"),
0x085d1: (2, "ExternalInternalizedStringMap"),
0x085f9: (10, "ExternalInternalizedStringWithOneByteDataMap"),
- 0x08621: (6, "ExternalAsciiInternalizedStringMap"),
+ 0x08621: (6, "ExternalOneByteInternalizedStringMap"),
0x08649: (18, "ShortExternalInternalizedStringMap"),
0x08671: (26, "ShortExternalInternalizedStringWithOneByteDataMap"),
- 0x08699: (22, "ShortExternalAsciiInternalizedStringMap"),
- 0x086c1: (86, "ShortExternalAsciiStringMap"),
+ 0x08699: (22, "ShortExternalOneByteInternalizedStringMap"),
+ 0x086c1: (86, "ShortExternalOneByteStringMap"),
0x086e9: (64, "UndetectableStringMap"),
- 0x08711: (68, "UndetectableAsciiStringMap"),
+ 0x08711: (68, "UndetectableOneByteStringMap"),
0x08739: (138, "ExternalInt8ArrayMap"),
0x08761: (139, "ExternalUint8ArrayMap"),
0x08789: (140, "ExternalInt16ArrayMap"),
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 64a6f4c985..0872fb9dff 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -1,8 +1,8 @@
-You can modify this file to create no-op changelists.
+You can modify this file to create no-op changelists...
Try to write something funny. And please don't add trailing whitespace.
A Smi walks into a bar and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
-The Smi looked at them and......................
+The Smi looked at them when a crazy v8-autoroll account showed up.